repo stringlengths 1 99 | file stringlengths 13 215 | code stringlengths 12 59.2M | file_length int64 12 59.2M | avg_line_length float64 3.82 1.48M | max_line_length int64 12 2.51M | extension_type stringclasses 1
value |
|---|---|---|---|---|---|---|
STTS | STTS-main/VideoSwin/tests/test_runtime/test_inference.py | import mmcv
import numpy as np
import pytest
import torch
import torch.nn as nn
from mmaction.apis import inference_recognizer, init_recognizer
video_config_file = 'configs/recognition/tsn/tsn_r50_video_inference_1x1x3_100e_kinetics400_rgb.py' # noqa: E501
frame_config_file = 'configs/recognition/tsn/tsn_r50_inference_1x1x3_100e_kinetics400_rgb.py' # noqa: E501
flow_frame_config_file = 'configs/recognition/tsn/tsn_r50_320p_1x1x3_110e_kinetics400_flow.py' # noqa: E501
label_path = 'demo/label_map_k400.txt'
video_path = 'demo/demo.mp4'
frames_path = 'tests/data/imgs'
def test_init_recognizer():
with pytest.raises(TypeError):
# config must be a filename or Config object
init_recognizer(dict(config_file=None))
with pytest.raises(RuntimeError):
# input data type should be consist with the dataset type
init_recognizer(frame_config_file)
with pytest.raises(RuntimeError):
# input data type should be consist with the dataset type
init_recognizer(video_config_file, use_frames=True)
if torch.cuda.is_available():
device = 'cuda:0'
else:
device = 'cpu'
model = init_recognizer(video_config_file, None, device)
config = mmcv.Config.fromfile(video_config_file)
config.model.backbone.pretrained = None
isinstance(model, nn.Module)
if torch.cuda.is_available():
assert next(model.parameters()).is_cuda is True
else:
assert next(model.parameters()).is_cuda is False
assert model.cfg.model.backbone.pretrained is None
def test_video_inference_recognizer():
if torch.cuda.is_available():
device = 'cuda:0'
else:
device = 'cpu'
model = init_recognizer(video_config_file, None, device)
with pytest.raises(RuntimeError):
# video path doesn't exist
inference_recognizer(model, 'missing.mp4', label_path)
with pytest.raises(RuntimeError):
# ``video_path`` should be consist with the ``use_frames``
inference_recognizer(model, video_path, label_path, use_frames=True)
with pytest.raises(RuntimeError):
# ``video_path`` should be consist with the ``use_frames``
inference_recognizer(model, 'demo/', label_path)
for ops in model.cfg.data.test.pipeline:
if ops['type'] in ('TenCrop', 'ThreeCrop'):
# Use CenterCrop to reduce memory in order to pass CI
ops['type'] = 'CenterCrop'
top5_label = inference_recognizer(model, video_path, label_path)
scores = [item[1] for item in top5_label]
assert len(top5_label) == 5
assert scores == sorted(scores, reverse=True)
_, feat = inference_recognizer(
model,
video_path,
label_path,
outputs=('backbone', 'cls_head'),
as_tensor=False)
assert isinstance(feat, dict)
assert 'backbone' in feat and 'cls_head' in feat
assert isinstance(feat['backbone'], np.ndarray)
assert isinstance(feat['cls_head'], np.ndarray)
assert feat['backbone'].shape == (25, 2048, 7, 7)
assert feat['cls_head'].shape == (1, 400)
_, feat = inference_recognizer(
model,
video_path,
label_path,
outputs=('backbone.layer3', 'backbone.layer3.1.conv1'))
assert 'backbone.layer3.1.conv1' in feat and 'backbone.layer3' in feat
assert isinstance(feat['backbone.layer3.1.conv1'], torch.Tensor)
assert isinstance(feat['backbone.layer3'], torch.Tensor)
assert feat['backbone.layer3'].size() == (25, 1024, 14, 14)
assert feat['backbone.layer3.1.conv1'].size() == (25, 256, 14, 14)
cfg_file = 'configs/recognition/slowfast/slowfast_r50_video_inference_4x16x1_256e_kinetics400_rgb.py' # noqa: E501
sf_model = init_recognizer(cfg_file, None, device)
for ops in sf_model.cfg.data.test.pipeline:
# Changes to reduce memory in order to pass CI
if ops['type'] in ('TenCrop', 'ThreeCrop'):
ops['type'] = 'CenterCrop'
if ops['type'] == 'SampleFrames':
ops['num_clips'] = 1
_, feat = inference_recognizer(
sf_model, video_path, label_path, outputs=('backbone', 'cls_head'))
assert isinstance(feat, dict) and isinstance(feat['backbone'], tuple)
assert 'backbone' in feat and 'cls_head' in feat
assert len(feat['backbone']) == 2
assert isinstance(feat['backbone'][0], torch.Tensor)
assert isinstance(feat['backbone'][1], torch.Tensor)
assert feat['backbone'][0].size() == (1, 2048, 4, 8, 8)
assert feat['backbone'][1].size() == (1, 256, 32, 8, 8)
assert feat['cls_head'].size() == (1, 400)
def test_frames_inference_recognizer():
if torch.cuda.is_available():
device = 'cuda:0'
else:
device = 'cpu'
rgb_model = init_recognizer(
frame_config_file, None, device, use_frames=True)
flow_model = init_recognizer(
flow_frame_config_file, None, device, use_frames=True)
with pytest.raises(RuntimeError):
# video path doesn't exist
inference_recognizer(rgb_model, 'missing_path', label_path)
with pytest.raises(RuntimeError):
# ``video_path`` should be consist with the ``use_frames``
inference_recognizer(
flow_model, frames_path, label_path, use_frames=False)
for ops in rgb_model.cfg.data.test.pipeline:
if ops['type'] in ('TenCrop', 'ThreeCrop'):
# Use CenterCrop to reduce memory in order to pass CI
ops['type'] = 'CenterCrop'
ops['crop_size'] = 224
for ops in flow_model.cfg.data.test.pipeline:
if ops['type'] in ('TenCrop', 'ThreeCrop'):
# Use CenterCrop to reduce memory in order to pass CI
ops['type'] = 'CenterCrop'
ops['crop_size'] = 224
top5_label = inference_recognizer(
rgb_model, frames_path, label_path, use_frames=True)
scores = [item[1] for item in top5_label]
assert len(top5_label) == 5
assert scores == sorted(scores, reverse=True)
_, feat = inference_recognizer(
flow_model,
frames_path,
label_path,
outputs=('backbone', 'cls_head'),
as_tensor=False,
use_frames=True)
assert isinstance(feat, dict)
assert 'backbone' in feat and 'cls_head' in feat
assert isinstance(feat['backbone'], np.ndarray)
assert isinstance(feat['cls_head'], np.ndarray)
assert feat['backbone'].shape == (25, 2048, 7, 7)
assert feat['cls_head'].shape == (1, 400)
_, feat = inference_recognizer(
rgb_model,
frames_path,
label_path,
use_frames=True,
outputs=('backbone.layer3', 'backbone.layer3.1.conv1'))
assert 'backbone.layer3.1.conv1' in feat and 'backbone.layer3' in feat
assert isinstance(feat['backbone.layer3.1.conv1'], torch.Tensor)
assert isinstance(feat['backbone.layer3'], torch.Tensor)
assert feat['backbone.layer3'].size() == (25, 1024, 14, 14)
assert feat['backbone.layer3.1.conv1'].size() == (25, 256, 14, 14)
| 6,972 | 37.313187 | 119 | py |
STTS | STTS-main/VideoSwin/tests/test_runtime/test_lr.py | import logging
import shutil
import sys
import tempfile
from unittest.mock import MagicMock, call
import torch
import torch.nn as nn
from mmcv.runner import IterTimerHook, PaviLoggerHook, build_runner
from torch.utils.data import DataLoader
def test_tin_lr_updater_hook():
sys.modules['pavi'] = MagicMock()
loader = DataLoader(torch.ones((10, 2)))
runner = _build_demo_runner()
hook_cfg = dict(type='TINLrUpdaterHook', min_lr=0.1)
runner.register_hook_from_cfg(hook_cfg)
hook_cfg = dict(
type='TINLrUpdaterHook',
by_epoch=False,
min_lr=0.1,
warmup='exp',
warmup_iters=2,
warmup_ratio=0.9)
runner.register_hook_from_cfg(hook_cfg)
runner.register_hook_from_cfg(dict(type='IterTimerHook'))
runner.register_hook(IterTimerHook())
hook_cfg = dict(
type='TINLrUpdaterHook',
by_epoch=False,
min_lr=0.1,
warmup='constant',
warmup_iters=2,
warmup_ratio=0.9)
runner.register_hook_from_cfg(hook_cfg)
runner.register_hook_from_cfg(dict(type='IterTimerHook'))
runner.register_hook(IterTimerHook())
hook_cfg = dict(
type='TINLrUpdaterHook',
by_epoch=False,
min_lr=0.1,
warmup='linear',
warmup_iters=2,
warmup_ratio=0.9)
runner.register_hook_from_cfg(hook_cfg)
runner.register_hook_from_cfg(dict(type='IterTimerHook'))
runner.register_hook(IterTimerHook())
# add pavi hook
hook = PaviLoggerHook(interval=1, add_graph=False, add_last_ckpt=True)
runner.register_hook(hook)
runner.run([loader], [('train', 1)])
shutil.rmtree(runner.work_dir)
assert hasattr(hook, 'writer')
calls = [
call('train', {
'learning_rate': 0.028544155877284292,
'momentum': 0.95
}, 1),
call('train', {
'learning_rate': 0.04469266270539641,
'momentum': 0.95
}, 6),
call('train', {
'learning_rate': 0.09695518130045147,
'momentum': 0.95
}, 10)
]
hook.writer.add_scalars.assert_has_calls(calls, any_order=True)
def _build_demo_runner(runner_type='EpochBasedRunner',
max_epochs=1,
max_iters=None):
class Model(nn.Module):
def __init__(self):
super().__init__()
self.linear = nn.Linear(2, 1)
def forward(self, x):
return self.linear(x)
def train_step(self, x, optimizer, **kwargs):
return dict(loss=self(x))
def val_step(self, x, optimizer, **kwargs):
return dict(loss=self(x))
model = Model()
optimizer = torch.optim.SGD(model.parameters(), lr=0.02, momentum=0.95)
log_config = dict(
interval=1, hooks=[
dict(type='TextLoggerHook'),
])
tmp_dir = tempfile.mkdtemp()
runner = build_runner(
dict(type=runner_type),
default_args=dict(
model=model,
work_dir=tmp_dir,
optimizer=optimizer,
logger=logging.getLogger(),
max_epochs=max_epochs,
max_iters=max_iters))
runner.register_checkpoint_hook(dict(interval=1))
runner.register_logger_hooks(log_config)
return runner
| 3,291 | 26.898305 | 75 | py |
STTS | STTS-main/VideoSwin/tests/test_runtime/test_apis_test.py | import sys
import warnings
from unittest.mock import MagicMock, Mock, patch
import pytest
import torch
import torch.nn as nn
from torch.utils.data import DataLoader, Dataset
# TODO import test functions from mmcv and delete them from mmaction2
try:
from mmcv.engine import (collect_results_cpu, multi_gpu_test,
single_gpu_test)
pytest.skip(
'Test functions are supported in MMCV', allow_module_level=True)
except (ImportError, ModuleNotFoundError):
warnings.warn(
'DeprecationWarning: single_gpu_test, multi_gpu_test, '
'collect_results_cpu, collect_results_gpu from mmaction2 will be '
'deprecated. Please install mmcv through master branch.')
from mmaction.apis.test import (collect_results_cpu, multi_gpu_test,
single_gpu_test)
class OldStyleModel(nn.Module):
def __init__(self):
super().__init__()
self.conv = nn.Conv2d(3, 3, 1)
self.cnt = 0
def forward(self, *args, **kwargs):
result = [self.cnt]
self.cnt += 1
return result
class Model(OldStyleModel):
def train_step(self):
pass
def val_step(self):
pass
class ExampleDataset(Dataset):
def __init__(self):
self.index = 0
self.eval_result = [1, 4, 3, 7, 2, -3, 4, 6]
def __getitem__(self, idx):
results = dict(imgs=torch.tensor([1]))
return results
def __len__(self):
return len(self.eval_result)
def test_single_gpu_test():
test_dataset = ExampleDataset()
loader = DataLoader(test_dataset, batch_size=1)
model = Model()
results = single_gpu_test(model, loader)
assert results == list(range(8))
def mock_tensor_without_cuda(*args, **kwargs):
if 'device' not in kwargs:
return torch.Tensor(*args)
return torch.IntTensor(*args, device='cpu')
@patch('mmaction.apis.test.collect_results_gpu',
Mock(return_value=list(range(8))))
@patch('mmaction.apis.test.collect_results_cpu',
Mock(return_value=list(range(8))))
def test_multi_gpu_test():
test_dataset = ExampleDataset()
loader = DataLoader(test_dataset, batch_size=1)
model = Model()
results = multi_gpu_test(model, loader)
assert results == list(range(8))
results = multi_gpu_test(model, loader, gpu_collect=False)
assert results == list(range(8))
@patch('mmcv.runner.get_dist_info', Mock(return_value=(0, 1)))
@patch('torch.distributed.broadcast', MagicMock)
@patch('torch.distributed.barrier', Mock)
@pytest.mark.skipif(
sys.version_info[:2] == (3, 8), reason='Not for python 3.8')
def test_collect_results_cpu():
def content_for_unittest():
results_part = list(range(8))
size = 8
results = collect_results_cpu(results_part, size)
assert results == list(range(8))
results = collect_results_cpu(results_part, size, 'unittest')
assert results == list(range(8))
if not torch.cuda.is_available():
with patch(
'torch.full',
Mock(
return_value=torch.full(
(512, ), 32, dtype=torch.uint8, device='cpu'))):
with patch('torch.tensor', mock_tensor_without_cuda):
content_for_unittest()
else:
content_for_unittest()
| 3,346 | 27.12605 | 74 | py |
STTS | STTS-main/VideoSwin/tests/test_models/base.py | import os.path as osp
import mmcv
import numpy as np
import torch
from mmcv.utils import _BatchNorm
def check_norm_state(modules, train_state):
"""Check if norm layer is in correct train state."""
for mod in modules:
if isinstance(mod, _BatchNorm):
if mod.training != train_state:
return False
return True
def generate_backbone_demo_inputs(input_shape=(1, 3, 64, 64)):
"""Create a superset of inputs needed to run backbone.
Args:
input_shape (tuple): input batch dimensions.
Default: (1, 3, 64, 64).
"""
imgs = np.random.random(input_shape)
imgs = torch.FloatTensor(imgs)
return imgs
def generate_recognizer_demo_inputs(
input_shape=(1, 3, 3, 224, 224), model_type='2D'):
"""Create a superset of inputs needed to run test or train batches.
Args:
input_shape (tuple): input batch dimensions.
Default: (1, 250, 3, 224, 224).
model_type (str): Model type for data generation, from {'2D', '3D'}.
Default:'2D'
"""
if len(input_shape) == 5:
(N, L, _, _, _) = input_shape
elif len(input_shape) == 6:
(N, M, _, L, _, _) = input_shape
imgs = np.random.random(input_shape)
if model_type == '2D':
gt_labels = torch.LongTensor([2] * N)
elif model_type == '3D':
gt_labels = torch.LongTensor([2] * M)
elif model_type == 'audio':
gt_labels = torch.LongTensor([2] * L)
else:
raise ValueError(f'Data type {model_type} is not available')
inputs = {'imgs': torch.FloatTensor(imgs), 'gt_labels': gt_labels}
return inputs
def generate_detector_demo_inputs(
input_shape=(1, 3, 4, 224, 224), num_classes=81, train=True,
device='cpu'):
num_samples = input_shape[0]
if not train:
assert num_samples == 1
def random_box(n):
box = torch.rand(n, 4) * 0.5
box[:, 2:] += 0.5
box[:, 0::2] *= input_shape[3]
box[:, 1::2] *= input_shape[4]
if device == 'cuda':
box = box.cuda()
return box
def random_label(n):
label = torch.randn(n, num_classes)
label = (label > 0.8).type(torch.float32)
label[:, 0] = 0
if device == 'cuda':
label = label.cuda()
return label
img = torch.FloatTensor(np.random.random(input_shape))
if device == 'cuda':
img = img.cuda()
proposals = [random_box(2) for i in range(num_samples)]
gt_bboxes = [random_box(2) for i in range(num_samples)]
gt_labels = [random_label(2) for i in range(num_samples)]
img_metas = [dict(img_shape=input_shape[-2:]) for i in range(num_samples)]
if train:
return dict(
img=img,
proposals=proposals,
gt_bboxes=gt_bboxes,
gt_labels=gt_labels,
img_metas=img_metas)
return dict(img=[img], proposals=[proposals], img_metas=[img_metas])
def generate_gradcam_inputs(input_shape=(1, 3, 3, 224, 224), model_type='2D'):
"""Create a superset of inputs needed to run gradcam.
Args:
input_shape (tuple[int]): input batch dimensions.
Default: (1, 3, 3, 224, 224).
model_type (str): Model type for data generation, from {'2D', '3D'}.
Default:'2D'
return:
dict: model inputs, including two keys, ``imgs`` and ``label``.
"""
imgs = np.random.random(input_shape)
if model_type in ['2D', '3D']:
gt_labels = torch.LongTensor([2] * input_shape[0])
else:
raise ValueError(f'Data type {model_type} is not available')
inputs = {
'imgs': torch.FloatTensor(imgs),
'label': gt_labels,
}
return inputs
def get_cfg(config_type, fname):
"""Grab configs necessary to create a recognizer.
These are deep copied to allow for safe modification of parameters without
influencing other tests.
"""
config_types = ('recognition', 'recognition_audio', 'localization',
'detection')
assert config_type in config_types
repo_dpath = osp.dirname(osp.dirname(osp.dirname(__file__)))
config_dpath = osp.join(repo_dpath, 'configs/' + config_type)
config_fpath = osp.join(config_dpath, fname)
if not osp.exists(config_dpath):
raise Exception('Cannot find config path')
config = mmcv.Config.fromfile(config_fpath)
return config
def get_recognizer_cfg(fname):
return get_cfg('recognition', fname)
def get_audio_recognizer_cfg(fname):
return get_cfg('recognition_audio', fname)
def get_localizer_cfg(fname):
return get_cfg('localization', fname)
def get_detector_cfg(fname):
return get_cfg('detection', fname)
| 4,723 | 27.981595 | 78 | py |
STTS | STTS-main/VideoSwin/tests/test_models/test_roi_extractor.py | import torch
from mmaction.models import SingleRoIExtractor3D
def test_single_roi_extractor3d():
roi_extractor = SingleRoIExtractor3D(
roi_layer_type='RoIAlign',
featmap_stride=16,
output_size=8,
sampling_ratio=0,
pool_mode='avg',
aligned=True,
with_temporal_pool=True)
feat = torch.randn([4, 64, 8, 16, 16])
rois = torch.tensor([[0., 1., 1., 6., 6.], [1., 2., 2., 7., 7.],
[3., 2., 2., 9., 9.], [2., 2., 0., 10., 9.]])
roi_feat, feat = roi_extractor(feat, rois)
assert roi_feat.shape == (4, 64, 1, 8, 8)
assert feat.shape == (4, 64, 1, 16, 16)
feat = (torch.randn([4, 64, 8, 16, 16]), torch.randn([4, 32, 16, 16, 16]))
roi_feat, feat = roi_extractor(feat, rois)
assert roi_feat.shape == (4, 96, 1, 8, 8)
assert feat.shape == (4, 96, 1, 16, 16)
feat = torch.randn([4, 64, 8, 16, 16])
roi_extractor = SingleRoIExtractor3D(
roi_layer_type='RoIAlign',
featmap_stride=16,
output_size=8,
sampling_ratio=0,
pool_mode='avg',
aligned=True,
with_temporal_pool=False)
roi_feat, feat = roi_extractor(feat, rois)
assert roi_feat.shape == (4, 64, 8, 8, 8)
assert feat.shape == (4, 64, 8, 16, 16)
feat = (torch.randn([4, 64, 8, 16, 16]), torch.randn([4, 32, 16, 16, 16]))
roi_feat, feat = roi_extractor(feat, rois)
assert roi_feat.shape == (4, 96, 16, 8, 8)
assert feat.shape == (4, 96, 16, 16, 16)
feat = torch.randn([4, 64, 8, 16, 16])
roi_extractor = SingleRoIExtractor3D(
roi_layer_type='RoIAlign',
featmap_stride=16,
output_size=8,
sampling_ratio=0,
pool_mode='avg',
aligned=True,
with_temporal_pool=True,
with_global=True)
roi_feat, feat = roi_extractor(feat, rois)
assert roi_feat.shape == (4, 128, 1, 8, 8)
assert feat.shape == (4, 64, 1, 16, 16)
| 1,945 | 32.551724 | 78 | py |
STTS | STTS-main/VideoSwin/tests/test_models/test_common.py | import os.path as osp
import pytest
import torch
from mmaction.models.common import LFB, TAM, Conv2plus1d, ConvAudio
def test_conv2plus1d():
with pytest.raises(AssertionError):
# Length of kernel size, stride and padding must be the same
Conv2plus1d(3, 8, (2, 2))
conv_2plus1d = Conv2plus1d(3, 8, 2)
conv_2plus1d.init_weights()
assert torch.equal(conv_2plus1d.bn_s.weight,
torch.ones_like(conv_2plus1d.bn_s.weight))
assert torch.equal(conv_2plus1d.bn_s.bias,
torch.zeros_like(conv_2plus1d.bn_s.bias))
x = torch.rand(1, 3, 8, 256, 256)
output = conv_2plus1d(x)
assert output.shape == torch.Size([1, 8, 7, 255, 255])
def test_conv_audio():
conv_audio = ConvAudio(3, 8, 3)
conv_audio.init_weights()
x = torch.rand(1, 3, 8, 8)
output = conv_audio(x)
assert output.shape == torch.Size([1, 16, 8, 8])
conv_audio_sum = ConvAudio(3, 8, 3, op='sum')
output = conv_audio_sum(x)
assert output.shape == torch.Size([1, 8, 8, 8])
def test_TAM():
"""test TAM."""
with pytest.raises(AssertionError):
# alpha must be a positive integer
TAM(16, 8, alpha=0, beta=4)
with pytest.raises(AssertionError):
# beta must be a positive integer
TAM(16, 8, alpha=2, beta=0)
with pytest.raises(AssertionError):
# the channels number of x should be equal to self.in_channels of TAM
tam = TAM(16, 8)
x = torch.rand(64, 8, 112, 112)
tam(x)
tam = TAM(16, 8)
x = torch.rand(32, 16, 112, 112)
output = tam(x)
assert output.shape == torch.Size([32, 16, 112, 112])
def test_LFB():
"""test LFB."""
with pytest.raises(ValueError):
LFB(lfb_prefix_path='./_non_exist_path')
lfb_prefix_path = osp.normpath(
osp.join(osp.dirname(__file__), '../data/lfb'))
with pytest.raises(AssertionError):
LFB(lfb_prefix_path=lfb_prefix_path, dataset_modes=100)
with pytest.raises(ValueError):
LFB(lfb_prefix_path=lfb_prefix_path, device='ceph')
# load on cpu
lfb_cpu = LFB(
lfb_prefix_path=lfb_prefix_path,
max_num_sampled_feat=5,
window_size=60,
lfb_channels=16,
dataset_modes=('unittest'),
device='cpu')
lt_feat_cpu = lfb_cpu['video_1,930']
assert lt_feat_cpu.shape == (5 * 60, 16)
assert len(lfb_cpu) == 1
# load on lmdb
lfb_lmdb = LFB(
lfb_prefix_path=lfb_prefix_path,
max_num_sampled_feat=3,
window_size=30,
lfb_channels=16,
dataset_modes=('unittest'),
device='lmdb',
lmdb_map_size=1e6)
lt_feat_lmdb = lfb_lmdb['video_1,930']
assert lt_feat_lmdb.shape == (3 * 30, 16)
| 2,754 | 26.55 | 77 | py |
STTS | STTS-main/VideoSwin/tests/test_models/test_backbones.py | import copy
import pytest
import torch
import torch.nn as nn
from mmcv.utils import _BatchNorm
from mmaction.models import (C3D, X3D, MobileNetV2TSM, ResNet2Plus1d,
ResNet3dCSN, ResNet3dSlowFast, ResNet3dSlowOnly,
ResNetAudio, ResNetTIN, ResNetTSM, TANet)
from mmaction.models.backbones.resnet_tsm import NL3DWrapper
from .base import check_norm_state, generate_backbone_demo_inputs
def test_x3d_backbone():
"""Test x3d backbone."""
with pytest.raises(AssertionError):
# In X3D: 1 <= num_stages <= 4
X3D(gamma_w=1.0, gamma_b=2.25, gamma_d=2.2, num_stages=0)
with pytest.raises(AssertionError):
# In X3D: 1 <= num_stages <= 4
X3D(gamma_w=1.0, gamma_b=2.25, gamma_d=2.2, num_stages=5)
with pytest.raises(AssertionError):
# len(spatial_strides) == num_stages
X3D(gamma_w=1.0,
gamma_b=2.25,
gamma_d=2.2,
spatial_strides=(1, 2),
num_stages=4)
with pytest.raises(AssertionError):
# se_style in ['half', 'all']
X3D(gamma_w=1.0, gamma_b=2.25, gamma_d=2.2, se_style=None)
with pytest.raises(AssertionError):
# se_ratio should be None or > 0
X3D(gamma_w=1.0,
gamma_b=2.25,
gamma_d=2.2,
se_style='half',
se_ratio=0)
# x3d_s, no pretrained, norm_eval True
x3d_s = X3D(gamma_w=1.0, gamma_b=2.25, gamma_d=2.2, norm_eval=True)
x3d_s.init_weights()
x3d_s.train()
assert check_norm_state(x3d_s.modules(), False)
# x3d_l, no pretrained, norm_eval True
x3d_l = X3D(gamma_w=1.0, gamma_b=2.25, gamma_d=5.0, norm_eval=True)
x3d_l.init_weights()
x3d_l.train()
assert check_norm_state(x3d_l.modules(), False)
# x3d_s, no pretrained, norm_eval False
x3d_s = X3D(gamma_w=1.0, gamma_b=2.25, gamma_d=2.2, norm_eval=False)
x3d_s.init_weights()
x3d_s.train()
assert check_norm_state(x3d_s.modules(), True)
# x3d_l, no pretrained, norm_eval False
x3d_l = X3D(gamma_w=1.0, gamma_b=2.25, gamma_d=5.0, norm_eval=False)
x3d_l.init_weights()
x3d_l.train()
assert check_norm_state(x3d_l.modules(), True)
# x3d_s, no pretrained, frozen_stages, norm_eval False
frozen_stages = 1
x3d_s_frozen = X3D(
gamma_w=1.0,
gamma_b=2.25,
gamma_d=2.2,
norm_eval=False,
frozen_stages=frozen_stages)
x3d_s_frozen.init_weights()
x3d_s_frozen.train()
assert x3d_s_frozen.conv1_t.bn.training is False
for param in x3d_s_frozen.conv1_s.parameters():
assert param.requires_grad is False
for param in x3d_s_frozen.conv1_t.parameters():
assert param.requires_grad is False
for i in range(1, frozen_stages + 1):
layer = getattr(x3d_s_frozen, f'layer{i}')
for mod in layer.modules():
if isinstance(mod, _BatchNorm):
assert mod.training is False
for param in layer.parameters():
assert param.requires_grad is False
# test zero_init_residual, zero_init_residual is True by default
for m in x3d_s_frozen.modules():
if hasattr(m, 'conv3'):
assert torch.equal(m.conv3.bn.weight,
torch.zeros_like(m.conv3.bn.weight))
assert torch.equal(m.conv3.bn.bias,
torch.zeros_like(m.conv3.bn.bias))
# x3d_s inference
input_shape = (1, 3, 13, 64, 64)
imgs = generate_backbone_demo_inputs(input_shape)
# parrots 3dconv is only implemented on gpu
if torch.__version__ == 'parrots':
if torch.cuda.is_available():
x3d_s_frozen = x3d_s_frozen.cuda()
imgs_gpu = imgs.cuda()
feat = x3d_s_frozen(imgs_gpu)
assert feat.shape == torch.Size([1, 432, 13, 2, 2])
else:
feat = x3d_s_frozen(imgs)
assert feat.shape == torch.Size([1, 432, 13, 2, 2])
# x3d_m inference
input_shape = (1, 3, 16, 96, 96)
imgs = generate_backbone_demo_inputs(input_shape)
# parrots 3dconv is only implemented on gpu
if torch.__version__ == 'parrots':
if torch.cuda.is_available():
x3d_s_frozen = x3d_s_frozen.cuda()
imgs_gpu = imgs.cuda()
feat = x3d_s_frozen(imgs_gpu)
assert feat.shape == torch.Size([1, 432, 16, 3, 3])
else:
feat = x3d_s_frozen(imgs)
assert feat.shape == torch.Size([1, 432, 16, 3, 3])
def test_resnet2plus1d_backbone():
# Test r2+1d backbone
with pytest.raises(AssertionError):
# r2+1d does not support inflation
ResNet2Plus1d(50, None, pretrained2d=True)
with pytest.raises(AssertionError):
# r2+1d requires conv(2+1)d module
ResNet2Plus1d(
50, None, pretrained2d=False, conv_cfg=dict(type='Conv3d'))
frozen_stages = 1
r2plus1d_34_frozen = ResNet2Plus1d(
34,
None,
conv_cfg=dict(type='Conv2plus1d'),
pretrained2d=False,
frozen_stages=frozen_stages,
conv1_kernel=(3, 7, 7),
conv1_stride_t=1,
pool1_stride_t=1,
inflate=(1, 1, 1, 1),
spatial_strides=(1, 2, 2, 2),
temporal_strides=(1, 2, 2, 2))
r2plus1d_34_frozen.init_weights()
r2plus1d_34_frozen.train()
assert r2plus1d_34_frozen.conv1.conv.bn_s.training is False
assert r2plus1d_34_frozen.conv1.bn.training is False
for param in r2plus1d_34_frozen.conv1.parameters():
assert param.requires_grad is False
for i in range(1, frozen_stages + 1):
layer = getattr(r2plus1d_34_frozen, f'layer{i}')
for mod in layer.modules():
if isinstance(mod, _BatchNorm):
assert mod.training is False
for param in layer.parameters():
assert param.requires_grad is False
input_shape = (1, 3, 8, 64, 64)
imgs = generate_backbone_demo_inputs(input_shape)
# parrots 3dconv is only implemented on gpu
if torch.__version__ == 'parrots':
if torch.cuda.is_available():
r2plus1d_34_frozen = r2plus1d_34_frozen.cuda()
imgs_gpu = imgs.cuda()
feat = r2plus1d_34_frozen(imgs_gpu)
assert feat.shape == torch.Size([1, 512, 1, 2, 2])
else:
feat = r2plus1d_34_frozen(imgs)
assert feat.shape == torch.Size([1, 512, 1, 2, 2])
r2plus1d_50_frozen = ResNet2Plus1d(
50,
None,
conv_cfg=dict(type='Conv2plus1d'),
pretrained2d=False,
conv1_kernel=(3, 7, 7),
conv1_stride_t=1,
pool1_stride_t=1,
inflate=(1, 1, 1, 1),
spatial_strides=(1, 2, 2, 2),
temporal_strides=(1, 2, 2, 2),
frozen_stages=frozen_stages)
r2plus1d_50_frozen.init_weights()
r2plus1d_50_frozen.train()
assert r2plus1d_50_frozen.conv1.conv.bn_s.training is False
assert r2plus1d_50_frozen.conv1.bn.training is False
for param in r2plus1d_50_frozen.conv1.parameters():
assert param.requires_grad is False
for i in range(1, frozen_stages + 1):
layer = getattr(r2plus1d_50_frozen, f'layer{i}')
for mod in layer.modules():
if isinstance(mod, _BatchNorm):
assert mod.training is False
for param in layer.parameters():
assert param.requires_grad is False
input_shape = (1, 3, 8, 64, 64)
imgs = generate_backbone_demo_inputs(input_shape)
# parrots 3dconv is only implemented on gpu
if torch.__version__ == 'parrots':
if torch.cuda.is_available():
r2plus1d_50_frozen = r2plus1d_50_frozen.cuda()
imgs_gpu = imgs.cuda()
feat = r2plus1d_50_frozen(imgs_gpu)
assert feat.shape == torch.Size([1, 2048, 1, 2, 2])
else:
feat = r2plus1d_50_frozen(imgs)
assert feat.shape == torch.Size([1, 2048, 1, 2, 2])
def test_resnet_tsm_backbone():
"""Test resnet_tsm backbone."""
with pytest.raises(NotImplementedError):
# shift_place must be block or blockres
resnet_tsm_50_block = ResNetTSM(50, shift_place='Block')
resnet_tsm_50_block.init_weights()
from mmaction.models.backbones.resnet import Bottleneck
from mmaction.models.backbones.resnet_tsm import TemporalShift
input_shape = (8, 3, 64, 64)
imgs = generate_backbone_demo_inputs(input_shape)
# resnet_tsm with depth 50
resnet_tsm_50 = ResNetTSM(50)
resnet_tsm_50.init_weights()
for layer_name in resnet_tsm_50.res_layers:
layer = getattr(resnet_tsm_50, layer_name)
blocks = list(layer.children())
for block in blocks:
assert isinstance(block.conv1.conv, TemporalShift)
assert block.conv1.conv.num_segments == resnet_tsm_50.num_segments
assert block.conv1.conv.shift_div == resnet_tsm_50.shift_div
assert isinstance(block.conv1.conv.net, nn.Conv2d)
# resnet_tsm with depth 50, no pretrained, shift_place is block
resnet_tsm_50_block = ResNetTSM(50, shift_place='block')
resnet_tsm_50_block.init_weights()
for layer_name in resnet_tsm_50_block.res_layers:
layer = getattr(resnet_tsm_50_block, layer_name)
blocks = list(layer.children())
for block in blocks:
assert isinstance(block, TemporalShift)
assert block.num_segments == resnet_tsm_50_block.num_segments
assert block.num_segments == resnet_tsm_50_block.num_segments
assert block.shift_div == resnet_tsm_50_block.shift_div
assert isinstance(block.net, Bottleneck)
# resnet_tsm with depth 50, no pretrained, use temporal_pool
resnet_tsm_50_temporal_pool = ResNetTSM(50, temporal_pool=True)
resnet_tsm_50_temporal_pool.init_weights()
for layer_name in resnet_tsm_50_temporal_pool.res_layers:
layer = getattr(resnet_tsm_50_temporal_pool, layer_name)
blocks = list(layer.children())
if layer_name == 'layer2':
assert len(blocks) == 2
assert isinstance(blocks[1], nn.MaxPool3d)
blocks = copy.deepcopy(blocks[0])
for block in blocks:
assert isinstance(block.conv1.conv, TemporalShift)
if layer_name == 'layer1':
assert block.conv1.conv.num_segments == \
resnet_tsm_50_temporal_pool.num_segments
else:
assert block.conv1.conv.num_segments == \
resnet_tsm_50_temporal_pool.num_segments // 2
assert block.conv1.conv.shift_div == resnet_tsm_50_temporal_pool.shift_div # noqa: E501
assert isinstance(block.conv1.conv.net, nn.Conv2d)
# resnet_tsm with non-local module
non_local_cfg = dict(
sub_sample=True,
use_scale=False,
norm_cfg=dict(type='BN3d', requires_grad=True),
mode='embedded_gaussian')
non_local = ((0, 0, 0), (1, 0, 1, 0), (1, 0, 1, 0, 1, 0), (0, 0, 0))
resnet_tsm_nonlocal = ResNetTSM(
50, non_local=non_local, non_local_cfg=non_local_cfg)
resnet_tsm_nonlocal.init_weights()
for layer_name in ['layer2', 'layer3']:
layer = getattr(resnet_tsm_nonlocal, layer_name)
for i, _ in enumerate(layer):
if i % 2 == 0:
assert isinstance(layer[i], NL3DWrapper)
resnet_tsm_50_full = ResNetTSM(
50,
non_local=non_local,
non_local_cfg=non_local_cfg,
temporal_pool=True)
resnet_tsm_50_full.init_weights()
# TSM forword
feat = resnet_tsm_50(imgs)
assert feat.shape == torch.Size([8, 2048, 2, 2])
# TSM with non-local forward
feat = resnet_tsm_nonlocal(imgs)
assert feat.shape == torch.Size([8, 2048, 2, 2])
# TSM with temporal pool forward
feat = resnet_tsm_50_temporal_pool(imgs)
assert feat.shape == torch.Size([4, 2048, 2, 2])
# TSM with temporal pool + non-local forward
input_shape = (16, 3, 32, 32)
imgs = generate_backbone_demo_inputs(input_shape)
feat = resnet_tsm_50_full(imgs)
assert feat.shape == torch.Size([8, 2048, 1, 1])
def test_mobilenetv2_tsm_backbone():
"""Test mobilenetv2_tsm backbone."""
from mmaction.models.backbones.resnet_tsm import TemporalShift
from mmaction.models.backbones.mobilenet_v2 import InvertedResidual
from mmcv.cnn import ConvModule
input_shape = (8, 3, 64, 64)
imgs = generate_backbone_demo_inputs(input_shape)
# mobilenetv2_tsm with width_mult = 1.0
mobilenetv2_tsm = MobileNetV2TSM()
mobilenetv2_tsm.init_weights()
for cur_module in mobilenetv2_tsm.modules():
if isinstance(cur_module, InvertedResidual) and \
len(cur_module.conv) == 3 and \
cur_module.use_res_connect:
assert isinstance(cur_module.conv[0], TemporalShift)
assert cur_module.conv[0].num_segments == \
mobilenetv2_tsm.num_segments
assert cur_module.conv[0].shift_div == mobilenetv2_tsm.shift_div
assert isinstance(cur_module.conv[0].net, ConvModule)
# TSM-MobileNetV2 with widen_factor = 1.0 forword
feat = mobilenetv2_tsm(imgs)
assert feat.shape == torch.Size([8, 1280, 2, 2])
# mobilenetv2 with widen_factor = 0.5 forword
mobilenetv2_tsm_05 = MobileNetV2TSM(widen_factor=0.5)
mobilenetv2_tsm_05.init_weights()
feat = mobilenetv2_tsm_05(imgs)
assert feat.shape == torch.Size([8, 1280, 2, 2])
# mobilenetv2 with widen_factor = 1.5 forword
mobilenetv2_tsm_15 = MobileNetV2TSM(widen_factor=1.5)
mobilenetv2_tsm_15.init_weights()
feat = mobilenetv2_tsm_15(imgs)
assert feat.shape == torch.Size([8, 1920, 2, 2])
def test_slowfast_backbone():
"""Test SlowFast backbone."""
with pytest.raises(TypeError):
# cfg should be a dict
ResNet3dSlowFast(None, slow_pathway=list(['foo', 'bar']))
with pytest.raises(TypeError):
# pretrained should be a str
sf_50 = ResNet3dSlowFast(dict(foo='bar'))
sf_50.init_weights()
with pytest.raises(KeyError):
# pathway type should be implemented
ResNet3dSlowFast(None, slow_pathway=dict(type='resnext'))
# test slowfast with slow inflated
sf_50_inflate = ResNet3dSlowFast(
None,
slow_pathway=dict(
type='resnet3d',
depth=50,
pretrained='torchvision://resnet50',
pretrained2d=True,
lateral=True,
conv1_kernel=(1, 7, 7),
dilations=(1, 1, 1, 1),
conv1_stride_t=1,
pool1_stride_t=1,
inflate=(0, 0, 1, 1)))
sf_50_inflate.init_weights()
sf_50_inflate.train()
# test slowfast with no lateral connection
sf_50_wo_lateral = ResNet3dSlowFast(
None,
slow_pathway=dict(
type='resnet3d',
depth=50,
pretrained=None,
lateral=False,
conv1_kernel=(1, 7, 7),
dilations=(1, 1, 1, 1),
conv1_stride_t=1,
pool1_stride_t=1,
inflate=(0, 0, 1, 1)))
sf_50_wo_lateral.init_weights()
sf_50_wo_lateral.train()
# slowfast w/o lateral connection inference test
input_shape = (1, 3, 8, 64, 64)
imgs = generate_backbone_demo_inputs(input_shape)
# parrots 3dconv is only implemented on gpu
if torch.__version__ == 'parrots':
if torch.cuda.is_available():
sf_50_wo_lateral = sf_50_wo_lateral.cuda()
imgs_gpu = imgs.cuda()
feat = sf_50_wo_lateral(imgs_gpu)
else:
feat = sf_50_wo_lateral(imgs)
assert isinstance(feat, tuple)
assert feat[0].shape == torch.Size([1, 2048, 1, 2, 2])
assert feat[1].shape == torch.Size([1, 256, 8, 2, 2])
# test slowfast with frozen stages config
frozen_slow = 3
sf_50 = ResNet3dSlowFast(
None,
slow_pathway=dict(
type='resnet3d',
depth=50,
pretrained=None,
pretrained2d=True,
lateral=True,
conv1_kernel=(1, 7, 7),
dilations=(1, 1, 1, 1),
conv1_stride_t=1,
pool1_stride_t=1,
inflate=(0, 0, 1, 1),
frozen_stages=frozen_slow))
sf_50.init_weights()
sf_50.train()
for stage in range(1, sf_50.slow_path.num_stages):
lateral_name = sf_50.slow_path.lateral_connections[stage - 1]
conv_lateral = getattr(sf_50.slow_path, lateral_name)
for mod in conv_lateral.modules():
if isinstance(mod, _BatchNorm):
if stage <= frozen_slow:
assert mod.training is False
else:
assert mod.training is True
for param in conv_lateral.parameters():
if stage <= frozen_slow:
assert param.requires_grad is False
else:
assert param.requires_grad is True
# test slowfast with normal config
sf_50 = ResNet3dSlowFast(None)
sf_50.init_weights()
sf_50.train()
# slowfast inference test
input_shape = (1, 3, 8, 64, 64)
imgs = generate_backbone_demo_inputs(input_shape)
# parrots 3dconv is only implemented on gpu
if torch.__version__ == 'parrots':
if torch.cuda.is_available():
sf_50 = sf_50.cuda()
imgs_gpu = imgs.cuda()
feat = sf_50(imgs_gpu)
else:
feat = sf_50(imgs)
assert isinstance(feat, tuple)
assert feat[0].shape == torch.Size([1, 2048, 1, 2, 2])
assert feat[1].shape == torch.Size([1, 256, 8, 2, 2])
def test_slowonly_backbone():
"""Test SlowOnly backbone."""
with pytest.raises(AssertionError):
# SlowOnly should contain no lateral connection
ResNet3dSlowOnly(50, None, lateral=True)
# test SlowOnly for PoseC3D
so_50 = ResNet3dSlowOnly(
depth=50,
pretrained=None,
in_channels=17,
base_channels=32,
num_stages=3,
out_indices=(2, ),
stage_blocks=(4, 6, 3),
conv1_stride_s=1,
pool1_stride_s=1,
inflate=(0, 1, 1),
spatial_strides=(2, 2, 2),
temporal_strides=(1, 1, 2),
dilations=(1, 1, 1))
so_50.init_weights()
so_50.train()
# test SlowOnly with normal config
so_50 = ResNet3dSlowOnly(50, None)
so_50.init_weights()
so_50.train()
# SlowOnly inference test
input_shape = (1, 3, 8, 64, 64)
imgs = generate_backbone_demo_inputs(input_shape)
# parrots 3dconv is only implemented on gpu
if torch.__version__ == 'parrots':
if torch.cuda.is_available():
so_50 = so_50.cuda()
imgs_gpu = imgs.cuda()
feat = so_50(imgs_gpu)
else:
feat = so_50(imgs)
assert feat.shape == torch.Size([1, 2048, 8, 2, 2])
def test_resnet_csn_backbone():
"""Test resnet_csn backbone."""
with pytest.raises(ValueError):
# Bottleneck mode must be "ip" or "ir"
ResNet3dCSN(152, None, bottleneck_mode='id')
input_shape = (2, 3, 6, 64, 64)
imgs = generate_backbone_demo_inputs(input_shape)
resnet3d_csn_frozen = ResNet3dCSN(
152, None, bn_frozen=True, norm_eval=True)
resnet3d_csn_frozen.train()
for m in resnet3d_csn_frozen.modules():
if isinstance(m, _BatchNorm):
for param in m.parameters():
assert param.requires_grad is False
# Interaction-preserved channel-separated bottleneck block
resnet3d_csn_ip = ResNet3dCSN(152, None, bottleneck_mode='ip')
resnet3d_csn_ip.init_weights()
resnet3d_csn_ip.train()
for i, layer_name in enumerate(resnet3d_csn_ip.res_layers):
layers = getattr(resnet3d_csn_ip, layer_name)
num_blocks = resnet3d_csn_ip.stage_blocks[i]
assert len(layers) == num_blocks
for layer in layers:
assert isinstance(layer.conv2, nn.Sequential)
assert len(layer.conv2) == 2
assert layer.conv2[1].groups == layer.planes
if torch.__version__ == 'parrots':
if torch.cuda.is_available():
resnet3d_csn_ip = resnet3d_csn_ip.cuda()
imgs_gpu = imgs.cuda()
feat = resnet3d_csn_ip(imgs_gpu)
assert feat.shape == torch.Size([2, 2048, 1, 2, 2])
else:
feat = resnet3d_csn_ip(imgs)
assert feat.shape == torch.Size([2, 2048, 1, 2, 2])
# Interaction-reduced channel-separated bottleneck block
resnet3d_csn_ir = ResNet3dCSN(152, None, bottleneck_mode='ir')
resnet3d_csn_ir.init_weights()
resnet3d_csn_ir.train()
for i, layer_name in enumerate(resnet3d_csn_ir.res_layers):
layers = getattr(resnet3d_csn_ir, layer_name)
num_blocks = resnet3d_csn_ir.stage_blocks[i]
assert len(layers) == num_blocks
for layer in layers:
assert isinstance(layer.conv2, nn.Sequential)
assert len(layer.conv2) == 1
assert layer.conv2[0].groups == layer.planes
if torch.__version__ == 'parrots':
if torch.cuda.is_available():
resnet3d_csn_ir = resnet3d_csn_ir.cuda()
imgs_gpu = imgs.cuda()
feat = resnet3d_csn_ir(imgs_gpu)
assert feat.shape == torch.Size([2, 2048, 1, 2, 2])
else:
feat = resnet3d_csn_ir(imgs)
assert feat.shape == torch.Size([2, 2048, 1, 2, 2])
# Set training status = False
resnet3d_csn_ip = ResNet3dCSN(152, None, bottleneck_mode='ip')
resnet3d_csn_ip.init_weights()
resnet3d_csn_ip.train(False)
for module in resnet3d_csn_ip.children():
assert module.training is False
def test_tanet_backbone():
"""Test tanet backbone."""
with pytest.raises(NotImplementedError):
# TA-Blocks are only based on Bottleneck block now
tanet_18 = TANet(18, 8)
tanet_18.init_weights()
from mmaction.models.backbones.resnet import Bottleneck
from mmaction.models.backbones.tanet import TABlock
# tanet with depth 50
tanet_50 = TANet(50, 8)
tanet_50.init_weights()
for layer_name in tanet_50.res_layers:
layer = getattr(tanet_50, layer_name)
blocks = list(layer.children())
for block in blocks:
assert isinstance(block, TABlock)
assert isinstance(block.block, Bottleneck)
assert block.tam.num_segments == block.num_segments
assert block.tam.in_channels == block.block.conv1.out_channels
input_shape = (8, 3, 64, 64)
imgs = generate_backbone_demo_inputs(input_shape)
feat = tanet_50(imgs)
assert feat.shape == torch.Size([8, 2048, 2, 2])
input_shape = (16, 3, 32, 32)
imgs = generate_backbone_demo_inputs(input_shape)
feat = tanet_50(imgs)
assert feat.shape == torch.Size([16, 2048, 1, 1])
def test_c3d_backbone():
"""Test c3d backbone."""
input_shape = (1, 3, 16, 112, 112)
imgs = generate_backbone_demo_inputs(input_shape)
# c3d inference test
c3d = C3D()
c3d.init_weights()
c3d.train()
feat = c3d(imgs)
assert feat.shape == torch.Size([1, 4096])
# c3d with bn inference test
c3d_bn = C3D(norm_cfg=dict(type='BN3d'))
c3d_bn.init_weights()
c3d_bn.train()
feat = c3d_bn(imgs)
assert feat.shape == torch.Size([1, 4096])
def test_resnet_audio_backbone():
"""Test ResNetAudio backbone."""
input_shape = (1, 1, 16, 16)
spec = generate_backbone_demo_inputs(input_shape)
# inference
audioonly = ResNetAudio(50, None)
audioonly.init_weights()
audioonly.train()
feat = audioonly(spec)
assert feat.shape == torch.Size([1, 1024, 2, 2])
@pytest.mark.skipif(
not torch.cuda.is_available(), reason='requires CUDA support')
def test_resnet_tin_backbone():
"""Test resnet_tin backbone."""
with pytest.raises(AssertionError):
# num_segments should be positive
resnet_tin = ResNetTIN(50, num_segments=-1)
resnet_tin.init_weights()
from mmaction.models.backbones.resnet_tin import (CombineNet,
TemporalInterlace)
# resnet_tin with normal config
resnet_tin = ResNetTIN(50)
resnet_tin.init_weights()
for layer_name in resnet_tin.res_layers:
layer = getattr(resnet_tin, layer_name)
blocks = list(layer.children())
for block in blocks:
assert isinstance(block.conv1.conv, CombineNet)
assert isinstance(block.conv1.conv.net1, TemporalInterlace)
assert (
block.conv1.conv.net1.num_segments == resnet_tin.num_segments)
assert block.conv1.conv.net1.shift_div == resnet_tin.shift_div
# resnet_tin with partial batchnorm
resnet_tin_pbn = ResNetTIN(50, partial_bn=True)
resnet_tin_pbn.train()
count_bn = 0
for m in resnet_tin_pbn.modules():
if isinstance(m, nn.BatchNorm2d):
count_bn += 1
if count_bn >= 2:
assert m.training is False
assert m.weight.requires_grad is False
assert m.bias.requires_grad is False
else:
assert m.training is True
assert m.weight.requires_grad is True
assert m.bias.requires_grad is True
input_shape = (8, 3, 64, 64)
imgs = generate_backbone_demo_inputs(input_shape).cuda()
resnet_tin = resnet_tin.cuda()
# resnet_tin with normal cfg inference
feat = resnet_tin(imgs)
assert feat.shape == torch.Size([8, 2048, 2, 2])
| 25,467 | 34.971751 | 100 | py |
STTS | STTS-main/VideoSwin/tests/test_models/test_neck.py | import copy
import pytest
import torch
from mmaction.models import TPN
from .base import generate_backbone_demo_inputs
def test_tpn():
"""Test TPN backbone."""
tpn_cfg = dict(
in_channels=(1024, 2048),
out_channels=1024,
spatial_modulation_cfg=dict(
in_channels=(1024, 2048), out_channels=2048),
temporal_modulation_cfg=dict(downsample_scales=(8, 8)),
upsample_cfg=dict(scale_factor=(1, 1, 1)),
downsample_cfg=dict(downsample_scale=(1, 1, 1)),
level_fusion_cfg=dict(
in_channels=(1024, 1024),
mid_channels=(1024, 1024),
out_channels=2048,
downsample_scales=((1, 1, 1), (1, 1, 1))),
aux_head_cfg=dict(out_channels=400, loss_weight=0.5))
with pytest.raises(AssertionError):
tpn_cfg_ = copy.deepcopy(tpn_cfg)
tpn_cfg_['in_channels'] = list(tpn_cfg_['in_channels'])
TPN(**tpn_cfg_)
with pytest.raises(AssertionError):
tpn_cfg_ = copy.deepcopy(tpn_cfg)
tpn_cfg_['out_channels'] = float(tpn_cfg_['out_channels'])
TPN(**tpn_cfg_)
with pytest.raises(AssertionError):
tpn_cfg_ = copy.deepcopy(tpn_cfg)
tpn_cfg_['downsample_cfg']['downsample_position'] = 'unsupport'
TPN(**tpn_cfg_)
for k in tpn_cfg:
if not k.endswith('_cfg'):
continue
tpn_cfg_ = copy.deepcopy(tpn_cfg)
tpn_cfg_[k] = list()
with pytest.raises(AssertionError):
TPN(**tpn_cfg_)
with pytest.raises(ValueError):
tpn_cfg_ = copy.deepcopy(tpn_cfg)
tpn_cfg_['flow_type'] = 'unsupport'
TPN(**tpn_cfg_)
target_shape = (32, 1)
target = generate_backbone_demo_inputs(target_shape).long().squeeze()
x0_shape = (32, 1024, 1, 4, 4)
x1_shape = (32, 2048, 1, 2, 2)
x0 = generate_backbone_demo_inputs(x0_shape)
x1 = generate_backbone_demo_inputs(x1_shape)
x = [x0, x1]
# ResNetTPN with 'cascade' flow_type
tpn_cfg_ = copy.deepcopy(tpn_cfg)
tpn_cascade = TPN(**tpn_cfg_)
feat, loss_aux = tpn_cascade(x, target)
assert feat.shape == torch.Size([32, 2048, 1, 2, 2])
assert len(loss_aux) == 1
# ResNetTPN with 'parallel' flow_type
tpn_cfg_ = copy.deepcopy(tpn_cfg)
tpn_parallel = TPN(flow_type='parallel', **tpn_cfg_)
feat, loss_aux = tpn_parallel(x, target)
assert feat.shape == torch.Size([32, 2048, 1, 2, 2])
assert len(loss_aux) == 1
# ResNetTPN with 'cascade' flow_type and target is None
feat, loss_aux = tpn_cascade(x, None)
assert feat.shape == torch.Size([32, 2048, 1, 2, 2])
assert len(loss_aux) == 0
# ResNetTPN with 'parallel' flow_type and target is None
feat, loss_aux = tpn_parallel(x, None)
assert feat.shape == torch.Size([32, 2048, 1, 2, 2])
assert len(loss_aux) == 0
| 2,850 | 31.770115 | 73 | py |
STTS | STTS-main/VideoSwin/tests/test_models/test_gradcam.py | import pytest
import torch
from mmaction.models import build_recognizer
from mmaction.utils.gradcam_utils import GradCAM
from .base import generate_gradcam_inputs, get_recognizer_cfg
def _get_target_shapes(input_shape, num_classes=400, model_type='2D'):
if model_type not in ['2D', '3D']:
raise ValueError(f'Data type {model_type} is not available')
preds_target_shape = (input_shape[0], num_classes)
if model_type == '3D':
# input shape (batch_size, num_crops*num_clips, C, clip_len, H, W)
# target shape (batch_size*num_crops*num_clips, clip_len, H, W, C)
blended_imgs_target_shape = (input_shape[0] * input_shape[1],
input_shape[3], input_shape[4],
input_shape[5], input_shape[2])
else:
# input shape (batch_size, num_segments, C, H, W)
# target shape (batch_size, num_segments, H, W, C)
blended_imgs_target_shape = (input_shape[0], input_shape[1],
input_shape[3], input_shape[4],
input_shape[2])
return blended_imgs_target_shape, preds_target_shape
def _do_test_2D_models(recognizer,
target_layer_name,
input_shape,
num_classes=400,
device='cpu'):
demo_inputs = generate_gradcam_inputs(input_shape)
demo_inputs['imgs'] = demo_inputs['imgs'].to(device)
demo_inputs['label'] = demo_inputs['label'].to(device)
recognizer = recognizer.to(device)
gradcam = GradCAM(recognizer, target_layer_name)
blended_imgs_target_shape, preds_target_shape = _get_target_shapes(
input_shape, num_classes=num_classes, model_type='2D')
blended_imgs, preds = gradcam(demo_inputs)
assert blended_imgs.size() == blended_imgs_target_shape
assert preds.size() == preds_target_shape
blended_imgs, preds = gradcam(demo_inputs, True)
assert blended_imgs.size() == blended_imgs_target_shape
assert preds.size() == preds_target_shape
def _do_test_3D_models(recognizer,
target_layer_name,
input_shape,
num_classes=400):
blended_imgs_target_shape, preds_target_shape = _get_target_shapes(
input_shape, num_classes=num_classes, model_type='3D')
demo_inputs = generate_gradcam_inputs(input_shape, '3D')
# parrots 3dconv is only implemented on gpu
if torch.__version__ == 'parrots':
if torch.cuda.is_available():
recognizer = recognizer.cuda()
demo_inputs['imgs'] = demo_inputs['imgs'].cuda()
demo_inputs['label'] = demo_inputs['label'].cuda()
gradcam = GradCAM(recognizer, target_layer_name)
blended_imgs, preds = gradcam(demo_inputs)
assert blended_imgs.size() == blended_imgs_target_shape
assert preds.size() == preds_target_shape
blended_imgs, preds = gradcam(demo_inputs, True)
assert blended_imgs.size() == blended_imgs_target_shape
assert preds.size() == preds_target_shape
else:
gradcam = GradCAM(recognizer, target_layer_name)
blended_imgs, preds = gradcam(demo_inputs)
assert blended_imgs.size() == blended_imgs_target_shape
assert preds.size() == preds_target_shape
blended_imgs, preds = gradcam(demo_inputs, True)
assert blended_imgs.size() == blended_imgs_target_shape
assert preds.size() == preds_target_shape
def test_tsn():
config = get_recognizer_cfg('tsn/tsn_r50_1x1x3_100e_kinetics400_rgb.py')
config.model['backbone']['pretrained'] = None
recognizer = build_recognizer(config.model)
recognizer.cfg = config
input_shape = (1, 25, 3, 32, 32)
target_layer_name = 'backbone/layer4/1/relu'
_do_test_2D_models(recognizer, target_layer_name, input_shape)
def test_i3d():
config = get_recognizer_cfg('i3d/i3d_r50_32x2x1_100e_kinetics400_rgb.py')
config.model['backbone']['pretrained2d'] = False
config.model['backbone']['pretrained'] = None
recognizer = build_recognizer(config.model)
recognizer.cfg = config
input_shape = [1, 1, 3, 32, 32, 32]
target_layer_name = 'backbone/layer4/1/relu'
_do_test_3D_models(recognizer, target_layer_name, input_shape)
def test_r2plus1d():
config = get_recognizer_cfg(
'r2plus1d/r2plus1d_r34_8x8x1_180e_kinetics400_rgb.py')
config.model['backbone']['pretrained2d'] = False
config.model['backbone']['pretrained'] = None
config.model['backbone']['norm_cfg'] = dict(type='BN3d')
recognizer = build_recognizer(config.model)
recognizer.cfg = config
input_shape = (1, 3, 3, 8, 32, 32)
target_layer_name = 'backbone/layer4/1/relu'
_do_test_3D_models(recognizer, target_layer_name, input_shape)
def test_slowfast():
config = get_recognizer_cfg(
'slowfast/slowfast_r50_4x16x1_256e_kinetics400_rgb.py')
recognizer = build_recognizer(config.model)
recognizer.cfg = config
input_shape = (1, 1, 3, 32, 32, 32)
target_layer_name = 'backbone/slow_path/layer4/1/relu'
_do_test_3D_models(recognizer, target_layer_name, input_shape)
def test_tsm():
config = get_recognizer_cfg('tsm/tsm_r50_1x1x8_50e_kinetics400_rgb.py')
config.model['backbone']['pretrained'] = None
target_layer_name = 'backbone/layer4/1/relu'
# base config
recognizer = build_recognizer(config.model)
recognizer.cfg = config
input_shape = (1, 8, 3, 32, 32)
_do_test_2D_models(recognizer, target_layer_name, input_shape)
# test twice sample + 3 crops, 2*3*8=48
config.model.test_cfg = dict(average_clips='prob')
recognizer = build_recognizer(config.model)
recognizer.cfg = config
input_shape = (1, 48, 3, 32, 32)
_do_test_2D_models(recognizer, target_layer_name, input_shape)
def test_csn():
config = get_recognizer_cfg(
'csn/ircsn_ig65m_pretrained_r152_32x2x1_58e_kinetics400_rgb.py')
config.model['backbone']['pretrained2d'] = False
config.model['backbone']['pretrained'] = None
recognizer = build_recognizer(config.model)
recognizer.cfg = config
input_shape = (1, 1, 3, 32, 32, 32)
target_layer_name = 'backbone/layer4/1/relu'
_do_test_3D_models(recognizer, target_layer_name, input_shape)
def test_tpn():
target_layer_name = 'backbone/layer4/1/relu'
config = get_recognizer_cfg('tpn/tpn_tsm_r50_1x1x8_150e_sthv1_rgb.py')
config.model['backbone']['pretrained'] = None
recognizer = build_recognizer(config.model)
recognizer.cfg = config
input_shape = (1, 8, 3, 32, 32)
_do_test_2D_models(recognizer, target_layer_name, input_shape, 174)
config = get_recognizer_cfg(
'tpn/tpn_slowonly_r50_8x8x1_150e_kinetics_rgb.py')
config.model['backbone']['pretrained'] = None
recognizer = build_recognizer(config.model)
recognizer.cfg = config
input_shape = (1, 3, 3, 8, 32, 32)
_do_test_3D_models(recognizer, target_layer_name, input_shape)
def test_c3d():
config = get_recognizer_cfg('c3d/c3d_sports1m_16x1x1_45e_ucf101_rgb.py')
config.model['backbone']['pretrained'] = None
recognizer = build_recognizer(config.model)
recognizer.cfg = config
input_shape = (1, 1, 3, 16, 112, 112)
target_layer_name = 'backbone/conv5a/activate'
_do_test_3D_models(recognizer, target_layer_name, input_shape, 101)
@pytest.mark.skipif(
not torch.cuda.is_available(), reason='requires CUDA support')
def test_tin():
config = get_recognizer_cfg(
'tin/tin_tsm_finetune_r50_1x1x8_50e_kinetics400_rgb.py')
config.model['backbone']['pretrained'] = None
target_layer_name = 'backbone/layer4/1/relu'
recognizer = build_recognizer(config.model)
recognizer.cfg = config
input_shape = (1, 8, 3, 64, 64)
_do_test_2D_models(
recognizer, target_layer_name, input_shape, device='cuda:0')
def test_x3d():
config = get_recognizer_cfg('x3d/x3d_s_13x6x1_facebook_kinetics400_rgb.py')
config.model['backbone']['pretrained'] = None
recognizer = build_recognizer(config.model)
recognizer.cfg = config
input_shape = (1, 1, 3, 13, 32, 32)
target_layer_name = 'backbone/layer4/1/relu'
_do_test_3D_models(recognizer, target_layer_name, input_shape)
| 8,364 | 35.369565 | 79 | py |
STTS | STTS-main/VideoSwin/tests/test_models/test_head.py | import os.path as osp
import tempfile
from unittest.mock import Mock, patch
import numpy as np
import pytest
import torch
import torch.nn as nn
import mmaction
from mmaction.models import (ACRNHead, AudioTSNHead, BBoxHeadAVA, FBOHead,
I3DHead, LFBInferHead, SlowFastHead, TPNHead,
TRNHead, TSMHead, TSNHead, X3DHead)
from .base import generate_backbone_demo_inputs
def test_i3d_head():
"""Test loss method, layer construction, attributes and forward function in
i3d head."""
i3d_head = I3DHead(num_classes=4, in_channels=2048)
i3d_head.init_weights()
assert i3d_head.num_classes == 4
assert i3d_head.dropout_ratio == 0.5
assert i3d_head.in_channels == 2048
assert i3d_head.init_std == 0.01
assert isinstance(i3d_head.dropout, nn.Dropout)
assert i3d_head.dropout.p == i3d_head.dropout_ratio
assert isinstance(i3d_head.fc_cls, nn.Linear)
assert i3d_head.fc_cls.in_features == i3d_head.in_channels
assert i3d_head.fc_cls.out_features == i3d_head.num_classes
assert isinstance(i3d_head.avg_pool, nn.AdaptiveAvgPool3d)
assert i3d_head.avg_pool.output_size == (1, 1, 1)
input_shape = (3, 2048, 4, 7, 7)
feat = torch.rand(input_shape)
# i3d head inference
cls_scores = i3d_head(feat)
assert cls_scores.shape == torch.Size([3, 4])
def test_bbox_head_ava():
"""Test loss method, layer construction, attributes and forward function in
bbox head."""
with pytest.raises(TypeError):
# topk must be None, int or tuple[int]
BBoxHeadAVA(topk=0.1)
with pytest.raises(AssertionError):
# topk should be smaller than num_classes
BBoxHeadAVA(num_classes=5, topk=(3, 5))
bbox_head = BBoxHeadAVA(in_channels=10, num_classes=4, topk=1)
input = torch.randn([3, 10, 2, 2, 2])
ret, _ = bbox_head(input)
assert ret.shape == (3, 4)
bbox_head = BBoxHeadAVA()
bbox_head.init_weights()
bbox_head = BBoxHeadAVA(temporal_pool_type='max', spatial_pool_type='avg')
bbox_head.init_weights()
cls_score = torch.tensor(
[[0.568, -0.162, 0.273, -0.390, 0.447, 0.102, -0.409],
[2.388, 0.609, 0.369, 1.630, -0.808, -0.212, 0.296],
[0.252, -0.533, -0.644, -0.591, 0.148, 0.963, -0.525],
[0.134, -0.311, -0.764, -0.752, 0.656, -1.517, 0.185]])
labels = torch.tensor([[0., 0., 1., 0., 0., 1., 0.],
[0., 0., 0., 1., 0., 0., 0.],
[0., 1., 0., 0., 1., 0., 1.],
[0., 0., 1., 1., 0., 0., 1.]])
label_weights = torch.tensor([1., 1., 1., 1.])
losses = bbox_head.loss(
cls_score=cls_score,
bbox_pred=None,
rois=None,
labels=labels,
label_weights=label_weights)
assert torch.isclose(losses['loss_action_cls'], torch.tensor(0.7162495))
assert torch.isclose(losses['recall@thr=0.5'], torch.tensor(0.6666666))
assert torch.isclose(losses['prec@thr=0.5'], torch.tensor(0.4791665))
assert torch.isclose(losses['recall@top3'], torch.tensor(0.75))
assert torch.isclose(losses['prec@top3'], torch.tensor(0.5))
assert torch.isclose(losses['recall@top5'], torch.tensor(1.0))
assert torch.isclose(losses['prec@top5'], torch.tensor(0.45))
rois = torch.tensor([[0.0, 0.1, 0.2, 0.3, 0.4], [0.0, 0.5, 0.6, 0.7, 0.8]])
rois[1::2] *= 380
rois[2::2] *= 220
crop_quadruple = np.array([0.1, 0.2, 0.8, 0.7])
cls_score = torch.tensor([0.995, 0.728])
img_shape = (320, 480)
flip = True
bboxes, scores = bbox_head.get_det_bboxes(
rois=rois,
cls_score=cls_score,
img_shape=img_shape,
flip=flip,
crop_quadruple=crop_quadruple)
assert torch.all(
torch.isclose(
bboxes,
torch.tensor([[0.89783341, 0.20043750, 0.89816672, 0.20087500],
[0.45499998, 0.69875002, 0.58166665, 0.86499995]])))
assert torch.all(
torch.isclose(scores, torch.tensor([0.73007441, 0.67436624])))
def test_x3d_head():
"""Test loss method, layer construction, attributes and forward function in
x3d head."""
x3d_head = X3DHead(in_channels=432, num_classes=4, fc1_bias=False)
x3d_head.init_weights()
assert x3d_head.num_classes == 4
assert x3d_head.dropout_ratio == 0.5
assert x3d_head.in_channels == 432
assert x3d_head.init_std == 0.01
assert isinstance(x3d_head.dropout, nn.Dropout)
assert x3d_head.dropout.p == x3d_head.dropout_ratio
assert isinstance(x3d_head.fc1, nn.Linear)
assert x3d_head.fc1.in_features == x3d_head.in_channels
assert x3d_head.fc1.out_features == x3d_head.mid_channels
assert x3d_head.fc1.bias is None
assert isinstance(x3d_head.fc2, nn.Linear)
assert x3d_head.fc2.in_features == x3d_head.mid_channels
assert x3d_head.fc2.out_features == x3d_head.num_classes
assert isinstance(x3d_head.pool, nn.AdaptiveAvgPool3d)
assert x3d_head.pool.output_size == (1, 1, 1)
input_shape = (3, 432, 4, 7, 7)
feat = torch.rand(input_shape)
# i3d head inference
cls_scores = x3d_head(feat)
assert cls_scores.shape == torch.Size([3, 4])
def test_slowfast_head():
"""Test loss method, layer construction, attributes and forward function in
slowfast head."""
sf_head = SlowFastHead(num_classes=4, in_channels=2304)
sf_head.init_weights()
assert sf_head.num_classes == 4
assert sf_head.dropout_ratio == 0.8
assert sf_head.in_channels == 2304
assert sf_head.init_std == 0.01
assert isinstance(sf_head.dropout, nn.Dropout)
assert sf_head.dropout.p == sf_head.dropout_ratio
assert isinstance(sf_head.fc_cls, nn.Linear)
assert sf_head.fc_cls.in_features == sf_head.in_channels
assert sf_head.fc_cls.out_features == sf_head.num_classes
assert isinstance(sf_head.avg_pool, nn.AdaptiveAvgPool3d)
assert sf_head.avg_pool.output_size == (1, 1, 1)
input_shape = (3, 2048, 32, 7, 7)
feat_slow = torch.rand(input_shape)
input_shape = (3, 256, 4, 7, 7)
feat_fast = torch.rand(input_shape)
sf_head = SlowFastHead(num_classes=4, in_channels=2304)
cls_scores = sf_head((feat_slow, feat_fast))
assert cls_scores.shape == torch.Size([3, 4])
def test_tsn_head():
"""Test loss method, layer construction, attributes and forward function in
tsn head."""
tsn_head = TSNHead(num_classes=4, in_channels=2048)
tsn_head.init_weights()
assert tsn_head.num_classes == 4
assert tsn_head.dropout_ratio == 0.4
assert tsn_head.in_channels == 2048
assert tsn_head.init_std == 0.01
assert tsn_head.consensus.dim == 1
assert tsn_head.spatial_type == 'avg'
assert isinstance(tsn_head.dropout, nn.Dropout)
assert tsn_head.dropout.p == tsn_head.dropout_ratio
assert isinstance(tsn_head.fc_cls, nn.Linear)
assert tsn_head.fc_cls.in_features == tsn_head.in_channels
assert tsn_head.fc_cls.out_features == tsn_head.num_classes
assert isinstance(tsn_head.avg_pool, nn.AdaptiveAvgPool2d)
assert tsn_head.avg_pool.output_size == (1, 1)
input_shape = (8, 2048, 7, 7)
feat = torch.rand(input_shape)
# tsn head inference
num_segs = input_shape[0]
cls_scores = tsn_head(feat, num_segs)
assert cls_scores.shape == torch.Size([1, 4])
# Test multi-class recognition
multi_tsn_head = TSNHead(
num_classes=4,
in_channels=2048,
loss_cls=dict(type='BCELossWithLogits', loss_weight=160.0),
multi_class=True,
label_smooth_eps=0.01)
multi_tsn_head.init_weights()
assert multi_tsn_head.num_classes == 4
assert multi_tsn_head.dropout_ratio == 0.4
assert multi_tsn_head.in_channels == 2048
assert multi_tsn_head.init_std == 0.01
assert multi_tsn_head.consensus.dim == 1
assert isinstance(multi_tsn_head.dropout, nn.Dropout)
assert multi_tsn_head.dropout.p == multi_tsn_head.dropout_ratio
assert isinstance(multi_tsn_head.fc_cls, nn.Linear)
assert multi_tsn_head.fc_cls.in_features == multi_tsn_head.in_channels
assert multi_tsn_head.fc_cls.out_features == multi_tsn_head.num_classes
assert isinstance(multi_tsn_head.avg_pool, nn.AdaptiveAvgPool2d)
assert multi_tsn_head.avg_pool.output_size == (1, 1)
input_shape = (8, 2048, 7, 7)
feat = torch.rand(input_shape)
# multi-class tsn head inference
num_segs = input_shape[0]
cls_scores = tsn_head(feat, num_segs)
assert cls_scores.shape == torch.Size([1, 4])
def test_tsn_head_audio():
"""Test loss method, layer construction, attributes and forward function in
tsn head."""
tsn_head_audio = AudioTSNHead(num_classes=4, in_channels=5)
tsn_head_audio.init_weights()
assert tsn_head_audio.num_classes == 4
assert tsn_head_audio.dropout_ratio == 0.4
assert tsn_head_audio.in_channels == 5
assert tsn_head_audio.init_std == 0.01
assert tsn_head_audio.spatial_type == 'avg'
assert isinstance(tsn_head_audio.dropout, nn.Dropout)
assert tsn_head_audio.dropout.p == tsn_head_audio.dropout_ratio
assert isinstance(tsn_head_audio.fc_cls, nn.Linear)
assert tsn_head_audio.fc_cls.in_features == tsn_head_audio.in_channels
assert tsn_head_audio.fc_cls.out_features == tsn_head_audio.num_classes
assert isinstance(tsn_head_audio.avg_pool, nn.AdaptiveAvgPool2d)
assert tsn_head_audio.avg_pool.output_size == (1, 1)
input_shape = (8, 5, 7, 7)
feat = torch.rand(input_shape)
# tsn head inference
cls_scores = tsn_head_audio(feat)
assert cls_scores.shape == torch.Size([8, 4])
def test_tsm_head():
"""Test loss method, layer construction, attributes and forward function in
tsm head."""
tsm_head = TSMHead(num_classes=4, in_channels=2048)
tsm_head.init_weights()
assert tsm_head.num_classes == 4
assert tsm_head.dropout_ratio == 0.8
assert tsm_head.in_channels == 2048
assert tsm_head.init_std == 0.001
assert tsm_head.consensus.dim == 1
assert tsm_head.spatial_type == 'avg'
assert isinstance(tsm_head.dropout, nn.Dropout)
assert tsm_head.dropout.p == tsm_head.dropout_ratio
assert isinstance(tsm_head.fc_cls, nn.Linear)
assert tsm_head.fc_cls.in_features == tsm_head.in_channels
assert tsm_head.fc_cls.out_features == tsm_head.num_classes
assert isinstance(tsm_head.avg_pool, nn.AdaptiveAvgPool2d)
assert tsm_head.avg_pool.output_size == 1
input_shape = (8, 2048, 7, 7)
feat = torch.rand(input_shape)
# tsm head inference with no init
num_segs = input_shape[0]
cls_scores = tsm_head(feat, num_segs)
assert cls_scores.shape == torch.Size([1, 4])
# tsm head inference with init
tsm_head = TSMHead(num_classes=4, in_channels=2048, temporal_pool=True)
tsm_head.init_weights()
cls_scores = tsm_head(feat, num_segs)
assert cls_scores.shape == torch.Size([2, 4])
def test_trn_head():
"""Test loss method, layer construction, attributes and forward function in
trn head."""
from mmaction.models.heads.trn_head import (RelationModule,
RelationModuleMultiScale)
trn_head = TRNHead(num_classes=4, in_channels=2048, relation_type='TRN')
trn_head.init_weights()
assert trn_head.num_classes == 4
assert trn_head.dropout_ratio == 0.8
assert trn_head.in_channels == 2048
assert trn_head.init_std == 0.001
assert trn_head.spatial_type == 'avg'
relation_module = trn_head.consensus
assert isinstance(relation_module, RelationModule)
assert relation_module.hidden_dim == 256
assert isinstance(relation_module.classifier[3], nn.Linear)
assert relation_module.classifier[3].out_features == trn_head.num_classes
assert trn_head.dropout.p == trn_head.dropout_ratio
assert isinstance(trn_head.dropout, nn.Dropout)
assert isinstance(trn_head.fc_cls, nn.Linear)
assert trn_head.fc_cls.in_features == trn_head.in_channels
assert trn_head.fc_cls.out_features == trn_head.hidden_dim
assert isinstance(trn_head.avg_pool, nn.AdaptiveAvgPool2d)
assert trn_head.avg_pool.output_size == 1
input_shape = (8, 2048, 7, 7)
feat = torch.rand(input_shape)
# tsm head inference with no init
num_segs = input_shape[0]
cls_scores = trn_head(feat, num_segs)
assert cls_scores.shape == torch.Size([1, 4])
# tsm head inference with init
trn_head = TRNHead(
num_classes=4,
in_channels=2048,
num_segments=8,
relation_type='TRNMultiScale')
trn_head.init_weights()
assert isinstance(trn_head.consensus, RelationModuleMultiScale)
assert trn_head.consensus.scales == range(8, 1, -1)
cls_scores = trn_head(feat, num_segs)
assert cls_scores.shape == torch.Size([1, 4])
with pytest.raises(ValueError):
trn_head = TRNHead(
num_classes=4,
in_channels=2048,
num_segments=8,
relation_type='RelationModlue')
@patch.object(mmaction.models.LFBInferHead, '__del__', Mock)
def test_lfb_infer_head():
"""Test layer construction, attributes and forward function in lfb infer
head."""
with tempfile.TemporaryDirectory() as tmpdir:
lfb_infer_head = LFBInferHead(
lfb_prefix_path=tmpdir, use_half_precision=True)
lfb_infer_head.init_weights()
st_feat_shape = (3, 16, 1, 8, 8)
st_feat = generate_backbone_demo_inputs(st_feat_shape)
rois = torch.cat(
(torch.tensor([0, 1, 0]).float().view(3, 1), torch.randn(3, 4)), dim=1)
img_metas = [dict(img_key='video_1,777'), dict(img_key='video_2, 888')]
result = lfb_infer_head(st_feat, rois, img_metas)
assert st_feat.equal(result)
assert len(lfb_infer_head.all_features) == 3
assert lfb_infer_head.all_features[0].shape == (16, 1, 1, 1)
def test_fbo_head():
"""Test layer construction, attributes and forward function in fbo head."""
lfb_prefix_path = osp.normpath(
osp.join(osp.dirname(__file__), '../data/lfb'))
st_feat_shape = (1, 16, 1, 8, 8)
st_feat = generate_backbone_demo_inputs(st_feat_shape)
rois = torch.randn(1, 5)
rois[0][0] = 0
img_metas = [dict(img_key='video_1, 930')]
# non local fbo
fbo_head = FBOHead(
lfb_cfg=dict(
lfb_prefix_path=lfb_prefix_path,
max_num_sampled_feat=5,
window_size=60,
lfb_channels=16,
dataset_modes=('unittest'),
device='cpu'),
fbo_cfg=dict(
type='non_local',
st_feat_channels=16,
lt_feat_channels=16,
latent_channels=8,
num_st_feat=1,
num_lt_feat=5 * 60,
))
fbo_head.init_weights()
out = fbo_head(st_feat, rois, img_metas)
assert out.shape == (1, 24, 1, 1, 1)
# avg fbo
fbo_head = FBOHead(
lfb_cfg=dict(
lfb_prefix_path=lfb_prefix_path,
max_num_sampled_feat=5,
window_size=60,
lfb_channels=16,
dataset_modes=('unittest'),
device='cpu'),
fbo_cfg=dict(type='avg'))
fbo_head.init_weights()
out = fbo_head(st_feat, rois, img_metas)
assert out.shape == (1, 32, 1, 1, 1)
# max fbo
fbo_head = FBOHead(
lfb_cfg=dict(
lfb_prefix_path=lfb_prefix_path,
max_num_sampled_feat=5,
window_size=60,
lfb_channels=16,
dataset_modes=('unittest'),
device='cpu'),
fbo_cfg=dict(type='max'))
fbo_head.init_weights()
out = fbo_head(st_feat, rois, img_metas)
assert out.shape == (1, 32, 1, 1, 1)
def test_tpn_head():
"""Test loss method, layer construction, attributes and forward function in
tpn head."""
tpn_head = TPNHead(num_classes=4, in_channels=2048)
tpn_head.init_weights()
assert hasattr(tpn_head, 'avg_pool2d')
assert hasattr(tpn_head, 'avg_pool3d')
assert isinstance(tpn_head.avg_pool3d, nn.AdaptiveAvgPool3d)
assert tpn_head.avg_pool3d.output_size == (1, 1, 1)
assert tpn_head.avg_pool2d is None
input_shape = (4, 2048, 7, 7)
feat = torch.rand(input_shape)
# tpn head inference with num_segs
num_segs = 2
cls_scores = tpn_head(feat, num_segs)
assert isinstance(tpn_head.avg_pool2d, nn.AvgPool3d)
assert tpn_head.avg_pool2d.kernel_size == (1, 7, 7)
assert cls_scores.shape == torch.Size([2, 4])
# tpn head inference with no num_segs
input_shape = (2, 2048, 3, 7, 7)
feat = torch.rand(input_shape)
cls_scores = tpn_head(feat)
assert isinstance(tpn_head.avg_pool2d, nn.AvgPool3d)
assert tpn_head.avg_pool2d.kernel_size == (1, 7, 7)
assert cls_scores.shape == torch.Size([2, 4])
def test_acrn_head():
roi_feat = torch.randn(4, 16, 1, 7, 7)
feat = torch.randn(2, 16, 1, 16, 16)
rois = torch.Tensor([[0, 2.2268, 0.5926, 10.6142, 8.0029],
[0, 2.2577, 0.1519, 11.6451, 8.9282],
[1, 1.9874, 1.0000, 11.1585, 8.2840],
[1, 3.3338, 3.7166, 8.4174, 11.2785]])
acrn_head = ACRNHead(32, 16)
acrn_head.init_weights()
new_feat = acrn_head(roi_feat, feat, rois)
assert new_feat.shape == (4, 16, 1, 16, 16)
acrn_head = ACRNHead(32, 16, stride=2)
new_feat = acrn_head(roi_feat, feat, rois)
assert new_feat.shape == (4, 16, 1, 8, 8)
acrn_head = ACRNHead(32, 16, stride=2, num_convs=2)
new_feat = acrn_head(roi_feat, feat, rois)
assert new_feat.shape == (4, 16, 1, 8, 8)
| 17,583 | 34.097804 | 79 | py |
STTS | STTS-main/VideoSwin/tests/test_models/test_localizers/test_bmn.py | import numpy as np
import torch
from mmaction.models import build_localizer
from ..base import get_localizer_cfg
def test_bmn():
model_cfg = get_localizer_cfg(
'bmn/bmn_400x100_2x8_9e_activitynet_feature.py')
if torch.cuda.is_available():
localizer_bmn = build_localizer(model_cfg.model).cuda()
raw_feature = torch.rand(8, 400, 100).cuda()
gt_bbox = np.array([[[0.1, 0.3], [0.375, 0.625]]] * 8)
losses = localizer_bmn(raw_feature, gt_bbox)
assert isinstance(losses, dict)
# Test forward test
video_meta = [
dict(
video_name='v_test',
duration_second=100,
duration_frame=960,
feature_frame=960)
]
with torch.no_grad():
one_raw_feature = torch.rand(1, 400, 100).cuda()
localizer_bmn(
one_raw_feature,
gt_bbox=None,
video_meta=video_meta,
return_loss=False)
else:
localizer_bmn = build_localizer(model_cfg.model)
raw_feature = torch.rand(8, 400, 100)
gt_bbox = torch.Tensor([[[0.1, 0.3], [0.375, 0.625]]] * 8)
losses = localizer_bmn(raw_feature, gt_bbox)
assert isinstance(losses, dict)
# Test forward test
video_meta = [
dict(
video_name='v_test',
duration_second=100,
duration_frame=960,
feature_frame=960)
]
with torch.no_grad():
one_raw_feature = torch.rand(1, 400, 100)
localizer_bmn(
one_raw_feature,
gt_bbox=None,
video_meta=video_meta,
return_loss=False)
| 1,758 | 30.410714 | 66 | py |
STTS | STTS-main/VideoSwin/tests/test_models/test_localizers/test_ssn.py | import copy
import mmcv
import pytest
import torch
from mmaction.models import build_localizer
def test_ssn_train():
train_cfg = mmcv.ConfigDict(
dict(
ssn=dict(
assigner=dict(
positive_iou_threshold=0.7,
background_iou_threshold=0.01,
incomplete_iou_threshold=0.3,
background_coverage_threshold=0.02,
incomplete_overlap_threshold=0.01),
sampler=dict(
num_per_video=8,
positive_ratio=1,
background_ratio=1,
incomplete_ratio=6,
add_gt_as_proposals=True),
loss_weight=dict(comp_loss_weight=0.1, reg_loss_weight=0.1),
debug=False)))
base_model_cfg = dict(
type='SSN',
backbone=dict(
type='ResNet', pretrained=None, depth=18, norm_eval=True),
spatial_type='avg',
dropout_ratio=0.8,
loss_cls=dict(type='SSNLoss'),
cls_head=dict(
type='SSNHead',
dropout_ratio=0.,
in_channels=512,
num_classes=20,
consensus=dict(
type='STPPTrain',
stpp_stage=(1, 1, 1),
num_segments_list=(2, 5, 2)),
use_regression=True),
train_cfg=train_cfg)
dropout_cfg = copy.deepcopy(base_model_cfg)
dropout_cfg['dropout_ratio'] = 0
dropout_cfg['cls_head']['dropout_ratio'] = 0.5
non_regression_cfg = copy.deepcopy(base_model_cfg)
non_regression_cfg['cls_head']['use_regression'] = False
imgs = torch.rand(1, 8, 9, 3, 224, 224)
proposal_scale_factor = torch.Tensor([[[1.0345, 1.0345], [1.0028, 0.0028],
[1.0013, 1.0013], [1.0008, 1.0008],
[0.3357, 1.0006], [1.0006, 1.0006],
[0.0818, 1.0005], [1.0030,
1.0030]]])
proposal_type = torch.Tensor([[0, 1, 1, 1, 1, 1, 1, 2]])
proposal_labels = torch.LongTensor([[8, 8, 8, 8, 8, 8, 8, 0]])
reg_targets = torch.Tensor([[[0.2929, 0.2694], [0.0000, 0.0000],
[0.0000, 0.0000], [0.0000, 0.0000],
[0.0000, 0.0000], [0.0000, 0.0000],
[0.0000, 0.0000], [0.0000, 0.0000]]])
localizer_ssn = build_localizer(base_model_cfg)
localizer_ssn_dropout = build_localizer(dropout_cfg)
localizer_ssn_non_regression = build_localizer(non_regression_cfg)
if torch.cuda.is_available():
localizer_ssn = localizer_ssn.cuda()
localizer_ssn_dropout = localizer_ssn_dropout.cuda()
localizer_ssn_non_regression = localizer_ssn_non_regression.cuda()
imgs = imgs.cuda()
proposal_scale_factor = proposal_scale_factor.cuda()
proposal_type = proposal_type.cuda()
proposal_labels = proposal_labels.cuda()
reg_targets = reg_targets.cuda()
# Train normal case
losses = localizer_ssn(
imgs,
proposal_scale_factor=proposal_scale_factor,
proposal_type=proposal_type,
proposal_labels=proposal_labels,
reg_targets=reg_targets)
assert isinstance(losses, dict)
# Train SSN without dropout in model, with dropout in head
losses = localizer_ssn_dropout(
imgs,
proposal_scale_factor=proposal_scale_factor,
proposal_type=proposal_type,
proposal_labels=proposal_labels,
reg_targets=reg_targets)
assert isinstance(losses, dict)
# Train SSN model without regression
losses = localizer_ssn_non_regression(
imgs,
proposal_scale_factor=proposal_scale_factor,
proposal_type=proposal_type,
proposal_labels=proposal_labels,
reg_targets=reg_targets)
assert isinstance(losses, dict)
def test_ssn_test():
test_cfg = mmcv.ConfigDict(
dict(
ssn=dict(
sampler=dict(test_interval=6, batch_size=16),
evaluater=dict(
top_k=2000,
nms=0.2,
softmax_before_filter=True,
cls_score_dict=None,
cls_top_k=2))))
base_model_cfg = dict(
type='SSN',
backbone=dict(
type='ResNet', pretrained=None, depth=18, norm_eval=True),
spatial_type='avg',
dropout_ratio=0.8,
cls_head=dict(
type='SSNHead',
dropout_ratio=0.,
in_channels=512,
num_classes=20,
consensus=dict(type='STPPTest', stpp_stage=(1, 1, 1)),
use_regression=True),
test_cfg=test_cfg)
maxpool_model_cfg = copy.deepcopy(base_model_cfg)
maxpool_model_cfg['spatial_type'] = 'max'
non_regression_cfg = copy.deepcopy(base_model_cfg)
non_regression_cfg['cls_head']['use_regression'] = False
non_regression_cfg['cls_head']['consensus']['use_regression'] = False
tuple_stage_cfg = copy.deepcopy(base_model_cfg)
tuple_stage_cfg['cls_head']['consensus']['stpp_stage'] = (1, (1, 2), 1)
str_stage_cfg = copy.deepcopy(base_model_cfg)
str_stage_cfg['cls_head']['consensus']['stpp_stage'] = ('error', )
imgs = torch.rand(1, 8, 3, 224, 224)
relative_proposal_list = torch.Tensor([[[0.2500, 0.6250], [0.3750,
0.7500]]])
scale_factor_list = torch.Tensor([[[1.0000, 1.0000], [1.0000, 0.2661]]])
proposal_tick_list = torch.LongTensor([[[1, 2, 5, 7], [20, 30, 60, 80]]])
reg_norm_consts = torch.Tensor([[[-0.0603, 0.0325], [0.0752, 0.1596]]])
localizer_ssn = build_localizer(base_model_cfg)
localizer_ssn_maxpool = build_localizer(maxpool_model_cfg)
localizer_ssn_non_regression = build_localizer(non_regression_cfg)
localizer_ssn_tuple_stage_cfg = build_localizer(tuple_stage_cfg)
with pytest.raises(ValueError):
build_localizer(str_stage_cfg)
if torch.cuda.is_available():
localizer_ssn = localizer_ssn.cuda()
localizer_ssn_maxpool = localizer_ssn_maxpool.cuda()
localizer_ssn_non_regression = localizer_ssn_non_regression.cuda()
localizer_ssn_tuple_stage_cfg = localizer_ssn_tuple_stage_cfg.cuda()
imgs = imgs.cuda()
relative_proposal_list = relative_proposal_list.cuda()
scale_factor_list = scale_factor_list.cuda()
proposal_tick_list = proposal_tick_list.cuda()
reg_norm_consts = reg_norm_consts.cuda()
with torch.no_grad():
# Test normal case
localizer_ssn(
imgs,
relative_proposal_list=relative_proposal_list,
scale_factor_list=scale_factor_list,
proposal_tick_list=proposal_tick_list,
reg_norm_consts=reg_norm_consts,
return_loss=False)
# Test SSN model with max spatial pooling
localizer_ssn_maxpool(
imgs,
relative_proposal_list=relative_proposal_list,
scale_factor_list=scale_factor_list,
proposal_tick_list=proposal_tick_list,
reg_norm_consts=reg_norm_consts,
return_loss=False)
# Test SSN model without regression
localizer_ssn_non_regression(
imgs,
relative_proposal_list=relative_proposal_list,
scale_factor_list=scale_factor_list,
proposal_tick_list=proposal_tick_list,
reg_norm_consts=reg_norm_consts,
return_loss=False)
# Test SSN model with tuple stage cfg.
localizer_ssn_tuple_stage_cfg(
imgs,
relative_proposal_list=relative_proposal_list,
scale_factor_list=scale_factor_list,
proposal_tick_list=proposal_tick_list,
reg_norm_consts=reg_norm_consts,
return_loss=False)
| 7,989 | 38.359606 | 78 | py |
STTS | STTS-main/VideoSwin/tests/test_models/test_localizers/test_pem.py | import torch
from mmaction.models import build_localizer
from ..base import get_localizer_cfg
def test_pem():
model_cfg = get_localizer_cfg(
'bsn/bsn_pem_400x100_1x16_20e_activitynet_feature.py')
localizer_pem = build_localizer(model_cfg.model)
bsp_feature = torch.rand(8, 100, 32)
reference_temporal_iou = torch.rand(8, 100)
losses = localizer_pem(bsp_feature, reference_temporal_iou)
assert isinstance(losses, dict)
# Test forward test
tmin = torch.rand(100)
tmax = torch.rand(100)
tmin_score = torch.rand(100)
tmax_score = torch.rand(100)
video_meta = [
dict(
video_name='v_test',
duration_second=100,
duration_frame=1000,
annotations=[{
'segment': [0.3, 0.6],
'label': 'Rock climbing'
}],
feature_frame=900)
]
with torch.no_grad():
for one_bsp_feature in bsp_feature:
one_bsp_feature = one_bsp_feature.reshape(1, 100, 32)
localizer_pem(
one_bsp_feature,
tmin=tmin,
tmax=tmax,
tmin_score=tmin_score,
tmax_score=tmax_score,
video_meta=video_meta,
return_loss=False)
| 1,294 | 27.777778 | 65 | py |
STTS | STTS-main/VideoSwin/tests/test_models/test_localizers/test_tem.py | import torch
from mmaction.models import build_localizer
from ..base import get_localizer_cfg
def test_tem():
model_cfg = get_localizer_cfg(
'bsn/bsn_tem_400x100_1x16_20e_activitynet_feature.py')
localizer_tem = build_localizer(model_cfg.model)
raw_feature = torch.rand(8, 400, 100)
gt_bbox = torch.Tensor([[[1.0, 3.0], [3.0, 5.0]]] * 8)
losses = localizer_tem(raw_feature, gt_bbox)
assert isinstance(losses, dict)
# Test forward test
video_meta = [{'video_name': 'v_test'}]
with torch.no_grad():
for one_raw_feature in raw_feature:
one_raw_feature = one_raw_feature.reshape(1, 400, 100)
localizer_tem(
one_raw_feature, video_meta=video_meta, return_loss=False)
| 759 | 30.666667 | 74 | py |
STTS | STTS-main/VideoSwin/tests/test_models/test_recognizers/test_recognizer2d.py | import torch
from mmaction.models import build_recognizer
from ..base import generate_recognizer_demo_inputs, get_recognizer_cfg
def test_tsn():
config = get_recognizer_cfg('tsn/tsn_r50_1x1x3_100e_kinetics400_rgb.py')
config.model['backbone']['pretrained'] = None
recognizer = build_recognizer(config.model)
input_shape = (1, 3, 3, 32, 32)
demo_inputs = generate_recognizer_demo_inputs(input_shape)
imgs = demo_inputs['imgs']
gt_labels = demo_inputs['gt_labels']
losses = recognizer(imgs, gt_labels)
assert isinstance(losses, dict)
# Test forward test
with torch.no_grad():
img_list = [img[None, :] for img in imgs]
for one_img in img_list:
recognizer(one_img, None, return_loss=False)
# Test forward gradcam
recognizer(imgs, gradcam=True)
for one_img in img_list:
recognizer(one_img, gradcam=True)
# test forward dummy
recognizer.forward_dummy(imgs, softmax=False)
res = recognizer.forward_dummy(imgs, softmax=True)[0]
assert torch.min(res) >= 0
assert torch.max(res) <= 1
mmcls_backbone = dict(
type='mmcls.ResNeXt',
depth=101,
num_stages=4,
out_indices=(3, ),
groups=32,
width_per_group=4,
style='pytorch')
config.model['backbone'] = mmcls_backbone
recognizer = build_recognizer(config.model)
input_shape = (1, 3, 3, 32, 32)
demo_inputs = generate_recognizer_demo_inputs(input_shape)
imgs = demo_inputs['imgs']
gt_labels = demo_inputs['gt_labels']
losses = recognizer(imgs, gt_labels)
assert isinstance(losses, dict)
# Test forward test
with torch.no_grad():
img_list = [img[None, :] for img in imgs]
for one_img in img_list:
recognizer(one_img, None, return_loss=False)
# test mixup forward
config = get_recognizer_cfg(
'tsn/tsn_r50_video_mixup_1x1x8_100e_kinetics400_rgb.py')
config.model['backbone']['pretrained'] = None
recognizer = build_recognizer(config.model)
input_shape = (2, 8, 3, 32, 32)
demo_inputs = generate_recognizer_demo_inputs(input_shape)
imgs = demo_inputs['imgs']
gt_labels = demo_inputs['gt_labels']
losses = recognizer(imgs, gt_labels)
assert isinstance(losses, dict)
# test torchvision backbones
tv_backbone = dict(type='torchvision.densenet161', pretrained=True)
config.model['backbone'] = tv_backbone
config.model['cls_head']['in_channels'] = 2208
recognizer = build_recognizer(config.model)
input_shape = (1, 3, 3, 32, 32)
demo_inputs = generate_recognizer_demo_inputs(input_shape)
imgs = demo_inputs['imgs']
gt_labels = demo_inputs['gt_labels']
losses = recognizer(imgs, gt_labels)
assert isinstance(losses, dict)
# Test forward test
with torch.no_grad():
img_list = [img[None, :] for img in imgs]
for one_img in img_list:
recognizer(one_img, None, return_loss=False)
# test timm backbones
timm_backbone = dict(type='timm.efficientnet_b0', pretrained=False)
config.model['backbone'] = timm_backbone
config.model['cls_head']['in_channels'] = 1280
recognizer = build_recognizer(config.model)
input_shape = (1, 3, 3, 32, 32)
demo_inputs = generate_recognizer_demo_inputs(input_shape)
imgs = demo_inputs['imgs']
gt_labels = demo_inputs['gt_labels']
losses = recognizer(imgs, gt_labels)
assert isinstance(losses, dict)
# Test forward test
with torch.no_grad():
img_list = [img[None, :] for img in imgs]
for one_img in img_list:
recognizer(one_img, None, return_loss=False)
def test_tsm():
config = get_recognizer_cfg('tsm/tsm_r50_1x1x8_50e_kinetics400_rgb.py')
config.model['backbone']['pretrained'] = None
recognizer = build_recognizer(config.model)
input_shape = (1, 8, 3, 32, 32)
demo_inputs = generate_recognizer_demo_inputs(input_shape)
imgs = demo_inputs['imgs']
gt_labels = demo_inputs['gt_labels']
losses = recognizer(imgs, gt_labels)
assert isinstance(losses, dict)
# Test forward test
with torch.no_grad():
img_list = [img[None, :] for img in imgs]
for one_img in img_list:
recognizer(one_img, None, return_loss=False)
# test twice sample + 3 crops
input_shape = (2, 48, 3, 32, 32)
demo_inputs = generate_recognizer_demo_inputs(input_shape)
imgs = demo_inputs['imgs']
config.model.test_cfg = dict(average_clips='prob')
recognizer = build_recognizer(config.model)
# Test forward test
with torch.no_grad():
img_list = [img[None, :] for img in imgs]
for one_img in img_list:
recognizer(one_img, None, return_loss=False)
# Test forward gradcam
recognizer(imgs, gradcam=True)
for one_img in img_list:
recognizer(one_img, gradcam=True)
def test_trn():
config = get_recognizer_cfg('trn/trn_r50_1x1x8_50e_sthv1_rgb.py')
config.model['backbone']['pretrained'] = None
recognizer = build_recognizer(config.model)
input_shape = (1, 8, 3, 32, 32)
demo_inputs = generate_recognizer_demo_inputs(input_shape)
imgs = demo_inputs['imgs']
gt_labels = demo_inputs['gt_labels']
losses = recognizer(imgs, gt_labels)
assert isinstance(losses, dict)
# Test forward test
with torch.no_grad():
img_list = [img[None, :] for img in imgs]
for one_img in img_list:
recognizer(one_img, None, return_loss=False)
# test twice sample + 3 crops
input_shape = (2, 48, 3, 32, 32)
demo_inputs = generate_recognizer_demo_inputs(input_shape)
imgs = demo_inputs['imgs']
config.model.test_cfg = dict(average_clips='prob')
recognizer = build_recognizer(config.model)
# Test forward test
with torch.no_grad():
img_list = [img[None, :] for img in imgs]
for one_img in img_list:
recognizer(one_img, None, return_loss=False)
# Test forward gradcam
recognizer(imgs, gradcam=True)
for one_img in img_list:
recognizer(one_img, gradcam=True)
def test_tpn():
config = get_recognizer_cfg('tpn/tpn_tsm_r50_1x1x8_150e_sthv1_rgb.py')
config.model['backbone']['pretrained'] = None
recognizer = build_recognizer(config.model)
input_shape = (1, 8, 3, 224, 224)
demo_inputs = generate_recognizer_demo_inputs(input_shape)
imgs = demo_inputs['imgs']
gt_labels = demo_inputs['gt_labels']
losses = recognizer(imgs, gt_labels)
assert isinstance(losses, dict)
assert 'loss_aux' in losses and 'loss_cls' in losses
# Test forward test
with torch.no_grad():
img_list = [img[None, :] for img in imgs]
for one_img in img_list:
recognizer(one_img, None, return_loss=False)
# Test forward gradcam
recognizer(imgs, gradcam=True)
for one_img in img_list:
recognizer(one_img, gradcam=True)
# Test forward dummy
with torch.no_grad():
_recognizer = build_recognizer(config.model)
img_list = [img[None, :] for img in imgs]
if hasattr(_recognizer, 'forward_dummy'):
_recognizer.forward = _recognizer.forward_dummy
for one_img in img_list:
_recognizer(one_img)
def test_tanet():
config = get_recognizer_cfg(
'tanet/tanet_r50_dense_1x1x8_100e_kinetics400_rgb.py')
config.model['backbone']['pretrained'] = None
recognizer = build_recognizer(config.model)
input_shape = (1, 8, 3, 32, 32)
demo_inputs = generate_recognizer_demo_inputs(input_shape)
imgs = demo_inputs['imgs']
gt_labels = demo_inputs['gt_labels']
losses = recognizer(imgs, gt_labels)
assert isinstance(losses, dict)
# Test forward test
with torch.no_grad():
img_list = [img[None, :] for img in imgs]
for one_img in img_list:
recognizer(one_img, None, return_loss=False)
# test twice sample + 3 crops
input_shape = (2, 48, 3, 32, 32)
demo_inputs = generate_recognizer_demo_inputs(input_shape)
imgs = demo_inputs['imgs']
config.model.test_cfg = dict(average_clips='prob')
recognizer = build_recognizer(config.model)
# Test forward test
with torch.no_grad():
img_list = [img[None, :] for img in imgs]
for one_img in img_list:
recognizer(one_img, None, return_loss=False)
# Test forward gradcam
recognizer(imgs, gradcam=True)
for one_img in img_list:
recognizer(one_img, gradcam=True)
| 8,541 | 29.29078 | 76 | py |
STTS | STTS-main/VideoSwin/tests/test_models/test_recognizers/test_recognizer3d.py | import torch
from mmaction.models import build_recognizer
from ..base import generate_recognizer_demo_inputs, get_recognizer_cfg
def test_i3d():
config = get_recognizer_cfg('i3d/i3d_r50_32x2x1_100e_kinetics400_rgb.py')
config.model['backbone']['pretrained2d'] = False
config.model['backbone']['pretrained'] = None
recognizer = build_recognizer(config.model)
input_shape = (1, 3, 3, 8, 32, 32)
demo_inputs = generate_recognizer_demo_inputs(input_shape, '3D')
imgs = demo_inputs['imgs']
gt_labels = demo_inputs['gt_labels']
# parrots 3dconv is only implemented on gpu
if torch.__version__ == 'parrots':
if torch.cuda.is_available():
recognizer = recognizer.cuda()
imgs = imgs.cuda()
gt_labels = gt_labels.cuda()
losses = recognizer(imgs, gt_labels)
assert isinstance(losses, dict)
# Test forward test
with torch.no_grad():
img_list = [img[None, :] for img in imgs]
for one_img in img_list:
recognizer(one_img, None, return_loss=False)
# Test forward gradcam
recognizer(imgs, gradcam=True)
for one_img in img_list:
recognizer(one_img, gradcam=True)
# Test forward dummy
recognizer.forward_dummy(imgs, softmax=False)
res = recognizer.forward_dummy(imgs, softmax=True)[0]
assert torch.min(res) >= 0
assert torch.max(res) <= 1
else:
losses = recognizer(imgs, gt_labels)
assert isinstance(losses, dict)
# Test forward test
with torch.no_grad():
img_list = [img[None, :] for img in imgs]
for one_img in img_list:
recognizer(one_img, None, return_loss=False)
# Test forward gradcam
recognizer(imgs, gradcam=True)
for one_img in img_list:
recognizer(one_img, gradcam=True)
# Test forward dummy
recognizer.forward_dummy(imgs, softmax=False)
res = recognizer.forward_dummy(imgs, softmax=True)[0]
assert torch.min(res) >= 0
assert torch.max(res) <= 1
def test_r2plus1d():
config = get_recognizer_cfg(
'r2plus1d/r2plus1d_r34_8x8x1_180e_kinetics400_rgb.py')
config.model['backbone']['pretrained2d'] = False
config.model['backbone']['pretrained'] = None
config.model['backbone']['norm_cfg'] = dict(type='BN3d')
recognizer = build_recognizer(config.model)
input_shape = (1, 3, 3, 8, 32, 32)
demo_inputs = generate_recognizer_demo_inputs(input_shape, '3D')
imgs = demo_inputs['imgs']
gt_labels = demo_inputs['gt_labels']
# parrots 3dconv is only implemented on gpu
if torch.__version__ == 'parrots':
if torch.cuda.is_available():
recognizer = recognizer.cuda()
imgs = imgs.cuda()
gt_labels = gt_labels.cuda()
losses = recognizer(imgs, gt_labels)
assert isinstance(losses, dict)
# Test forward test
with torch.no_grad():
img_list = [img[None, :] for img in imgs]
for one_img in img_list:
recognizer(one_img, None, return_loss=False)
# Test forward gradcam
recognizer(imgs, gradcam=True)
for one_img in img_list:
recognizer(one_img, gradcam=True)
else:
losses = recognizer(imgs, gt_labels)
assert isinstance(losses, dict)
# Test forward test
with torch.no_grad():
img_list = [img[None, :] for img in imgs]
for one_img in img_list:
recognizer(one_img, None, return_loss=False)
# Test forward gradcam
recognizer(imgs, gradcam=True)
for one_img in img_list:
recognizer(one_img, gradcam=True)
def test_slowfast():
config = get_recognizer_cfg(
'slowfast/slowfast_r50_4x16x1_256e_kinetics400_rgb.py')
recognizer = build_recognizer(config.model)
input_shape = (1, 3, 3, 16, 32, 32)
demo_inputs = generate_recognizer_demo_inputs(input_shape, '3D')
imgs = demo_inputs['imgs']
gt_labels = demo_inputs['gt_labels']
# parrots 3dconv is only implemented on gpu
if torch.__version__ == 'parrots':
if torch.cuda.is_available():
recognizer = recognizer.cuda()
imgs = imgs.cuda()
gt_labels = gt_labels.cuda()
losses = recognizer(imgs, gt_labels)
assert isinstance(losses, dict)
# Test forward test
with torch.no_grad():
img_list = [img[None, :] for img in imgs]
for one_img in img_list:
recognizer(one_img, None, return_loss=False)
# Test forward gradcam
recognizer(imgs, gradcam=True)
for one_img in img_list:
recognizer(one_img, gradcam=True)
else:
losses = recognizer(imgs, gt_labels)
assert isinstance(losses, dict)
# Test forward test
with torch.no_grad():
img_list = [img[None, :] for img in imgs]
for one_img in img_list:
recognizer(one_img, None, return_loss=False)
# Test forward gradcam
recognizer(imgs, gradcam=True)
for one_img in img_list:
recognizer(one_img, gradcam=True)
# Test the feature max_testing_views
config.model.test_cfg['max_testing_views'] = 1
recognizer = build_recognizer(config.model)
with torch.no_grad():
img_list = [img[None, :] for img in imgs]
for one_img in img_list:
recognizer(one_img, None, return_loss=False)
def test_csn():
config = get_recognizer_cfg(
'csn/ircsn_ig65m_pretrained_r152_32x2x1_58e_kinetics400_rgb.py')
config.model['backbone']['pretrained2d'] = False
config.model['backbone']['pretrained'] = None
recognizer = build_recognizer(config.model)
input_shape = (1, 3, 3, 8, 32, 32)
demo_inputs = generate_recognizer_demo_inputs(input_shape, '3D')
imgs = demo_inputs['imgs']
gt_labels = demo_inputs['gt_labels']
# parrots 3dconv is only implemented on gpu
if torch.__version__ == 'parrots':
if torch.cuda.is_available():
recognizer = recognizer.cuda()
imgs = imgs.cuda()
gt_labels = gt_labels.cuda()
losses = recognizer(imgs, gt_labels)
assert isinstance(losses, dict)
# Test forward test
with torch.no_grad():
img_list = [img[None, :] for img in imgs]
for one_img in img_list:
recognizer(one_img, None, return_loss=False)
# Test forward gradcam
recognizer(imgs, gradcam=True)
for one_img in img_list:
recognizer(one_img, gradcam=True)
else:
losses = recognizer(imgs, gt_labels)
assert isinstance(losses, dict)
# Test forward test
with torch.no_grad():
img_list = [img[None, :] for img in imgs]
for one_img in img_list:
recognizer(one_img, None, return_loss=False)
# Test forward gradcam
recognizer(imgs, gradcam=True)
for one_img in img_list:
recognizer(one_img, gradcam=True)
def test_tpn():
config = get_recognizer_cfg(
'tpn/tpn_slowonly_r50_8x8x1_150e_kinetics_rgb.py')
config.model['backbone']['pretrained'] = None
recognizer = build_recognizer(config.model)
input_shape = (1, 8, 3, 1, 32, 32)
demo_inputs = generate_recognizer_demo_inputs(input_shape, '3D')
imgs = demo_inputs['imgs']
gt_labels = demo_inputs['gt_labels']
losses = recognizer(imgs, gt_labels)
assert isinstance(losses, dict)
# Test forward test
with torch.no_grad():
img_list = [img[None, :] for img in imgs]
for one_img in img_list:
recognizer(one_img, None, return_loss=False)
# Test forward gradcam
recognizer(imgs, gradcam=True)
for one_img in img_list:
recognizer(one_img, gradcam=True)
# Test dummy forward
with torch.no_grad():
_recognizer = build_recognizer(config.model)
img_list = [img[None, :] for img in imgs]
if hasattr(_recognizer, 'forward_dummy'):
_recognizer.forward = _recognizer.forward_dummy
for one_img in img_list:
_recognizer(one_img)
def test_c3d():
config = get_recognizer_cfg('c3d/c3d_sports1m_16x1x1_45e_ucf101_rgb.py')
config.model['backbone']['pretrained'] = None
recognizer = build_recognizer(config.model)
input_shape = (1, 3, 3, 16, 112, 112)
demo_inputs = generate_recognizer_demo_inputs(input_shape, '3D')
imgs = demo_inputs['imgs']
gt_labels = demo_inputs['gt_labels']
losses = recognizer(imgs, gt_labels)
assert isinstance(losses, dict)
# Test forward test
with torch.no_grad():
img_list = [img[None, :] for img in imgs]
for one_img in img_list:
recognizer(one_img, None, return_loss=False)
# Test forward gradcam
recognizer(imgs, gradcam=True)
for one_img in img_list:
recognizer(one_img, gradcam=True)
| 9,327 | 31.84507 | 77 | py |
STTS | STTS-main/VideoSwin/tests/test_models/test_recognizers/test_audio_recognizer.py | import torch
from mmaction.models import build_recognizer
from ..base import generate_recognizer_demo_inputs, get_audio_recognizer_cfg
def test_audio_recognizer():
config = get_audio_recognizer_cfg(
'resnet/tsn_r18_64x1x1_100e_kinetics400_audio_feature.py')
config.model['backbone']['pretrained'] = None
recognizer = build_recognizer(config.model)
input_shape = (1, 3, 1, 128, 80)
demo_inputs = generate_recognizer_demo_inputs(
input_shape, model_type='audio')
audios = demo_inputs['imgs']
gt_labels = demo_inputs['gt_labels']
losses = recognizer(audios, gt_labels)
assert isinstance(losses, dict)
# Test forward test
with torch.no_grad():
audio_list = [audio[None, :] for audio in audios]
for one_spectro in audio_list:
recognizer(one_spectro, None, return_loss=False)
| 866 | 28.896552 | 76 | py |
STTS | STTS-main/VideoSwin/tests/test_models/test_common_modules/test_resnet.py | import pytest
import torch
import torch.nn as nn
from mmcv.utils import _BatchNorm
from mmaction.models import ResNet
from ..base import check_norm_state, generate_backbone_demo_inputs
def test_resnet_backbone():
"""Test resnet backbone."""
with pytest.raises(KeyError):
# ResNet depth should be in [18, 34, 50, 101, 152]
ResNet(20)
with pytest.raises(AssertionError):
# In ResNet: 1 <= num_stages <= 4
ResNet(50, num_stages=0)
with pytest.raises(AssertionError):
# In ResNet: 1 <= num_stages <= 4
ResNet(50, num_stages=5)
with pytest.raises(AssertionError):
# len(strides) == len(dilations) == num_stages
ResNet(50, strides=(1, ), dilations=(1, 1), num_stages=3)
with pytest.raises(TypeError):
# pretrain must be a str
resnet50 = ResNet(50, pretrained=0)
resnet50.init_weights()
with pytest.raises(AssertionError):
# style must be in ['pytorch', 'caffe']
ResNet(18, style='tensorflow')
with pytest.raises(AssertionError):
# assert not with_cp
ResNet(18, with_cp=True)
# resnet with depth 18, norm_eval False, initial weights
resnet18 = ResNet(18)
resnet18.init_weights()
# resnet with depth 50, norm_eval True
resnet50 = ResNet(50, norm_eval=True)
resnet50.init_weights()
resnet50.train()
assert check_norm_state(resnet50.modules(), False)
# resnet with depth 50, norm_eval True, pretrained
resnet50_pretrain = ResNet(
pretrained='torchvision://resnet50', depth=50, norm_eval=True)
resnet50_pretrain.init_weights()
resnet50_pretrain.train()
assert check_norm_state(resnet50_pretrain.modules(), False)
# resnet with depth 50, norm_eval True, frozen_stages 1
frozen_stages = 1
resnet50_frozen = ResNet(50, frozen_stages=frozen_stages)
resnet50_frozen.init_weights()
resnet50_frozen.train()
assert resnet50_frozen.conv1.bn.training is False
for layer in resnet50_frozen.conv1.modules():
for param in layer.parameters():
assert param.requires_grad is False
for i in range(1, frozen_stages + 1):
layer = getattr(resnet50_frozen, f'layer{i}')
for mod in layer.modules():
if isinstance(mod, _BatchNorm):
assert mod.training is False
for param in layer.parameters():
assert param.requires_grad is False
# resnet with depth 50, partial batchnorm
resnet_pbn = ResNet(50, partial_bn=True)
resnet_pbn.train()
count_bn = 0
for m in resnet_pbn.modules():
if isinstance(m, nn.BatchNorm2d):
count_bn += 1
if count_bn >= 2:
assert m.weight.requires_grad is False
assert m.bias.requires_grad is False
assert m.training is False
else:
assert m.weight.requires_grad is True
assert m.bias.requires_grad is True
assert m.training is True
input_shape = (1, 3, 64, 64)
imgs = generate_backbone_demo_inputs(input_shape)
# resnet with depth 18 inference
resnet18 = ResNet(18, norm_eval=False)
resnet18.init_weights()
resnet18.train()
feat = resnet18(imgs)
assert feat.shape == torch.Size([1, 512, 2, 2])
# resnet with depth 50 inference
resnet50 = ResNet(50, norm_eval=False)
resnet50.init_weights()
resnet50.train()
feat = resnet50(imgs)
assert feat.shape == torch.Size([1, 2048, 2, 2])
# resnet with depth 50 in caffe style inference
resnet50_caffe = ResNet(50, style='caffe', norm_eval=False)
resnet50_caffe.init_weights()
resnet50_caffe.train()
feat = resnet50_caffe(imgs)
assert feat.shape == torch.Size([1, 2048, 2, 2])
resnet50_flow = ResNet(
depth=50, pretrained='torchvision://resnet50', in_channels=10)
input_shape = (1, 10, 64, 64)
imgs = generate_backbone_demo_inputs(input_shape)
feat = resnet50_flow(imgs)
assert feat.shape == torch.Size([1, 2048, 2, 2])
resnet50 = ResNet(
depth=50, pretrained='torchvision://resnet50', in_channels=3)
input_shape = (1, 3, 64, 64)
imgs = generate_backbone_demo_inputs(input_shape)
feat = resnet50(imgs)
assert feat.shape == torch.Size([1, 2048, 2, 2])
| 4,319 | 32.75 | 70 | py |
STTS | STTS-main/VideoSwin/tests/test_models/test_common_modules/test_mobilenet_v2.py | import pytest
import torch
from mmcv.utils import _BatchNorm
from mmaction.models import MobileNetV2
from ..base import check_norm_state, generate_backbone_demo_inputs
def test_mobilenetv2_backbone():
"""Test MobileNetV2.
Modified from mmclassification.
"""
from torch.nn.modules import GroupNorm
from mmaction.models.backbones.mobilenet_v2 import InvertedResidual
def is_norm(modules):
"""Check if is one of the norms."""
if isinstance(modules, (GroupNorm, _BatchNorm)):
return True
return False
def is_block(modules):
"""Check if is ResNet building block."""
if isinstance(modules, (InvertedResidual, )):
return True
return False
with pytest.raises(TypeError):
# pretrained must be a string path
model = MobileNetV2(pretrained=0)
model.init_weights()
with pytest.raises(ValueError):
# frozen_stages must in range(1, 8)
MobileNetV2(frozen_stages=8)
with pytest.raises(ValueError):
# tout_indices in range(-1, 8)
MobileNetV2(out_indices=[8])
input_shape = (1, 3, 224, 224)
imgs = generate_backbone_demo_inputs(input_shape)
# Test MobileNetV2 with first stage frozen
frozen_stages = 1
model = MobileNetV2(frozen_stages=frozen_stages)
model.init_weights()
model.train()
for mod in model.conv1.modules():
for param in mod.parameters():
assert param.requires_grad is False
for i in range(1, frozen_stages + 1):
layer = getattr(model, f'layer{i}')
for mod in layer.modules():
if isinstance(mod, _BatchNorm):
assert mod.training is False
for param in layer.parameters():
assert param.requires_grad is False
# Test MobileNetV2 with norm_eval=True
model = MobileNetV2(norm_eval=True)
model.init_weights()
model.train()
assert check_norm_state(model.modules(), False)
# Test MobileNetV2 forward with widen_factor=1.0, pretrained
model = MobileNetV2(
widen_factor=1.0,
out_indices=range(0, 8),
pretrained='mmcls://mobilenet_v2')
model.init_weights()
model.train()
assert check_norm_state(model.modules(), True)
feat = model(imgs)
assert len(feat) == 8
assert feat[0].shape == torch.Size((1, 16, 112, 112))
assert feat[1].shape == torch.Size((1, 24, 56, 56))
assert feat[2].shape == torch.Size((1, 32, 28, 28))
assert feat[3].shape == torch.Size((1, 64, 14, 14))
assert feat[4].shape == torch.Size((1, 96, 14, 14))
assert feat[5].shape == torch.Size((1, 160, 7, 7))
assert feat[6].shape == torch.Size((1, 320, 7, 7))
assert feat[7].shape == torch.Size((1, 1280, 7, 7))
# Test MobileNetV2 forward with widen_factor=0.5
model = MobileNetV2(widen_factor=0.5, out_indices=range(0, 7))
model.init_weights()
model.train()
feat = model(imgs)
assert len(feat) == 7
assert feat[0].shape == torch.Size((1, 8, 112, 112))
assert feat[1].shape == torch.Size((1, 16, 56, 56))
assert feat[2].shape == torch.Size((1, 16, 28, 28))
assert feat[3].shape == torch.Size((1, 32, 14, 14))
assert feat[4].shape == torch.Size((1, 48, 14, 14))
assert feat[5].shape == torch.Size((1, 80, 7, 7))
assert feat[6].shape == torch.Size((1, 160, 7, 7))
# Test MobileNetV2 forward with widen_factor=2.0
model = MobileNetV2(widen_factor=2.0)
model.init_weights()
model.train()
feat = model(imgs)
assert feat.shape == torch.Size((1, 2560, 7, 7))
# Test MobileNetV2 forward with out_indices=None
model = MobileNetV2(widen_factor=1.0)
model.init_weights()
model.train()
feat = model(imgs)
assert feat.shape == torch.Size((1, 1280, 7, 7))
# Test MobileNetV2 forward with dict(type='ReLU')
model = MobileNetV2(
widen_factor=1.0, act_cfg=dict(type='ReLU'), out_indices=range(0, 7))
model.init_weights()
model.train()
feat = model(imgs)
assert len(feat) == 7
assert feat[0].shape == torch.Size((1, 16, 112, 112))
assert feat[1].shape == torch.Size((1, 24, 56, 56))
assert feat[2].shape == torch.Size((1, 32, 28, 28))
assert feat[3].shape == torch.Size((1, 64, 14, 14))
assert feat[4].shape == torch.Size((1, 96, 14, 14))
assert feat[5].shape == torch.Size((1, 160, 7, 7))
assert feat[6].shape == torch.Size((1, 320, 7, 7))
# Test MobileNetV2 with GroupNorm forward
model = MobileNetV2(widen_factor=1.0, out_indices=range(0, 7))
for m in model.modules():
if is_norm(m):
assert isinstance(m, _BatchNorm)
model.init_weights()
model.train()
feat = model(imgs)
assert len(feat) == 7
assert feat[0].shape == torch.Size((1, 16, 112, 112))
assert feat[1].shape == torch.Size((1, 24, 56, 56))
assert feat[2].shape == torch.Size((1, 32, 28, 28))
assert feat[3].shape == torch.Size((1, 64, 14, 14))
assert feat[4].shape == torch.Size((1, 96, 14, 14))
assert feat[5].shape == torch.Size((1, 160, 7, 7))
assert feat[6].shape == torch.Size((1, 320, 7, 7))
# Test MobileNetV2 with BatchNorm forward
model = MobileNetV2(
widen_factor=1.0,
norm_cfg=dict(type='GN', num_groups=2, requires_grad=True),
out_indices=range(0, 7))
for m in model.modules():
if is_norm(m):
assert isinstance(m, GroupNorm)
model.init_weights()
model.train()
feat = model(imgs)
assert len(feat) == 7
assert feat[0].shape == torch.Size((1, 16, 112, 112))
assert feat[1].shape == torch.Size((1, 24, 56, 56))
assert feat[2].shape == torch.Size((1, 32, 28, 28))
assert feat[3].shape == torch.Size((1, 64, 14, 14))
assert feat[4].shape == torch.Size((1, 96, 14, 14))
assert feat[5].shape == torch.Size((1, 160, 7, 7))
assert feat[6].shape == torch.Size((1, 320, 7, 7))
# Test MobileNetV2 with layers 1, 3, 5 out forward
model = MobileNetV2(widen_factor=1.0, out_indices=(0, 2, 4))
model.init_weights()
model.train()
feat = model(imgs)
assert len(feat) == 3
assert feat[0].shape == torch.Size((1, 16, 112, 112))
assert feat[1].shape == torch.Size((1, 32, 28, 28))
assert feat[2].shape == torch.Size((1, 96, 14, 14))
# Test MobileNetV2 with checkpoint forward
model = MobileNetV2(
widen_factor=1.0, with_cp=True, out_indices=range(0, 7))
for m in model.modules():
if is_block(m):
assert m.with_cp
model.init_weights()
model.train()
feat = model(imgs)
assert len(feat) == 7
assert feat[0].shape == torch.Size((1, 16, 112, 112))
assert feat[1].shape == torch.Size((1, 24, 56, 56))
assert feat[2].shape == torch.Size((1, 32, 28, 28))
assert feat[3].shape == torch.Size((1, 64, 14, 14))
assert feat[4].shape == torch.Size((1, 96, 14, 14))
assert feat[5].shape == torch.Size((1, 160, 7, 7))
assert feat[6].shape == torch.Size((1, 320, 7, 7))
| 7,014 | 33.219512 | 77 | py |
STTS | STTS-main/VideoSwin/tests/test_models/test_common_modules/test_resnet3d.py | import pytest
import torch
import torch.nn as nn
from mmcv.utils import _BatchNorm
from mmaction.models import ResNet3d, ResNet3dLayer
from ..base import check_norm_state, generate_backbone_demo_inputs
def test_resnet3d_backbone():
"""Test resnet3d backbone."""
with pytest.raises(AssertionError):
# In ResNet3d: 1 <= num_stages <= 4
ResNet3d(34, None, num_stages=0)
with pytest.raises(AssertionError):
# In ResNet3d: 1 <= num_stages <= 4
ResNet3d(34, None, num_stages=5)
with pytest.raises(AssertionError):
# In ResNet3d: 1 <= num_stages <= 4
ResNet3d(50, None, num_stages=0)
with pytest.raises(AssertionError):
# In ResNet3d: 1 <= num_stages <= 4
ResNet3d(50, None, num_stages=5)
with pytest.raises(AssertionError):
# len(spatial_strides) == len(temporal_strides)
# == len(dilations) == num_stages
ResNet3d(
50,
None,
spatial_strides=(1, ),
temporal_strides=(1, 1),
dilations=(1, 1, 1),
num_stages=4)
with pytest.raises(AssertionError):
# len(spatial_strides) == len(temporal_strides)
# == len(dilations) == num_stages
ResNet3d(
34,
None,
spatial_strides=(1, ),
temporal_strides=(1, 1),
dilations=(1, 1, 1),
num_stages=4)
with pytest.raises(TypeError):
# pretrain must be str or None.
resnet3d_34 = ResNet3d(34, ['resnet', 'bninception'])
resnet3d_34.init_weights()
with pytest.raises(TypeError):
# pretrain must be str or None.
resnet3d_50 = ResNet3d(50, ['resnet', 'bninception'])
resnet3d_50.init_weights()
# resnet3d with depth 34, no pretrained, norm_eval True
resnet3d_34 = ResNet3d(34, None, pretrained2d=False, norm_eval=True)
resnet3d_34.init_weights()
resnet3d_34.train()
assert check_norm_state(resnet3d_34.modules(), False)
# resnet3d with depth 50, no pretrained, norm_eval True
resnet3d_50 = ResNet3d(50, None, pretrained2d=False, norm_eval=True)
resnet3d_50.init_weights()
resnet3d_50.train()
assert check_norm_state(resnet3d_50.modules(), False)
# resnet3d with depth 50, pretrained2d, norm_eval True
resnet3d_50_pretrain = ResNet3d(
50, 'torchvision://resnet50', norm_eval=True)
resnet3d_50_pretrain.init_weights()
resnet3d_50_pretrain.train()
assert check_norm_state(resnet3d_50_pretrain.modules(), False)
from mmcv.runner import _load_checkpoint
chkp_2d = _load_checkpoint('torchvision://resnet50')
for name, module in resnet3d_50_pretrain.named_modules():
if len(name.split('.')) == 4:
# layer.block.module.submodule
prefix = name.split('.')[:2]
module_type = name.split('.')[2]
submodule_type = name.split('.')[3]
if module_type == 'downsample':
name2d = name.replace('conv', '0').replace('bn', '1')
else:
layer_id = name.split('.')[2][-1]
name2d = prefix[0] + '.' + prefix[1] + '.' + \
submodule_type + layer_id
if isinstance(module, nn.Conv3d):
conv2d_weight = chkp_2d[name2d + '.weight']
conv3d_weight = getattr(module, 'weight').data
assert torch.equal(
conv3d_weight,
conv2d_weight.data.unsqueeze(2).expand_as(conv3d_weight) /
conv3d_weight.shape[2])
if getattr(module, 'bias') is not None:
conv2d_bias = chkp_2d[name2d + '.bias']
conv3d_bias = getattr(module, 'bias').data
assert torch.equal(conv2d_bias, conv3d_bias)
elif isinstance(module, nn.BatchNorm3d):
for pname in ['weight', 'bias', 'running_mean', 'running_var']:
param_2d = chkp_2d[name2d + '.' + pname]
param_3d = getattr(module, pname).data
assert torch.equal(param_2d, param_3d)
conv3d = resnet3d_50_pretrain.conv1.conv
assert torch.equal(
conv3d.weight,
chkp_2d['conv1.weight'].unsqueeze(2).expand_as(conv3d.weight) /
conv3d.weight.shape[2])
conv3d = resnet3d_50_pretrain.layer3[2].conv2.conv
assert torch.equal(
conv3d.weight, chkp_2d['layer3.2.conv2.weight'].unsqueeze(2).expand_as(
conv3d.weight) / conv3d.weight.shape[2])
# resnet3d with depth 34, no pretrained, norm_eval False
resnet3d_34_no_bn_eval = ResNet3d(
34, None, pretrained2d=False, norm_eval=False)
resnet3d_34_no_bn_eval.init_weights()
resnet3d_34_no_bn_eval.train()
assert check_norm_state(resnet3d_34_no_bn_eval.modules(), True)
# resnet3d with depth 50, no pretrained, norm_eval False
resnet3d_50_no_bn_eval = ResNet3d(
50, None, pretrained2d=False, norm_eval=False)
resnet3d_50_no_bn_eval.init_weights()
resnet3d_50_no_bn_eval.train()
assert check_norm_state(resnet3d_50_no_bn_eval.modules(), True)
# resnet3d with depth 34, no pretrained, frozen_stages, norm_eval False
frozen_stages = 1
resnet3d_34_frozen = ResNet3d(
34, None, pretrained2d=False, frozen_stages=frozen_stages)
resnet3d_34_frozen.init_weights()
resnet3d_34_frozen.train()
assert resnet3d_34_frozen.conv1.bn.training is False
for param in resnet3d_34_frozen.conv1.parameters():
assert param.requires_grad is False
for i in range(1, frozen_stages + 1):
layer = getattr(resnet3d_34_frozen, f'layer{i}')
for mod in layer.modules():
if isinstance(mod, _BatchNorm):
assert mod.training is False
for param in layer.parameters():
assert param.requires_grad is False
# test zero_init_residual
for m in resnet3d_34_frozen.modules():
if hasattr(m, 'conv2'):
assert torch.equal(m.conv2.bn.weight,
torch.zeros_like(m.conv2.bn.weight))
assert torch.equal(m.conv2.bn.bias,
torch.zeros_like(m.conv2.bn.bias))
# resnet3d with depth 50, no pretrained, frozen_stages, norm_eval False
frozen_stages = 1
resnet3d_50_frozen = ResNet3d(
50, None, pretrained2d=False, frozen_stages=frozen_stages)
resnet3d_50_frozen.init_weights()
resnet3d_50_frozen.train()
assert resnet3d_50_frozen.conv1.bn.training is False
for param in resnet3d_50_frozen.conv1.parameters():
assert param.requires_grad is False
for i in range(1, frozen_stages + 1):
layer = getattr(resnet3d_50_frozen, f'layer{i}')
for mod in layer.modules():
if isinstance(mod, _BatchNorm):
assert mod.training is False
for param in layer.parameters():
assert param.requires_grad is False
# test zero_init_residual
for m in resnet3d_50_frozen.modules():
if hasattr(m, 'conv3'):
assert torch.equal(m.conv3.bn.weight,
torch.zeros_like(m.conv3.bn.weight))
assert torch.equal(m.conv3.bn.bias,
torch.zeros_like(m.conv3.bn.bias))
# resnet3d frozen with depth 34 inference
input_shape = (1, 3, 6, 64, 64)
imgs = generate_backbone_demo_inputs(input_shape)
# parrots 3dconv is only implemented on gpu
if torch.__version__ == 'parrots':
if torch.cuda.is_available():
resnet3d_34_frozen = resnet3d_34_frozen.cuda()
imgs_gpu = imgs.cuda()
feat = resnet3d_34_frozen(imgs_gpu)
assert feat.shape == torch.Size([1, 512, 3, 2, 2])
else:
feat = resnet3d_34_frozen(imgs)
assert feat.shape == torch.Size([1, 512, 3, 2, 2])
# resnet3d with depth 50 inference
input_shape = (1, 3, 6, 64, 64)
imgs = generate_backbone_demo_inputs(input_shape)
# parrots 3dconv is only implemented on gpu
if torch.__version__ == 'parrots':
if torch.cuda.is_available():
resnet3d_50_frozen = resnet3d_50_frozen.cuda()
imgs_gpu = imgs.cuda()
feat = resnet3d_50_frozen(imgs_gpu)
assert feat.shape == torch.Size([1, 2048, 3, 2, 2])
else:
feat = resnet3d_50_frozen(imgs)
assert feat.shape == torch.Size([1, 2048, 3, 2, 2])
# resnet3d with depth 50 in caffe style inference
resnet3d_50_caffe = ResNet3d(50, None, pretrained2d=False, style='caffe')
resnet3d_50_caffe.init_weights()
resnet3d_50_caffe.train()
# parrots 3dconv is only implemented on gpu
if torch.__version__ == 'parrots':
if torch.cuda.is_available():
resnet3d_50_caffe = resnet3d_50_caffe.cuda()
imgs_gpu = imgs.cuda()
feat = resnet3d_50_caffe(imgs_gpu)
assert feat.shape == torch.Size([1, 2048, 3, 2, 2])
else:
feat = resnet3d_50_caffe(imgs)
assert feat.shape == torch.Size([1, 2048, 3, 2, 2])
# resnet3d with depth 34 in caffe style inference
resnet3d_34_caffe = ResNet3d(34, None, pretrained2d=False, style='caffe')
resnet3d_34_caffe.init_weights()
resnet3d_34_caffe.train()
# parrots 3dconv is only implemented on gpu
if torch.__version__ == 'parrots':
if torch.cuda.is_available():
resnet3d_34_caffe = resnet3d_34_caffe.cuda()
imgs_gpu = imgs.cuda()
feat = resnet3d_34_caffe(imgs_gpu)
assert feat.shape == torch.Size([1, 512, 3, 2, 2])
else:
feat = resnet3d_34_caffe(imgs)
assert feat.shape == torch.Size([1, 512, 3, 2, 2])
# resnet3d with depth with 3x3x3 inflate_style inference
resnet3d_50_1x1x1 = ResNet3d(
50, None, pretrained2d=False, inflate_style='3x3x3')
resnet3d_50_1x1x1.init_weights()
resnet3d_50_1x1x1.train()
# parrots 3dconv is only implemented on gpu
if torch.__version__ == 'parrots':
if torch.cuda.is_available():
resnet3d_50_1x1x1 = resnet3d_50_1x1x1.cuda()
imgs_gpu = imgs.cuda()
feat = resnet3d_50_1x1x1(imgs_gpu)
assert feat.shape == torch.Size([1, 2048, 3, 2, 2])
else:
feat = resnet3d_50_1x1x1(imgs)
assert feat.shape == torch.Size([1, 2048, 3, 2, 2])
resnet3d_34_1x1x1 = ResNet3d(
34, None, pretrained2d=False, inflate_style='3x3x3')
resnet3d_34_1x1x1.init_weights()
resnet3d_34_1x1x1.train()
# parrots 3dconv is only implemented on gpu
if torch.__version__ == 'parrots':
if torch.cuda.is_available():
resnet3d_34_1x1x1 = resnet3d_34_1x1x1.cuda()
imgs_gpu = imgs.cuda()
feat = resnet3d_34_1x1x1(imgs_gpu)
assert feat.shape == torch.Size([1, 512, 3, 2, 2])
else:
feat = resnet3d_34_1x1x1(imgs)
assert feat.shape == torch.Size([1, 512, 3, 2, 2])
# resnet3d with non-local module
non_local_cfg = dict(
sub_sample=True,
use_scale=False,
norm_cfg=dict(type='BN3d', requires_grad=True),
mode='embedded_gaussian')
non_local = ((0, 0, 0), (1, 0, 1, 0), (1, 0, 1, 0, 1, 0), (0, 0, 0))
resnet3d_nonlocal = ResNet3d(
50,
None,
pretrained2d=False,
non_local=non_local,
non_local_cfg=non_local_cfg)
resnet3d_nonlocal.init_weights()
for layer_name in ['layer2', 'layer3']:
layer = getattr(resnet3d_nonlocal, layer_name)
for i, _ in enumerate(layer):
if i % 2 == 0:
assert hasattr(layer[i], 'non_local_block')
feat = resnet3d_nonlocal(imgs)
assert feat.shape == torch.Size([1, 2048, 3, 2, 2])
def test_resnet3d_layer():
with pytest.raises(AssertionError):
ResNet3dLayer(22, None)
with pytest.raises(AssertionError):
ResNet3dLayer(50, None, stage=4)
res_layer = ResNet3dLayer(50, None, stage=3, norm_eval=True)
res_layer.init_weights()
res_layer.train()
input_shape = (1, 1024, 1, 4, 4)
imgs = generate_backbone_demo_inputs(input_shape)
if torch.__version__ == 'parrots':
if torch.cuda.is_available():
res_layer = res_layer.cuda()
imgs_gpu = imgs.cuda()
feat = res_layer(imgs_gpu)
assert feat.shape == torch.Size([1, 2048, 1, 2, 2])
else:
feat = res_layer(imgs)
assert feat.shape == torch.Size([1, 2048, 1, 2, 2])
res_layer = ResNet3dLayer(
50, 'torchvision://resnet50', stage=3, all_frozen=True)
res_layer.init_weights()
res_layer.train()
imgs = generate_backbone_demo_inputs(input_shape)
if torch.__version__ == 'parrots':
if torch.cuda.is_available():
res_layer = res_layer.cuda()
imgs_gpu = imgs.cuda()
feat = res_layer(imgs_gpu)
assert feat.shape == torch.Size([1, 2048, 1, 2, 2])
else:
feat = res_layer(imgs)
assert feat.shape == torch.Size([1, 2048, 1, 2, 2])
| 13,070 | 38.01791 | 79 | py |
STTS | STTS-main/VideoSwin/tests/test_models/test_common_modules/test_base_head.py | import torch
import torch.nn.functional as F
from mmcv.utils import assert_dict_has_keys
from mmaction.models import BaseHead
class ExampleHead(BaseHead):
# use an ExampleHead to test BaseHead
def init_weights(self):
pass
def forward(self, x):
pass
def test_base_head():
head = ExampleHead(3, 400, dict(type='CrossEntropyLoss'))
cls_scores = torch.rand((3, 4))
# When truth is non-empty then cls loss should be nonzero for random inputs
gt_labels = torch.LongTensor([2] * 3).squeeze()
losses = head.loss(cls_scores, gt_labels)
assert 'loss_cls' in losses.keys()
assert losses.get('loss_cls') > 0, 'cls loss should be non-zero'
head = ExampleHead(3, 400, dict(type='CrossEntropyLoss', loss_weight=2.0))
cls_scores = torch.rand((3, 4))
# When truth is non-empty then cls loss should be nonzero for random inputs
gt_labels = torch.LongTensor([2] * 3).squeeze()
losses = head.loss(cls_scores, gt_labels)
assert_dict_has_keys(losses, ['loss_cls'])
assert losses.get('loss_cls') > 0, 'cls loss should be non-zero'
# Test Soft label with batch size > 1
cls_scores = torch.rand((3, 3))
gt_labels = torch.LongTensor([[2] * 3])
gt_one_hot_labels = F.one_hot(gt_labels, num_classes=3).squeeze()
losses = head.loss(cls_scores, gt_one_hot_labels)
assert 'loss_cls' in losses.keys()
assert losses.get('loss_cls') > 0, 'cls loss should be non-zero'
# Test Soft label with batch size = 1
cls_scores = torch.rand((1, 3))
gt_labels = torch.LongTensor([2])
gt_one_hot_labels = F.one_hot(gt_labels, num_classes=3).squeeze()
losses = head.loss(cls_scores, gt_one_hot_labels)
assert 'loss_cls' in losses.keys()
assert losses.get('loss_cls') > 0, 'cls loss should be non-zero'
# test multi-class & label smoothing
head = ExampleHead(
3,
400,
dict(type='BCELossWithLogits'),
multi_class=True,
label_smooth_eps=0.1)
# batch size > 1
cls_scores = torch.rand((2, 3))
gt_labels = torch.LongTensor([[1, 0, 1], [0, 1, 0]]).squeeze()
losses = head.loss(cls_scores, gt_labels)
assert 'loss_cls' in losses.keys()
assert losses.get('loss_cls') > 0, 'cls loss should be non-zero'
# batch size = 1
cls_scores = torch.rand((1, 3))
gt_labels = torch.LongTensor([[1, 0, 1]]).squeeze()
losses = head.loss(cls_scores, gt_labels)
assert 'loss_cls' in losses.keys()
assert losses.get('loss_cls') > 0, 'cls loss should be non-zero'
| 2,538 | 33.780822 | 79 | py |
STTS | STTS-main/VideoSwin/tests/test_models/test_common_modules/test_base_recognizers.py | import pytest
import torch
import torch.nn.functional as F
from mmaction.models import BaseRecognizer
class ExampleRecognizer(BaseRecognizer):
def __init__(self, train_cfg, test_cfg):
super(BaseRecognizer, self).__init__()
# reconstruct `__init__()` method in BaseRecognizer to avoid building
# backbone and head which are useless to ExampleRecognizer,
# since ExampleRecognizer is only used for model-unrelated methods
# (like `average_clip`) testing.
self.train_cfg = train_cfg
self.test_cfg = test_cfg
def forward_train(self, imgs, labels):
pass
def forward_test(self, imgs):
pass
def forward_gradcam(self, imgs):
pass
def test_base_recognizer():
cls_score = torch.rand(5, 400)
with pytest.raises(KeyError):
# "average_clips" must defined in test_cfg keys
wrong_test_cfg = dict(clip='score')
recognizer = ExampleRecognizer(None, wrong_test_cfg)
recognizer.average_clip(cls_score)
with pytest.raises(ValueError):
# unsupported average clips type
wrong_test_cfg = dict(average_clips='softmax')
recognizer = ExampleRecognizer(None, wrong_test_cfg)
recognizer.average_clip(cls_score)
with pytest.raises(ValueError):
# Label should not be None
recognizer = ExampleRecognizer(None, None)
recognizer(torch.tensor(0))
# average_clips=None
test_cfg = dict(average_clips=None)
recognizer = ExampleRecognizer(None, test_cfg)
score = recognizer.average_clip(cls_score, num_segs=5)
assert torch.equal(score, cls_score)
# average_clips='score'
test_cfg = dict(average_clips='score')
recognizer = ExampleRecognizer(None, test_cfg)
score = recognizer.average_clip(cls_score, num_segs=5)
assert torch.equal(score, cls_score.mean(dim=0, keepdim=True))
# average_clips='prob'
test_cfg = dict(average_clips='prob')
recognizer = ExampleRecognizer(None, test_cfg)
score = recognizer.average_clip(cls_score, num_segs=5)
assert torch.equal(score,
F.softmax(cls_score, dim=1).mean(dim=0, keepdim=True))
| 2,178 | 32.015152 | 77 | py |
STTS | STTS-main/VideoSwin/tests/test_models/test_detectors/test_detectors.py | import pytest
import torch
from ..base import generate_detector_demo_inputs, get_detector_cfg
try:
from mmaction.models import build_detector
mmdet_imported = True
except (ImportError, ModuleNotFoundError):
mmdet_imported = False
@pytest.mark.skipif(not mmdet_imported, reason='requires mmdet')
def test_ava_detector():
config = get_detector_cfg('ava/slowonly_kinetics_pretrained_r50_'
'4x16x1_20e_ava_rgb.py')
detector = build_detector(config.model)
if torch.__version__ == 'parrots':
if torch.cuda.is_available():
train_demo_inputs = generate_detector_demo_inputs(
train=True, device='cuda')
test_demo_inputs = generate_detector_demo_inputs(
train=False, device='cuda')
detector = detector.cuda()
losses = detector(**train_demo_inputs)
assert isinstance(losses, dict)
# Test forward test
with torch.no_grad():
_ = detector(**test_demo_inputs, return_loss=False)
else:
train_demo_inputs = generate_detector_demo_inputs(train=True)
test_demo_inputs = generate_detector_demo_inputs(train=False)
losses = detector(**train_demo_inputs)
assert isinstance(losses, dict)
# Test forward test
with torch.no_grad():
_ = detector(**test_demo_inputs, return_loss=False)
| 1,425 | 32.952381 | 69 | py |
STTS | STTS-main/VideoSwin/tests/test_data/test_formating.py | import numpy as np
import pytest
import torch
from mmcv.parallel import DataContainer as DC
from mmcv.utils import assert_dict_has_keys
from mmaction.datasets.pipelines import (Collect, FormatAudioShape,
FormatShape, ImageToTensor, Rename,
ToDataContainer, ToTensor, Transpose)
def test_rename():
org_name = 'a'
new_name = 'b'
mapping = {org_name: new_name}
rename = Rename(mapping)
results = dict(a=2)
results = rename(results)
assert results['b'] == 2
assert 'a' not in results
def test_to_tensor():
to_tensor = ToTensor(['str'])
with pytest.raises(TypeError):
# str cannot be converted to tensor
results = dict(str='0')
to_tensor(results)
# convert tensor, numpy, squence, int, float to tensor
target_keys = ['tensor', 'numpy', 'sequence', 'int', 'float']
to_tensor = ToTensor(target_keys)
original_results = dict(
tensor=torch.randn(2, 3),
numpy=np.random.randn(2, 3),
sequence=list(range(10)),
int=1,
float=0.1)
results = to_tensor(original_results)
assert assert_dict_has_keys(results, target_keys)
for key in target_keys:
assert isinstance(results[key], torch.Tensor)
assert torch.equal(results[key].data, original_results[key])
# Add an additional key which is not in keys.
original_results = dict(
tensor=torch.randn(2, 3),
numpy=np.random.randn(2, 3),
sequence=list(range(10)),
int=1,
float=0.1,
str='test')
results = to_tensor(original_results)
assert assert_dict_has_keys(results, target_keys)
for key in target_keys:
assert isinstance(results[key], torch.Tensor)
assert torch.equal(results[key].data, original_results[key])
assert repr(to_tensor) == to_tensor.__class__.__name__ + \
f'(keys={target_keys})'
def test_to_data_container():
# check user-defined fields
fields = (dict(key='key1', stack=True), dict(key='key2'))
to_data_container = ToDataContainer(fields=fields)
target_keys = ['key1', 'key2']
original_results = dict(key1=np.random.randn(10, 20), key2=['a', 'b'])
results = to_data_container(original_results.copy())
assert assert_dict_has_keys(results, target_keys)
for key in target_keys:
assert isinstance(results[key], DC)
assert np.all(results[key].data == original_results[key])
assert results['key1'].stack
assert not results['key2'].stack
# Add an additional key which is not in keys.
original_results = dict(
key1=np.random.randn(10, 20), key2=['a', 'b'], key3='value3')
results = to_data_container(original_results.copy())
assert assert_dict_has_keys(results, target_keys)
for key in target_keys:
assert isinstance(results[key], DC)
assert np.all(results[key].data == original_results[key])
assert results['key1'].stack
assert not results['key2'].stack
assert repr(to_data_container) == (
to_data_container.__class__.__name__ + f'(fields={fields})')
def test_image_to_tensor():
original_results = dict(imgs=np.random.randn(256, 256, 3))
keys = ['imgs']
image_to_tensor = ImageToTensor(keys)
results = image_to_tensor(original_results)
assert results['imgs'].shape == torch.Size([3, 256, 256])
assert isinstance(results['imgs'], torch.Tensor)
assert torch.equal(results['imgs'].data, original_results['imgs'])
assert repr(image_to_tensor) == image_to_tensor.__class__.__name__ + \
f'(keys={keys})'
def test_transpose():
results = dict(imgs=np.random.randn(256, 256, 3))
keys = ['imgs']
order = [2, 0, 1]
transpose = Transpose(keys, order)
results = transpose(results)
assert results['imgs'].shape == (3, 256, 256)
assert repr(transpose) == transpose.__class__.__name__ + \
f'(keys={keys}, order={order})'
def test_collect():
inputs = dict(
imgs=np.random.randn(256, 256, 3),
label=[1],
filename='test.txt',
original_shape=(256, 256, 3),
img_shape=(256, 256, 3),
pad_shape=(256, 256, 3),
flip_direction='vertical',
img_norm_cfg=dict(to_bgr=False))
keys = ['imgs', 'label']
collect = Collect(keys)
results = collect(inputs)
assert sorted(list(results.keys())) == sorted(
['imgs', 'label', 'img_metas'])
imgs = inputs.pop('imgs')
assert set(results['img_metas'].data) == set(inputs)
for key in results['img_metas'].data:
assert results['img_metas'].data[key] == inputs[key]
assert repr(collect) == collect.__class__.__name__ + \
(f'(keys={keys}, meta_keys={collect.meta_keys}, '
f'nested={collect.nested})')
inputs['imgs'] = imgs
collect = Collect(keys, nested=True)
results = collect(inputs)
assert sorted(list(results.keys())) == sorted(
['imgs', 'label', 'img_metas'])
for k in results:
assert isinstance(results[k], list)
def test_format_shape():
with pytest.raises(ValueError):
# invalid input format
FormatShape('NHWC')
# 'NCHW' input format
results = dict(
imgs=np.random.randn(3, 224, 224, 3), num_clips=1, clip_len=3)
format_shape = FormatShape('NCHW')
assert format_shape(results)['input_shape'] == (3, 3, 224, 224)
# `NCTHW` input format with num_clips=1, clip_len=3
results = dict(
imgs=np.random.randn(3, 224, 224, 3), num_clips=1, clip_len=3)
format_shape = FormatShape('NCTHW')
assert format_shape(results)['input_shape'] == (1, 3, 3, 224, 224)
# `NCTHW` input format with num_clips=2, clip_len=3
results = dict(
imgs=np.random.randn(18, 224, 224, 3), num_clips=2, clip_len=3)
assert format_shape(results)['input_shape'] == (6, 3, 3, 224, 224)
target_keys = ['imgs', 'input_shape']
assert assert_dict_has_keys(results, target_keys)
assert repr(format_shape) == format_shape.__class__.__name__ + \
"(input_format='NCTHW')"
# 'NPTCHW' input format
results = dict(
imgs=np.random.randn(72, 224, 224, 3),
num_clips=9,
clip_len=1,
num_proposals=8)
format_shape = FormatShape('NPTCHW')
assert format_shape(results)['input_shape'] == (8, 9, 3, 224, 224)
def test_format_audio_shape():
with pytest.raises(ValueError):
# invalid input format
FormatAudioShape('XXXX')
# 'NCTF' input format
results = dict(audios=np.random.randn(3, 128, 8))
format_shape = FormatAudioShape('NCTF')
assert format_shape(results)['input_shape'] == (3, 1, 128, 8)
assert repr(format_shape) == format_shape.__class__.__name__ + \
"(input_format='NCTF')"
| 6,791 | 33.830769 | 78 | py |
STTS | STTS-main/VideoSwin/tests/test_data/test_sampler.py | from torch.utils.data import DataLoader, Dataset
from mmaction.datasets.samplers import (ClassSpecificDistributedSampler,
DistributedSampler)
class MyDataset(Dataset):
def __init__(self, class_prob={i: 1 for i in range(10)}):
super().__init__()
self.class_prob = class_prob
self.video_infos = [
dict(data=idx, label=idx % 10) for idx in range(100)
]
def __len__(self):
return len(self.video_infos)
def __getitem__(self, idx):
return self.video_infos[idx]
def test_distributed_sampler():
dataset = MyDataset()
sampler = DistributedSampler(dataset, num_replicas=1, rank=0)
data_loader = DataLoader(dataset, batch_size=4, sampler=sampler)
batches = []
for _, data in enumerate(data_loader):
batches.append(data)
assert len(batches) == 25
assert sum([len(x['data']) for x in batches]) == 100
sampler = DistributedSampler(dataset, num_replicas=4, rank=2)
data_loader = DataLoader(dataset, batch_size=4, sampler=sampler)
batches = []
for i, data in enumerate(data_loader):
batches.append(data)
assert len(batches) == 7
assert sum([len(x['data']) for x in batches]) == 25
sampler = DistributedSampler(dataset, num_replicas=6, rank=3)
data_loader = DataLoader(dataset, batch_size=4, sampler=sampler)
batches = []
for i, data in enumerate(data_loader):
batches.append(data)
assert len(batches) == 5
assert sum([len(x['data']) for x in batches]) == 17
def test_class_specific_distributed_sampler():
class_prob = dict(zip(list(range(10)), [1] * 5 + [3] * 5))
dataset = MyDataset(class_prob=class_prob)
sampler = ClassSpecificDistributedSampler(
dataset, num_replicas=1, rank=0, dynamic_length=True)
data_loader = DataLoader(dataset, batch_size=4, sampler=sampler)
batches = []
for _, data in enumerate(data_loader):
batches.append(data)
assert len(batches) == 50
assert sum([len(x['data']) for x in batches]) == 200
sampler = ClassSpecificDistributedSampler(
dataset, num_replicas=1, rank=0, dynamic_length=False)
data_loader = DataLoader(dataset, batch_size=4, sampler=sampler)
batches = []
for i, data in enumerate(data_loader):
batches.append(data)
assert len(batches) == 25
assert sum([len(x['data']) for x in batches]) == 100
sampler = ClassSpecificDistributedSampler(
dataset, num_replicas=6, rank=2, dynamic_length=True)
data_loader = DataLoader(dataset, batch_size=4, sampler=sampler)
batches = []
for i, data in enumerate(data_loader):
batches.append(data)
assert len(batches) == 9
assert sum([len(x['data']) for x in batches]) == 34
sampler = ClassSpecificDistributedSampler(
dataset, num_replicas=6, rank=2, dynamic_length=False)
data_loader = DataLoader(dataset, batch_size=4, sampler=sampler)
batches = []
for i, data in enumerate(data_loader):
batches.append(data)
assert len(batches) == 5
assert sum([len(x['data']) for x in batches]) == 17
| 3,148 | 31.802083 | 72 | py |
STTS | STTS-main/VideoSwin/tests/test_data/test_blending.py | import torch
from mmaction.datasets import CutmixBlending, MixupBlending
def test_mixup():
alpha = 0.2
num_classes = 10
label = torch.randint(0, num_classes, (4, ))
mixup = MixupBlending(num_classes, alpha)
# NCHW imgs
imgs = torch.randn(4, 4, 3, 32, 32)
mixed_imgs, mixed_label = mixup(imgs, label)
assert mixed_imgs.shape == torch.Size((4, 4, 3, 32, 32))
assert mixed_label.shape == torch.Size((4, num_classes))
# NCTHW imgs
imgs = torch.randn(4, 4, 2, 3, 32, 32)
mixed_imgs, mixed_label = mixup(imgs, label)
assert mixed_imgs.shape == torch.Size((4, 4, 2, 3, 32, 32))
assert mixed_label.shape == torch.Size((4, num_classes))
def test_cutmix():
alpha = 0.2
num_classes = 10
label = torch.randint(0, num_classes, (4, ))
mixup = CutmixBlending(num_classes, alpha)
# NCHW imgs
imgs = torch.randn(4, 4, 3, 32, 32)
mixed_imgs, mixed_label = mixup(imgs, label)
assert mixed_imgs.shape == torch.Size((4, 4, 3, 32, 32))
assert mixed_label.shape == torch.Size((4, num_classes))
# NCTHW imgs
imgs = torch.randn(4, 4, 2, 3, 32, 32)
mixed_imgs, mixed_label = mixup(imgs, label)
assert mixed_imgs.shape == torch.Size((4, 4, 2, 3, 32, 32))
assert mixed_label.shape == torch.Size((4, num_classes))
| 1,306 | 30.119048 | 63 | py |
STTS | STTS-main/VideoSwin/tests/test_data/test_pipelines/test_loadings/test_load.py | import copy
import numpy as np
import pytest
import torch
from mmcv.utils import assert_dict_has_keys
from numpy.testing import assert_array_almost_equal
from mmaction.datasets.pipelines import (LoadAudioFeature, LoadHVULabel,
LoadLocalizationFeature,
LoadProposals)
from .base import BaseTestLoading
class TestLoad(BaseTestLoading):
def test_load_hvu_label(self):
hvu_label_example1 = copy.deepcopy(self.hvu_label_example1)
hvu_label_example2 = copy.deepcopy(self.hvu_label_example2)
categories = hvu_label_example1['categories']
category_nums = hvu_label_example1['category_nums']
num_tags = sum(category_nums)
num_categories = len(categories)
loader = LoadHVULabel()
assert repr(loader) == (f'{loader.__class__.__name__}('
f'hvu_initialized={False})')
result1 = loader(hvu_label_example1)
label1 = torch.zeros(num_tags)
mask1 = torch.zeros(num_tags)
category_mask1 = torch.zeros(num_categories)
assert repr(loader) == (f'{loader.__class__.__name__}('
f'hvu_initialized={True})')
label1[[0, 4, 5, 7, 8]] = 1.
mask1[:10] = 1.
category_mask1[:3] = 1.
assert torch.all(torch.eq(label1, result1['label']))
assert torch.all(torch.eq(mask1, result1['mask']))
assert torch.all(torch.eq(category_mask1, result1['category_mask']))
result2 = loader(hvu_label_example2)
label2 = torch.zeros(num_tags)
mask2 = torch.zeros(num_tags)
category_mask2 = torch.zeros(num_categories)
label2[[1, 8, 9, 11]] = 1.
mask2[:2] = 1.
mask2[7:] = 1.
category_mask2[[0, 2, 3]] = 1.
assert torch.all(torch.eq(label2, result2['label']))
assert torch.all(torch.eq(mask2, result2['mask']))
assert torch.all(torch.eq(category_mask2, result2['category_mask']))
def test_load_localization_feature(self):
target_keys = ['raw_feature']
action_result = copy.deepcopy(self.action_results)
# test error cases
with pytest.raises(NotImplementedError):
load_localization_feature = LoadLocalizationFeature(
'unsupport_ext')
# test normal cases
load_localization_feature = LoadLocalizationFeature()
load_localization_feature_result = load_localization_feature(
action_result)
assert assert_dict_has_keys(load_localization_feature_result,
target_keys)
assert load_localization_feature_result['raw_feature'].shape == (400,
5)
assert repr(load_localization_feature) == (
f'{load_localization_feature.__class__.__name__}('
f'raw_feature_ext=.csv)')
def test_load_proposals(self):
target_keys = [
'bsp_feature', 'tmin', 'tmax', 'tmin_score', 'tmax_score',
'reference_temporal_iou'
]
action_result = copy.deepcopy(self.action_results)
# test error cases
with pytest.raises(NotImplementedError):
load_proposals = LoadProposals(5, self.proposals_dir,
self.bsp_feature_dir,
'unsupport_ext')
with pytest.raises(NotImplementedError):
load_proposals = LoadProposals(5, self.proposals_dir,
self.bsp_feature_dir, '.csv',
'unsupport_ext')
# test normal cases
load_proposals = LoadProposals(5, self.proposals_dir,
self.bsp_feature_dir)
load_proposals_result = load_proposals(action_result)
assert assert_dict_has_keys(load_proposals_result, target_keys)
assert load_proposals_result['bsp_feature'].shape[0] == 5
assert load_proposals_result['tmin'].shape == (5, )
assert_array_almost_equal(
load_proposals_result['tmin'], np.arange(0.1, 0.6, 0.1), decimal=4)
assert load_proposals_result['tmax'].shape == (5, )
assert_array_almost_equal(
load_proposals_result['tmax'], np.arange(0.2, 0.7, 0.1), decimal=4)
assert load_proposals_result['tmin_score'].shape == (5, )
assert_array_almost_equal(
load_proposals_result['tmin_score'],
np.arange(0.95, 0.90, -0.01),
decimal=4)
assert load_proposals_result['tmax_score'].shape == (5, )
assert_array_almost_equal(
load_proposals_result['tmax_score'],
np.arange(0.96, 0.91, -0.01),
decimal=4)
assert load_proposals_result['reference_temporal_iou'].shape == (5, )
assert_array_almost_equal(
load_proposals_result['reference_temporal_iou'],
np.arange(0.85, 0.80, -0.01),
decimal=4)
assert repr(load_proposals) == (
f'{load_proposals.__class__.__name__}('
f'top_k={5}, '
f'pgm_proposals_dir={self.proposals_dir}, '
f'pgm_features_dir={self.bsp_feature_dir}, '
f'proposal_ext=.csv, '
f'feature_ext=.npy)')
def test_load_audio_feature(self):
target_keys = ['audios']
inputs = copy.deepcopy(self.audio_feature_results)
load_audio_feature = LoadAudioFeature()
results = load_audio_feature(inputs)
assert assert_dict_has_keys(results, target_keys)
# test when no audio feature file exists
inputs = copy.deepcopy(self.audio_feature_results)
inputs['audio_path'] = 'foo/foo/bar.npy'
load_audio_feature = LoadAudioFeature()
results = load_audio_feature(inputs)
assert results['audios'].shape == (640, 80)
assert assert_dict_has_keys(results, target_keys)
assert repr(load_audio_feature) == (
f'{load_audio_feature.__class__.__name__}('
f'pad_method=zero)')
| 6,171 | 39.605263 | 79 | py |
STTS | STTS-main/VideoSwin/tests/test_utils/test_module_hooks.py | import copy
import os.path as osp
import mmcv
import numpy as np
import pytest
import torch
from mmaction.models import build_recognizer
from mmaction.utils import register_module_hooks
from mmaction.utils.module_hooks import GPUNormalize
def test_register_module_hooks():
_module_hooks = [
dict(
type='GPUNormalize',
hooked_module='backbone',
hook_pos='forward_pre',
input_format='NCHW',
mean=[123.675, 116.28, 103.53],
std=[58.395, 57.12, 57.375])
]
repo_dpath = osp.dirname(osp.dirname(osp.dirname(__file__)))
config_fpath = osp.join(repo_dpath, 'configs/_base_/models/tsm_r50.py')
config = mmcv.Config.fromfile(config_fpath)
config.model['backbone']['pretrained'] = None
# case 1
module_hooks = copy.deepcopy(_module_hooks)
module_hooks[0]['hook_pos'] = 'forward_pre'
recognizer = build_recognizer(config.model)
handles = register_module_hooks(recognizer, module_hooks)
assert recognizer.backbone._forward_pre_hooks[
handles[0].id].__name__ == 'normalize_hook'
# case 2
module_hooks = copy.deepcopy(_module_hooks)
module_hooks[0]['hook_pos'] = 'forward'
recognizer = build_recognizer(config.model)
handles = register_module_hooks(recognizer, module_hooks)
assert recognizer.backbone._forward_hooks[
handles[0].id].__name__ == 'normalize_hook'
# case 3
module_hooks = copy.deepcopy(_module_hooks)
module_hooks[0]['hooked_module'] = 'cls_head'
module_hooks[0]['hook_pos'] = 'backward'
recognizer = build_recognizer(config.model)
handles = register_module_hooks(recognizer, module_hooks)
assert recognizer.cls_head._backward_hooks[
handles[0].id].__name__ == 'normalize_hook'
# case 4
module_hooks = copy.deepcopy(_module_hooks)
module_hooks[0]['hook_pos'] = '_other_pos'
recognizer = build_recognizer(config.model)
with pytest.raises(ValueError):
handles = register_module_hooks(recognizer, module_hooks)
# case 5
module_hooks = copy.deepcopy(_module_hooks)
module_hooks[0]['hooked_module'] = '_other_module'
recognizer = build_recognizer(config.model)
with pytest.raises(ValueError):
handles = register_module_hooks(recognizer, module_hooks)
def test_gpu_normalize():
def check_normalize(origin_imgs, result_imgs, norm_cfg):
"""Check if the origin_imgs are normalized correctly into result_imgs
in a given norm_cfg."""
from numpy.testing import assert_array_almost_equal
target_imgs = result_imgs.copy()
target_imgs *= norm_cfg['std']
target_imgs += norm_cfg['mean']
assert_array_almost_equal(origin_imgs, target_imgs, decimal=4)
_gpu_normalize_cfg = dict(
input_format='NCTHW',
mean=[123.675, 116.28, 103.53],
std=[58.395, 57.12, 57.375])
# case 1
gpu_normalize_cfg = copy.deepcopy(_gpu_normalize_cfg)
gpu_normalize_cfg['input_format'] = 'NCHW'
gpu_normalize = GPUNormalize(**gpu_normalize_cfg)
assert gpu_normalize._mean.shape == (1, 3, 1, 1)
imgs = np.random.randint(256, size=(2, 240, 320, 3), dtype=np.uint8)
_input = (torch.tensor(imgs).permute(0, 3, 1, 2), )
normalize_hook = gpu_normalize.hook_func()
_input = normalize_hook(torch.nn.Module, _input)
result_imgs = np.array(_input[0].permute(0, 2, 3, 1))
check_normalize(imgs, result_imgs, gpu_normalize_cfg)
# case 2
gpu_normalize_cfg = copy.deepcopy(_gpu_normalize_cfg)
gpu_normalize_cfg['input_format'] = 'NCTHW'
gpu_normalize = GPUNormalize(**gpu_normalize_cfg)
assert gpu_normalize._mean.shape == (1, 3, 1, 1, 1)
# case 3
gpu_normalize_cfg = copy.deepcopy(_gpu_normalize_cfg)
gpu_normalize_cfg['input_format'] = 'NCHW_Flow'
gpu_normalize = GPUNormalize(**gpu_normalize_cfg)
assert gpu_normalize._mean.shape == (1, 3, 1, 1)
# case 4
gpu_normalize_cfg = copy.deepcopy(_gpu_normalize_cfg)
gpu_normalize_cfg['input_format'] = 'NPTCHW'
gpu_normalize = GPUNormalize(**gpu_normalize_cfg)
assert gpu_normalize._mean.shape == (1, 1, 1, 3, 1, 1)
# case 5
gpu_normalize_cfg = copy.deepcopy(_gpu_normalize_cfg)
gpu_normalize_cfg['input_format'] = '_format'
with pytest.raises(ValueError):
gpu_normalize = GPUNormalize(**gpu_normalize_cfg)
| 4,387 | 35.264463 | 77 | py |
STTS | STTS-main/VideoSwin/tests/test_utils/test_onnx.py | import os.path as osp
import tempfile
import torch.nn as nn
from tools.deployment.pytorch2onnx import _convert_batchnorm, pytorch2onnx
class TestModel(nn.Module):
def __init__(self):
super().__init__()
self.conv = nn.Conv3d(1, 2, 1)
self.bn = nn.SyncBatchNorm(2)
def forward(self, x):
return self.bn(self.conv(x))
def forward_dummy(self, x):
out = self.bn(self.conv(x))
return (out, )
def test_onnx_exporting():
with tempfile.TemporaryDirectory() as tmpdir:
out_file = osp.join(tmpdir, 'tmp.onnx')
model = TestModel()
model = _convert_batchnorm(model)
# test exporting
if hasattr(model, 'forward_dummy'):
model.forward = model.forward_dummy
pytorch2onnx(model, (2, 1, 1, 1, 1), output_file=out_file, verify=True)
| 845 | 25.4375 | 79 | py |
STTS | STTS-main/VideoSwin/tests/test_utils/test_bbox.py | import os.path as osp
from abc import abstractproperty
import numpy as np
import torch
from mmaction.core.bbox import bbox2result, bbox_target
from mmaction.datasets import AVADataset
from mmaction.utils import import_module_error_func
try:
from mmdet.core.bbox import build_assigner, build_sampler
except (ImportError, ModuleNotFoundError):
@import_module_error_func('mmdet')
def build_assigner(*args, **kwargs):
pass
@import_module_error_func('mmdet')
def build_sampler(*args, **kwargs):
pass
def test_assigner_sampler():
data_prefix = osp.normpath(
osp.join(osp.dirname(__file__), '../data/eval_detection'))
ann_file = osp.join(data_prefix, 'gt.csv')
label_file = osp.join(data_prefix, 'action_list.txt')
proposal_file = osp.join(data_prefix, 'proposal.pkl')
dataset = AVADataset(
ann_file=ann_file,
exclude_file=None,
pipeline=[],
label_file=label_file,
proposal_file=proposal_file,
num_classes=4)
assigner = dict(
type='MaxIoUAssignerAVA',
pos_iou_thr=0.5,
neg_iou_thr=0.5,
min_pos_iou=0.5)
assigner = build_assigner(assigner)
proposal = torch.tensor(dataset[0]['proposals'])
gt_bboxes = torch.tensor(dataset[0]['gt_bboxes'])
gt_labels = torch.tensor(dataset[0]['gt_labels'])
assign_result = assigner.assign(
bboxes=proposal,
gt_bboxes=gt_bboxes,
gt_bboxes_ignore=None,
gt_labels=gt_labels)
assert assign_result.num_gts == 4
assert torch.all(
assign_result.gt_inds == torch.tensor([0, 0, 3, 3, 0, 0, 0, 1, 0, 0]))
assert torch.all(
torch.isclose(
assign_result.max_overlaps,
torch.tensor([
0.40386841, 0.47127257, 0.53544776, 0.58797631, 0.29281288,
0.40979504, 0.45902917, 0.50093938, 0.21560125, 0.32948171
],
dtype=torch.float64)))
assert torch.all(
torch.isclose(
assign_result.labels,
torch.tensor([[0., 0., 0., 0.], [0., 0., 0., 0.], [0., 1., 0., 0.],
[0., 1., 0., 0.], [0., 0., 0., 0.], [0., 0., 0., 0.],
[0., 0., 0., 0.], [0., 0., 0., 1.], [0., 0., 0., 0.],
[0., 0., 0., 0.]])))
sampler = dict(type='RandomSampler', num=32, pos_fraction=1)
sampler = build_sampler(sampler)
sampling_result = sampler.sample(assign_result, proposal, gt_bboxes,
gt_labels)
assert (sampling_result.pos_inds.shape[0] ==
sampling_result.pos_bboxes.shape[0])
assert (sampling_result.neg_inds.shape[0] ==
sampling_result.neg_bboxes.shape[0])
return sampling_result
def test_bbox2result():
bboxes = torch.tensor([[0.072, 0.47, 0.84, 0.898],
[0.23, 0.215, 0.781, 0.534],
[0.195, 0.128, 0.643, 0.944],
[0.236, 0.189, 0.689, 0.74],
[0.375, 0.371, 0.726, 0.804],
[0.024, 0.398, 0.776, 0.719]])
labels = torch.tensor([[-1.650, 0.515, 0.798, 1.240],
[1.368, -1.128, 0.037, -1.087],
[0.481, -1.303, 0.501, -0.463],
[-0.356, 0.126, -0.840, 0.438],
[0.079, 1.269, -0.263, -0.538],
[-0.853, 0.391, 0.103, 0.398]])
num_classes = 4
result = bbox2result(bboxes, labels, num_classes)
assert np.all(
np.isclose(
result[0],
np.array([[0.072, 0.47, 0.84, 0.898, 0.515],
[0.236, 0.189, 0.689, 0.74, 0.126],
[0.375, 0.371, 0.726, 0.804, 1.269],
[0.024, 0.398, 0.776, 0.719, 0.391]])))
assert np.all(
np.isclose(
result[1],
np.array([[0.072, 0.47, 0.84, 0.898, 0.798],
[0.23, 0.215, 0.781, 0.534, 0.037],
[0.195, 0.128, 0.643, 0.944, 0.501],
[0.024, 0.398, 0.776, 0.719, 0.103]])))
assert np.all(
np.isclose(
result[2],
np.array([[0.072, 0.47, 0.84, 0.898, 1.24],
[0.236, 0.189, 0.689, 0.74, 0.438],
[0.024, 0.398, 0.776, 0.719, 0.398]])))
def test_bbox_target():
pos_bboxes = torch.tensor([[0.072, 0.47, 0.84, 0.898],
[0.23, 0.215, 0.781, 0.534],
[0.195, 0.128, 0.643, 0.944],
[0.236, 0.189, 0.689, 0.74]])
neg_bboxes = torch.tensor([[0.375, 0.371, 0.726, 0.804],
[0.024, 0.398, 0.776, 0.719]])
pos_gt_labels = torch.tensor([[0., 0., 1., 0.], [0., 0., 0., 1.],
[0., 1., 0., 0.], [0., 1., 0., 0.]])
cfg = abstractproperty()
cfg.pos_weight = 0.8
labels, label_weights = bbox_target([pos_bboxes], [neg_bboxes],
[pos_gt_labels], cfg)
assert torch.all(
torch.isclose(
labels,
torch.tensor([[0., 0., 1., 0.], [0., 0., 0., 1.], [0., 1., 0., 0.],
[0., 1., 0., 0.], [0., 0., 0., 0.], [0., 0., 0.,
0.]])))
assert torch.all(
torch.isclose(label_weights, torch.tensor([0.8] * 4 + [1.0] * 2)))
| 5,511 | 38.371429 | 79 | py |
STTS | STTS-main/VideoSwin/tests/test_metrics/test_losses.py | import numpy as np
import pytest
import torch
import torch.nn as nn
import torch.nn.functional as F
from mmcv import ConfigDict
from numpy.testing import assert_almost_equal, assert_array_almost_equal
from torch.autograd import Variable
from mmaction.models import (BCELossWithLogits, BinaryLogisticRegressionLoss,
BMNLoss, CrossEntropyLoss, HVULoss, NLLLoss,
OHEMHingeLoss, SSNLoss)
def test_hvu_loss():
pred = torch.tensor([[-1.0525, -0.7085, 0.1819, -0.8011],
[0.1555, -1.5550, 0.5586, 1.9746]])
gt = torch.tensor([[1., 0., 0., 0.], [0., 0., 1., 1.]])
mask = torch.tensor([[1., 1., 0., 0.], [0., 0., 1., 1.]])
category_mask = torch.tensor([[1., 0.], [0., 1.]])
categories = ['action', 'scene']
category_nums = (2, 2)
category_loss_weights = (1, 1)
loss_all_nomask_sum = HVULoss(
categories=categories,
category_nums=category_nums,
category_loss_weights=category_loss_weights,
loss_type='all',
with_mask=False,
reduction='sum')
loss = loss_all_nomask_sum(pred, gt, mask, category_mask)
loss1 = F.binary_cross_entropy_with_logits(pred, gt, reduction='none')
loss1 = torch.sum(loss1, dim=1)
assert torch.eq(loss['loss_cls'], torch.mean(loss1))
loss_all_mask = HVULoss(
categories=categories,
category_nums=category_nums,
category_loss_weights=category_loss_weights,
loss_type='all',
with_mask=True)
loss = loss_all_mask(pred, gt, mask, category_mask)
loss1 = F.binary_cross_entropy_with_logits(pred, gt, reduction='none')
loss1 = torch.sum(loss1 * mask, dim=1) / torch.sum(mask, dim=1)
loss1 = torch.mean(loss1)
assert torch.eq(loss['loss_cls'], loss1)
loss_ind_mask = HVULoss(
categories=categories,
category_nums=category_nums,
category_loss_weights=category_loss_weights,
loss_type='individual',
with_mask=True)
loss = loss_ind_mask(pred, gt, mask, category_mask)
action_loss = F.binary_cross_entropy_with_logits(pred[:1, :2], gt[:1, :2])
scene_loss = F.binary_cross_entropy_with_logits(pred[1:, 2:], gt[1:, 2:])
loss1 = (action_loss + scene_loss) / 2
assert torch.eq(loss['loss_cls'], loss1)
loss_ind_nomask_sum = HVULoss(
categories=categories,
category_nums=category_nums,
category_loss_weights=category_loss_weights,
loss_type='individual',
with_mask=False,
reduction='sum')
loss = loss_ind_nomask_sum(pred, gt, mask, category_mask)
action_loss = F.binary_cross_entropy_with_logits(
pred[:, :2], gt[:, :2], reduction='none')
action_loss = torch.sum(action_loss, dim=1)
action_loss = torch.mean(action_loss)
scene_loss = F.binary_cross_entropy_with_logits(
pred[:, 2:], gt[:, 2:], reduction='none')
scene_loss = torch.sum(scene_loss, dim=1)
scene_loss = torch.mean(scene_loss)
loss1 = (action_loss + scene_loss) / 2
assert torch.eq(loss['loss_cls'], loss1)
def test_cross_entropy_loss():
cls_scores = torch.rand((3, 4))
hard_gt_labels = torch.LongTensor([0, 1, 2]).squeeze()
soft_gt_labels = torch.FloatTensor([[1, 0, 0, 0], [0, 1, 0, 0],
[0, 0, 1, 0]]).squeeze()
# hard label without weight
cross_entropy_loss = CrossEntropyLoss()
output_loss = cross_entropy_loss(cls_scores, hard_gt_labels)
assert torch.equal(output_loss, F.cross_entropy(cls_scores,
hard_gt_labels))
# hard label with class weight
weight = torch.rand(4)
class_weight = weight.numpy().tolist()
cross_entropy_loss = CrossEntropyLoss(class_weight=class_weight)
output_loss = cross_entropy_loss(cls_scores, hard_gt_labels)
assert torch.equal(
output_loss,
F.cross_entropy(cls_scores, hard_gt_labels, weight=weight))
# soft label without class weight
cross_entropy_loss = CrossEntropyLoss()
output_loss = cross_entropy_loss(cls_scores, soft_gt_labels)
assert_almost_equal(
output_loss.numpy(),
F.cross_entropy(cls_scores, hard_gt_labels).numpy(),
decimal=4)
# soft label with class weight
cross_entropy_loss = CrossEntropyLoss(class_weight=class_weight)
output_loss = cross_entropy_loss(cls_scores, soft_gt_labels)
assert_almost_equal(
output_loss.numpy(),
F.cross_entropy(cls_scores, hard_gt_labels, weight=weight).numpy(),
decimal=4)
def test_bce_loss_with_logits():
cls_scores = torch.rand((3, 4))
gt_labels = torch.rand((3, 4))
bce_loss_with_logits = BCELossWithLogits()
output_loss = bce_loss_with_logits(cls_scores, gt_labels)
assert torch.equal(
output_loss, F.binary_cross_entropy_with_logits(cls_scores, gt_labels))
weight = torch.rand(4)
class_weight = weight.numpy().tolist()
bce_loss_with_logits = BCELossWithLogits(class_weight=class_weight)
output_loss = bce_loss_with_logits(cls_scores, gt_labels)
assert torch.equal(
output_loss,
F.binary_cross_entropy_with_logits(
cls_scores, gt_labels, weight=weight))
def test_nll_loss():
cls_scores = torch.randn(3, 3)
gt_labels = torch.tensor([0, 2, 1]).squeeze()
sm = nn.Softmax(dim=0)
nll_loss = NLLLoss()
cls_score_log = torch.log(sm(cls_scores))
output_loss = nll_loss(cls_score_log, gt_labels)
assert torch.equal(output_loss, F.nll_loss(cls_score_log, gt_labels))
def test_binary_logistic_loss():
binary_logistic_regression_loss = BinaryLogisticRegressionLoss()
reg_score = torch.tensor([0., 1.])
label = torch.tensor([0., 1.])
output_loss = binary_logistic_regression_loss(reg_score, label, 0.5)
assert_array_almost_equal(output_loss.numpy(), np.array([0.]), decimal=4)
reg_score = torch.tensor([0.3, 0.9])
label = torch.tensor([0., 1.])
output_loss = binary_logistic_regression_loss(reg_score, label, 0.5)
assert_array_almost_equal(
output_loss.numpy(), np.array([0.231]), decimal=4)
def test_bmn_loss():
bmn_loss = BMNLoss()
# test tem_loss
pred_start = torch.tensor([0.9, 0.1])
pred_end = torch.tensor([0.1, 0.9])
gt_start = torch.tensor([1., 0.])
gt_end = torch.tensor([0., 1.])
output_tem_loss = bmn_loss.tem_loss(pred_start, pred_end, gt_start, gt_end)
binary_logistic_regression_loss = BinaryLogisticRegressionLoss()
assert_loss = (
binary_logistic_regression_loss(pred_start, gt_start) +
binary_logistic_regression_loss(pred_end, gt_end))
assert_array_almost_equal(
output_tem_loss.numpy(), assert_loss.numpy(), decimal=4)
# test pem_reg_loss
seed = 1
torch.manual_seed(seed)
torch.cuda.manual_seed(seed)
torch.cuda.manual_seed_all(seed)
pred_bm_reg = torch.tensor([[0.1, 0.99], [0.5, 0.4]])
gt_iou_map = torch.tensor([[0, 1.], [0, 1.]])
mask = torch.tensor([[0.1, 0.4], [0.4, 0.1]])
output_pem_reg_loss = bmn_loss.pem_reg_loss(pred_bm_reg, gt_iou_map, mask)
assert_array_almost_equal(
output_pem_reg_loss.numpy(), np.array([0.2140]), decimal=4)
# test pem_cls_loss
pred_bm_cls = torch.tensor([[0.1, 0.99], [0.95, 0.2]])
gt_iou_map = torch.tensor([[0., 1.], [0., 1.]])
mask = torch.tensor([[0.1, 0.4], [0.4, 0.1]])
output_pem_cls_loss = bmn_loss.pem_cls_loss(pred_bm_cls, gt_iou_map, mask)
assert_array_almost_equal(
output_pem_cls_loss.numpy(), np.array([1.6137]), decimal=4)
# test bmn_loss
pred_bm = torch.tensor([[[[0.1, 0.99], [0.5, 0.4]],
[[0.1, 0.99], [0.95, 0.2]]]])
pred_start = torch.tensor([[0.9, 0.1]])
pred_end = torch.tensor([[0.1, 0.9]])
gt_iou_map = torch.tensor([[[0., 2.5], [0., 10.]]])
gt_start = torch.tensor([[1., 0.]])
gt_end = torch.tensor([[0., 1.]])
mask = torch.tensor([[0.1, 0.4], [0.4, 0.1]])
output_loss = bmn_loss(pred_bm, pred_start, pred_end, gt_iou_map, gt_start,
gt_end, mask)
assert_array_almost_equal(
output_loss[0].numpy(),
output_tem_loss + 10 * output_pem_reg_loss + output_pem_cls_loss)
assert_array_almost_equal(output_loss[1].numpy(), output_tem_loss)
assert_array_almost_equal(output_loss[2].numpy(), output_pem_reg_loss)
assert_array_almost_equal(output_loss[3].numpy(), output_pem_cls_loss)
def test_ohem_hinge_loss():
# test normal case
pred = torch.tensor([[
0.5161, 0.5228, 0.7748, 0.0573, 0.1113, 0.8862, 0.1752, 0.9448, 0.0253,
0.1009, 0.4371, 0.2232, 0.0412, 0.3487, 0.3350, 0.9294, 0.7122, 0.3072,
0.2942, 0.7679
]],
requires_grad=True)
gt = torch.tensor([8])
num_video = 1
loss = OHEMHingeLoss.apply(pred, gt, 1, 1.0, num_video)
assert_array_almost_equal(
loss.detach().numpy(), np.array([0.0552]), decimal=4)
loss.backward(Variable(torch.ones([1])))
assert_array_almost_equal(
np.array(pred.grad),
np.array([[
0., 0., 0., 0., 0., 0., 0., -1., 0., 0., 0., 0., 0., 0., 0., 0.,
0., 0., 0., 0.
]]),
decimal=4)
# test error case
with pytest.raises(ValueError):
gt = torch.tensor([8, 10])
loss = OHEMHingeLoss.apply(pred, gt, 1, 1.0, num_video)
def test_ssn_loss():
ssn_loss = SSNLoss()
# test activity_loss
activity_score = torch.rand((8, 21))
labels = torch.LongTensor([8] * 8).squeeze()
activity_indexer = torch.tensor([0, 7])
output_activity_loss = ssn_loss.activity_loss(activity_score, labels,
activity_indexer)
assert torch.equal(
output_activity_loss,
F.cross_entropy(activity_score[activity_indexer, :],
labels[activity_indexer]))
# test completeness_loss
completeness_score = torch.rand((8, 20), requires_grad=True)
labels = torch.LongTensor([8] * 8).squeeze()
completeness_indexer = torch.tensor([0, 1, 2, 3, 4, 5, 6])
positive_per_video = 1
incomplete_per_video = 6
output_completeness_loss = ssn_loss.completeness_loss(
completeness_score, labels, completeness_indexer, positive_per_video,
incomplete_per_video)
pred = completeness_score[completeness_indexer, :]
gt = labels[completeness_indexer]
pred_dim = pred.size(1)
pred = pred.view(-1, positive_per_video + incomplete_per_video, pred_dim)
gt = gt.view(-1, positive_per_video + incomplete_per_video)
# yapf:disable
positive_pred = pred[:, :positive_per_video, :].contiguous().view(-1, pred_dim) # noqa:E501
incomplete_pred = pred[:, positive_per_video:, :].contiguous().view(-1, pred_dim) # noqa:E501
# yapf:enable
ohem_ratio = 0.17
positive_loss = OHEMHingeLoss.apply(
positive_pred, gt[:, :positive_per_video].contiguous().view(-1), 1,
1.0, positive_per_video)
incomplete_loss = OHEMHingeLoss.apply(
incomplete_pred, gt[:, positive_per_video:].contiguous().view(-1), -1,
ohem_ratio, incomplete_per_video)
num_positives = positive_pred.size(0)
num_incompletes = int(incomplete_pred.size(0) * ohem_ratio)
assert_loss = ((positive_loss + incomplete_loss) /
float(num_positives + num_incompletes))
assert torch.equal(output_completeness_loss, assert_loss)
# test reg_loss
bbox_pred = torch.rand((8, 20, 2))
labels = torch.LongTensor([8] * 8).squeeze()
bbox_targets = torch.rand((8, 2))
regression_indexer = torch.tensor([0])
output_reg_loss = ssn_loss.classwise_regression_loss(
bbox_pred, labels, bbox_targets, regression_indexer)
pred = bbox_pred[regression_indexer, :, :]
gt = labels[regression_indexer]
reg_target = bbox_targets[regression_indexer, :]
class_idx = gt.data - 1
classwise_pred = pred[:, class_idx, :]
classwise_reg_pred = torch.cat((torch.diag(classwise_pred[:, :, 0]).view(
-1, 1), torch.diag(classwise_pred[:, :, 1]).view(-1, 1)),
dim=1)
assert torch.equal(
output_reg_loss,
F.smooth_l1_loss(classwise_reg_pred.view(-1), reg_target.view(-1)) * 2)
# test ssn_loss
proposal_type = torch.tensor([[0, 1, 1, 1, 1, 1, 1, 2]])
train_cfg = ConfigDict(
dict(
ssn=dict(
sampler=dict(
num_per_video=8,
positive_ratio=1,
background_ratio=1,
incomplete_ratio=6,
add_gt_as_proposals=True),
loss_weight=dict(comp_loss_weight=0.1, reg_loss_weight=0.1))))
output_loss = ssn_loss(activity_score, completeness_score, bbox_pred,
proposal_type, labels, bbox_targets, train_cfg)
assert torch.equal(output_loss['loss_activity'], output_activity_loss)
assert torch.equal(output_loss['loss_completeness'],
output_completeness_loss * 0.1)
assert torch.equal(output_loss['loss_reg'], output_reg_loss * 0.1)
| 13,164 | 38.653614 | 98 | py |
STTS | STTS-main/VideoSwin/configs/_base_/models/tsm_r50.py | # model settings
model = dict(
type='Recognizer2D',
backbone=dict(
type='ResNetTSM',
pretrained='torchvision://resnet50',
depth=50,
norm_eval=False,
shift_div=8),
cls_head=dict(
type='TSMHead',
num_classes=400,
in_channels=2048,
spatial_type='avg',
consensus=dict(type='AvgConsensus', dim=1),
dropout_ratio=0.5,
init_std=0.001,
is_shift=True),
# model training and testing settings
train_cfg=None,
test_cfg=dict(average_clips='prob'))
| 563 | 24.636364 | 51 | py |
STTS | STTS-main/VideoSwin/configs/_base_/models/tsn_r50.py | # model settings
model = dict(
type='Recognizer2D',
backbone=dict(
type='ResNet',
pretrained='torchvision://resnet50',
depth=50,
norm_eval=False),
cls_head=dict(
type='TSNHead',
num_classes=400,
in_channels=2048,
spatial_type='avg',
consensus=dict(type='AvgConsensus', dim=1),
dropout_ratio=0.4,
init_std=0.01),
# model training and testing settings
train_cfg=None,
test_cfg=dict(average_clips=None))
| 513 | 24.7 | 51 | py |
STTS | STTS-main/VideoSwin/configs/_base_/models/trn_r50.py | # model settings
model = dict(
type='Recognizer2D',
backbone=dict(
type='ResNet',
pretrained='torchvision://resnet50',
depth=50,
norm_eval=False,
partial_bn=True),
cls_head=dict(
type='TRNHead',
num_classes=400,
in_channels=2048,
num_segments=8,
spatial_type='avg',
relation_type='TRNMultiScale',
hidden_dim=256,
dropout_ratio=0.8,
init_std=0.001),
# model training and testing settings
train_cfg=None,
test_cfg=dict(average_clips='prob'))
| 576 | 24.086957 | 44 | py |
STTS | STTS-main/VideoSwin/configs/_base_/models/c3d_sports1m_pretrained.py | # model settings
model = dict(
type='Recognizer3D',
backbone=dict(
type='C3D',
pretrained= # noqa: E251
'https://download.openmmlab.com/mmaction/recognition/c3d/c3d_sports1m_pretrain_20201016-dcc47ddc.pth', # noqa: E501
style='pytorch',
conv_cfg=dict(type='Conv3d'),
norm_cfg=None,
act_cfg=dict(type='ReLU'),
dropout_ratio=0.5,
init_std=0.005),
cls_head=dict(
type='I3DHead',
num_classes=101,
in_channels=4096,
spatial_type=None,
dropout_ratio=0.5,
init_std=0.01),
# model training and testing settings
train_cfg=None,
test_cfg=dict(average_clips='score'))
| 703 | 28.333333 | 124 | py |
STTS | STTS-main/VideoSwin/configs/_base_/models/tpn_slowonly_r50.py | # model settings
model = dict(
type='Recognizer3D',
backbone=dict(
type='ResNet3dSlowOnly',
depth=50,
pretrained='torchvision://resnet50',
lateral=False,
out_indices=(2, 3),
conv1_kernel=(1, 7, 7),
conv1_stride_t=1,
pool1_stride_t=1,
inflate=(0, 0, 1, 1),
norm_eval=False),
neck=dict(
type='TPN',
in_channels=(1024, 2048),
out_channels=1024,
spatial_modulation_cfg=dict(
in_channels=(1024, 2048), out_channels=2048),
temporal_modulation_cfg=dict(downsample_scales=(8, 8)),
upsample_cfg=dict(scale_factor=(1, 1, 1)),
downsample_cfg=dict(downsample_scale=(1, 1, 1)),
level_fusion_cfg=dict(
in_channels=(1024, 1024),
mid_channels=(1024, 1024),
out_channels=2048,
downsample_scales=((1, 1, 1), (1, 1, 1))),
aux_head_cfg=dict(out_channels=400, loss_weight=0.5)),
cls_head=dict(
type='TPNHead',
num_classes=400,
in_channels=2048,
spatial_type='avg',
consensus=dict(type='AvgConsensus', dim=1),
dropout_ratio=0.5,
init_std=0.01),
# model training and testing settings
train_cfg=None,
test_cfg=dict(average_clips='prob'))
| 1,310 | 30.97561 | 63 | py |
STTS | STTS-main/VideoSwin/configs/_base_/models/i3d_r50.py | # model settings
model = dict(
type='Recognizer3D',
backbone=dict(
type='ResNet3d',
pretrained2d=True,
pretrained='torchvision://resnet50',
depth=50,
conv1_kernel=(5, 7, 7),
conv1_stride_t=2,
pool1_stride_t=2,
conv_cfg=dict(type='Conv3d'),
norm_eval=False,
inflate=((1, 1, 1), (1, 0, 1, 0), (1, 0, 1, 0, 1, 0), (0, 1, 0)),
zero_init_residual=False),
cls_head=dict(
type='I3DHead',
num_classes=400,
in_channels=2048,
spatial_type='avg',
dropout_ratio=0.5,
init_std=0.01),
# model training and testing settings
train_cfg=None,
test_cfg=dict(average_clips='prob'))
# This setting refers to https://github.com/open-mmlab/mmaction/blob/master/mmaction/models/tenons/backbones/resnet_i3d.py#L329-L332 # noqa: E501
| 870 | 30.107143 | 146 | py |
STTS | STTS-main/VideoSwin/configs/_base_/models/tin_r50.py | # model settings
model = dict(
type='Recognizer2D',
backbone=dict(
type='ResNetTIN',
pretrained='torchvision://resnet50',
depth=50,
norm_eval=False,
shift_div=4),
cls_head=dict(
type='TSMHead',
num_classes=400,
in_channels=2048,
spatial_type='avg',
consensus=dict(type='AvgConsensus', dim=1),
dropout_ratio=0.5,
init_std=0.001,
is_shift=False),
# model training and testing settings
train_cfg=None,
test_cfg=dict(average_clips=None))
| 562 | 24.590909 | 51 | py |
STTS | STTS-main/VideoSwin/configs/_base_/models/tanet_r50.py | # model settings
model = dict(
type='Recognizer2D',
backbone=dict(
type='TANet',
pretrained='torchvision://resnet50',
depth=50,
num_segments=8,
tam_cfg=dict()),
cls_head=dict(
type='TSMHead',
num_classes=400,
in_channels=2048,
spatial_type='avg',
consensus=dict(type='AvgConsensus', dim=1),
dropout_ratio=0.5,
init_std=0.001),
# model training and testing settings
train_cfg=None,
test_cfg=dict(average_clips='prob'))
| 538 | 24.666667 | 51 | py |
STTS | STTS-main/VideoSwin/configs/_base_/models/tpn_tsm_r50.py | # model settings
model = dict(
type='Recognizer2D',
backbone=dict(
type='ResNetTSM',
pretrained='torchvision://resnet50',
depth=50,
out_indices=(2, 3),
norm_eval=False,
shift_div=8),
neck=dict(
type='TPN',
in_channels=(1024, 2048),
out_channels=1024,
spatial_modulation_cfg=dict(
in_channels=(1024, 2048), out_channels=2048),
temporal_modulation_cfg=dict(downsample_scales=(8, 8)),
upsample_cfg=dict(scale_factor=(1, 1, 1)),
downsample_cfg=dict(downsample_scale=(1, 1, 1)),
level_fusion_cfg=dict(
in_channels=(1024, 1024),
mid_channels=(1024, 1024),
out_channels=2048,
downsample_scales=((1, 1, 1), (1, 1, 1))),
aux_head_cfg=dict(out_channels=174, loss_weight=0.5)),
cls_head=dict(
type='TPNHead',
num_classes=174,
in_channels=2048,
spatial_type='avg',
consensus=dict(type='AvgConsensus', dim=1),
dropout_ratio=0.5,
init_std=0.01),
# model training and testing settings
train_cfg=None,
test_cfg=dict(average_clips='prob', fcn_test=True))
| 1,202 | 31.513514 | 63 | py |
STTS | STTS-main/VideoSwin/configs/_base_/models/slowonly_r50.py | # model settings
model = dict(
type='Recognizer3D',
backbone=dict(
type='ResNet3dSlowOnly',
depth=50,
pretrained='torchvision://resnet50',
lateral=False,
conv1_kernel=(1, 7, 7),
conv1_stride_t=1,
pool1_stride_t=1,
inflate=(0, 0, 1, 1),
norm_eval=False),
cls_head=dict(
type='I3DHead',
in_channels=2048,
num_classes=400,
spatial_type='avg',
dropout_ratio=0.5),
# model training and testing settings
train_cfg=None,
test_cfg=dict(average_clips='prob'))
| 587 | 24.565217 | 44 | py |
PathomicFusion | PathomicFusion-master/data_loaders.py | ### data_loaders.py
import os
import numpy as np
import pandas as pd
from PIL import Image
from sklearn import preprocessing
import torch
import torch.nn as nn
from torch.utils.data.dataset import Dataset # For custom datasets
from torchvision import datasets, transforms
################
# Dataset Loader
################
class PathgraphomicDatasetLoader(Dataset):
def __init__(self, opt, data, split, mode='omic'):
"""
Args:
X = data
e = overall survival event
t = overall survival in months
"""
self.X_path = data[split]['x_path']
self.X_grph = data[split]['x_grph']
self.X_omic = data[split]['x_omic']
self.e = data[split]['e']
self.t = data[split]['t']
self.g = data[split]['g']
self.mode = mode
self.transforms = transforms.Compose([
transforms.RandomHorizontalFlip(0.5),
transforms.RandomVerticalFlip(0.5),
transforms.RandomCrop(opt.input_size_path),
transforms.ColorJitter(brightness=0.1, contrast=0.1, saturation=0.05, hue=0.01),
transforms.ToTensor(),
transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))])
def __getitem__(self, index):
single_e = torch.tensor(self.e[index]).type(torch.FloatTensor)
single_t = torch.tensor(self.t[index]).type(torch.FloatTensor)
single_g = torch.tensor(self.g[index]).type(torch.LongTensor)
if self.mode == "path" or self.mode == 'pathpath':
single_X_path = Image.open(self.X_path[index]).convert('RGB')
return (self.transforms(single_X_path), 0, 0, single_e, single_t, single_g)
elif self.mode == "graph" or self.mode == 'graphgraph':
single_X_grph = torch.load(self.X_grph[index])
return (0, single_X_grph, 0, single_e, single_t, single_g)
elif self.mode == "omic" or self.mode == 'omicomic':
single_X_omic = torch.tensor(self.X_omic[index]).type(torch.FloatTensor)
return (0, 0, single_X_omic, single_e, single_t, single_g)
elif self.mode == "pathomic":
single_X_path = Image.open(self.X_path[index]).convert('RGB')
single_X_omic = torch.tensor(self.X_omic[index]).type(torch.FloatTensor)
return (self.transforms(single_X_path), 0, single_X_omic, single_e, single_t, single_g)
elif self.mode == "graphomic":
single_X_grph = torch.load(self.X_grph[index])
single_X_omic = torch.tensor(self.X_omic[index]).type(torch.FloatTensor)
return (0, single_X_grph, single_X_omic, single_e, single_t, single_g)
elif self.mode == "pathgraph":
single_X_path = Image.open(self.X_path[index]).convert('RGB')
single_X_grph = torch.load(self.X_grph[index])
return (self.transforms(single_X_path), single_X_grph, 0, single_e, single_t, single_g)
elif self.mode == "pathgraphomic":
single_X_path = Image.open(self.X_path[index]).convert('RGB')
single_X_grph = torch.load(self.X_grph[index])
single_X_omic = torch.tensor(self.X_omic[index]).type(torch.FloatTensor)
return (self.transforms(single_X_path), single_X_grph, single_X_omic, single_e, single_t, single_g)
def __len__(self):
return len(self.X_path)
class PathgraphomicFastDatasetLoader(Dataset):
def __init__(self, opt, data, split, mode='omic'):
"""
Args:
X = data
e = overall survival event
t = overall survival in months
"""
self.X_path = data[split]['x_path']
self.X_grph = data[split]['x_grph']
self.X_omic = data[split]['x_omic']
self.e = data[split]['e']
self.t = data[split]['t']
self.g = data[split]['g']
self.mode = mode
def __getitem__(self, index):
single_e = torch.tensor(self.e[index]).type(torch.FloatTensor)
single_t = torch.tensor(self.t[index]).type(torch.FloatTensor)
single_g = torch.tensor(self.g[index]).type(torch.LongTensor)
if self.mode == "path" or self.mode == 'pathpath':
single_X_path = torch.tensor(self.X_path[index]).type(torch.FloatTensor).squeeze(0)
return (single_X_path, 0, 0, single_e, single_t, single_g)
elif self.mode == "graph" or self.mode == 'graphgraph':
single_X_grph = torch.load(self.X_grph[index])
return (0, single_X_grph, 0, single_e, single_t, single_g)
elif self.mode == "omic" or self.mode == 'omicomic':
single_X_omic = torch.tensor(self.X_omic[index]).type(torch.FloatTensor)
return (0, 0, single_X_omic, single_e, single_t, single_g)
elif self.mode == "pathomic":
single_X_path = torch.tensor(self.X_path[index]).type(torch.FloatTensor).squeeze(0)
single_X_omic = torch.tensor(self.X_omic[index]).type(torch.FloatTensor)
return (single_X_path, 0, single_X_omic, single_e, single_t, single_g)
elif self.mode == "graphomic":
single_X_grph = torch.load(self.X_grph[index])
single_X_omic = torch.tensor(self.X_omic[index]).type(torch.FloatTensor)
return (0, single_X_grph, single_X_omic, single_e, single_t, single_g)
elif self.mode == "pathgraph":
single_X_path = torch.tensor(self.X_path[index]).type(torch.FloatTensor).squeeze(0)
single_X_grph = torch.load(self.X_grph[index])
return (single_X_path, single_X_grph, 0, single_e, single_t, single_g)
elif self.mode == "pathgraphomic":
single_X_path = torch.tensor(self.X_path[index]).type(torch.FloatTensor).squeeze(0)
single_X_grph = torch.load(self.X_grph[index])
single_X_omic = torch.tensor(self.X_omic[index]).type(torch.FloatTensor)
return (single_X_path, single_X_grph, single_X_omic, single_e, single_t, single_g)
def __len__(self):
return len(self.X_path)
| 6,141 | 46.984375 | 111 | py |
PathomicFusion | PathomicFusion-master/fusion.py | import torch
import torch.nn as nn
from utils import init_max_weights
class BilinearFusion(nn.Module):
def __init__(self, skip=1, use_bilinear=1, gate1=1, gate2=1, dim1=32, dim2=32, scale_dim1=1, scale_dim2=1, mmhid=64, dropout_rate=0.25):
super(BilinearFusion, self).__init__()
self.skip = skip
self.use_bilinear = use_bilinear
self.gate1 = gate1
self.gate2 = gate2
dim1_og, dim2_og, dim1, dim2 = dim1, dim2, dim1//scale_dim1, dim2//scale_dim2
skip_dim = dim1+dim2+2 if skip else 0
self.linear_h1 = nn.Sequential(nn.Linear(dim1_og, dim1), nn.ReLU())
self.linear_z1 = nn.Bilinear(dim1_og, dim2_og, dim1) if use_bilinear else nn.Sequential(nn.Linear(dim1_og+dim2_og, dim1))
self.linear_o1 = nn.Sequential(nn.Linear(dim1, dim1), nn.ReLU(), nn.Dropout(p=dropout_rate))
self.linear_h2 = nn.Sequential(nn.Linear(dim2_og, dim2), nn.ReLU())
self.linear_z2 = nn.Bilinear(dim1_og, dim2_og, dim2) if use_bilinear else nn.Sequential(nn.Linear(dim1_og+dim2_og, dim2))
self.linear_o2 = nn.Sequential(nn.Linear(dim2, dim2), nn.ReLU(), nn.Dropout(p=dropout_rate))
self.post_fusion_dropout = nn.Dropout(p=dropout_rate)
self.encoder1 = nn.Sequential(nn.Linear((dim1+1)*(dim2+1), mmhid), nn.ReLU(), nn.Dropout(p=dropout_rate))
self.encoder2 = nn.Sequential(nn.Linear(mmhid+skip_dim, mmhid), nn.ReLU(), nn.Dropout(p=dropout_rate))
init_max_weights(self)
def forward(self, vec1, vec2):
### Gated Multimodal Units
if self.gate1:
h1 = self.linear_h1(vec1)
z1 = self.linear_z1(vec1, vec2) if self.use_bilinear else self.linear_z1(torch.cat((vec1, vec2), dim=1))
o1 = self.linear_o1(nn.Sigmoid()(z1)*h1)
else:
o1 = self.linear_o1(vec1)
if self.gate2:
h2 = self.linear_h2(vec2)
z2 = self.linear_z2(vec1, vec2) if self.use_bilinear else self.linear_z2(torch.cat((vec1, vec2), dim=1))
o2 = self.linear_o2(nn.Sigmoid()(z2)*h2)
else:
o2 = self.linear_o2(vec2)
### Fusion
o1 = torch.cat((o1, torch.cuda.FloatTensor(o1.shape[0], 1).fill_(1)), 1)
o2 = torch.cat((o2, torch.cuda.FloatTensor(o2.shape[0], 1).fill_(1)), 1)
o12 = torch.bmm(o1.unsqueeze(2), o2.unsqueeze(1)).flatten(start_dim=1) # BATCH_SIZE X 1024
out = self.post_fusion_dropout(o12)
out = self.encoder1(out)
if self.skip: out = torch.cat((out, o1, o2), 1)
out = self.encoder2(out)
return out
class TrilinearFusion_A(nn.Module):
def __init__(self, skip=1, use_bilinear=1, gate1=1, gate2=1, gate3=1, dim1=32, dim2=32, dim3=32, scale_dim1=1, scale_dim2=1, scale_dim3=1, mmhid=96, dropout_rate=0.25):
super(TrilinearFusion_A, self).__init__()
self.skip = skip
self.use_bilinear = use_bilinear
self.gate1 = gate1
self.gate2 = gate2
self.gate3 = gate3
dim1_og, dim2_og, dim3_og, dim1, dim2, dim3 = dim1, dim2, dim3, dim1//scale_dim1, dim2//scale_dim2, dim3//scale_dim3
skip_dim = dim1+dim2+dim3+3 if skip else 0
### Path
self.linear_h1 = nn.Sequential(nn.Linear(dim1_og, dim1), nn.ReLU())
self.linear_z1 = nn.Bilinear(dim1_og, dim3_og, dim1) if use_bilinear else nn.Sequential(nn.Linear(dim1_og+dim3_og, dim1))
self.linear_o1 = nn.Sequential(nn.Linear(dim1, dim1), nn.ReLU(), nn.Dropout(p=dropout_rate))
### Graph
self.linear_h2 = nn.Sequential(nn.Linear(dim2_og, dim2), nn.ReLU())
self.linear_z2 = nn.Bilinear(dim2_og, dim3_og, dim2) if use_bilinear else nn.Sequential(nn.Linear(dim2_og+dim3_og, dim2))
self.linear_o2 = nn.Sequential(nn.Linear(dim2, dim2), nn.ReLU(), nn.Dropout(p=dropout_rate))
### Omic
self.linear_h3 = nn.Sequential(nn.Linear(dim3_og, dim3), nn.ReLU())
self.linear_z3 = nn.Bilinear(dim1_og, dim3_og, dim3) if use_bilinear else nn.Sequential(nn.Linear(dim1_og+dim3_og, dim3))
self.linear_o3 = nn.Sequential(nn.Linear(dim3, dim3), nn.ReLU(), nn.Dropout(p=dropout_rate))
self.post_fusion_dropout = nn.Dropout(p=0.25)
self.encoder1 = nn.Sequential(nn.Linear((dim1+1)*(dim2+1)*(dim3+1), mmhid), nn.ReLU(), nn.Dropout(p=dropout_rate))
self.encoder2 = nn.Sequential(nn.Linear(mmhid+skip_dim, mmhid), nn.ReLU(), nn.Dropout(p=dropout_rate))
init_max_weights(self)
def forward(self, vec1, vec2, vec3):
### Gated Multimodal Units
if self.gate1:
h1 = self.linear_h1(vec1)
z1 = self.linear_z1(vec1, vec3) if self.use_bilinear else self.linear_z1(torch.cat((vec1, vec3), dim=1)) # Gate Path with Omic
o1 = self.linear_o1(nn.Sigmoid()(z1)*h1)
else:
o1 = self.linear_o1(vec1)
if self.gate2:
h2 = self.linear_h2(vec2)
z2 = self.linear_z2(vec2, vec3) if self.use_bilinear else self.linear_z2(torch.cat((vec2, vec3), dim=1)) # Gate Graph with Omic
o2 = self.linear_o2(nn.Sigmoid()(z2)*h2)
else:
o2 = self.linear_o2(vec2)
if self.gate3:
h3 = self.linear_h3(vec3)
z3 = self.linear_z3(vec1, vec3) if self.use_bilinear else self.linear_z3(torch.cat((vec1, vec3), dim=1)) # Gate Omic With Path
o3 = self.linear_o3(nn.Sigmoid()(z3)*h3)
else:
o3 = self.linear_o3(vec3)
### Fusion
o1 = torch.cat((o1, torch.cuda.FloatTensor(o1.shape[0], 1).fill_(1)), 1)
o2 = torch.cat((o2, torch.cuda.FloatTensor(o2.shape[0], 1).fill_(1)), 1)
o3 = torch.cat((o3, torch.cuda.FloatTensor(o3.shape[0], 1).fill_(1)), 1)
o12 = torch.bmm(o1.unsqueeze(2), o2.unsqueeze(1)).flatten(start_dim=1)
o123 = torch.bmm(o12.unsqueeze(2), o3.unsqueeze(1)).flatten(start_dim=1)
out = self.post_fusion_dropout(o123)
out = self.encoder1(out)
if self.skip: out = torch.cat((out, o1, o2, o3), 1)
out = self.encoder2(out)
return out
class TrilinearFusion_B(nn.Module):
def __init__(self, skip=1, use_bilinear=1, gate1=1, gate2=1, gate3=1, dim1=32, dim2=32, dim3=32, scale_dim1=1, scale_dim2=1, scale_dim3=1, mmhid=96, dropout_rate=0.25):
super(TrilinearFusion_B, self).__init__()
self.skip = skip
self.use_bilinear = use_bilinear
self.gate1 = gate1
self.gate2 = gate2
self.gate3 = gate3
dim1_og, dim2_og, dim3_og, dim1, dim2, dim3 = dim1, dim2, dim3, dim1//scale_dim1, dim2//scale_dim2, dim3//scale_dim3
skip_dim = dim1+dim2+dim3+3 if skip else 0
### Path
self.linear_h1 = nn.Sequential(nn.Linear(dim1_og, dim1), nn.ReLU())
self.linear_z1 = nn.Bilinear(dim1_og, dim3_og, dim1) if use_bilinear else nn.Sequential(nn.Linear(dim1_og+dim3_og, dim1))
self.linear_o1 = nn.Sequential(nn.Linear(dim1, dim1), nn.ReLU(), nn.Dropout(p=dropout_rate))
### Graph
self.linear_h2 = nn.Sequential(nn.Linear(dim2_og, dim2), nn.ReLU())
self.linear_z2 = nn.Bilinear(dim2_og, dim1_og, dim2) if use_bilinear else nn.Sequential(nn.Linear(dim2_og+dim1_og, dim2))
self.linear_o2 = nn.Sequential(nn.Linear(dim2, dim2), nn.ReLU(), nn.Dropout(p=dropout_rate))
### Omic
self.linear_h3 = nn.Sequential(nn.Linear(dim3_og, dim3), nn.ReLU())
self.linear_z3 = nn.Bilinear(dim1_og, dim3_og, dim3) if use_bilinear else nn.Sequential(nn.Linear(dim1_og+dim3_og, dim3))
self.linear_o3 = nn.Sequential(nn.Linear(dim3, dim3), nn.ReLU(), nn.Dropout(p=dropout_rate))
self.post_fusion_dropout = nn.Dropout(p=0.25)
self.encoder1 = nn.Sequential(nn.Linear((dim1+1)*(dim2+1)*(dim3+1), mmhid), nn.ReLU(), nn.Dropout(p=dropout_rate))
self.encoder2 = nn.Sequential(nn.Linear(mmhid+skip_dim, mmhid), nn.ReLU(), nn.Dropout(p=dropout_rate))
init_max_weights(self)
def forward(self, vec1, vec2, vec3):
### Gated Multimodal Units
if self.gate1:
h1 = self.linear_h1(vec1)
z1 = self.linear_z1(vec1, vec3) if self.use_bilinear else self.linear_z1(torch.cat((vec1, vec3), dim=1)) # Gate Path with Omic
o1 = self.linear_o1(nn.Sigmoid()(z1)*h1)
else:
o1 = self.linear_o1(vec1)
if self.gate2:
h2 = self.linear_h2(vec2)
z2 = self.linear_z2(vec2, vec1) if self.use_bilinear else self.linear_z2(torch.cat((vec2, vec1), dim=1)) # Gate Graph with Omic
o2 = self.linear_o2(nn.Sigmoid()(z2)*h2)
else:
o2 = self.linear_o2(vec2)
if self.gate3:
h3 = self.linear_h3(vec3)
z3 = self.linear_z3(vec1, vec3) if self.use_bilinear else self.linear_z3(torch.cat((vec1, vec3), dim=1)) # Gate Omic With Path
o3 = self.linear_o3(nn.Sigmoid()(z3)*h3)
else:
o3 = self.linear_o3(vec3)
### Fusion
o1 = torch.cat((o1, torch.cuda.FloatTensor(o1.shape[0], 1).fill_(1)), 1)
o2 = torch.cat((o2, torch.cuda.FloatTensor(o2.shape[0], 1).fill_(1)), 1)
o3 = torch.cat((o3, torch.cuda.FloatTensor(o3.shape[0], 1).fill_(1)), 1)
o12 = torch.bmm(o1.unsqueeze(2), o2.unsqueeze(1)).flatten(start_dim=1)
o123 = torch.bmm(o12.unsqueeze(2), o3.unsqueeze(1)).flatten(start_dim=1)
out = self.post_fusion_dropout(o123)
out = self.encoder1(out)
if self.skip: out = torch.cat((out, o1, o2, o3), 1)
out = self.encoder2(out)
return out | 9,580 | 48.901042 | 172 | py |
PathomicFusion | PathomicFusion-master/utils.py | # Base / Native
import math
import os
import pickle
import re
import warnings
warnings.filterwarnings('ignore')
# Numerical / Array
import lifelines
from lifelines.utils import concordance_index
from lifelines import CoxPHFitter
from lifelines.datasets import load_regression_dataset
from lifelines.utils import k_fold_cross_validation
from lifelines.statistics import logrank_test
from imblearn.over_sampling import RandomOverSampler
import matplotlib as mpl
import matplotlib.pyplot as plt
import matplotlib.font_manager as font_manager
import numpy as np
import pandas as pd
from PIL import Image
import pylab
import scipy
import seaborn as sns
from sklearn import preprocessing
from sklearn.model_selection import train_test_split, KFold
from sklearn.metrics import average_precision_score, auc, f1_score, roc_curve, roc_auc_score
from sklearn.preprocessing import LabelBinarizer
from scipy import interp
mpl.rcParams['axes.linewidth'] = 3 #set the value globally
# Torch
import torch
import torch.nn as nn
from torch.nn import init, Parameter
from torch.utils.data._utils.collate import *
from torch.utils.data.dataloader import default_collate
import torch_geometric
from torch_geometric.data import Batch
################
# Regularization
################
def regularize_weights(model, reg_type=None):
l1_reg = None
for W in model.parameters():
if l1_reg is None:
l1_reg = torch.abs(W).sum()
else:
l1_reg = l1_reg + torch.abs(W).sum() # torch.abs(W).sum() is equivalent to W.norm(1)
return l1_reg
def regularize_path_weights(model, reg_type=None):
l1_reg = None
for W in model.module.classifier.parameters():
if l1_reg is None:
l1_reg = torch.abs(W).sum()
else:
l1_reg = l1_reg + torch.abs(W).sum() # torch.abs(W).sum() is equivalent to W.norm(1)
for W in model.module.linear.parameters():
if l1_reg is None:
l1_reg = torch.abs(W).sum()
else:
l1_reg = l1_reg + torch.abs(W).sum() # torch.abs(W).sum() is equivalent to W.norm(1)
return l1_reg
def regularize_MM_weights(model, reg_type=None):
l1_reg = None
if model.module.__hasattr__('omic_net'):
for W in model.module.omic_net.parameters():
if l1_reg is None:
l1_reg = torch.abs(W).sum()
else:
l1_reg = l1_reg + torch.abs(W).sum() # torch.abs(W).sum() is equivalent to W.norm(1)
if model.module.__hasattr__('linear_h_path'):
for W in model.module.linear_h_path.parameters():
if l1_reg is None:
l1_reg = torch.abs(W).sum()
else:
l1_reg = l1_reg + torch.abs(W).sum() # torch.abs(W).sum() is equivalent to W.norm(1)
if model.module.__hasattr__('linear_h_omic'):
for W in model.module.linear_h_omic.parameters():
if l1_reg is None:
l1_reg = torch.abs(W).sum()
else:
l1_reg = l1_reg + torch.abs(W).sum() # torch.abs(W).sum() is equivalent to W.norm(1)
if model.module.__hasattr__('linear_h_grph'):
for W in model.module.linear_h_grph.parameters():
if l1_reg is None:
l1_reg = torch.abs(W).sum()
else:
l1_reg = l1_reg + torch.abs(W).sum() # torch.abs(W).sum() is equivalent to W.norm(1)
if model.module.__hasattr__('linear_z_path'):
for W in model.module.linear_z_path.parameters():
if l1_reg is None:
l1_reg = torch.abs(W).sum()
else:
l1_reg = l1_reg + torch.abs(W).sum() # torch.abs(W).sum() is equivalent to W.norm(1)
if model.module.__hasattr__('linear_z_omic'):
for W in model.module.linear_z_omic.parameters():
if l1_reg is None:
l1_reg = torch.abs(W).sum()
else:
l1_reg = l1_reg + torch.abs(W).sum() # torch.abs(W).sum() is equivalent to W.norm(1)
if model.module.__hasattr__('linear_z_grph'):
for W in model.module.linear_z_grph.parameters():
if l1_reg is None:
l1_reg = torch.abs(W).sum()
else:
l1_reg = l1_reg + torch.abs(W).sum() # torch.abs(W).sum() is equivalent to W.norm(1)
if model.module.__hasattr__('linear_o_path'):
for W in model.module.linear_o_path.parameters():
if l1_reg is None:
l1_reg = torch.abs(W).sum()
else:
l1_reg = l1_reg + torch.abs(W).sum() # torch.abs(W).sum() is equivalent to W.norm(1)
if model.module.__hasattr__('linear_o_omic'):
for W in model.module.linear_o_omic.parameters():
if l1_reg is None:
l1_reg = torch.abs(W).sum()
else:
l1_reg = l1_reg + torch.abs(W).sum() # torch.abs(W).sum() is equivalent to W.norm(1)
if model.module.__hasattr__('linear_o_grph'):
for W in model.module.linear_o_grph.parameters():
if l1_reg is None:
l1_reg = torch.abs(W).sum()
else:
l1_reg = l1_reg + torch.abs(W).sum() # torch.abs(W).sum() is equivalent to W.norm(1)
if model.module.__hasattr__('encoder1'):
for W in model.module.encoder1.parameters():
if l1_reg is None:
l1_reg = torch.abs(W).sum()
else:
l1_reg = l1_reg + torch.abs(W).sum() # torch.abs(W).sum() is equivalent to W.norm(1)
if model.module.__hasattr__('encoder2'):
for W in model.module.encoder2.parameters():
if l1_reg is None:
l1_reg = torch.abs(W).sum()
else:
l1_reg = l1_reg + torch.abs(W).sum() # torch.abs(W).sum() is equivalent to W.norm(1)
if model.module.__hasattr__('classifier'):
for W in model.module.classifier.parameters():
if l1_reg is None:
l1_reg = torch.abs(W).sum()
else:
l1_reg = l1_reg + torch.abs(W).sum() # torch.abs(W).sum() is equivalent to W.norm(1)
return l1_reg
def regularize_MM_omic(model, reg_type=None):
l1_reg = None
if model.module.__hasattr__('omic_net'):
for W in model.module.omic_net.parameters():
if l1_reg is None:
l1_reg = torch.abs(W).sum()
else:
l1_reg = l1_reg + torch.abs(W).sum() # torch.abs(W).sum() is equivalent to W.norm(1)
return l1_reg
################
# Network Initialization
################
def init_weights(net, init_type='orthogonal', init_gain=0.02):
"""Initialize network weights.
Parameters:
net (network) -- network to be initialized
init_type (str) -- the name of an initialization method: normal | xavier | kaiming | orthogonal
init_gain (float) -- scaling factor for normal, xavier and orthogonal.
We use 'normal' in the original pix2pix and CycleGAN paper. But xavier and kaiming might
work better for some applications. Feel free to try yourself.
"""
def init_func(m): # define the initialization function
classname = m.__class__.__name__
if hasattr(m, 'weight') and (classname.find('Conv') != -1 or classname.find('Linear') != -1):
if init_type == 'normal':
init.normal_(m.weight.data, 0.0, init_gain)
elif init_type == 'xavier':
init.xavier_normal_(m.weight.data, gain=init_gain)
elif init_type == 'kaiming':
init.kaiming_normal_(m.weight.data, a=0, mode='fan_in')
elif init_type == 'orthogonal':
init.orthogonal_(m.weight.data, gain=init_gain)
else:
raise NotImplementedError('initialization method [%s] is not implemented' % init_type)
if hasattr(m, 'bias') and m.bias is not None:
init.constant_(m.bias.data, 0.0)
elif classname.find('BatchNorm2d') != -1: # BatchNorm Layer's weight is not a matrix; only normal distribution applies.
init.normal_(m.weight.data, 1.0, init_gain)
init.constant_(m.bias.data, 0.0)
print('initialize network with %s' % init_type)
net.apply(init_func) # apply the initialization function <init_func>
def init_max_weights(module):
for m in module.modules():
if type(m) == nn.Linear:
stdv = 1. / math.sqrt(m.weight.size(1))
m.weight.data.normal_(0, stdv)
m.bias.data.zero_()
def init_net(net, init_type='normal', init_gain=0.02, gpu_ids=[]):
"""Initialize a network: 1. register CPU/GPU device (with multi-GPU support); 2. initialize the network weights
Parameters:
net (network) -- the network to be initialized
init_type (str) -- the name of an initialization method: normal | xavier | kaiming | orthogonal
gain (float) -- scaling factor for normal, xavier and orthogonal.
gpu_ids (int list) -- which GPUs the network runs on: e.g., 0,1,2
Return an initialized network.
"""
if len(gpu_ids) > 0:
assert(torch.cuda.is_available())
net.to(gpu_ids[0])
net = torch.nn.DataParallel(net, gpu_ids) # multi-GPUs
if init_type != 'max' and init_type != 'none':
print("Init Type:", init_type)
init_weights(net, init_type, init_gain=init_gain)
elif init_type == 'none':
print("Init Type: Not initializing networks.")
elif init_type == 'max':
print("Init Type: Self-Normalizing Weights")
return net
################
# Freeze / Unfreeze
################
def unfreeze_unimodal(opt, model, epoch):
if opt.mode == 'graphomic':
if epoch == 5:
dfs_unfreeze(model.module.omic_net)
print("Unfreezing Omic")
if epoch == 5:
dfs_unfreeze(model.module.grph_net)
print("Unfreezing Graph")
elif opt.mode == 'pathomic':
if epoch == 5:
dfs_unfreeze(model.module.omic_net)
print("Unfreezing Omic")
elif opt.mode == 'pathgraph':
if epoch == 5:
dfs_unfreeze(model.module.grph_net)
print("Unfreezing Graph")
elif opt.mode == "pathgraphomic":
if epoch == 5:
dfs_unfreeze(model.module.omic_net)
print("Unfreezing Omic")
if epoch == 5:
dfs_unfreeze(model.module.grph_net)
print("Unfreezing Graph")
elif opt.mode == "omicomic":
if epoch == 5:
dfs_unfreeze(model.module.omic_net)
print("Unfreezing Omic")
elif opt.mode == "graphgraph":
if epoch == 5:
dfs_unfreeze(model.module.grph_net)
print("Unfreezing Graph")
def dfs_freeze(model):
for name, child in model.named_children():
for param in child.parameters():
param.requires_grad = False
dfs_freeze(child)
def dfs_unfreeze(model):
for name, child in model.named_children():
for param in child.parameters():
param.requires_grad = True
dfs_unfreeze(child)
def print_if_frozen(module):
for idx, child in enumerate(module.children()):
for param in child.parameters():
if param.requires_grad == True:
print("Learnable!!! %d:" % idx, child)
else:
print("Still Frozen %d:" % idx, child)
def unfreeze_vgg_features(model, epoch):
epoch_schedule = {30:45}
unfreeze_index = epoch_schedule[epoch]
for idx, child in enumerate(model.features.children()):
if idx > unfreeze_index:
print("Unfreezing %d:" %idx, child)
for param in child.parameters():
param.requires_grad = True
else:
print("Still Frozen %d:" %idx, child)
continue
################
# Collate Utils
################
def mixed_collate(batch):
elem = batch[0]
elem_type = type(elem)
transposed = zip(*batch)
return [Batch.from_data_list(samples, []) if type(samples[0]) is torch_geometric.data.data.Data else default_collate(samples) for samples in transposed]
################
# Survival Utils
################
def CoxLoss(survtime, censor, hazard_pred, device):
# This calculation credit to Travers Ching https://github.com/traversc/cox-nnet
# Cox-nnet: An artificial neural network method for prognosis prediction of high-throughput omics data
current_batch_len = len(survtime)
R_mat = np.zeros([current_batch_len, current_batch_len], dtype=int)
for i in range(current_batch_len):
for j in range(current_batch_len):
R_mat[i,j] = survtime[j] >= survtime[i]
R_mat = torch.FloatTensor(R_mat).to(device)
theta = hazard_pred.reshape(-1)
exp_theta = torch.exp(theta)
loss_cox = -torch.mean((theta - torch.log(torch.sum(exp_theta*R_mat, dim=1))) * censor)
return loss_cox
def accuracy(output, labels):
preds = output.max(1)[1].type_as(labels)
correct = preds.eq(labels).double()
correct = correct.sum()
return correct / len(labels)
def accuracy_cox(hazardsdata, labels):
# This accuracy is based on estimated survival events against true survival events
median = np.median(hazardsdata)
hazards_dichotomize = np.zeros([len(hazardsdata)], dtype=int)
hazards_dichotomize[hazardsdata > median] = 1
correct = np.sum(hazards_dichotomize == labels)
return correct / len(labels)
def cox_log_rank(hazardsdata, labels, survtime_all):
median = np.median(hazardsdata)
hazards_dichotomize = np.zeros([len(hazardsdata)], dtype=int)
hazards_dichotomize[hazardsdata > median] = 1
idx = hazards_dichotomize == 0
T1 = survtime_all[idx]
T2 = survtime_all[~idx]
E1 = labels[idx]
E2 = labels[~idx]
results = logrank_test(T1, T2, event_observed_A=E1, event_observed_B=E2)
pvalue_pred = results.p_value
return(pvalue_pred)
def CIndex(hazards, labels, survtime_all):
concord = 0.
total = 0.
N_test = labels.shape[0]
for i in range(N_test):
if labels[i] == 1:
for j in range(N_test):
if survtime_all[j] > survtime_all[i]:
total += 1
if hazards[j] < hazards[i]: concord += 1
elif hazards[j] < hazards[i]: concord += 0.5
return(concord/total)
def CIndex_lifeline(hazards, labels, survtime_all):
return(concordance_index(survtime_all, -hazards, labels))
################
# Data Utils
################
def addHistomolecularSubtype(data):
"""
Molecular Subtype: IDHwt == 0, IDHmut-non-codel == 1, IDHmut-codel == 2
Histology Subtype: astrocytoma == 0, oligoastrocytoma == 1, oligodendroglioma == 2, glioblastoma == 3
"""
subtyped_data = data.copy()
subtyped_data.insert(loc=0, column='Histomolecular subtype', value=np.ones(len(data)))
idhwt_ATC = np.logical_and(data['Molecular subtype'] == 0, np.logical_or(data['Histology'] == 0, data['Histology'] == 3))
subtyped_data.loc[idhwt_ATC, 'Histomolecular subtype'] = 'idhwt_ATC'
idhmut_ATC = np.logical_and(data['Molecular subtype'] == 1, np.logical_or(data['Histology'] == 0, data['Histology'] == 3))
subtyped_data.loc[idhmut_ATC, 'Histomolecular subtype'] = 'idhmut_ATC'
ODG = np.logical_and(data['Molecular subtype'] == 2, data['Histology'] == 2)
subtyped_data.loc[ODG, 'Histomolecular subtype'] = 'ODG'
return subtyped_data
def changeHistomolecularSubtype(data):
"""
Molecular Subtype: IDHwt == 0, IDHmut-non-codel == 1, IDHmut-codel == 2
Histology Subtype: astrocytoma == 0, oligoastrocytoma == 1, oligodendroglioma == 2, glioblastoma == 3
"""
data = data.drop(['Histomolecular subtype'], axis=1)
subtyped_data = data.copy()
subtyped_data.insert(loc=0, column='Histomolecular subtype', value=np.ones(len(data)))
idhwt_ATC = np.logical_and(data['Molecular subtype'] == 0, np.logical_or(data['Histology'] == 0, data['Histology'] == 3))
subtyped_data.loc[idhwt_ATC, 'Histomolecular subtype'] = 'idhwt_ATC'
idhmut_ATC = np.logical_and(data['Molecular subtype'] == 1, np.logical_or(data['Histology'] == 0, data['Histology'] == 3))
subtyped_data.loc[idhmut_ATC, 'Histomolecular subtype'] = 'idhmut_ATC'
ODG = np.logical_and(data['Molecular subtype'] == 2, data['Histology'] == 2)
subtyped_data.loc[ODG, 'Histomolecular subtype'] = 'ODG'
return subtyped_data
def getCleanAllDataset(dataroot='./data/TCGA_GBMLGG/', ignore_missing_moltype=False, ignore_missing_histype=False, use_rnaseq=False):
### 1. Joining all_datasets.csv with grade data. Looks at columns with misisng samples
metadata = ['Histology', 'Grade', 'Molecular subtype', 'TCGA ID', 'censored', 'Survival months']
all_dataset = pd.read_csv(os.path.join(dataroot, 'all_dataset.csv')).drop('indexes', axis=1)
all_dataset.index = all_dataset['TCGA ID']
all_grade = pd.read_csv(os.path.join(dataroot, 'grade_data.csv'))
all_grade['Histology'] = all_grade['Histology'].str.replace('astrocytoma (glioblastoma)', 'glioblastoma', regex=False)
all_grade.index = all_grade['TCGA ID']
assert pd.Series(all_dataset.index).equals(pd.Series(sorted(all_grade.index)))
all_dataset = all_dataset.join(all_grade[['Histology', 'Grade', 'Molecular subtype']], how='inner')
cols = all_dataset.columns.tolist()
cols = cols[-3:] + cols[:-3]
all_dataset = all_dataset[cols]
if use_rnaseq:
gbm = pd.read_csv(os.path.join(dataroot, 'mRNA_Expression_z-Scores_RNA_Seq_RSEM.txt'), sep='\t', skiprows=1, index_col=0)
lgg = pd.read_csv(os.path.join(dataroot, 'mRNA_Expression_Zscores_RSEM.txt'), sep='\t', skiprows=1, index_col=0)
gbm = gbm[gbm.columns[~gbm.isnull().all()]]
lgg = lgg[lgg.columns[~lgg.isnull().all()]]
glioma_RNAseq = gbm.join(lgg, how='inner').T
glioma_RNAseq = glioma_RNAseq.dropna(axis=1)
glioma_RNAseq.columns = [gene+'_rnaseq' for gene in glioma_RNAseq.columns]
glioma_RNAseq.index = [patname[:12] for patname in glioma_RNAseq.index]
glioma_RNAseq = glioma_RNAseq.iloc[~glioma_RNAseq.index.duplicated()]
glioma_RNAseq.index.name = 'TCGA ID'
all_dataset = all_dataset.join(glioma_RNAseq, how='inner')
pat_missing_moltype = all_dataset[all_dataset['Molecular subtype'].isna()].index
pat_missing_idh = all_dataset[all_dataset['idh mutation'].isna()].index
pat_missing_1p19q = all_dataset[all_dataset['codeletion'].isna()].index
print("# Missing Molecular Subtype:", len(pat_missing_moltype))
print("# Missing IDH Mutation:", len(pat_missing_idh))
print("# Missing 1p19q Codeletion:", len(pat_missing_1p19q))
assert pat_missing_moltype.equals(pat_missing_idh)
assert pat_missing_moltype.equals(pat_missing_1p19q)
pat_missing_grade = all_dataset[all_dataset['Grade'].isna()].index
pat_missing_histype = all_dataset[all_dataset['Histology'].isna()].index
print("# Missing Histological Subtype:", len(pat_missing_histype))
print("# Missing Grade:", len(pat_missing_grade))
assert pat_missing_histype.equals(pat_missing_grade)
### 2. Impute Missing Genomic Data: Removes patients with missing molecular subtype / idh mutation / 1p19q. Else imputes with median value of each column. Fills missing Molecular subtype with "Missing"
if ignore_missing_moltype:
all_dataset = all_dataset[all_dataset['Molecular subtype'].isna() == False]
for col in all_dataset.drop(metadata, axis=1).columns:
all_dataset['Molecular subtype'] = all_dataset['Molecular subtype'].fillna('Missing')
all_dataset[col] = all_dataset[col].fillna(all_dataset[col].median())
### 3. Impute Missing Histological Data: Removes patients with missing histological subtype / grade. Else imputes with "missing" / grade -1
if ignore_missing_histype:
all_dataset = all_dataset[all_dataset['Histology'].isna() == False]
else:
all_dataset['Grade'] = all_dataset['Grade'].fillna(1)
all_dataset['Histology'] = all_dataset['Histology'].fillna('Missing')
all_dataset['Grade'] = all_dataset['Grade'] - 2
### 4. Adds Histomolecular subtype
ms2int = {'Missing':-1, 'IDHwt':0, 'IDHmut-non-codel':1, 'IDHmut-codel':2}
all_dataset[['Molecular subtype']] = all_dataset[['Molecular subtype']].applymap(lambda s: ms2int.get(s) if s in ms2int else s)
hs2int = {'Missing':-1, 'astrocytoma':0, 'oligoastrocytoma':1, 'oligodendroglioma':2, 'glioblastoma':3}
all_dataset[['Histology']] = all_dataset[['Histology']].applymap(lambda s: hs2int.get(s) if s in hs2int else s)
all_dataset = addHistomolecularSubtype(all_dataset)
metadata.extend(['Histomolecular subtype'])
all_dataset['censored'] = 1 - all_dataset['censored']
return metadata, all_dataset
################
# Analysis Utils
################
def count_parameters(model):
return sum(p.numel() for p in model.parameters() if p.requires_grad)
def hazard2grade(hazard, p):
if hazard < p[0]:
return 0
elif hazard < p[1]:
return 1
return 2
def p(n):
def percentile_(x):
return np.percentile(x, n)
percentile_.__name__ = 'p%s' % n
return percentile_
def natural_sort(l):
convert = lambda text: int(text) if text.isdigit() else text.lower()
alphanum_key = lambda key: [ convert(c) for c in re.split('([0-9]+)', key) ]
return sorted(l, key = alphanum_key)
def CI_pm(data, confidence=0.95):
a = 1.0 * np.array(data)
n = len(a)
m, se = np.mean(a), scipy.stats.sem(a)
h = se * scipy.stats.t.ppf((1 + confidence) / 2., n-1)
return str("{0:.4f} ± ".format(m) + "{0:.3f}".format(h))
def CI_interval(data, confidence=0.95):
a = 1.0 * np.array(data)
n = len(a)
m, se = np.mean(a), scipy.stats.sem(a)
h = se * scipy.stats.t.ppf((1 + confidence) / 2., n-1)
return str("{0:.3f}, ".format(m-h) + "{0:.3f}".format(m+h))
def poolSurvTestPD(ckpt_name='./checkpoints/TCGA_GBMLGG/surv_15_rnaseq/', model='pathgraphomic_fusion', split='test', zscore=False, agg_type='Hazard_mean'):
all_dataset_regstrd_pooled = []
ignore_missing_moltype = 1 if 'omic' in model else 0
ignore_missing_histype = 1 if 'grad' in ckpt_name else 0
use_patch, roi_dir, use_vgg_features = ('_patch_', 'all_st_patches_512', 1) if ((('path' in model) or ('graph' in model)) and ('cox' not in model)) else ('_', 'all_st', 0)
use_rnaseq = '_rnaseq' if ('rnaseq' in ckpt_name and 'path' != model and 'pathpath' not in model and 'graph' != model and 'graphgraph' not in model) else ''
for k in range(1,16):
pred = pickle.load(open(ckpt_name+'/%s/%s_%d%spred_%s.pkl' % (model, model, k, use_patch, split), 'rb'))
if 'cox' not in model:
surv_all = pd.DataFrame(np.stack(np.delete(np.array(pred), 3))).T
surv_all.columns = ['Hazard', 'Survival months', 'censored', 'Grade']
data_cv = pickle.load(open('./data/TCGA_GBMLGG/splits/gbmlgg15cv_%s_%d_%d_%d%s.pkl' % (roi_dir, ignore_missing_moltype, ignore_missing_histype, use_vgg_features, use_rnaseq), 'rb'))
data_cv_splits = data_cv['cv_splits']
data_cv_split_k = data_cv_splits[k]
assert np.all(data_cv_split_k[split]['t'] == pred[1]) # Data is correctly registered
all_dataset = data_cv['data_pd'].drop('TCGA ID', axis=1)
all_dataset_regstrd = all_dataset.loc[data_cv_split_k[split]['x_patname']] # Subset of "all_datasets" (metadata) that is registered with "pred" (predictions)
assert np.all(np.array(all_dataset_regstrd['Survival months']) == pred[1])
assert np.all(np.array(all_dataset_regstrd['censored']) == pred[2])
assert np.all(np.array(all_dataset_regstrd['Grade']) == pred[4])
all_dataset_regstrd.insert(loc=0, column='Hazard', value = np.array(surv_all['Hazard']))
all_dataset_regstrd.index.name = 'TCGA ID'
hazard_agg = all_dataset_regstrd.groupby('TCGA ID').agg({'Hazard': ['mean', 'median', max, p(0.25), p(0.75)]})
hazard_agg.columns = ["_".join(x) for x in hazard_agg.columns.ravel()]
hazard_agg = hazard_agg[[agg_type]]
hazard_agg.columns = ['Hazard']
pred = hazard_agg.join(all_dataset, how='inner')
if zscore: pred['Hazard'] = scipy.stats.zscore(np.array(pred['Hazard']))
all_dataset_regstrd_pooled.append(pred)
all_dataset_regstrd_pooled = pd.concat(all_dataset_regstrd_pooled)
all_dataset_regstrd_pooled = changeHistomolecularSubtype(all_dataset_regstrd_pooled)
return all_dataset_regstrd_pooled
def getAggHazardCV(ckpt_name='./checkpoints/TCGA_GBMLGG/surv_15_rnaseq/', model='pathgraphomic_fusion', split='test', agg_type='Hazard_mean'):
result = []
ignore_missing_moltype = 1 if 'omic' in model else 0
ignore_missing_histype = 1 if 'grad' in ckpt_name else 0
use_patch, roi_dir, use_vgg_features = ('_patch_', 'all_st_patches_512', 1) if (('path' in model) or ('graph' in model)) else ('_', 'all_st', 0)
use_rnaseq = '_rnaseq' if ('rnaseq' in ckpt_name and 'path' != model and 'pathpath' not in model and 'graph' != model and 'graphgraph' not in model) else ''
for k in range(1,16):
pred = pickle.load(open(ckpt_name+'/%s/%s_%d%spred_%s.pkl' % (model, model, k, use_patch, split), 'rb'))
surv_all = pd.DataFrame(np.stack(np.delete(np.array(pred), 3))).T
surv_all.columns = ['Hazard', 'Survival months', 'censored', 'Grade']
data_cv = pickle.load(open('./data/TCGA_GBMLGG/splits/gbmlgg15cv_%s_%d_%d_%d%s.pkl' % (roi_dir, ignore_missing_moltype, ignore_missing_histype, use_vgg_features, use_rnaseq), 'rb'))
data_cv_splits = data_cv['cv_splits']
data_cv_split_k = data_cv_splits[k]
assert np.all(data_cv_split_k[split]['t'] == pred[1]) # Data is correctly registered
all_dataset = data_cv['data_pd'].drop('TCGA ID', axis=1)
all_dataset_regstrd = all_dataset.loc[data_cv_split_k[split]['x_patname']] # Subset of "all_datasets" (metadata) that is registered with "pred" (predictions)
assert np.all(np.array(all_dataset_regstrd['Survival months']) == pred[1])
assert np.all(np.array(all_dataset_regstrd['censored']) == pred[2])
assert np.all(np.array(all_dataset_regstrd['Grade']) == pred[4])
all_dataset_regstrd.insert(loc=0, column='Hazard', value = np.array(surv_all['Hazard']))
all_dataset_regstrd.index.name = 'TCGA ID'
hazard_agg = all_dataset_regstrd.groupby('TCGA ID').agg({'Hazard': ['mean', max, p(0.75)]})
hazard_agg.columns = ["_".join(x) for x in hazard_agg.columns.ravel()]
hazard_agg = hazard_agg[[agg_type]]
hazard_agg.columns = ['Hazard']
all_dataset_hazard = hazard_agg.join(all_dataset, how='inner')
cin = CIndex_lifeline(all_dataset_hazard['Hazard'], all_dataset_hazard['censored'], all_dataset_hazard['Survival months'])
result.append(cin)
return result
def calcGradMetrics(ckpt_name='./checkpoints/grad_15/', model='pathgraphomic_fusion', split='test', avg='micro'):
auc_all = []
ap_all = []
f1_all = []
f1_gradeIV_all = []
ignore_missing_moltype = 1 if 'omic' in model else 0
ignore_missing_histype = 1 if 'grad' in ckpt_name else 0
use_patch, roi_dir, use_vgg_features = ('_patch_', 'all_st_patches_512', 1) if (('path' in model) or ('graph' in model)) else ('_', 'all_st', 0)
for k in range(1,16):
pred = pickle.load(open(ckpt_name+'/%s/%s_%d%spred_%s.pkl' % (model, model, k, use_patch, split), 'rb'))
grade_pred, grade = np.array(pred[3]), np.array(pred[4])
enc = LabelBinarizer()
enc.fit(grade)
grade_oh = enc.transform(grade)
rocauc = roc_auc_score(grade_oh, grade_pred, avg)
ap = average_precision_score(grade_oh, grade_pred, average=avg)
f1 = f1_score(grade_pred.argmax(axis=1), grade, average=avg)
f1_gradeIV = f1_score(grade_pred.argmax(axis=1), grade, average=None)[2]
auc_all.append(rocauc)
ap_all.append(ap)
f1_all.append(f1)
f1_gradeIV_all.append(f1_gradeIV)
return np.array([CI_pm(auc_all), CI_pm(ap_all), CI_pm(f1_all), CI_pm(f1_gradeIV_all)])
################
# Plot Utils
################
def makeKaplanMeierPlot(ckpt_name='./checkpoints/surv_15_rnaseq/', model='omic', split='test', zscore=False, agg_type='Hazard_mean'):
def hazard2KMCurve(data, subtype):
p = np.percentile(data['Hazard'], [33, 66])
if p[0] == p[1]: p[0] = 2.99997
data.insert(0, 'grade_pred', [hazard2grade(hazard, p) for hazard in data['Hazard']])
kmf_pred = lifelines.KaplanMeierFitter()
kmf_gt = lifelines.KaplanMeierFitter()
def get_name(model):
mode2name = {'pathgraphomic':'Pathomic F.', 'pathomic':'Pathomic F.', 'graphomic':'Pathomic F.', 'path':'Histology CNN', 'graph':'Histology GCN', 'omic':'Genomic SNN'}
for mode in mode2name.keys():
if mode in model: return mode2name[mode]
return 'N/A'
fig = plt.figure(figsize=(10, 10), dpi=600)
ax = plt.subplot()
censor_style = {'ms': 20, 'marker': '+'}
temp = data[data['Grade']==0]
kmf_gt.fit(temp['Survival months']/365, temp['censored'], label="Grade II")
kmf_gt.plot(ax=ax, show_censors=True, ci_show=False, c='g', linewidth=3, ls='--', markerfacecolor='black', censor_styles=censor_style)
temp = data[data['grade_pred']==0]
kmf_pred.fit(temp['Survival months']/365, temp['censored'], label="%s (Low)" % get_name(model))
kmf_pred.plot(ax=ax, show_censors=True, ci_show=False, c='g', linewidth=4, ls='-', markerfacecolor='black', censor_styles=censor_style)
temp = data[data['Grade']==1]
kmf_gt.fit(temp['Survival months']/365, temp['censored'], label="Grade III")
kmf_gt.plot(ax=ax, show_censors=True, ci_show=False, c='b', linewidth=3, ls='--', censor_styles=censor_style)
temp = data[data['grade_pred']==1]
kmf_pred.fit(temp['Survival months']/365, temp['censored'], label="%s (Mid)" % get_name(model))
kmf_pred.plot(ax=ax, show_censors=True, ci_show=False, c='b', linewidth=4, ls='-', censor_styles=censor_style)
if subtype != 'ODG':
temp = data[data['Grade']==2]
kmf_gt.fit(temp['Survival months']/365, temp['censored'], label="Grade IV")
kmf_gt.plot(ax=ax, show_censors=True, ci_show=False, c='r', linewidth=3, ls='--', censor_styles=censor_style)
temp = data[data['grade_pred']==2]
kmf_pred.fit(temp['Survival months']/365, temp['censored'], label="%s (High)" % get_name(model))
kmf_pred.plot(ax=ax, show_censors=True, ci_show=False, c='r', linewidth=4, ls='-', censor_styles=censor_style)
ax.set_xlabel('')
ax.set_ylim(0, 1)
ax.set_yticks(np.arange(0, 1.001, 0.5))
ax.tick_params(axis='both', which='major', labelsize=40)
plt.legend(fontsize=32, prop=font_manager.FontProperties(family='Arial', style='normal', size=32))
if subtype != 'idhwt_ATC': ax.get_legend().remove()
return fig
data = poolSurvTestPD(ckpt_name, model, split, zscore, agg_type)
for subtype in ['idhwt_ATC', 'idhmut_ATC', 'ODG']:
fig = hazard2KMCurve(data[data['Histomolecular subtype'] == subtype], subtype)
fig.savefig(ckpt_name+'/%s_KM_%s.png' % (model, subtype))
fig = hazard2KMCurve(data, 'all')
fig.savefig(ckpt_name+'/%s_KM_%s.png' % (model, 'all'))
def makeHazardSwarmPlot(ckpt_name='./checkpoints/surv_15_rnaseq/', model='path', split='test', zscore=True, agg_type='Hazard_mean'):
mpl.rcParams['font.family'] = "arial"
data = poolSurvTestPD(ckpt_name=ckpt_name, model=model, split=split, zscore=zscore, agg_type=agg_type)
data = data[data['Grade'] != -1]
data = data[data['Histomolecular subtype'] != -1]
data['Grade'] = data['Grade'].astype(int).astype(str)
data['Grade'] = data['Grade'].str.replace('0', 'Grade II', regex=False)
data['Grade'] = data['Grade'].str.replace('1', 'Grade III', regex=False)
data['Grade'] = data['Grade'].str.replace('2', 'Grade IV', regex=False)
data['Histomolecular subtype'] = data['Histomolecular subtype'].str.replace('idhwt_ATC', 'IDH-wt \n astryocytoma', regex=False)
data['Histomolecular subtype'] = data['Histomolecular subtype'].str.replace('idhmut_ATC', 'IDH-mut \n astrocytoma', regex=False)
data['Histomolecular subtype'] = data['Histomolecular subtype'].str.replace('ODG', 'Oligodendroglioma', regex=False)
fig, ax = plt.subplots(dpi=600)
ax.set_ylim([-2, 2.5]) # plt.ylim(-2, 2)
ax.spines['right'].set_visible(False)
ax.spines['top'].set_visible(False)
ax.set_yticks(np.arange(-2, 2.001, 1))
sns.swarmplot(x = 'Histomolecular subtype', y='Hazard', data=data, hue='Grade',
palette={"Grade II":"#AFD275" , "Grade III":"#7395AE", "Grade IV":"#E7717D"},
size = 4, alpha = 0.9, ax=ax)
ax.set_xlabel('') # ax.set_xlabel('Histomolecular subtype', size=16)
ax.set_ylabel('') # ax.set_ylabel('Hazard (Z-Score)', size=16)
ax.tick_params(axis='y', which='both', labelsize=20)
ax.tick_params(axis='x', which='both', labelsize=15)
ax.tick_params(axis='x', which='both', labelbottom='off') # doesn't work??
ax.legend(prop={'size': 8})
fig.savefig(ckpt_name+'/%s_HSP.png' % (model))
def makeHazardBoxPlot(ckpt_name='./checkpoints/surv_15_rnaseq/', model='omic', split='test', zscore=True, agg_type='Hazard_mean'):
mpl.rcParams['font.family'] = "arial"
data = poolSurvTestPD(ckpt_name, model, split, zscore, 'Hazard_mean')
data['Grade'] = data['Grade'].astype(int).astype(str)
data['Grade'] = data['Grade'].str.replace('0', 'II', regex=False)
data['Grade'] = data['Grade'].str.replace('1', 'III', regex=False)
data['Grade'] = data['Grade'].str.replace('2', 'IV', regex=False)
fig, axes = plt.subplots(nrows=1, ncols=3, gridspec_kw={'width_ratios': [3, 3, 2]}, dpi=600)
plt.subplots_adjust(wspace=0, hspace=0)
plt.ylim(-2, 2)
plt.yticks(np.arange(-2, 2.001, 1))
#color_dict = {0: '#CF9498', 1: '#8CC7C8', 2: '#AAA0C6'}
#color_dict = {0: '#F76C6C', 1: '#A8D0E6', 2: '#F8E9A1'}
color_dict = ['#F76C6C', '#A8D0E6', '#F8E9A1']
subtypes = ['idhwt_ATC', 'idhmut_ATC', 'ODG']
for i in range(len(subtypes)):
axes[i].spines["top"].set_visible(False)
axes[i].spines["right"].set_visible(False)
axes[i].xaxis.grid(False)
axes[i].yaxis.grid(False)
if i > 0:
axes[i].get_yaxis().set_visible(False)
axes[i].spines["left"].set_visible(False)
order = ["II","III","IV"] if subtypes[i] != 'ODG' else ["II", "III"]
axes[i].xaxis.label.set_visible(False)
axes[i].yaxis.label.set_visible(False)
axes[i].tick_params(axis='y', which='both', labelsize=20)
axes[i].tick_params(axis='x', which='both', labelsize=15)
datapoints = data[data['Histomolecular subtype'] == subtypes[i]]
sns.boxplot(y='Hazard', x="Grade", data=datapoints, ax = axes[i], color=color_dict[i], order=order)
sns.stripplot(y='Hazard', x='Grade', data=datapoints, alpha=0.2, jitter=0.2, color='k', ax = axes[i], order=order)
axes[i].set_ylim(-2.5, 2.5)
axes[i].set_yticks(np.arange(-2.0, 2.1, 1))
#axes[2].legend(prop={'size': 10})
fig.savefig(ckpt_name+'/%s_HBP.png' % (model))
def makeAUROCPlot(ckpt_name='./checkpoints/grad_15/', model_list=['path', 'omic', 'pathgraphomic_fusion'], split='test', avg='micro', use_zoom=False):
mpl.rcParams['font.family'] = "arial"
colors = {'path':'dodgerblue', 'graph':'orange', 'omic':'green', 'pathgraphomic_fusion':'crimson'}
names = {'path':'Histology CNN', 'graph':'Histology GCN', 'omic':'Genomic SNN', 'pathgraphomic_fusion':'Pathomic F.'}
zoom_params = {0:([0.2, 0.4], [0.8, 1.0]),
1:([0.25, 0.45], [0.75, 0.95]),
2:([0.0, 0.2], [0.8, 1.0]),
'micro':([0.15, 0.35], [0.8, 1.0])}
mean_fpr = np.linspace(0, 1, 100)
classes = [0, 1, 2, avg]
### 1. Looping over classes
for i in classes:
print("Class: " + str(i))
fi = pylab.figure(figsize=(10,10), dpi=600, linewidth=0.2)
axi = plt.subplot()
### 2. Looping over models
for m, model in enumerate(model_list):
ignore_missing_moltype = 1 if 'omic' in model else 0
ignore_missing_histype = 1 if 'grad' in ckpt_name else 0
use_patch, roi_dir, use_vgg_features = ('_patch_', 'all_st_patches_512', 1) if (('path' in model) or ('graph' in model)) else ('_', 'all_st', 0)
###. 3. Looping over all splits
tprs, pres, aucrocs, rocaucs, = [], [], [], []
for k in range(1,16):
pred = pickle.load(open(ckpt_name+'/%s/%s_%d%spred_%s.pkl' % (model, model, k, use_patch, split), 'rb'))
grade_pred, grade = np.array(pred[3]), np.array(pred[4])
enc = LabelBinarizer()
enc.fit(grade)
grade_oh = enc.transform(grade)
if i != avg:
pres.append(average_precision_score(grade_oh[:, i], grade_pred[:, i])) # from https://scikit-learn.org/stable/auto_examples/model_selection/plot_precision_recall.html
fpr, tpr, thresh = roc_curve(grade_oh[:,i], grade_pred[:,i], drop_intermediate=False)
aucrocs.append(auc(fpr, tpr)) # https://scikit-learn.org/stable/auto_examples/model_selection/plot_roc.html
rocaucs.append(roc_auc_score(grade_oh[:,i], grade_pred[:,i])) # https://scikit-learn.org/stable/modules/generated/sklearn.metrics.roc_auc_score.html#sklearn.metrics.roc_auc_score
tprs.append(interp(mean_fpr, fpr, tpr))
tprs[-1][0] = 0.0
else:
# A "micro-average": quantifying score on all classes jointly
pres.append(average_precision_score(grade_oh, grade_pred, average=avg))
fpr, tpr, thresh = roc_curve(grade_oh.ravel(), grade_pred.ravel())
aucrocs.append(auc(fpr, tpr))
rocaucs.append(roc_auc_score(grade_oh, grade_pred, avg))
tprs.append(interp(mean_fpr, fpr, tpr))
tprs[-1][0] = 0.0
mean_tpr = np.mean(tprs, axis=0)
mean_tpr[-1] = 1.0
#mean_auc = auc(mean_fpr, mean_tpr)
mean_auc = np.mean(aucrocs)
std_auc = np.std(aucrocs)
print('\t'+'%s - AUC: %0.3f ± %0.3f' % (model, mean_auc, std_auc))
if use_zoom:
alpha, lw = (0.8, 6) if model =='pathgraphomic_fusion' else (0.5, 6)
plt.plot(mean_fpr, mean_tpr, color=colors[model],
label=r'%s (AUC = %0.3f $\pm$ %0.3f)' % (names[model], mean_auc, std_auc), lw=lw, alpha=alpha)
std_tpr = np.std(tprs, axis=0)
tprs_upper = np.minimum(mean_tpr + std_tpr, 1)
tprs_lower = np.maximum(mean_tpr - std_tpr, 0)
plt.fill_between(mean_fpr, tprs_lower, tprs_upper, color=colors[model], alpha=0.1)
plt.xlim([zoom_params[i][0][0]-0.005, zoom_params[i][0][1]+0.005])
plt.ylim([zoom_params[i][1][0]-0.005, zoom_params[i][1][1]+0.005])
axi.set_xticks(np.arange(zoom_params[i][0][0], zoom_params[i][0][1]+0.001, 0.05))
axi.set_yticks(np.arange(zoom_params[i][1][0], zoom_params[i][1][1]+0.001, 0.05))
axi.tick_params(axis='both', which='major', labelsize=26)
else:
alpha, lw = (0.8, 4) if model =='pathgraphomic_fusion' else (0.5, 3)
plt.plot(mean_fpr, mean_tpr, color=colors[model],
label=r'%s (AUC = %0.3f $\pm$ %0.3f)' % (names[model], mean_auc, std_auc), lw=lw, alpha=alpha)
std_tpr = np.std(tprs, axis=0)
tprs_upper = np.minimum(mean_tpr + std_tpr, 1)
tprs_lower = np.maximum(mean_tpr - std_tpr, 0)
plt.fill_between(mean_fpr, tprs_lower, tprs_upper, color=colors[model], alpha=0.1)
plt.xlim([-0.05, 1.05])
plt.ylim([-0.05, 1.05])
axi.set_xticks(np.arange(0, 1.001, 0.2))
axi.set_yticks(np.arange(0, 1.001, 0.2))
axi.legend(loc="lower right", prop={'size': 20})
axi.tick_params(axis='both', which='major', labelsize=30)
#plt.plot([0, 1], [0, 1], linestyle='--', lw=2, color='navy', alpha=.8)
figures = [manager.canvas.figure
for manager in mpl._pylab_helpers.Gcf.get_all_fig_managers()]
zoom = '_zoom' if use_zoom else ''
for i, fig in enumerate(figures):
fig.savefig(ckpt_name+'/AUC_%s%s.png' % (classes[i], zoom)) | 41,241 | 45.235426 | 205 | py |
PathomicFusion | PathomicFusion-master/networks.py | # Base / Native
import csv
from collections import Counter
import copy
import json
import functools
import gc
import logging
import math
import os
import pdb
import pickle
import random
import sys
import tables
import time
from tqdm import tqdm
# Numerical / Array
import numpy as np
# Torch
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
from torch import Tensor
from torch.autograd import Variable
from torch.nn import init, Parameter
from torch.utils.data import DataLoader
from torch.utils.model_zoo import load_url as load_state_dict_from_url
from torchvision import datasets, transforms
import torch.optim.lr_scheduler as lr_scheduler
from torch_geometric.nn import GCNConv, SAGEConv, GraphConv, GatedGraphConv, GATConv
from torch_geometric.nn import GraphConv, TopKPooling, SAGPooling
from torch_geometric.nn import global_mean_pool as gap, global_max_pool as gmp
from torch_geometric.transforms.normalize_features import NormalizeFeatures
# Env
from fusion import *
from options import parse_args
from utils import *
################
# Network Utils
################
def define_net(opt, k):
net = None
act = define_act_layer(act_type=opt.act_type)
init_max = True if opt.init_type == "max" else False
if opt.mode == "path":
net = get_vgg(path_dim=opt.path_dim, act=act, label_dim=opt.label_dim)
elif opt.mode == "graph":
net = GraphNet(grph_dim=opt.grph_dim, dropout_rate=opt.dropout_rate, GNN=opt.GNN, use_edges=opt.use_edges, pooling_ratio=opt.pooling_ratio, act=act, label_dim=opt.label_dim, init_max=init_max)
elif opt.mode == "omic":
net = MaxNet(input_dim=opt.input_size_omic, omic_dim=opt.omic_dim, dropout_rate=opt.dropout_rate, act=act, label_dim=opt.label_dim, init_max=init_max)
elif opt.mode == "graphomic":
net = GraphomicNet(opt=opt, act=act, k=k)
elif opt.mode == "pathomic":
net = PathomicNet(opt=opt, act=act, k=k)
elif opt.mode == "pathgraphomic":
net = PathgraphomicNet(opt=opt, act=act, k=k)
elif opt.mode == "pathpath":
net = PathpathNet(opt=opt, act=act, k=k)
elif opt.mode == "graphgraph":
net = GraphgraphNet(opt=opt, act=act, k=k)
elif opt.mode == "omicomic":
net = OmicomicNet(opt=opt, act=act, k=k)
else:
raise NotImplementedError('model [%s] is not implemented' % opt.model)
return init_net(net, opt.init_type, opt.init_gain, opt.gpu_ids)
def define_optimizer(opt, model):
optimizer = None
if opt.optimizer_type == 'adabound':
optimizer = adabound.AdaBound(model.parameters(), lr=opt.lr, final_lr=opt.final_lr)
elif opt.optimizer_type == 'adam':
optimizer = torch.optim.Adam(model.parameters(), lr=opt.lr, betas=(opt.beta1, opt.beta2), weight_decay=opt.weight_decay)
elif opt.optimizer_type == 'adagrad':
optimizer = torch.optim.Adagrad(model.parameters(), lr=opt.lr, weight_decay=opt.weight_decay, initial_accumulator_value=0.1)
else:
raise NotImplementedError('initialization method [%s] is not implemented' % opt.optimizer)
return optimizer
def define_reg(opt, model):
loss_reg = None
if opt.reg_type == 'none':
loss_reg = 0
elif opt.reg_type == 'path':
loss_reg = regularize_path_weights(model=model)
elif opt.reg_type == 'mm':
loss_reg = regularize_MM_weights(model=model)
elif opt.reg_type == 'all':
loss_reg = regularize_weights(model=model)
elif opt.reg_type == 'omic':
loss_reg = regularize_MM_omic(model=model)
else:
raise NotImplementedError('reg method [%s] is not implemented' % opt.reg_type)
return loss_reg
def define_scheduler(opt, optimizer):
if opt.lr_policy == 'linear':
def lambda_rule(epoch):
lr_l = 1.0 - max(0, epoch + opt.epoch_count - opt.niter) / float(opt.niter_decay + 1)
return lr_l
scheduler = lr_scheduler.LambdaLR(optimizer, lr_lambda=lambda_rule)
elif opt.lr_policy == 'exp':
scheduler = lr_scheduler.ExponentialLR(optimizer, 0.1, last_epoch=-1)
elif opt.lr_policy == 'step':
scheduler = lr_scheduler.StepLR(optimizer, step_size=opt.lr_decay_iters, gamma=0.1)
elif opt.lr_policy == 'plateau':
scheduler = lr_scheduler.ReduceLROnPlateau(optimizer, mode='min', factor=0.2, threshold=0.01, patience=5)
elif opt.lr_policy == 'cosine':
scheduler = lr_scheduler.CosineAnnealingLR(optimizer, T_max=opt.niter, eta_min=0)
else:
return NotImplementedError('learning rate policy [%s] is not implemented', opt.lr_policy)
return scheduler
def define_act_layer(act_type='Tanh'):
if act_type == 'Tanh':
act_layer = nn.Tanh()
elif act_type == 'ReLU':
act_layer = nn.ReLU()
elif act_type == 'Sigmoid':
act_layer = nn.Sigmoid()
elif act_type == 'LSM':
act_layer = nn.LogSoftmax(dim=1)
elif act_type == "none":
act_layer = None
else:
raise NotImplementedError('activation layer [%s] is not found' % act_type)
return act_layer
def define_bifusion(fusion_type, skip=1, use_bilinear=1, gate1=1, gate2=1, dim1=32, dim2=32, scale_dim1=1, scale_dim2=1, mmhid=64, dropout_rate=0.25):
fusion = None
if fusion_type == 'pofusion':
fusion = BilinearFusion(skip=skip, use_bilinear=use_bilinear, gate1=gate1, gate2=gate2, dim1=dim1, dim2=dim2, scale_dim1=scale_dim1, scale_dim2=scale_dim2, mmhid=mmhid, dropout_rate=dropout_rate)
else:
raise NotImplementedError('fusion type [%s] is not found' % fusion_type)
return fusion
def define_trifusion(fusion_type, skip=1, use_bilinear=1, gate1=1, gate2=1, gate3=3, dim1=32, dim2=32, dim3=32, scale_dim1=1, scale_dim2=1, scale_dim3=1, mmhid=96, dropout_rate=0.25):
fusion = None
if fusion_type == 'pofusion_A':
fusion = TrilinearFusion_A(skip=skip, use_bilinear=use_bilinear, gate1=gate1, gate2=gate2, gate3=gate3, dim1=dim1, dim2=dim2, dim3=dim3, scale_dim1=scale_dim1, scale_dim2=scale_dim2, scale_dim3=scale_dim3, mmhid=mmhid, dropout_rate=dropout_rate)
elif fusion_type == 'pofusion_B':
fusion = TrilinearFusion_B(skip=skip, use_bilinear=use_bilinear, gate1=gate1, gate2=gate2, gate3=gate3, dim1=dim1, dim2=dim2, dim3=dim3, scale_dim1=scale_dim1, scale_dim2=scale_dim2, scale_dim3=scale_dim3, mmhid=mmhid, dropout_rate=dropout_rate)
else:
raise NotImplementedError('fusion type [%s] is not found' % fusion_type)
return fusion
############
# Omic Model
############
class MaxNet(nn.Module):
def __init__(self, input_dim=80, omic_dim=32, dropout_rate=0.25, act=None, label_dim=1, init_max=True):
super(MaxNet, self).__init__()
hidden = [64, 48, 32, 32]
self.act = act
encoder1 = nn.Sequential(
nn.Linear(input_dim, hidden[0]),
nn.ELU(),
nn.AlphaDropout(p=dropout_rate, inplace=False))
encoder2 = nn.Sequential(
nn.Linear(hidden[0], hidden[1]),
nn.ELU(),
nn.AlphaDropout(p=dropout_rate, inplace=False))
encoder3 = nn.Sequential(
nn.Linear(hidden[1], hidden[2]),
nn.ELU(),
nn.AlphaDropout(p=dropout_rate, inplace=False))
encoder4 = nn.Sequential(
nn.Linear(hidden[2], omic_dim),
nn.ELU(),
nn.AlphaDropout(p=dropout_rate, inplace=False))
self.encoder = nn.Sequential(encoder1, encoder2, encoder3, encoder4)
self.classifier = nn.Sequential(nn.Linear(omic_dim, label_dim))
if init_max: init_max_weights(self)
self.output_range = Parameter(torch.FloatTensor([6]), requires_grad=False)
self.output_shift = Parameter(torch.FloatTensor([-3]), requires_grad=False)
def forward(self, **kwargs):
x = kwargs['x_omic']
features = self.encoder(x)
out = self.classifier(features)
if self.act is not None:
out = self.act(out)
if isinstance(self.act, nn.Sigmoid):
out = out * self.output_range + self.output_shift
return features, out
############
# Graph Model
############
class NormalizeFeaturesV2(object):
r"""Column-normalizes node features to sum-up to one."""
def __call__(self, data):
data.x = data.x / data.x.max(0, keepdim=True)[0]#.type(torch.cuda.FloatTensor)
return data
def __repr__(self):
return '{}()'.format(self.__class__.__name__)
class NormalizeFeaturesV2(object):
r"""Column-normalizes node features to sum-up to one."""
def __call__(self, data):
data.x[:, :12] = data.x[:, :12] / data.x[:, :12].max(0, keepdim=True)[0]
data.x = data.x.type(torch.cuda.FloatTensor)
return data
def __repr__(self):
return '{}()'.format(self.__class__.__name__)
class NormalizeEdgesV2(object):
r"""Column-normalizes node features to sum-up to one."""
def __call__(self, data):
data.edge_attr = data.edge_attr.type(torch.cuda.FloatTensor)
data.edge_attr = data.edge_attr / data.edge_attr.max(0, keepdim=True)[0]#.type(torch.cuda.FloatTensor)
return data
def __repr__(self):
return '{}()'.format(self.__class__.__name__)
class GraphNet(torch.nn.Module):
def __init__(self, features=1036, nhid=128, grph_dim=32, nonlinearity=torch.tanh,
dropout_rate=0.25, GNN='GCN', use_edges=0, pooling_ratio=0.20, act=None, label_dim=1, init_max=True):
super(GraphNet, self).__init__()
self.dropout_rate = dropout_rate
self.use_edges = use_edges
self.act = act
self.conv1 = SAGEConv(features, nhid)
self.pool1 = SAGPooling(nhid, ratio=pooling_ratio, gnn=GNN)#, nonlinearity=nonlinearity)
self.conv2 = SAGEConv(nhid, nhid)
self.pool2 = SAGPooling(nhid, ratio=pooling_ratio, gnn=GNN)#, nonlinearity=nonlinearity)
self.conv3 = SAGEConv(nhid, nhid)
self.pool3 = SAGPooling(nhid, ratio=pooling_ratio, gnn=GNN)#, nonlinearity=nonlinearity)
self.lin1 = torch.nn.Linear(nhid*2, nhid)
self.lin2 = torch.nn.Linear(nhid, grph_dim)
self.lin3 = torch.nn.Linear(grph_dim, label_dim)
self.output_range = Parameter(torch.FloatTensor([6]), requires_grad=False)
self.output_shift = Parameter(torch.FloatTensor([-3]), requires_grad=False)
if init_max:
init_max_weights(self)
print("Initialzing with Max")
def forward(self, **kwargs):
data = kwargs['x_grph']
data = NormalizeFeaturesV2()(data)
data = NormalizeEdgesV2()(data)
x, edge_index, edge_attr, batch = data.x, data.edge_index, data.edge_attr, data.batch
#x, edge_index, edge_attr, batch = data.x.type(torch.cuda.FloatTensor), data.edge_index.type(torch.cuda.LongTensor), data.edge_attr.type(torch.cuda.FloatTensor), data.batch
x = F.relu(self.conv1(x, edge_index))
x, edge_index, edge_attr, batch, _ = self.pool1(x, edge_index, edge_attr, batch)
x1 = torch.cat([gmp(x, batch), gap(x, batch)], dim=1)
x = F.relu(self.conv2(x, edge_index))
x, edge_index, edge_attr, batch, _ = self.pool2(x, edge_index, edge_attr, batch)
x2 = torch.cat([gmp(x, batch), gap(x, batch)], dim=1)
x = F.relu(self.conv3(x, edge_index))
x, edge_index, edge_attr, batch, _ = self.pool3(x, edge_index, edge_attr, batch)
x3 = torch.cat([gmp(x, batch), gap(x, batch)], dim=1)
x = x1 + x2 + x3
x = F.relu(self.lin1(x))
x = F.dropout(x, p=self.dropout_rate, training=self.training)
features = F.relu(self.lin2(x))
out = self.lin3(features)
if self.act is not None:
out = self.act(out)
if isinstance(self.act, nn.Sigmoid):
out = out * self.output_range + self.output_shift
return features, out
############
# Path Model
############
model_urls = {
'vgg11': 'https://download.pytorch.org/models/vgg11-bbd30ac9.pth',
'vgg13': 'https://download.pytorch.org/models/vgg13-c768596a.pth',
'vgg16': 'https://download.pytorch.org/models/vgg16-397923af.pth',
'vgg19': 'https://download.pytorch.org/models/vgg19-dcbb9e9d.pth',
'vgg11_bn': 'https://download.pytorch.org/models/vgg11_bn-6002323d.pth',
'vgg13_bn': 'https://download.pytorch.org/models/vgg13_bn-abd245e5.pth',
'vgg16_bn': 'https://download.pytorch.org/models/vgg16_bn-6c64b313.pth',
'vgg19_bn': 'https://download.pytorch.org/models/vgg19_bn-c79401a0.pth',
}
class PathNet(nn.Module):
def __init__(self, features, path_dim=32, act=None, num_classes=1):
super(PathNet, self).__init__()
self.features = features
self.avgpool = nn.AdaptiveAvgPool2d((7, 7))
self.classifier = nn.Sequential(
nn.Linear(512 * 7 * 7, 1024),
nn.ReLU(True),
nn.Dropout(0.25),
nn.Linear(1024, 1024),
nn.ReLU(True),
nn.Dropout(0.25),
nn.Linear(1024, path_dim),
nn.ReLU(True),
nn.Dropout(0.05)
)
self.linear = nn.Linear(path_dim, num_classes)
self.act = act
self.output_range = Parameter(torch.FloatTensor([6]), requires_grad=False)
self.output_shift = Parameter(torch.FloatTensor([-3]), requires_grad=False)
dfs_freeze(self.features)
def forward(self, **kwargs):
x = kwargs['x_path']
x = self.features(x)
x = self.avgpool(x)
x = x.view(x.size(0), -1)
features = self.classifier(x)
hazard = self.linear(features)
if self.act is not None:
hazard = self.act(hazard)
if isinstance(self.act, nn.Sigmoid):
hazard = hazard * self.output_range + self.output_shift
return features, hazard
def make_layers(cfg, batch_norm=False):
layers = []
in_channels = 3
for v in cfg:
if v == 'M':
layers += [nn.MaxPool2d(kernel_size=2, stride=2)]
else:
conv2d = nn.Conv2d(in_channels, v, kernel_size=3, padding=1)
if batch_norm:
layers += [conv2d, nn.BatchNorm2d(v), nn.ReLU(inplace=True)]
else:
layers += [conv2d, nn.ReLU(inplace=True)]
in_channels = v
return nn.Sequential(*layers)
cfgs = {
'A': [64, 'M', 128, 'M', 256, 256, 'M', 512, 512, 'M', 512, 512, 'M'],
'B': [64, 64, 'M', 128, 128, 'M', 256, 256, 'M', 512, 512, 'M', 512, 512, 'M'],
'D': [64, 64, 'M', 128, 128, 'M', 256, 256, 256, 'M', 512, 512, 512, 'M', 512, 512, 512, 'M'],
'E': [64, 64, 'M', 128, 128, 'M', 256, 256, 256, 256, 'M', 512, 512, 512, 512, 'M', 512, 512, 512, 512, 'M'],
}
def get_vgg(arch='vgg19_bn', cfg='E', act=None, batch_norm=True, label_dim=1, pretrained=True, progress=True, **kwargs):
model = PathNet(make_layers(cfgs[cfg], batch_norm=batch_norm), act=act, num_classes=label_dim, **kwargs)
if pretrained:
pretrained_dict = load_state_dict_from_url(model_urls[arch], progress=progress)
for key in list(pretrained_dict.keys()):
if 'classifier' in key: pretrained_dict.pop(key)
model.load_state_dict(pretrained_dict, strict=False)
print("Initializing Path Weights")
return model
##############################################################################
# Graph + Omic
##############################################################################
class GraphomicNet(nn.Module):
def __init__(self, opt, act, k):
super(GraphomicNet, self).__init__()
self.grph_net = GraphNet(grph_dim=opt.grph_dim, dropout_rate=opt.dropout_rate, use_edges=1, pooling_ratio=0.20, label_dim=opt.label_dim, init_max=False)
self.omic_net = MaxNet(input_dim=opt.input_size_omic, omic_dim=opt.omic_dim, dropout_rate=opt.dropout_rate, act=act, label_dim=opt.label_dim, init_max=False)
if k is not None:
pt_fname = '_%d.pt' % k
best_grph_ckpt = torch.load(os.path.join(opt.checkpoints_dir, opt.exp_name, 'graph', 'graph'+pt_fname), map_location=torch.device('cpu'))
best_omic_ckpt = torch.load(os.path.join(opt.checkpoints_dir, opt.exp_name, 'omic', 'omic'+pt_fname), map_location=torch.device('cpu'))
self.grph_net.load_state_dict(best_grph_ckpt['model_state_dict'])
self.omic_net.load_state_dict(best_omic_ckpt['model_state_dict'])
print("Loading Models:\n", os.path.join(opt.checkpoints_dir, opt.exp_name, 'graph', 'graph'+pt_fname), "\n", os.path.join(opt.checkpoints_dir, opt.exp_name, 'omic', 'omic'+pt_fname))
self.fusion = define_bifusion(fusion_type=opt.fusion_type, skip=opt.skip, use_bilinear=opt.use_bilinear, gate1=opt.grph_gate, gate2=opt.omic_gate, dim1=opt.grph_dim, dim2=opt.omic_dim, scale_dim1=opt.grph_scale, scale_dim2=opt.omic_scale, mmhid=opt.mmhid, dropout_rate=opt.dropout_rate)
self.classifier = nn.Sequential(nn.Linear(opt.mmhid, opt.label_dim))
self.act = act
dfs_freeze(self.grph_net)
dfs_freeze(self.omic_net)
self.output_range = Parameter(torch.FloatTensor([6]), requires_grad=False)
self.output_shift = Parameter(torch.FloatTensor([-3]), requires_grad=False)
def forward(self, **kwargs):
grph_vec, _ = self.grph_net(x_grph=kwargs['x_grph'])
omic_vec, _ = self.omic_net(x_omic=kwargs['x_omic'])
features = self.fusion(grph_vec, omic_vec)
hazard = self.classifier(features)
if self.act is not None:
hazard = self.act(hazard)
if isinstance(self.act, nn.Sigmoid):
hazard = hazard * self.output_range + self.output_shift
return features, hazard
def __hasattr__(self, name):
if '_parameters' in self.__dict__:
_parameters = self.__dict__['_parameters']
if name in _parameters:
return True
if '_buffers' in self.__dict__:
_buffers = self.__dict__['_buffers']
if name in _buffers:
return True
if '_modules' in self.__dict__:
modules = self.__dict__['_modules']
if name in modules:
return True
return False
##############################################################################
# Path + Omic
##############################################################################
class PathomicNet(nn.Module):
def __init__(self, opt, act, k):
super(PathomicNet, self).__init__()
self.omic_net = MaxNet(input_dim=opt.input_size_omic, omic_dim=opt.omic_dim, dropout_rate=opt.dropout_rate, act=act, label_dim=opt.label_dim, init_max=False)
if k is not None:
pt_fname = '_%d.pt' % k
best_omic_ckpt = torch.load(os.path.join(opt.checkpoints_dir, opt.exp_name, 'omic', 'omic'+pt_fname), map_location=torch.device('cpu'))
self.omic_net.load_state_dict(best_omic_ckpt['model_state_dict'])
print("Loading Models:\n", os.path.join(opt.checkpoints_dir, opt.exp_name, 'omic', 'omic'+pt_fname))
self.fusion = define_bifusion(fusion_type=opt.fusion_type, skip=opt.skip, use_bilinear=opt.use_bilinear, gate1=opt.path_gate, gate2=opt.omic_gate, dim1=opt.path_dim, dim2=opt.omic_dim, scale_dim1=opt.path_scale, scale_dim2=opt.omic_scale, mmhid=opt.mmhid, dropout_rate=opt.dropout_rate)
self.classifier = nn.Sequential(nn.Linear(opt.mmhid, opt.label_dim))
self.act = act
dfs_freeze(self.omic_net)
self.output_range = Parameter(torch.FloatTensor([6]), requires_grad=False)
self.output_shift = Parameter(torch.FloatTensor([-3]), requires_grad=False)
def forward(self, **kwargs):
path_vec = kwargs['x_path']
omic_vec, _ = self.omic_net(x_omic=kwargs['x_omic'])
features = self.fusion(path_vec, omic_vec)
hazard = self.classifier(features)
if self.act is not None:
hazard = self.act(hazard)
if isinstance(self.act, nn.Sigmoid):
hazard = hazard * self.output_range + self.output_shift
return features, hazard
def __hasattr__(self, name):
if '_parameters' in self.__dict__:
_parameters = self.__dict__['_parameters']
if name in _parameters:
return True
if '_buffers' in self.__dict__:
_buffers = self.__dict__['_buffers']
if name in _buffers:
return True
if '_modules' in self.__dict__:
modules = self.__dict__['_modules']
if name in modules:
return True
return False
#############################################################################
# Path + Graph + Omic
##############################################################################
class PathgraphomicNet(nn.Module):
def __init__(self, opt, act, k):
super(PathgraphomicNet, self).__init__()
self.grph_net = GraphNet(grph_dim=opt.grph_dim, dropout_rate=opt.dropout_rate, use_edges=1, pooling_ratio=0.20, label_dim=opt.label_dim, init_max=False)
self.omic_net = MaxNet(input_dim=opt.input_size_omic, omic_dim=opt.omic_dim, dropout_rate=opt.dropout_rate, act=act, label_dim=opt.label_dim, init_max=False)
if k is not None:
pt_fname = '_%d.pt' % k
best_grph_ckpt = torch.load(os.path.join(opt.checkpoints_dir, opt.exp_name, 'graph', 'graph'+pt_fname), map_location=torch.device('cpu'))
best_omic_ckpt = torch.load(os.path.join(opt.checkpoints_dir, opt.exp_name, 'omic', 'omic'+pt_fname), map_location=torch.device('cpu'))
self.grph_net.load_state_dict(best_grph_ckpt['model_state_dict'])
self.omic_net.load_state_dict(best_omic_ckpt['model_state_dict'])
print("Loading Models:\n", os.path.join(opt.checkpoints_dir, opt.exp_name, 'graph', 'graph'+pt_fname), "\n", os.path.join(opt.checkpoints_dir, opt.exp_name, 'omic', 'omic'+pt_fname))
self.fusion = define_trifusion(fusion_type=opt.fusion_type, skip=opt.skip, use_bilinear=opt.use_bilinear, gate1=opt.path_gate, gate2=opt.grph_gate, gate3=opt.omic_gate, dim1=opt.path_dim, dim2=opt.grph_dim, dim3=opt.omic_dim, scale_dim1=opt.path_scale, scale_dim2=opt.grph_scale, scale_dim3=opt.omic_scale, mmhid=opt.mmhid, dropout_rate=opt.dropout_rate)
self.classifier = nn.Sequential(nn.Linear(opt.mmhid, opt.label_dim))
self.act = act
dfs_freeze(self.grph_net)
dfs_freeze(self.omic_net)
self.output_range = Parameter(torch.FloatTensor([6]), requires_grad=False)
self.output_shift = Parameter(torch.FloatTensor([-3]), requires_grad=False)
def forward(self, **kwargs):
path_vec = kwargs['x_path']
grph_vec, _ = self.grph_net(x_grph=kwargs['x_grph'])
omic_vec, _ = self.omic_net(x_omic=kwargs['x_omic'])
features = self.fusion(path_vec, grph_vec, omic_vec)
hazard = self.classifier(features)
if self.act is not None:
hazard = self.act(hazard)
if isinstance(self.act, nn.Sigmoid):
hazard = hazard * self.output_range + self.output_shift
return features, hazard
def __hasattr__(self, name):
if '_parameters' in self.__dict__:
_parameters = self.__dict__['_parameters']
if name in _parameters:
return True
if '_buffers' in self.__dict__:
_buffers = self.__dict__['_buffers']
if name in _buffers:
return True
if '_modules' in self.__dict__:
modules = self.__dict__['_modules']
if name in modules:
return True
return False
##############################################################################
# Ensembling Effects
##############################################################################
class PathgraphNet(nn.Module):
def __init__(self, opt, act, k):
super(PathgraphNet, self).__init__()
self.grph_net = GraphNet(grph_dim=opt.grph_dim, dropout_rate=opt.dropout_rate, use_edges=1, pooling_ratio=0.20, label_dim=opt.label_dim, init_max=False)
if k is not None:
pt_fname = '_%d.pt' % k
best_grph_ckpt = torch.load(os.path.join(opt.checkpoints_dir, opt.exp_name, 'graph', 'graph'+pt_fname), map_location=torch.device('cpu'))
self.grph_net.load_state_dict(best_grph_ckpt['model_state_dict'])
print("Loading Models:\n", os.path.join(opt.checkpoints_dir, opt.exp_name, 'graph', 'graph'+pt_fname))
self.fusion = define_bifusion(fusion_type=opt.fusion_type, skip=opt.skip, use_bilinear=opt.use_bilinear, gate1=opt.path_gate, gate2=opt.grph_gate, dim1=opt.path_dim, dim2=opt.grph_dim, scale_dim1=opt.path_scale, scale_dim2=opt.grph_scale, mmhid=opt.mmhid, dropout_rate=opt.dropout_rate)
self.classifier = nn.Sequential(nn.Linear(opt.mmhid, opt.label_dim))
self.act = act
dfs_freeze(self.grph_net)
self.output_range = Parameter(torch.FloatTensor([6]), requires_grad=False)
self.output_shift = Parameter(torch.FloatTensor([-3]), requires_grad=False)
def forward(self, **kwargs):
path_vec = kwargs['x_path']
grph_vec, _ = self.grph_net(x_grph=kwargs['x_grph'])
features = self.fusion(path_vec, grph_vec)
hazard = self.classifier(features)
if self.act is not None:
hazard = self.act(hazard)
if isinstance(self.act, nn.Sigmoid):
hazard = hazard * self.output_range + self.output_shift
return features, hazard
def __hasattr__(self, name):
if '_parameters' in self.__dict__:
_parameters = self.__dict__['_parameters']
if name in _parameters:
return True
if '_buffers' in self.__dict__:
_buffers = self.__dict__['_buffers']
if name in _buffers:
return True
if '_modules' in self.__dict__:
modules = self.__dict__['_modules']
if name in modules:
return True
return False
class PathpathNet(nn.Module):
def __init__(self, opt, act, k):
super(PathpathNet, self).__init__()
self.fusion = define_bifusion(fusion_type=opt.fusion_type, skip=opt.skip, use_bilinear=opt.use_bilinear, gate1=opt.path_gate, gate2=1-opt.path_gate if opt.path_gate else 0,
dim1=opt.path_dim, dim2=opt.path_dim, scale_dim1=opt.path_scale, scale_dim2=opt.path_scale, mmhid=opt.mmhid, dropout_rate=opt.dropout_rate)
self.classifier = nn.Sequential(nn.Linear(opt.mmhid, opt.label_dim))
self.act = act
self.output_range = Parameter(torch.FloatTensor([6]), requires_grad=False)
self.output_shift = Parameter(torch.FloatTensor([-3]), requires_grad=False)
def forward(self, **kwargs):
path_vec = kwargs['x_path']
features = self.fusion(path_vec, path_vec)
hazard = self.classifier(features)
if self.act is not None:
hazard = self.act(hazard)
if isinstance(self.act, nn.Sigmoid):
hazard = hazard * self.output_range + self.output_shift
return features, hazard
def __hasattr__(self, name):
if '_parameters' in self.__dict__:
_parameters = self.__dict__['_parameters']
if name in _parameters:
return True
if '_buffers' in self.__dict__:
_buffers = self.__dict__['_buffers']
if name in _buffers:
return True
if '_modules' in self.__dict__:
modules = self.__dict__['_modules']
if name in modules:
return True
return False
class GraphgraphNet(nn.Module):
def __init__(self, opt, act, k):
super(GraphgraphNet, self).__init__()
self.grph_net = GraphNet(grph_dim=opt.grph_dim, dropout_rate=opt.dropout_rate, use_edges=1, pooling_ratio=0.20, label_dim=opt.label_dim, init_max=False)
if k is not None:
pt_fname = '_%d.pt' % k
best_grph_ckpt = torch.load(os.path.join(opt.checkpoints_dir, opt.exp_name, 'graph', 'graph'+pt_fname), map_location=torch.device('cpu'))
self.grph_net.load_state_dict(best_grph_ckpt['model_state_dict'])
print("Loading Models:\n", os.path.join(opt.checkpoints_dir, opt.exp_name, 'graph', 'graph'+pt_fname))
self.fusion = define_bifusion(fusion_type=opt.fusion_type, skip=opt.skip, use_bilinear=opt.use_bilinear, gate1=opt.grph_gate, gate2=1-opt.grph_gate if opt.grph_gate else 0,
dim1=opt.grph_dim, dim2=opt.grph_dim, scale_dim1=opt.grph_scale, scale_dim2=opt.grph_scale, mmhid=opt.mmhid, dropout_rate=opt.dropout_rate)
self.classifier = nn.Sequential(nn.Linear(opt.mmhid, opt.label_dim))
self.act = act
dfs_freeze(self.grph_net)
self.output_range = Parameter(torch.FloatTensor([6]), requires_grad=False)
self.output_shift = Parameter(torch.FloatTensor([-3]), requires_grad=False)
def forward(self, **kwargs):
grph_vec, _ = self.grph_net(x_grph=kwargs['x_grph'])
features = self.fusion(grph_vec, grph_vec)
hazard = self.classifier(features)
if self.act is not None:
hazard = self.act(hazard)
if isinstance(self.act, nn.Sigmoid):
hazard = hazard * self.output_range + self.output_shift
return features, hazard
def __hasattr__(self, name):
if '_parameters' in self.__dict__:
_parameters = self.__dict__['_parameters']
if name in _parameters:
return True
if '_buffers' in self.__dict__:
_buffers = self.__dict__['_buffers']
if name in _buffers:
return True
if '_modules' in self.__dict__:
modules = self.__dict__['_modules']
if name in modules:
return True
return False
class OmicomicNet(nn.Module):
def __init__(self, opt, act, k):
super(OmicomicNet, self).__init__()
self.omic_net = MaxNet(input_dim=opt.input_size_omic, omic_dim=opt.omic_dim, dropout_rate=opt.dropout_rate, act=act, label_dim=opt.label_dim, init_max=False)
if k is not None:
pt_fname = '_%d.pt' % k
best_omic_ckpt = torch.load(os.path.join(opt.checkpoints_dir, opt.exp_name, 'omic', 'omic'+pt_fname), map_location=torch.device('cpu'))
self.omic_net.load_state_dict(best_omic_ckpt['model_state_dict'])
print("Loading Models:\n", os.path.join(opt.checkpoints_dir, opt.exp_name, 'omic', 'omic'+pt_fname))
self.fusion = define_bifusion(fusion_type=opt.fusion_type, skip=opt.skip, use_bilinear=opt.use_bilinear, gate1=opt.omic_gate, gate2=1-opt.omic_gate if opt.omic_gate else 0,
dim1=opt.omic_dim, dim2=opt.omic_dim, scale_dim1=opt.omic_scale, scale_dim2=opt.omic_scale, mmhid=opt.mmhid, dropout_rate=opt.dropout_rate)
self.classifier = nn.Sequential(nn.Linear(opt.mmhid, opt.label_dim))
self.act = act
dfs_freeze(self.omic_net)
self.output_range = Parameter(torch.FloatTensor([6]), requires_grad=False)
self.output_shift = Parameter(torch.FloatTensor([-3]), requires_grad=False)
def forward(self, **kwargs):
omic_vec, _ = self.omic_net(x_omic=kwargs['x_omic'])
features = self.fusion(omic_vec, omic_vec)
hazard = self.classifier(features)
if self.act is not None:
hazard = self.act(hazard)
if isinstance(self.act, nn.Sigmoid):
hazard = hazard * self.output_range + self.output_shift
return features, hazard
def __hasattr__(self, name):
if '_parameters' in self.__dict__:
_parameters = self.__dict__['_parameters']
if name in _parameters:
return True
if '_buffers' in self.__dict__:
_buffers = self.__dict__['_buffers']
if name in _buffers:
return True
if '_modules' in self.__dict__:
modules = self.__dict__['_modules']
if name in modules:
return True
return False | 32,354 | 42.313253 | 362 | py |
PathomicFusion | PathomicFusion-master/make_splits.py | ### data_loaders.py
import argparse
import os
import pickle
import numpy as np
import pandas as pd
from PIL import Image
from sklearn import preprocessing
# Env
from networks import define_net
from utils import getCleanAllDataset
import torch
from torchvision import transforms
from options import parse_gpuids
### Initializes parser and data
"""
all_st
python make_splits.py --ignore_missing_moltype 0 --ignore_missing_histype 0 --use_vgg_features 0 --roi_dir all_st # for training Surv Path, Surv Graph, and testing Surv Graph
python make_splits.py --ignore_missing_moltype 0 --ignore_missing_histype 1 --use_vgg_features 0 --roi_dir all_st # for training Grad Path, Grad Graph, and testing Surv_graph
python make_splits.py --ignore_missing_moltype 1 --ignore_missing_histype 0 --use_vgg_features 0 --roi_dir all_st # for training Surv Omic, Surv Graphomic
python make_splits.py --ignore_missing_moltype 1 --ignore_missing_histype 1 --use_vgg_features 0 --roi_dir all_st # for training Grad Omic, Grad Graphomic
all_st_patches_512 (no VGG)
python make_splits.py --ignore_missing_moltype 0 --ignore_missing_histype 0 --use_vgg_features 0 --roi_dir all_st_patches_512 # for testing Surv Path
python make_splits.py --ignore_missing_moltype 0 --ignore_missing_histype 1 --use_vgg_features 0 --roi_dir all_st_patches_512 # for testing Grad Path
all_st_patches_512 (use VGG)
python make_splits.py --ignore_missing_moltype 0 --ignore_missing_histype 0 --use_vgg_features 1 --roi_dir all_st_patches_512 --exp_name surv_15 --gpu_ids 0 # for Surv Pathgraph
python make_splits.py --ignore_missing_moltype 0 --ignore_missing_histype 1 --use_vgg_features 1 --roi_dir all_st_patches_512 --exp_name grad_15 --act_type LSM --label_dim 3 --gpu_ids 1 # for Grad Pathgraph
python make_splits.py --ignore_missing_moltype 1 --ignore_missing_histype 0 --use_vgg_features 1 --roi_dir all_st_patches_512 --exp_name surv_15 --gpu_ids 2 # for Surv Pathomic, Pathgraphomic
python make_splits.py --ignore_missing_moltype 1 --ignore_missing_histype 1 --use_vgg_features 1 --roi_dir all_st_patches_512 --exp_name grad_15 --act_type LSM --label_dim 3 --gpu_ids 3 # for Grad Pathomic, Pathgraphomic
python make_splits.py --ignore_missing_moltype 0 --ignore_missing_histype 1 --make_all_train 1
python make_splits.py --ignore_missing_moltype 1 --ignore_missing_histype 0 --use_vgg_features 0 --roi_dir all_st --use_rnaseq 1
python make_splits.py --ignore_missing_moltype 1 --ignore_missing_histype 1 --use_vgg_features 0 --roi_dir all_st --use_rnaseq 1
python make_splits.py --ignore_missing_moltype 1 --ignore_missing_histype 0 --use_vgg_features 1 --roi_dir all_st_patches_512 --exp_name surv_15 --use_rnaseq 1 --gpu_ids 2
python make_splits.py --ignore_missing_moltype 1 --ignore_missing_histype 1 --use_vgg_features 1 --roi_dir all_st_patches_512 --exp_name grad_15 --use_rnaseq 1 --act_type LSM --label_dim 3 --gpu_ids 3
python make_splits.py --ignore_missing_moltype 0 --ignore_missing_histype 0 --use_vgg_features 1 --roi_dir all_st_patches_512 --exp_name surv_15_rnaseq --gpu_ids 0
python make_splits.py --ignore_missing_moltype 1 --ignore_missing_histype 0 --use_vgg_features 1 --roi_dir all_st_patches_512 --exp_name surv_15_rnaseq --use_rnaseq 1 --gpu_ids 0
python make_splits.py --ignore_missing_moltype 0 --ignore_missing_histype 1 --use_vgg_features 1 --roi_dir all_st_patches_512 --exp_name grad_15 --act_type LSM --label_dim 3 --gpu_ids 1
python make_splits.py --ignore_missing_moltype 1 --ignore_missing_histype 1 --use_vgg_features 1 --roi_dir all_st_patches_512 --exp_name grad_15 --use_rnaseq 1 --act_type LSM --label_dim 3 --gpu_ids 1
python make_splits.py --ignore_missing_moltype 1 --ignore_missing_histype 0 --use_vgg_features 0 --roi_dir all_st --use_rnaseq 1
python make_splits.py --ignore_missing_moltype 1 --ignore_missing_histype 0 --use_vgg_features 1 --roi_dir all_st_patches_512 --exp_name surv_15_rnaseq --gpu_ids 2
python make_splits.py --ignore_missing_moltype 1 --ignore_missing_histype 1 --use_vgg_features 0 --roi_dir all_st --use_rnaseq 1
python make_splits.py --ignore_missing_moltype 1 --ignore_missing_histype 1 --use_vgg_features 1 --roi_dir all_st_patches_512 --exp_name grad_15 --act_type LSM --label_dim 3 --gpu_ids 3
"""
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument('--dataroot', type=str, default='./data/TCGA_GBMLGG/', help="datasets")
parser.add_argument('--roi_dir', type=str, default='all_st')
parser.add_argument('--graph_feat_type', type=str, default='cpc', help="graph features to use")
parser.add_argument('--ignore_missing_moltype', type=int, default=0, help="Ignore data points with missing molecular subtype")
parser.add_argument('--ignore_missing_histype', type=int, default=0, help="Ignore data points with missign histology subtype")
parser.add_argument('--make_all_train', type=int, default=0)
parser.add_argument('--use_vgg_features', type=int, default=0)
parser.add_argument('--use_rnaseq', type=int, default=0)
parser.add_argument('--checkpoints_dir', type=str, default='./checkpoints/TCGA_GBMLGG/', help='models are saved here')
parser.add_argument('--exp_name', type=str, default='surv_15_rnaseq', help='name of the project. It decides where to store samples and models')
parser.add_argument('--gpu_ids', type=str, default='0,1,2,3', help='gpu ids: e.g. 0 0,1,2, 0,2. use -1 for CPU')
parser.add_argument('--mode', type=str, default='path', help='mode')
parser.add_argument('--model_name', type=str, default='path', help='mode')
parser.add_argument('--task', type=str, default='surv', help='surv | grad')
parser.add_argument('--act_type', type=str, default='Sigmoid', help='activation function')
parser.add_argument('--init_gain', type=float, default=0.02, help='scaling factor for normal, xavier and orthogonal.')
parser.add_argument('--label_dim', type=int, default=1, help='size of output')
parser.add_argument('--batch_size', type=int, default=32, help="Number of batches to train/test for. Default: 256")
parser.add_argument('--path_dim', type=int, default=32)
parser.add_argument('--init_type', type=str, default='none', help='network initialization [normal | xavier | kaiming | orthogonal | max]. Max seems to work well')
parser.add_argument('--dropout_rate', default=0.25, type=float, help='0 - 0.25. Increasing dropout_rate helps overfitting. Some people have gone as high as 0.5. You can try adding more regularization')
opt = parser.parse_known_args()[0]
opt = parse_gpuids(opt)
return opt
opt = parse_args()
device = torch.device('cuda:{}'.format(opt.gpu_ids[0])) if opt.gpu_ids else torch.device('cpu')
metadata, all_dataset = getCleanAllDataset(opt.dataroot, opt.ignore_missing_moltype, opt.ignore_missing_histype, opt.use_rnaseq)
### Creates a mapping from TCGA ID -> Image ROI
img_fnames = os.listdir(os.path.join(opt.dataroot, opt.roi_dir))
pat2img = {}
for pat, img_fname in zip([img_fname[:12] for img_fname in img_fnames], img_fnames):
if pat not in pat2img.keys(): pat2img[pat] = []
pat2img[pat].append(img_fname)
### Dictionary file containing split information
data_dict = {}
data_dict['data_pd'] = all_dataset
#data_dict['pat2img'] = pat2img
#data_dict['img_fnames'] = img_fnames
cv_splits = {}
### Extracting K-Fold Splits
pnas_splits = pd.read_csv(opt.dataroot+'pnas_splits.csv')
pnas_splits.columns = ['TCGA ID']+[str(k) for k in range(1, 16)]
pnas_splits.index = pnas_splits['TCGA ID']
pnas_splits = pnas_splits.drop(['TCGA ID'], axis=1)
### get path_feats
def get_vgg_features(model, device, img_path):
if model is None:
return img_path
else:
x_path = Image.open(img_path).convert('RGB')
normalize = transforms.Compose([transforms.ToTensor(), transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))])
x_path = torch.unsqueeze(normalize(x_path), dim=0)
features, hazard = model(x_path=x_path.to(device))
return features.cpu().detach().numpy()
### method for constructing aligned
def getAlignedMultimodalData(opt, model, device, all_dataset, pat_split, pat2img):
x_patname, x_path, x_grph, x_omic, e, t, g = [], [], [], [], [], [], []
for pat_name in pat_split:
if pat_name not in all_dataset.index: continue
for img_fname in pat2img[pat_name]:
grph_fname = img_fname.rstrip('.png')+'.pt'
assert grph_fname in os.listdir(os.path.join(opt.dataroot, '%s_%s' % (opt.roi_dir, opt.graph_feat_type)))
assert all_dataset[all_dataset['TCGA ID'] == pat_name].shape[0] == 1
x_patname.append(pat_name)
x_path.append(get_vgg_features(model, device, os.path.join(opt.dataroot, opt.roi_dir, img_fname)))
x_grph.append(os.path.join(opt.dataroot, '%s_%s' % (opt.roi_dir, opt.graph_feat_type), grph_fname))
x_omic.append(np.array(all_dataset[all_dataset['TCGA ID'] == pat_name].drop(metadata, axis=1)))
e.append(int(all_dataset[all_dataset['TCGA ID']==pat_name]['censored']))
t.append(int(all_dataset[all_dataset['TCGA ID']==pat_name]['Survival months']))
g.append(int(all_dataset[all_dataset['TCGA ID']==pat_name]['Grade']))
return x_patname, x_path, x_grph, x_omic, e, t, g
print(all_dataset.shape)
for k in pnas_splits.columns:
print('Creating Split %s' % k)
pat_train = pnas_splits.index[pnas_splits[k] == 'Train'] if opt.make_all_train == 0 else pnas_splits.index
pat_test = pnas_splits.index[pnas_splits[k] == 'Test']
cv_splits[int(k)] = {}
model = None
if opt.use_vgg_features:
load_path = os.path.join(opt.checkpoints_dir, opt.exp_name, opt.model_name, '%s_%s.pt' % (opt.model_name, k))
model_ckpt = torch.load(load_path, map_location=device)
model_state_dict = model_ckpt['model_state_dict']
if hasattr(model_state_dict, '_metadata'): del model_state_dict._metadata
model = define_net(opt, None)
if isinstance(model, torch.nn.DataParallel): model = model.module
print('Loading the model from %s' % load_path)
model.load_state_dict(model_state_dict)
model.eval()
train_x_patname, train_x_path, train_x_grph, train_x_omic, train_e, train_t, train_g = getAlignedMultimodalData(opt, model, device, all_dataset, pat_train, pat2img)
test_x_patname, test_x_path, test_x_grph, test_x_omic, test_e, test_t, test_g = getAlignedMultimodalData(opt, model, device, all_dataset, pat_test, pat2img)
train_x_omic, train_e, train_t = np.array(train_x_omic).squeeze(axis=1), np.array(train_e, dtype=np.float64), np.array(train_t, dtype=np.float64)
test_x_omic, test_e, test_t = np.array(test_x_omic).squeeze(axis=1), np.array(test_e, dtype=np.float64), np.array(test_t, dtype=np.float64)
scaler = preprocessing.StandardScaler().fit(train_x_omic)
train_x_omic = scaler.transform(train_x_omic)
test_x_omic = scaler.transform(test_x_omic)
train_data = {'x_patname': train_x_patname,
'x_path':np.array(train_x_path),
'x_grph':train_x_grph,
'x_omic':train_x_omic,
'e':np.array(train_e, dtype=np.float64),
't':np.array(train_t, dtype=np.float64),
'g':np.array(train_g, dtype=np.float64)}
test_data = {'x_patname': test_x_patname,
'x_path':np.array(test_x_path),
'x_grph':test_x_grph,
'x_omic':test_x_omic,
'e':np.array(test_e, dtype=np.float64),
't':np.array(test_t, dtype=np.float64),
'g':np.array(test_g, dtype=np.float64)}
dataset = {'train':train_data, 'test':test_data}
cv_splits[int(k)] = dataset
if opt.make_all_train: break
data_dict['cv_splits'] = cv_splits
pickle.dump(data_dict, open('%s/splits/gbmlgg15cv_%s_%d_%d_%d%s.pkl' % (opt.dataroot, opt.roi_dir, opt.ignore_missing_moltype, opt.ignore_missing_histype, opt.use_vgg_features, '_rnaseq' if opt.use_rnaseq else ''), 'wb')) | 12,069 | 59.049751 | 221 | py |
PathomicFusion | PathomicFusion-master/train_cv.py | import os
import logging
import numpy as np
import random
import pickle
import torch
# Env
from data_loaders import *
from options import parse_args
from train_test import train, test
### 1. Initializes parser and device
opt = parse_args()
device = torch.device('cuda:{}'.format(opt.gpu_ids[0])) if opt.gpu_ids else torch.device('cpu')
print("Using device:", device)
if not os.path.exists(opt.checkpoints_dir): os.makedirs(opt.checkpoints_dir)
if not os.path.exists(os.path.join(opt.checkpoints_dir, opt.exp_name)): os.makedirs(os.path.join(opt.checkpoints_dir, opt.exp_name))
if not os.path.exists(os.path.join(opt.checkpoints_dir, opt.exp_name, opt.model_name)): os.makedirs(os.path.join(opt.checkpoints_dir, opt.exp_name, opt.model_name))
### 2. Initializes Data
ignore_missing_histype = 1 if 'grad' in opt.task else 0
ignore_missing_moltype = 1 if 'omic' in opt.mode else 0
use_patch, roi_dir = ('_patch_', 'all_st_patches_512') if opt.use_vgg_features else ('_', 'all_st')
use_rnaseq = '_rnaseq' if opt.use_rnaseq else ''
data_cv_path = '%s/splits/gbmlgg15cv_%s_%d_%d_%d%s.pkl' % (opt.dataroot, roi_dir, ignore_missing_moltype, ignore_missing_histype, opt.use_vgg_features, use_rnaseq)
print("Loading %s" % data_cv_path)
data_cv = pickle.load(open(data_cv_path, 'rb'))
data_cv_splits = data_cv['cv_splits']
results = []
### 3. Sets-Up Main Loop
for k, data in data_cv_splits.items():
print("*******************************************")
print("************** SPLIT (%d/%d) **************" % (k, len(data_cv_splits.items())))
print("*******************************************")
if os.path.exists(os.path.join(opt.checkpoints_dir, opt.exp_name, opt.model_name, '%s_%d_patch_pred_train.pkl' % (opt.model_name, k))):
print("Train-Test Split already made.")
continue
### 3.1 Trains Model
model, optimizer, metric_logger = train(opt, data, device, k)
### 3.2 Evalutes Train + Test Error, and Saves Model
loss_train, cindex_train, pvalue_train, surv_acc_train, grad_acc_train, pred_train = test(opt, model, data, 'train', device)
loss_test, cindex_test, pvalue_test, surv_acc_test, grad_acc_test, pred_test = test(opt, model, data, 'test', device)
if opt.task == 'surv':
print("[Final] Apply model to training set: C-Index: %.10f, P-Value: %.10e" % (cindex_train, pvalue_train))
logging.info("[Final] Apply model to training set: C-Index: %.10f, P-Value: %.10e" % (cindex_train, pvalue_train))
print("[Final] Apply model to testing set: C-Index: %.10f, P-Value: %.10e" % (cindex_test, pvalue_test))
logging.info("[Final] Apply model to testing set: cC-Index: %.10f, P-Value: %.10e" % (cindex_test, pvalue_test))
results.append(cindex_test)
elif opt.task == 'grad':
print("[Final] Apply model to training set: Loss: %.10f, Acc: %.4f" % (loss_train, grad_acc_train))
logging.info("[Final] Apply model to training set: Loss: %.10f, Acc: %.4f" % (loss_train, grad_acc_train))
print("[Final] Apply model to testing set: Loss: %.10f, Acc: %.4f" % (loss_test, grad_acc_test))
logging.info("[Final] Apply model to testing set: Loss: %.10f, Acc: %.4f" % (loss_test, grad_acc_test))
results.append(grad_acc_test)
### 3.3 Saves Model
if len(opt.gpu_ids) > 0 and torch.cuda.is_available():
model_state_dict = model.module.cpu().state_dict()
else:
model_state_dict = model.cpu().state_dict()
torch.save({
'split':k,
'opt': opt,
'epoch': opt.niter+opt.niter_decay,
'data': data,
'model_state_dict': model_state_dict,
'optimizer_state_dict': optimizer.state_dict(),
'metrics': metric_logger},
os.path.join(opt.checkpoints_dir, opt.exp_name, opt.model_name, '%s_%d.pt' % (opt.model_name, k)))
print()
pickle.dump(pred_train, open(os.path.join(opt.checkpoints_dir, opt.exp_name, opt.model_name, '%s_%d%spred_train.pkl' % (opt.model_name, k, use_patch)), 'wb'))
pickle.dump(pred_test, open(os.path.join(opt.checkpoints_dir, opt.exp_name, opt.model_name, '%s_%d%spred_test.pkl' % (opt.model_name, k, use_patch)), 'wb'))
print('Split Results:', results)
print("Average:", np.array(results).mean())
pickle.dump(results, open(os.path.join(opt.checkpoints_dir, opt.exp_name, opt.model_name, '%s_results.pkl' % opt.model_name), 'wb')) | 4,216 | 46.920455 | 164 | py |
PathomicFusion | PathomicFusion-master/options.py | import argparse
import os
import torch
### Parser
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument('--dataroot', default='./data/TCGA_GBMLGG', help="datasets")
parser.add_argument('--checkpoints_dir', type=str, default='./checkpoints/TCGA_GBMLGG', help='models are saved here')
parser.add_argument('--exp_name', type=str, default='exp_name', help='name of the project. It decides where to store samples and models')
parser.add_argument('--gpu_ids', type=str, default='0', help='gpu ids: e.g. 0 0,1,2, 0,2. use -1 for CPU')
parser.add_argument('--mode', type=str, default='omic', help='mode')
parser.add_argument('--model_name', type=str, default='omic', help='mode')
parser.add_argument('--use_vgg_features', type=int, default=0, help='Use pretrained embeddings')
parser.add_argument('--use_rnaseq', type=int, default=0, help='Use RNAseq data.')
parser.add_argument('--task', type=str, default='surv', help='surv | grad')
parser.add_argument('--useRNA', type=int, default=0) # Doesn't work at the moment...:(
parser.add_argument('--useSN', type=int, default=1)
parser.add_argument('--act_type', type=str, default='Sigmoid', help='activation function')
parser.add_argument('--input_size_omic', type=int, default=80, help="input_size for omic vector")
parser.add_argument('--input_size_path', type=int, default=512, help="input_size for path images")
parser.add_argument('--init_gain', type=float, default=0.02, help='scaling factor for normal, xavier and orthogonal.')
parser.add_argument('--save_at', type=int, default=20, help="adsfasdf")
parser.add_argument('--label_dim', type=int, default=1, help='size of output')
parser.add_argument('--measure', default=1, type=int, help='disables measure while training (make program faster)')
parser.add_argument('--verbose', default=1, type=int)
parser.add_argument('--print_every', default=0, type=int)
parser.add_argument('--optimizer_type', type=str, default='adam')
parser.add_argument('--beta1', type=float, default=0.9, help='0.9, 0.5 | 0.25 | 0')
parser.add_argument('--beta2', type=float, default=0.999, help='0.9, 0.5 | 0.25 | 0')
parser.add_argument('--lr_policy', default='linear', type=str, help='5e-4 for Adam | 1e-3 for AdaBound')
parser.add_argument('--finetune', default=1, type=int, help='5e-4 for Adam | 1e-3 for AdaBound')
parser.add_argument('--final_lr', default=0.1, type=float, help='Used for AdaBound')
parser.add_argument('--reg_type', default='omic', type=str, help="regularization type")
parser.add_argument('--niter', type=int, default=0, help='# of iter at starting learning rate')
parser.add_argument('--niter_decay', type=int, default=25, help='# of iter to linearly decay learning rate to zero')
parser.add_argument('--epoch_count', type=int, default=1, help='start of epoch')
parser.add_argument('--batch_size', type=int, default=32, help="Number of batches to train/test for. Default: 256")
parser.add_argument('--lambda_cox', type=float, default=1)
parser.add_argument('--lambda_reg', type=float, default=3e-4)
parser.add_argument('--lambda_nll', type=float, default=1)
parser.add_argument('--fusion_type', type=str, default="pofusion", help='concat | pofusion')
parser.add_argument('--skip', type=int, default=0)
parser.add_argument('--use_bilinear', type=int, default=1)
parser.add_argument('--path_gate', type=int, default=1)
parser.add_argument('--grph_gate', type=int, default=1)
parser.add_argument('--omic_gate', type=int, default=1)
parser.add_argument('--path_dim', type=int, default=32)
parser.add_argument('--grph_dim', type=int, default=32)
parser.add_argument('--omic_dim', type=int, default=32)
parser.add_argument('--path_scale', type=int, default=1)
parser.add_argument('--grph_scale', type=int, default=1)
parser.add_argument('--omic_scale', type=int, default=1)
parser.add_argument('--mmhid', type=int, default=64)
parser.add_argument('--init_type', type=str, default='none', help='network initialization [normal | xavier | kaiming | orthogonal | max]. Max seems to work well')
parser.add_argument('--dropout_rate', default=0.25, type=float, help='0 - 0.25. Increasing dropout_rate helps overfitting. Some people have gone as high as 0.5. You can try adding more regularization')
parser.add_argument('--use_edges', default=1, type=float, help='Using edge_attr')
parser.add_argument('--pooling_ratio', default=0.2, type=float, help='pooling ratio for SAGPOOl')
parser.add_argument('--lr', default=2e-3, type=float, help='5e-4 for Adam | 1e-3 for AdaBound')
parser.add_argument('--weight_decay', default=4e-4, type=float, help='Used for Adam. L2 Regularization on weights. I normally turn this off if I am using L1. You should try')
parser.add_argument('--GNN', default='GCN', type=str, help='GCN | GAT | SAG. graph conv mode for pooling')
parser.add_argument('--patience', default=0.005, type=float)
opt = parser.parse_known_args()[0]
print_options(parser, opt)
opt = parse_gpuids(opt)
return opt
def print_options(parser, opt):
"""Print and save options
It will print both current options and default values(if different).
It will save options into a text file / [checkpoints_dir] / opt.txt
"""
message = ''
message += '----------------- Options ---------------\n'
for k, v in sorted(vars(opt).items()):
comment = ''
default = parser.get_default(k)
if v != default:
comment = '\t[default: %s]' % str(default)
message += '{:>25}: {:<30}{}\n'.format(str(k), str(v), comment)
message += '----------------- End -------------------'
print(message)
# save to the disk
expr_dir = os.path.join(opt.checkpoints_dir, opt.exp_name, opt.model_name)
mkdirs(expr_dir)
file_name = os.path.join(expr_dir, '{}_opt.txt'.format('train'))
with open(file_name, 'wt') as opt_file:
opt_file.write(message)
opt_file.write('\n')
def parse_gpuids(opt):
# set gpu ids
str_ids = opt.gpu_ids.split(',')
opt.gpu_ids = []
for str_id in str_ids:
id = int(str_id)
if id >= 0:
opt.gpu_ids.append(id)
if len(opt.gpu_ids) > 0:
torch.cuda.set_device(opt.gpu_ids[0])
return opt
def mkdirs(paths):
"""create empty directories if they don't exist
Parameters:
paths (str list) -- a list of directory paths
"""
if isinstance(paths, list) and not isinstance(paths, str):
for path in paths:
mkdir(path)
else:
mkdir(paths)
def mkdir(path):
"""create a single empty directory if it didn't exist
Parameters:
path (str) -- a single directory path
"""
if not os.path.exists(path):
os.makedirs(path)
| 6,901 | 49.014493 | 205 | py |
PathomicFusion | PathomicFusion-master/test_cv.py | import os
import logging
import numpy as np
import random
import pickle
import torch
# Env
from networks import define_net
from data_loaders import *
from options import parse_args
from train_test import train, test
### 1. Initializes parser and device
opt = parse_args()
device = torch.device('cuda:{}'.format(opt.gpu_ids[0])) if opt.gpu_ids else torch.device('cpu')
print("Using device:", device)
if not os.path.exists(opt.checkpoints_dir): os.makedirs(opt.checkpoints_dir)
if not os.path.exists(os.path.join(opt.checkpoints_dir, opt.exp_name)): os.makedirs(os.path.join(opt.checkpoints_dir, opt.exp_name))
if not os.path.exists(os.path.join(opt.checkpoints_dir, opt.exp_name, opt.model_name)): os.makedirs(os.path.join(opt.checkpoints_dir, opt.exp_name, opt.model_name))
### 2. Initializes Data
ignore_missing_histype = 1 if 'grad' in opt.task else 0
ignore_missing_moltype = 1 if 'omic' in opt.mode else 0
use_patch, roi_dir = ('_patch_', 'all_st_patches_512') if opt.use_vgg_features else ('_', 'all_st')
use_rnaseq = '_rnaseq' if opt.use_rnaseq else ''
data_cv_path = '%s/splits/gbmlgg15cv_%s_%d_%d_%d%s.pkl' % (opt.dataroot, roi_dir, ignore_missing_moltype, ignore_missing_histype, opt.use_vgg_features, use_rnaseq)
print("Loading %s" % data_cv_path)
data_cv = pickle.load(open(data_cv_path, 'rb'))
data_cv_splits = data_cv['cv_splits']
results = []
### 3. Sets-Up Main Loop
for k, data in data_cv_splits.items():
print("*******************************************")
print("************** SPLIT (%d/%d) **************" % (k, len(data_cv_splits.items())))
print("*******************************************")
load_path = os.path.join(opt.checkpoints_dir, opt.exp_name, opt.model_name, '%s_%d.pt' % (opt.model_name, k))
model_ckpt = torch.load(load_path, map_location=device)
#### Loading Env
model_state_dict = model_ckpt['model_state_dict']
if hasattr(model_state_dict, '_metadata'): del model_state_dict._metadata
model = define_net(opt, None)
if isinstance(model, torch.nn.DataParallel): model = model.module
print('Loading the model from %s' % load_path)
model.load_state_dict(model_state_dict)
### 3.2 Evalutes Train + Test Error, and Saves Model
loss_test, cindex_test, pvalue_test, surv_acc_test, grad_acc_test, pred_test = test(opt, model, data, 'test', device)
if opt.task == 'surv':
print("[Final] Apply model to testing set: C-Index: %.10f, P-Value: %.10e" % (cindex_test, pvalue_test))
logging.info("[Final] Apply model to testing set: cC-Index: %.10f, P-Value: %.10e" % (cindex_test, pvalue_test))
results.append(cindex_test)
elif opt.task == 'grad':
print("[Final] Apply model to testing set: Loss: %.10f, Acc: %.4f" % (loss_test, grad_acc_test))
logging.info("[Final] Apply model to testing set: Loss: %.10f, Acc: %.4f" % (loss_test, grad_acc_test))
results.append(grad_acc_test)
### 3.3 Saves Model
pickle.dump(pred_test, open(os.path.join(opt.checkpoints_dir, opt.exp_name, opt.model_name, '%s_%d%spred_test.pkl' % (opt.model_name, k, use_patch)), 'wb'))
print('Split Results:', results)
print("Average:", np.array(results).mean())
pickle.dump(results, open(os.path.join(opt.checkpoints_dir, opt.exp_name, opt.model_name, '%s_results.pkl' % opt.model_name), 'wb')) | 3,233 | 43.30137 | 164 | py |
PathomicFusion | PathomicFusion-master/train_test.py | import random
from tqdm import tqdm
import numpy as np
import torch
import torch.backends.cudnn as cudnn
import torch.nn.functional as F
from torch.utils.data import RandomSampler
from data_loaders import PathgraphomicDatasetLoader, PathgraphomicFastDatasetLoader
from networks import define_net, define_reg, define_optimizer, define_scheduler
from utils import unfreeze_unimodal, CoxLoss, CIndex_lifeline, cox_log_rank, accuracy_cox, mixed_collate, count_parameters
#from GPUtil import showUtilization as gpu_usage
import pdb
import pickle
import os
def train(opt, data, device, k):
cudnn.deterministic = True
torch.cuda.manual_seed_all(2019)
torch.manual_seed(2019)
random.seed(2019)
model = define_net(opt, k)
optimizer = define_optimizer(opt, model)
scheduler = define_scheduler(opt, optimizer)
print(model)
print("Number of Trainable Parameters: %d" % count_parameters(model))
print("Activation Type:", opt.act_type)
print("Optimizer Type:", opt.optimizer_type)
print("Regularization Type:", opt.reg_type)
use_patch, roi_dir = ('_patch_', 'all_st_patches_512') if opt.use_vgg_features else ('_', 'all_st')
custom_data_loader = PathgraphomicFastDatasetLoader(opt, data, split='train', mode=opt.mode) if opt.use_vgg_features else PathgraphomicDatasetLoader(opt, data, split='train', mode=opt.mode)
train_loader = torch.utils.data.DataLoader(dataset=custom_data_loader, batch_size=opt.batch_size, shuffle=True, collate_fn=mixed_collate)
metric_logger = {'train':{'loss':[], 'pvalue':[], 'cindex':[], 'surv_acc':[], 'grad_acc':[]},
'test':{'loss':[], 'pvalue':[], 'cindex':[], 'surv_acc':[], 'grad_acc':[]}}
for epoch in tqdm(range(opt.epoch_count, opt.niter+opt.niter_decay+1)):
if opt.finetune == 1:
unfreeze_unimodal(opt, model, epoch)
model.train()
risk_pred_all, censor_all, survtime_all = np.array([]), np.array([]), np.array([]) # Used for calculating the C-Index
loss_epoch, grad_acc_epoch = 0, 0
for batch_idx, (x_path, x_grph, x_omic, censor, survtime, grade) in enumerate(train_loader):
censor = censor.to(device) if "surv" in opt.task else censor
grade = grade.to(device) if "grad" in opt.task else grade
_, pred = model(x_path=x_path.to(device), x_grph=x_grph.to(device), x_omic=x_omic.to(device))
loss_cox = CoxLoss(survtime, censor, pred, device) if opt.task == "surv" else 0
loss_reg = define_reg(opt, model)
loss_nll = F.nll_loss(pred, grade) if opt.task == "grad" else 0
loss = opt.lambda_cox*loss_cox + opt.lambda_nll*loss_nll + opt.lambda_reg*loss_reg
loss_epoch += loss.data.item()
optimizer.zero_grad()
loss.backward()
optimizer.step()
if opt.task == "surv":
risk_pred_all = np.concatenate((risk_pred_all, pred.detach().cpu().numpy().reshape(-1))) # Logging Information
censor_all = np.concatenate((censor_all, censor.detach().cpu().numpy().reshape(-1))) # Logging Information
survtime_all = np.concatenate((survtime_all, survtime.detach().cpu().numpy().reshape(-1))) # Logging Information
elif opt.task == "grad":
pred = pred.argmax(dim=1, keepdim=True)
grad_acc_epoch += pred.eq(grade.view_as(pred)).sum().item()
if opt.verbose > 0 and opt.print_every > 0 and (batch_idx % opt.print_every == 0 or batch_idx+1 == len(train_loader)):
print("Epoch {:02d}/{:02d} Batch {:04d}/{:d}, Loss {:9.4f}".format(
epoch+1, opt.niter+opt.niter_decay, batch_idx+1, len(train_loader), loss.item()))
scheduler.step()
# lr = optimizer.param_groups[0]['lr']
#print('learning rate = %.7f' % lr)
if opt.measure or epoch == (opt.niter+opt.niter_decay - 1):
loss_epoch /= len(train_loader)
cindex_epoch = CIndex_lifeline(risk_pred_all, censor_all, survtime_all) if opt.task == 'surv' else None
pvalue_epoch = cox_log_rank(risk_pred_all, censor_all, survtime_all) if opt.task == 'surv' else None
surv_acc_epoch = accuracy_cox(risk_pred_all, censor_all) if opt.task == 'surv' else None
grad_acc_epoch = grad_acc_epoch / len(train_loader.dataset) if opt.task == 'grad' else None
loss_test, cindex_test, pvalue_test, surv_acc_test, grad_acc_test, pred_test = test(opt, model, data, 'test', device)
metric_logger['train']['loss'].append(loss_epoch)
metric_logger['train']['cindex'].append(cindex_epoch)
metric_logger['train']['pvalue'].append(pvalue_epoch)
metric_logger['train']['surv_acc'].append(surv_acc_epoch)
metric_logger['train']['grad_acc'].append(grad_acc_epoch)
metric_logger['test']['loss'].append(loss_test)
metric_logger['test']['cindex'].append(cindex_test)
metric_logger['test']['pvalue'].append(pvalue_test)
metric_logger['test']['surv_acc'].append(surv_acc_test)
metric_logger['test']['grad_acc'].append(grad_acc_test)
pickle.dump(pred_test, open(os.path.join(opt.checkpoints_dir, opt.exp_name, opt.model_name, '%s_%d%s%d_pred_test.pkl' % (opt.model_name, k, use_patch, epoch)), 'wb'))
if opt.verbose > 0:
if opt.task == 'surv':
print('[{:s}]\t\tLoss: {:.4f}, {:s}: {:.4f}'.format('Train', loss_epoch, 'C-Index', cindex_epoch))
print('[{:s}]\t\tLoss: {:.4f}, {:s}: {:.4f}\n'.format('Test', loss_test, 'C-Index', cindex_test))
elif opt.task == 'grad':
print('[{:s}]\t\tLoss: {:.4f}, {:s}: {:.4f}'.format('Train', loss_epoch, 'Accuracy', grad_acc_epoch))
print('[{:s}]\t\tLoss: {:.4f}, {:s}: {:.4f}\n'.format('Test', loss_test, 'Accuracy', grad_acc_test))
if opt.task == 'grad' and loss_epoch < opt.patience:
print("Early stopping at Epoch %d" % epoch)
break
return model, optimizer, metric_logger
def test(opt, model, data, split, device):
model.eval()
custom_data_loader = PathgraphomicFastDatasetLoader(opt, data, split, mode=opt.mode) if opt.use_vgg_features else PathgraphomicDatasetLoader(opt, data, split=split, mode=opt.mode)
test_loader = torch.utils.data.DataLoader(dataset=custom_data_loader, batch_size=opt.batch_size, shuffle=False, collate_fn=mixed_collate)
risk_pred_all, censor_all, survtime_all = np.array([]), np.array([]), np.array([])
probs_all, gt_all = None, np.array([])
loss_test, grad_acc_test = 0, 0
for batch_idx, (x_path, x_grph, x_omic, censor, survtime, grade) in enumerate(test_loader):
censor = censor.to(device) if "surv" in opt.task else censor
grade = grade.to(device) if "grad" in opt.task else grade
_, pred = model(x_path=x_path.to(device), x_grph=x_grph.to(device), x_omic=x_omic.to(device))
loss_cox = CoxLoss(survtime, censor, pred, device) if opt.task == "surv" else 0
loss_reg = define_reg(opt, model)
loss_nll = F.nll_loss(pred, grade) if opt.task == "grad" else 0
loss = opt.lambda_cox*loss_cox + opt.lambda_nll*loss_nll + opt.lambda_reg*loss_reg
loss_test += loss.data.item()
gt_all = np.concatenate((gt_all, grade.detach().cpu().numpy().reshape(-1))) # Logging Information
if opt.task == "surv":
risk_pred_all = np.concatenate((risk_pred_all, pred.detach().cpu().numpy().reshape(-1))) # Logging Information
censor_all = np.concatenate((censor_all, censor.detach().cpu().numpy().reshape(-1))) # Logging Information
survtime_all = np.concatenate((survtime_all, survtime.detach().cpu().numpy().reshape(-1))) # Logging Information
elif opt.task == "grad":
grade_pred = pred.argmax(dim=1, keepdim=True)
grad_acc_test += grade_pred.eq(grade.view_as(grade_pred)).sum().item()
probs_np = pred.detach().cpu().numpy()
probs_all = probs_np if probs_all is None else np.concatenate((probs_all, probs_np), axis=0) # Logging Information
###################################################
# ==== Measuring Test Loss, C-Index, P-Value ==== #
###################################################
loss_test /= len(test_loader)
cindex_test = CIndex_lifeline(risk_pred_all, censor_all, survtime_all) if opt.task == 'surv' else None
pvalue_test = cox_log_rank(risk_pred_all, censor_all, survtime_all) if opt.task == 'surv' else None
surv_acc_test = accuracy_cox(risk_pred_all, censor_all) if opt.task == 'surv' else None
grad_acc_test = grad_acc_test / len(test_loader.dataset) if opt.task == 'grad' else None
pred_test = [risk_pred_all, survtime_all, censor_all, probs_all, gt_all]
return loss_test, cindex_test, pvalue_test, surv_acc_test, grad_acc_test, pred_test
| 9,077 | 54.018182 | 193 | py |
PathomicFusion | PathomicFusion-master/core/utils_models.py | # Base / Native
import math
import os
import pickle
import re
import warnings
warnings.filterwarnings('ignore')
# Numerical / Array
import lifelines
from lifelines.utils import concordance_index
from lifelines import CoxPHFitter
from lifelines.datasets import load_regression_dataset
from lifelines.utils import k_fold_cross_validation
from lifelines.statistics import logrank_test
from imblearn.over_sampling import RandomOverSampler
import numpy as np
# Torch
import torch
import torch.nn as nn
from torch.nn import init, Parameter
from torch.utils.data._utils.collate import *
from torch.utils.data.dataloader import default_collate
import torch_geometric
from torch_geometric.data import Batch
################
# Regularization
################
def regularize_weights(model, reg_type=None):
l1_reg = None
for W in model.parameters():
if l1_reg is None:
l1_reg = torch.abs(W).sum()
else:
l1_reg = l1_reg + torch.abs(W).sum() # torch.abs(W).sum() is equivalent to W.norm(1)
return l1_reg
def regularize_path_weights(model, reg_type=None):
l1_reg = None
for W in model.classifier.parameters():
if l1_reg is None:
l1_reg = torch.abs(W).sum()
else:
l1_reg = l1_reg + torch.abs(W).sum() # torch.abs(W).sum() is equivalent to W.norm(1)
for W in model.linear.parameters():
if l1_reg is None:
l1_reg = torch.abs(W).sum()
else:
l1_reg = l1_reg + torch.abs(W).sum() # torch.abs(W).sum() is equivalent to W.norm(1)
return l1_reg
def regularize_MM_weights(model, reg_type=None):
l1_reg = None
if model.module.__hasattr__('omic_net'):
for W in model.module.omic_net.parameters():
if l1_reg is None:
l1_reg = torch.abs(W).sum()
else:
l1_reg = l1_reg + torch.abs(W).sum() # torch.abs(W).sum() is equivalent to W.norm(1)
if model.module.__hasattr__('linear_h_path'):
for W in model.module.linear_h_path.parameters():
if l1_reg is None:
l1_reg = torch.abs(W).sum()
else:
l1_reg = l1_reg + torch.abs(W).sum() # torch.abs(W).sum() is equivalent to W.norm(1)
if model.module.__hasattr__('linear_h_omic'):
for W in model.module.linear_h_omic.parameters():
if l1_reg is None:
l1_reg = torch.abs(W).sum()
else:
l1_reg = l1_reg + torch.abs(W).sum() # torch.abs(W).sum() is equivalent to W.norm(1)
if model.module.__hasattr__('linear_h_grph'):
for W in model.module.linear_h_grph.parameters():
if l1_reg is None:
l1_reg = torch.abs(W).sum()
else:
l1_reg = l1_reg + torch.abs(W).sum() # torch.abs(W).sum() is equivalent to W.norm(1)
if model.module.__hasattr__('linear_z_path'):
for W in model.module.linear_z_path.parameters():
if l1_reg is None:
l1_reg = torch.abs(W).sum()
else:
l1_reg = l1_reg + torch.abs(W).sum() # torch.abs(W).sum() is equivalent to W.norm(1)
if model.module.__hasattr__('linear_z_omic'):
for W in model.module.linear_z_omic.parameters():
if l1_reg is None:
l1_reg = torch.abs(W).sum()
else:
l1_reg = l1_reg + torch.abs(W).sum() # torch.abs(W).sum() is equivalent to W.norm(1)
if model.module.__hasattr__('linear_z_grph'):
for W in model.module.linear_z_grph.parameters():
if l1_reg is None:
l1_reg = torch.abs(W).sum()
else:
l1_reg = l1_reg + torch.abs(W).sum() # torch.abs(W).sum() is equivalent to W.norm(1)
if model.module.__hasattr__('linear_o_path'):
for W in model.module.linear_o_path.parameters():
if l1_reg is None:
l1_reg = torch.abs(W).sum()
else:
l1_reg = l1_reg + torch.abs(W).sum() # torch.abs(W).sum() is equivalent to W.norm(1)
if model.module.__hasattr__('linear_o_omic'):
for W in model.module.linear_o_omic.parameters():
if l1_reg is None:
l1_reg = torch.abs(W).sum()
else:
l1_reg = l1_reg + torch.abs(W).sum() # torch.abs(W).sum() is equivalent to W.norm(1)
if model.module.__hasattr__('linear_o_grph'):
for W in model.module.linear_o_grph.parameters():
if l1_reg is None:
l1_reg = torch.abs(W).sum()
else:
l1_reg = l1_reg + torch.abs(W).sum() # torch.abs(W).sum() is equivalent to W.norm(1)
if model.module.__hasattr__('encoder1'):
for W in model.module.encoder1.parameters():
if l1_reg is None:
l1_reg = torch.abs(W).sum()
else:
l1_reg = l1_reg + torch.abs(W).sum() # torch.abs(W).sum() is equivalent to W.norm(1)
if model.module.__hasattr__('encoder2'):
for W in model.module.encoder2.parameters():
if l1_reg is None:
l1_reg = torch.abs(W).sum()
else:
l1_reg = l1_reg + torch.abs(W).sum() # torch.abs(W).sum() is equivalent to W.norm(1)
if model.module.__hasattr__('classifier'):
for W in model.module.classifier.parameters():
if l1_reg is None:
l1_reg = torch.abs(W).sum()
else:
l1_reg = l1_reg + torch.abs(W).sum() # torch.abs(W).sum() is equivalent to W.norm(1)
return l1_reg
def regularize_MM_omic(model, reg_type=None):
l1_reg = None
if model.module.__hasattr__('omic_net'):
for W in model.module.omic_net.parameters():
if l1_reg is None:
l1_reg = torch.abs(W).sum()
else:
l1_reg = l1_reg + torch.abs(W).sum() # torch.abs(W).sum() is equivalent to W.norm(1)
return l1_reg
################
# Network Initialization
################
def init_weights(net, init_type='orthogonal', init_gain=0.02):
"""Initialize network weights.
Parameters:
net (network) -- network to be initialized
init_type (str) -- the name of an initialization method: normal | xavier | kaiming | orthogonal
init_gain (float) -- scaling factor for normal, xavier and orthogonal.
We use 'normal' in the original pix2pix and CycleGAN paper. But xavier and kaiming might
work better for some applications. Feel free to try yourself.
"""
def init_func(m): # define the initialization function
classname = m.__class__.__name__
if hasattr(m, 'weight') and (classname.find('Conv') != -1 or classname.find('Linear') != -1):
if init_type == 'normal':
init.normal_(m.weight.data, 0.0, init_gain)
elif init_type == 'xavier':
init.xavier_normal_(m.weight.data, gain=init_gain)
elif init_type == 'kaiming':
init.kaiming_normal_(m.weight.data, a=0, mode='fan_in')
elif init_type == 'orthogonal':
init.orthogonal_(m.weight.data, gain=init_gain)
else:
raise NotImplementedError('initialization method [%s] is not implemented' % init_type)
if hasattr(m, 'bias') and m.bias is not None:
init.constant_(m.bias.data, 0.0)
elif classname.find('BatchNorm2d') != -1: # BatchNorm Layer's weight is not a matrix; only normal distribution applies.
init.normal_(m.weight.data, 1.0, init_gain)
init.constant_(m.bias.data, 0.0)
print('initialize network with %s' % init_type)
net.apply(init_func) # apply the initialization function <init_func>
def init_max_weights(module):
for m in module.modules():
if type(m) == nn.Linear:
stdv = 1. / math.sqrt(m.weight.size(1))
m.weight.data.normal_(0, stdv)
m.bias.data.zero_()
def init_net(net, init_type='normal', init_gain=0.02, gpu_ids=[]):
"""Initialize a network: 1. register CPU/GPU device (with multi-GPU support); 2. initialize the network weights
Parameters:
net (network) -- the network to be initialized
init_type (str) -- the name of an initialization method: normal | xavier | kaiming | orthogonal
gain (float) -- scaling factor for normal, xavier and orthogonal.
gpu_ids (int list) -- which GPUs the network runs on: e.g., 0,1,2
Return an initialized network.
"""
if len(gpu_ids) > 0:
assert(torch.cuda.is_available())
net.to(gpu_ids[0])
net = torch.nn.DataParallel(net, gpu_ids) # multi-GPUs
if init_type != 'max' and init_type != 'none':
print("Init Type:", init_type)
init_weights(net, init_type, init_gain=init_gain)
elif init_type == 'none':
print("Init Type: Not initializing networks.")
elif init_type == 'max':
print("Init Type: Self-Normalizing Weights")
return net
################
# Freeze / Unfreeze
################
def unfreeze_unimodal(opt, model, epoch):
if opt.mode == 'graphomic':
if epoch == 5:
dfs_unfreeze(model.module.omic_net)
print("Unfreezing Omic")
if epoch == 5:
dfs_unfreeze(model.module.grph_net)
print("Unfreezing Graph")
elif opt.mode == 'pathomic':
if epoch == 5:
dfs_unfreeze(model.module.omic_net)
print("Unfreezing Omic")
elif opt.mode == 'pathgraph':
if epoch == 5:
dfs_unfreeze(model.module.grph_net)
print("Unfreezing Graph")
elif opt.mode == "pathgraphomic":
if epoch == 5:
dfs_unfreeze(model.module.omic_net)
print("Unfreezing Omic")
if epoch == 5:
dfs_unfreeze(model.module.grph_net)
print("Unfreezing Graph")
elif opt.mode == "omicomic":
if epoch == 5:
dfs_unfreeze(model.module.omic_net)
print("Unfreezing Omic")
elif opt.mode == "graphgraph":
if epoch == 5:
dfs_unfreeze(model.module.grph_net)
print("Unfreezing Graph")
def dfs_freeze(model):
for name, child in model.named_children():
for param in child.parameters():
param.requires_grad = False
dfs_freeze(child)
def dfs_unfreeze(model):
for name, child in model.named_children():
for param in child.parameters():
param.requires_grad = True
dfs_unfreeze(child)
def print_if_frozen(module):
for idx, child in enumerate(module.children()):
for param in child.parameters():
if param.requires_grad == True:
print("Learnable!!! %d:" % idx, child)
else:
print("Still Frozen %d:" % idx, child)
def unfreeze_vgg_features(model, epoch):
epoch_schedule = {30:45}
unfreeze_index = epoch_schedule[epoch]
for idx, child in enumerate(model.features.children()):
if idx > unfreeze_index:
print("Unfreezing %d:" %idx, child)
for param in child.parameters():
param.requires_grad = True
else:
print("Still Frozen %d:" %idx, child)
continue
################
# Collate Utils
################
def mixed_collate(batch):
elem = batch[0]
elem_type = type(elem)
transposed = zip(*batch)
return [Batch.from_data_list(samples, []) if type(samples[0]) is torch_geometric.data.data.Data else default_collate(samples) for samples in transposed]
################
# Survival Utils
################
def CoxLoss(survtime, censor, hazard_pred, device):
# This calculation credit to Travers Ching https://github.com/traversc/cox-nnet
# Cox-nnet: An artificial neural network method for prognosis prediction of high-throughput omics data
current_batch_len = len(survtime)
R_mat = np.zeros([current_batch_len, current_batch_len], dtype=int)
for i in range(current_batch_len):
for j in range(current_batch_len):
R_mat[i,j] = survtime[j] >= survtime[i]
R_mat = torch.FloatTensor(R_mat).to(device)
theta = hazard_pred.reshape(-1)
exp_theta = torch.exp(theta)
loss_cox = -torch.mean((theta - torch.log(torch.sum(exp_theta*R_mat, dim=1))) * censor)
return loss_cox
def accuracy(output, labels):
preds = output.max(1)[1].type_as(labels)
correct = preds.eq(labels).double()
correct = correct.sum()
return correct / len(labels)
def accuracy_cox(hazardsdata, labels):
# This accuracy is based on estimated survival events against true survival events
median = np.median(hazardsdata)
hazards_dichotomize = np.zeros([len(hazardsdata)], dtype=int)
hazards_dichotomize[hazardsdata > median] = 1
correct = np.sum(hazards_dichotomize == labels)
return correct / len(labels)
def cox_log_rank(hazardsdata, labels, survtime_all):
median = np.median(hazardsdata)
hazards_dichotomize = np.zeros([len(hazardsdata)], dtype=int)
hazards_dichotomize[hazardsdata > median] = 1
idx = hazards_dichotomize == 0
T1 = survtime_all[idx]
T2 = survtime_all[~idx]
E1 = labels[idx]
E2 = labels[~idx]
results = logrank_test(T1, T2, event_observed_A=E1, event_observed_B=E2)
pvalue_pred = results.p_value
return(pvalue_pred)
def CIndex(hazards, labels, survtime_all):
concord = 0.
total = 0.
N_test = labels.shape[0]
for i in range(N_test):
if labels[i] == 1:
for j in range(N_test):
if survtime_all[j] > survtime_all[i]:
total += 1
if hazards[j] < hazards[i]: concord += 1
elif hazards[j] < hazards[i]: concord += 0.5
return(concord/total)
def CIndex_lifeline(hazards, labels, survtime_all):
return(concordance_index(survtime_all, -hazards, labels)) | 13,960 | 34.524173 | 156 | py |
PathomicFusion | PathomicFusion-master/CellGraph/pixelcnn.py | import torch.nn as nn
from layers_custom import maskConv0, MaskConvBlock
import torch
class MaskCNN(nn.Module):
def __init__(self, n_channel=1024, h=128):
"""PixelCNN Model"""
super(MaskCNN, self).__init__()
self.MaskConv0 = maskConv0(n_channel, h, k_size=7, stride=1, pad=3)
# large 7 x 7 masked filter with image downshift to ensure that each output neuron's receptive field only sees what is above it in the image
MaskConv = []
# stack of 10 gated residual masked conv blocks
for i in range(10):
MaskConv.append(MaskConvBlock(h, k_size=3, stride=1, pad=1))
self.MaskConv = nn.Sequential(*MaskConv)
# 1x1 conv to upsample to required feature (channel) length
self.out = nn.Sequential(
nn.ReLU(),
nn.Conv2d(h, n_channel, kernel_size=1, stride=1, padding=0),
nn.BatchNorm2d(n_channel),
nn.ReLU()
)
def forward(self, x):
"""
Args:
x: [batch_size, channel, height, width]
Return:
out [batch_size, channel, height, width]
"""
# fully convolutional, feature map dimension maintained constant throughout
x = self.MaskConv0(x)
x = self.MaskConv(x)
x = self.out(x)
return x
if __name__ == '__main__':
from torchsummary import summary
model = PixelCNN(1024, 128)
summary(model, (1024, 7,7))
x = torch.rand(2, 1024, 7, 7)
x = model(x)
print(x.shape)
| 1,544 | 27.090909 | 149 | py |
PathomicFusion | PathomicFusion-master/CellGraph/resnet.py | '''
Properly implemented ResNet-s for CIFAR10 as described in paper [1].
The implementation and structure of this file is hugely influenced by [2]
which is implemented for ImageNet and doesn't have option A for identity.
Moreover, most of the implementations on the web is copy-paste from
torchvision's resnet and has wrong number of params.
Proper ResNet-s for CIFAR10 (for fair comparision and etc.) has following
number of layers and parameters:
name | layers | params
ResNet20 | 20 | 0.27M
ResNet32 | 32 | 0.46M
ResNet44 | 44 | 0.66M
ResNet56 | 56 | 0.85M
ResNet110 | 110 | 1.7M
ResNet1202| 1202 | 19.4m
which this implementation indeed has.
Reference:
[1] Kaiming He, Xiangyu Zhang, Shaoqing Ren, Jian Sun
Deep Residual Learning for Image Recognition. arXiv:1512.03385
[2] https://github.com/pytorch/vision/blob/master/torchvision/models/resnet.py
If you use this implementation in you work, please don't forget to mention the
author, Yerlan Idelbayev.
'''
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.nn.init as init
from torch.autograd import Variable
__all__ = ['ResNet', 'resnet20', 'resnet32', 'resnet44', 'resnet56', 'resnet110', 'resnet1202']
def _weights_init(m):
classname = m.__class__.__name__
print(classname)
if isinstance(m, nn.Linear) or isinstance(m, nn.Conv2d):
init.kaiming_normal(m.weight)
class LambdaLayer(nn.Module):
def __init__(self, lambd):
super(LambdaLayer, self).__init__()
self.lambd = lambd
def forward(self, x):
return self.lambd(x)
class BasicBlock(nn.Module):
expansion = 1
def __init__(self, in_planes, planes, stride=1, option='A'):
super(BasicBlock, self).__init__()
self.conv1 = nn.Conv2d(in_planes, planes, kernel_size=3, stride=stride, padding=1, bias=False)
self.bn1 = nn.BatchNorm2d(planes)
self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=1, padding=1, bias=False)
self.bn2 = nn.BatchNorm2d(planes)
self.shortcut = nn.Sequential()
if stride != 1 or in_planes != planes:
if option == 'A':
"""
For CIFAR10 ResNet paper uses option A.
"""
self.shortcut = LambdaLayer(lambda x:
F.pad(x[:, :, ::2, ::2], (0, 0, 0, 0, planes//4, planes//4), "constant", 0))
elif option == 'B':
self.shortcut = nn.Sequential(
nn.Conv2d(in_planes, self.expansion * planes, kernel_size=1, stride=stride, bias=False),
nn.BatchNorm2d(self.expansion * planes)
)
def forward(self, x):
out = F.relu(self.bn1(self.conv1(x)))
out = self.bn2(self.conv2(out))
out += self.shortcut(x)
out = F.relu(out)
return out
class ResNet(nn.Module):
def __init__(self, block, num_blocks, num_classes=10):
super(ResNet, self).__init__()
self.in_planes = 16
self.conv1 = nn.Conv2d(3, 16, kernel_size=3, stride=1, padding=1, bias=False)
self.bn1 = nn.BatchNorm2d(16)
self.layer1 = self._make_layer(block, 16, num_blocks[0], stride=1)
self.layer2 = self._make_layer(block, 32, num_blocks[1], stride=2)
self.layer3 = self._make_layer(block, 64, num_blocks[2], stride=2)
self.linear = nn.Linear(64, num_classes)
self.apply(_weights_init)
def _make_layer(self, block, planes, num_blocks, stride):
strides = [stride] + [1]*(num_blocks-1)
layers = []
for stride in strides:
layers.append(block(self.in_planes, planes, stride))
self.in_planes = planes * block.expansion
return nn.Sequential(*layers)
def forward(self, x):
out = F.relu(self.bn1(self.conv1(x)))
out = self.layer1(out)
out = self.layer2(out)
out = self.layer3(out)
out = F.avg_pool2d(out, out.size()[3])
out = out.view(out.size(0), -1)
return out
def resnet20():
return ResNet(BasicBlock, [3, 3, 3])
def resnet32():
return ResNet(BasicBlock, [5, 5, 5])
def resnet44():
return ResNet(BasicBlock, [7, 7, 7])
def resnet56():
return ResNet(BasicBlock, [9, 9, 9])
def resnet110():
return ResNet(BasicBlock, [18, 18, 18])
def resnet1202():
return ResNet(BasicBlock, [200, 200, 200])
def test(net):
import numpy as np
total_params = 0
for x in filter(lambda p: p.requires_grad, net.parameters()):
total_params += np.prod(x.data.numpy().shape)
print("Total number of params", total_params)
print("Total layers", len(list(filter(lambda p: p.requires_grad and len(p.data.size())>1, net.parameters()))))
if __name__ == "__main__":
for net_name in __all__:
if net_name.startswith('resnet'):
print(net_name)
test(globals()[net_name]())
print()
| 4,971 | 29.881988 | 120 | py |
PathomicFusion | PathomicFusion-master/CellGraph/model.py | import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.autograd import Variable
from resnet_custom import *
import pdb
import math
from pixelcnn import MaskCNN
device=torch.device("cuda" if torch.cuda.is_available() else "cpu")
def initialize_weights(module):
"""
args:
module: any pytorch module with trainable parameters
"""
for m in module.modules():
if isinstance(m, nn.Conv2d):
nn.init.kaiming_normal_(m.weight, nonlinearity='relu')
if m.bias is not None:
m.bias.data.zero_()
# if isinstance(m, nn.Linear):
# nn.init.xavier_normal_(m.weight)
# m.bias.data.zero_()
if isinstance(m, nn.Linear):
nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')
m.bias.data.zero_()
elif isinstance(m, nn.BatchNorm2d):
nn.init.constant_(m.weight, 1)
nn.init.constant_(m.bias, 0)
class CPC_model(nn.Module):
def __init__(self, input_size = 1024, hidden_size = 128, k = 3, ln = False):
"""
args:
input_size: input size to autoregresser (encoding size)
hidden_size: number of hidden units in MaskedCNN
num_layers: number of hidden layers in MaskedCNN
k: prediction length
"""
super(CPC_model, self).__init__()
### Settings
self.seq_len = 49 # 7 x 7 grid of overlapping 64 x 64 patches extracted from each 256 x 256 image
self.k = k
self.input_size = input_size
self.hidden_size=hidden_size
### Networks
if ln:
self.encoder = resnet50_ln(pretrained=False)
else:
self.encoder = resnet50(pretrained=False)
self.reg = MaskCNN(n_channel=self.input_size, h=self.hidden_size)
network_pred = [nn.Linear(input_size, input_size) for i in range(self.k)] #use an indepdent linear layer to predict each future row
self.network_pred= nn.ModuleList(network_pred)
# initialize linear network and context network
initialize_weights(self.network_pred)
initialize_weights(self.reg)
### Activation functions
self.softmax = nn.Softmax(dim=1)
self.lsoftmax = nn.LogSoftmax(dim=1)
def forward(self, x):
# input = [bs * 7 * 7, 3, 64, 64]
# compute batch_size
bs = x.size(0) // (self.seq_len)
rows = int(math.sqrt(self.seq_len))
cols = int(math.sqrt(self.seq_len))
# compute latent representation for each patch
z = self.encoder(x)
# z.shape: [bs * 7 * 7, 1024]
# reshape z into feature grid: [bs, 7, 7, 1024]
z = z.contiguous().view(bs, rows, cols, self.input_size)
device = z.device
#randomly draw a row to predict what is k rows below it, using information in current row and above
if self.training:
pred_id = torch.randint(rows - self.k, size=(1,)).long() #low is 0, high is 3 (predicts row 4, 5, 6)
else:
pred_id = torch.tensor([3]).long()
# feature predictions for the next k rows e.g. pred[i] is [bs * cols, 1024] for i in k
pred = [torch.empty(bs * cols, self.input_size).float().to(device) for i in range(self.k)]
# ground truth encodings for the next k rows e.g. encode_samples[i] is [bs * cols, 1024] for i in k
encode_samples = [torch.empty(bs * cols, self.input_size).float().to(device) for i in range(self.k)]
for i in np.arange(self.k):
# add ground truth encodings
start_row = pred_id.item()+i+1
encode_samples[i] = z[:,start_row, :, :].contiguous().view(bs * cols, self.input_size)
# reshape feature grid to channel first (required by Pytorch convolution convention)
z = z.permute(0, 3, 1, 2)
# z.shape: from [bs, 7, 7, 1024] --> [bs, 1024, 7, 7]
# apply aggregation to compute context
output = self.reg(z)
# reg is fully convolutional --> output size is [bs, 1024, 7, 7]
output = output.permute(0, 2, 3, 1) # reshape back to feature grid
# output.shape: [bs, row, col, 1024]
# context for each patch in the row
c_t = output[:,pred_id + 1,:, :]
# c_t.shape: [bs, 1, 7, 1024]
# reshape for linear classification:
c_t = c_t.contiguous().view(bs * cols, self.input_size)
# c_t.shape: [bs * cols, 1024]
# linear prediction: Wk*c_t
for i in np.arange(0, self.k):
if type(self.network_pred) == nn.DataParallel:
pred[i] = self.network_pred.module[i](c_t)
else:
pred[i] = self.network_pred[i](c_t) #e.g. size [bs * cols, 1024]
nce = 0 # average over prediction length, cols, and batch
accuracy = np.zeros((self.k,))
for i in np.arange(0, self.k):
"""
goal: can network correctly match predicted features with ground truth features among negative targets
i.e. match z_i+k,j with W_k * c_i,j
postivie target: patch with the correct groundtruth encoding
negative targets: patches with wrong groundtruth encodings (sampled from other patches in the same image, or other images in the minibatch)
1) dot product for each k to obtain raw prediction logits
total = (a_ij) = [bs * col, bs * col], where a_ij is the logit of ith patch prediction matching jth patch encoding
2) apply softmax along each row to get probability that ith patch prediction matches jth patch encoding
we want ith patch prediction to correctly match ith patch encoding, therefore target has 1s along diagnol, and 0s off diagnol
3) we take the argmax along softmaxed rows to get the patch prediction for the ith patch, this value should be i
4) compute nce loss as the cross-entropy of classifying the positive sample correctly (sum of logsoftmax along diagnol)
5) normalize loss by batchsize and k and number of patches in a row
"""
total = torch.mm(pred[i], torch.transpose(encode_samples[i],0,1)) # e.g. size [bs * col, bs * col]
accuracy[i] = torch.sum(torch.eq(torch.argmax(self.softmax(total), dim=1), torch.arange(0, bs * cols).to(device))).item()
accuracy[i] /= 1. * (bs * cols)
nce += torch.sum(torch.diag(self.lsoftmax(total))) # nce is a tensor
nce /= -1. * bs * cols * self.k
# accuracy = 1.*correct.item() / (bs * cols * self.k)
return nce, np.array(accuracy)
# crop data into 64 by 64 with 32 overlap
def cropdata(data, num_channels=3, kernel_size = 64, stride = 32):
if len(data.shape) == 3:
data = data.unsqueeze(0)
data = data.unfold(2, kernel_size, stride).unfold(3, kernel_size, stride)
data = data.permute(0,2,3,1,4,5)
data = data.contiguous().view(-1, num_channels, kernel_size, kernel_size)
return data
if __name__ == '__main__':
torch.set_printoptions(threshold=1e6)
x = torch.rand(2, 3, 256, 256)
x = cropdata(x)
print(x.shape)
model = CPC_model(1024, 256)
nce, accuracy = model(x)
| 6,484 | 32.427835 | 142 | py |
PathomicFusion | PathomicFusion-master/CellGraph/layers_custom.py | import torch
import torch.nn as nn
import pdb
def down_shift(x, pad=None):
# Pytorch ordering
xs = [int(y) for y in x.size()]
# when downshifting, the last row is removed
x = x[:, :, :xs[2] - 1, :]
# padding left, padding right, padding top, padding bottom
pad = nn.ZeroPad2d((0, 0, 1, 0)) if pad is None else pad
return pad(x)
class MaskedConv2d(nn.Conv2d):
def __init__(self, c_in, c_out, k_size, stride, pad, use_down_shift=False):
super(MaskedConv2d, self).__init__(
c_in, c_out, k_size, stride, pad, bias=False)
ch_out, ch_in, height, width = self.weight.size()
# Mask
# -------------------------------------
# | 1 1 1 1 1 |
# | 1 1 1 1 1 |
# | 1 1 1 1 1 | H // 2
# | 0 0 0 0 0 | H // 2 + 1
# | 0 0 0 0 0 |
# -------------------------------------
# index 0 1 W//2 W//2+1
mask = torch.ones(ch_out, ch_in, height, width)
mask[:, :, height // 2 + 1:] = 0
self.register_buffer('mask', mask)
self.use_down_shift = use_down_shift
def forward(self, x):
self.weight.data *= self.mask
if self.use_down_shift:
x = down_shift(x)
return super(MaskedConv2d, self).forward(x)
def maskConv0(c_in=3, c_out=256, k_size=7, stride=1, pad=3):
"""2D Masked Convolution first layer"""
return nn.Sequential(
MaskedConv2d(c_in, c_out * 2, k_size, stride, pad, use_down_shift=True),
nn.BatchNorm2d(c_out * 2),
Gate()
)
class Gate(nn.Module):
def __init__(self):
super(Gate, self).__init__()
def forward(self, x):
# gated activation
xf, xg = torch.chunk(x, 2, dim=1)
f = torch.tanh(xf)
g = torch.sigmoid(xg)
return f * g
class MaskConvBlock(nn.Module):
def __init__(self, h=128, k_size=3, stride=1, pad=1):
"""1x1 Conv + 2D Masked Convolution (type B) + 1x1 Conv"""
super(MaskConvBlock, self).__init__()
self.net = nn.Sequential(
MaskedConv2d(h, 2 * h, k_size, stride, pad),
nn.BatchNorm2d(2 * h),
Gate()
)
def forward(self, x):
"""Residual connection"""
return self.net(x) + x
if __name__ == '__main__':
def conv(x, kernel):
return nn.functional.conv2d(x, kernel, padding=1)
x = torch.ones((1, 1, 5, 5)) * 0.1
x[:,:,1,0] = 1000
print("blindspot experiment")
normal_kernel = torch.ones(1, 1, 3, 3)
mask_kernel = torch.zeros(1, 1, 3, 3)
mask_kernel[:,:,0,:] = 1
mask_b = mask_kernel.clone()
mask_b[:,:,1,1] = 1
# mask_kernel[:,:,1,1] = 1
print("unmasked kernel:", "\n",normal_kernel.squeeze(), "\n")
print("masked kernel:", "\n", mask_kernel.squeeze(), "\n")
print("normal conv")
print("orig image", "\n", x.squeeze(), "\n")
y = conv(x, normal_kernel)
print(y[:,0, :,:], "\n")
y = conv(y, normal_kernel)
print(y[:,0, :,:], "\n")
print("with mask")
print("orig image", "\n", x.squeeze(), "\n")
y = conv(x, mask_kernel)
print(y[:,0, :,:], "\n")
y = conv(y, mask_b)
print(y[:,0, :,:], "\n")
y = conv(y, mask_b)
print(y[:,0, :,:],"\n")
print("with down_shift")
print("orig image", x.squeeze(), "\n")
c_kernel = mask_kernel
c_kernel[:,:,1,:] = 1
print("custom kernel:", "\n", c_kernel.squeeze(), "\n")
y = conv(down_shift(x), c_kernel)
print(y[:,0, :,:],"\n")
y = conv(y, c_kernel)
print(y[:,0, :,:],"\n")
y = conv(y, c_kernel)
print(y[:,0, :,:],"\n")
y = conv(y, c_kernel)
print(y[:,0, :,:],"\n")
| 3,879 | 28.172932 | 80 | py |
PathomicFusion | PathomicFusion-master/CellGraph/resnet_custom.py | # modified from Pytorch official resnet.py
# oops
import torch.nn as nn
import torch.utils.model_zoo as model_zoo
import torch
from torchsummary import summary
import torch.nn.functional as F
__all__ = ['ResNet', 'resnet18', 'resnet34', 'resnet50', 'resnet101',
'resnet152']
model_urls = {
'resnet18': 'https://download.pytorch.org/models/resnet18-5c106cde.pth',
'resnet34': 'https://download.pytorch.org/models/resnet34-333f7ec4.pth',
'resnet50': 'https://download.pytorch.org/models/resnet50-19c8e357.pth',
'resnet101': 'https://download.pytorch.org/models/resnet101-5d3b4d8f.pth',
'resnet152': 'https://download.pytorch.org/models/resnet152-b121ed2d.pth',
}
def conv3x3(in_planes, out_planes, stride=1):
"""3x3 convolution with padding"""
return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride,
padding=1, bias=False)
class BasicBlock(nn.Module):
expansion = 1
def __init__(self, inplanes, planes, stride=1, downsample=None):
super(BasicBlock, self).__init__()
self.conv1 = conv3x3(inplanes, planes, stride)
# self.bn1 = nn.BatchNorm2d(planes)
self.relu = nn.ReLU(inplace=True)
self.conv2 = conv3x3(planes, planes)
# self.bn2 = nn.BatchNorm2d(planes)
self.downsample = downsample
self.stride = stride
def forward(self, x):
residual = x
out = self.conv1(x)
# out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
# out = self.bn2(out)
if self.downsample is not None:
residual = self.downsample(x)
out += residual
out = self.relu(out)
return out
class Bottleneck(nn.Module):
expansion = 4
def __init__(self, inplanes, planes, stride=1, downsample=None):
super(Bottleneck, self).__init__()
self.conv1 = nn.Conv2d(inplanes, planes, kernel_size=1, bias=False)
# self.bn1 = nn.BatchNorm2d(planes)
self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=stride,
padding=1, bias=False)
# self.bn2 = nn.BatchNorm2d(planes)
self.conv3 = nn.Conv2d(planes, planes * self.expansion, kernel_size=1, bias=False)
# self.bn3 = nn.BatchNorm2d(planes * self.expansion)
self.relu = nn.ReLU(inplace=True)
self.downsample = downsample
self.stride = stride
def forward(self, x):
residual = x
out = self.conv1(x)
# out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
# out = self.bn2(out)
out = self.relu(out)
out = self.conv3(out)
# out = self.bn3(out)
if self.downsample is not None:
residual = self.downsample(x)
out += residual
out = self.relu(out)
return out
class LayerNorm(nn.Module):
def __init__(self):
super(LayerNorm, self).__init__()
def forward(self, x):
return F.layer_norm(x, x.size()[1:])
class Bottleneck_LN(nn.Module):
expansion = 4
def __init__(self, inplanes, planes, stride=1, downsample=None):
super(Bottleneck_LN, self).__init__()
self.conv1 = nn.Conv2d(inplanes, planes, kernel_size=1, bias=False)
self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=stride,
padding=1, bias=False)
self.conv3 = nn.Conv2d(planes, planes * self.expansion, kernel_size=1, bias=False)
self.relu = nn.ReLU(inplace=True)
self.ln = LayerNorm()
self.downsample = downsample
self.stride = stride
def forward(self, x):
residual = x
out = self.conv1(x)
# out = F.layer_norm(out, out.size()[1:])
out = self.ln(out)
out = self.relu(out)
out = self.conv2(out)
# out = F.layer_norm(out, out.size()[1:])
out = self.ln(out)
out = self.relu(out)
out = self.conv3(out)
# out = F.layer_norm(out, out.size()[1:])
out = self.ln(out)
if self.downsample is not None:
residual = self.downsample(x)
# residual = F.layer_norm(residual, residual.size()[1:])
residual = self.ln(residual)
out += residual
out = self.relu(out)
return out
class ResNet(nn.Module):
def __init__(self, block, layers
# num_classes=1000
):
self.inplanes = 64
super(ResNet, self).__init__()
self.conv1 = nn.Conv2d(3, 64, kernel_size=7, stride=2, padding=3,
bias=False)
# self.bn1 = nn.BatchNorm2d(64)
self.relu = nn.ReLU(inplace=True)
self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
self.layer1 = self._make_layer(block, 64, layers[0])
self.layer2 = self._make_layer(block, 128, layers[1], stride=2)
self.layer3 = self._make_layer(block, 256, layers[2], stride=2)
# self.layer4 = self._make_layer(block, 512, layers[3], stride=2)
self.avgpool = nn.AdaptiveAvgPool2d(1) # was 7, if have layer4
# remove the final fc
# self.fc = nn.Linear(512 * block.expansion, num_classes)
for m in self.modules():
if isinstance(m, nn.Conv2d):
nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')
elif isinstance(m, nn.BatchNorm2d):
nn.init.constant_(m.weight, 1)
nn.init.constant_(m.bias, 0)
def _make_layer(self, block, planes, blocks, stride=1):
downsample = None
if stride != 1 or self.inplanes != planes * block.expansion:
downsample = nn.Sequential(
nn.Conv2d(self.inplanes, planes * block.expansion,
kernel_size=1, stride=stride, bias=False),
# nn.BatchNorm2d(planes * block.expansion),
)
layers = []
layers.append(block(self.inplanes, planes, stride, downsample))
self.inplanes = planes * block.expansion
for i in range(1, blocks):
layers.append(block(self.inplanes, planes))
return nn.Sequential(*layers)
def forward(self, x):
x = self.conv1(x)
# x = self.bn1(x)
x = self.relu(x)
x = self.maxpool(x)
x = self.layer1(x)
x = self.layer2(x)
x = self.layer3(x)
# x = self.layer4(x)
x = self.avgpool(x)
x = x.view(x.size(0), -1)
# x = self.fc(x)
return x
def resnet18(pretrained=False, **kwargs):
"""Constructs a ResNet-18 model.
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
"""
model = ResNet(BasicBlock, [2, 2, 2, 2], **kwargs)
if pretrained:
model = neq_load(model, 'resnet18')
# model.load_state_dict(model_zoo.load_url(model_urls['resnet18']))
return model
def resnet34(pretrained=False, **kwargs):
"""Constructs a ResNet-34 model.
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
"""
model = ResNet(BasicBlock, [3, 4, 6, 3], **kwargs)
if pretrained:
model = neq_load(model, 'resnet34')
# model.load_state_dict(model_zoo.load_url(model_urls['resnet34']))
return model
def resnet50(pretrained=False, **kwargs):
"""Constructs a ResNet-50 model.
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
"""
model = ResNet(Bottleneck, [3, 4, 6, 3], **kwargs)
if pretrained:
model = neq_load(model, 'resnet50')
# model.load_state_dict(model_zoo.load_url(model_urls['resnet50']))
return model
def resnet50_ln(pretrained=False):
"""Constructs a ResNet-50 model.
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
"""
model = ResNet(Bottleneck_LN, [3, 4, 6, 3], **kwargs)
if pretrained:
model = neq_load(model, 'resnet50')
# model.load_state_dict(model_zoo.load_url(model_urls['resnet50']))
return model
def resnet101(pretrained=False, **kwargs):
"""Constructs a ResNet-101 model.
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
"""
model = ResNet(Bottleneck, [3, 4, 23, 3], **kwargs)
if pretrained:
model = neq_load(model, 'resnet101')
# model.load_state_dict(model_zoo.load_url(model_urls['resnet101']))
return model
def resnet101_wide(pretrained=False, ln=False):
"""Constructs a ResNet-101 model.
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
"""
if ln:
model = ResNet_Wide_LN(Bottleneck_LN, Bottleneck_Wide_LN, [3, 4, 46, 3])
else:
model = ResNet_Wide(Bottleneck, Bottleneck_Wide, [3, 4, 46, 3])
if pretrained:
model = neq_load(model, 'resnet101')
# model.load_state_dict(model_zoo.load_url(model_urls['resnet101']))
return model
def resnet152(pretrained=False, **kwargs):
"""Constructs a ResNet-152 model.
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
"""
model = ResNet(Bottleneck, [3, 8, 36, 3], **kwargs)
if pretrained:
model = neq_load(model, 'resnet152')
# model.load_state_dict(model_zoo.load_url(model_urls['resnet152']))
return model
def neq_load(model, name):
# load pre-trained model in a not-equal way
# when new model has been modified
pretrained_dict = model_zoo.load_url(model_urls[name])
model_dict = model.state_dict()
pretrained_dict = {k: v for k, v in pretrained_dict.items() if k in model_dict}
model_dict.update(pretrained_dict)
model.load_state_dict(model_dict)
return model
if __name__ == '__main__':
# model = resnet50_wide(pretrained = False)
model = resnet50_wide(ln=True)
print(model)
# summary(model, (3,64,64))
x = torch.rand(49, 3, 64, 64)
x = model(x).squeeze()
print(x.shape)
print(len(x.shape))
| 10,078 | 30.794953 | 90 | py |
cc | cc-master/models.py | import tensorflow as tf
class CNN:
def __init__(self, x1_maxlen, x2_maxlen, y_len, embedding, filter_sizes, num_filters, hidden_size, state_size, x3_size):
self.input_x1 = tf.placeholder(tf.int32, [None, x1_maxlen], name="post_text")
self.input_x1_len = tf.placeholder(tf.int32, [None, ], name="post_text_len")
self.input_x2 = tf.placeholder(tf.int32, [None, x2_maxlen], name="target_description")
self.input_x2_len = tf.placeholder(tf.int32, [None, ], name="target_description_len")
self.input_x3 = tf.placeholder(tf.float32, [None, x3_size], name="image_feature")
self.input_y = tf.placeholder(tf.float32, [None, y_len], name="truth_class")
self.input_z = tf.placeholder(tf.float32, [None, 1], name="truth_mean")
self.dropout_rate_embedding = tf.placeholder(tf.float32, name="dropout_rate_embedding")
self.dropout_rate_hidden = tf.placeholder(tf.float32, name="dropout_rate_hidden")
self.dropout_rate_cell = tf.placeholder(tf.float32, name="dropout_rate_cell")
self.batch_size = tf.placeholder(tf.int32, name="batch_size")
self.W = tf.get_variable(shape=embedding.shape, initializer=tf.constant_initializer(embedding), name="embedding")
self.embedded_input_x1 = tf.nn.embedding_lookup(self.W, self.input_x1)
self.embedded_input_x1 = tf.layers.dropout(self.embedded_input_x1, rate=1-self.dropout_rate_embedding)
self.embedded_input_x1_expanded = tf.expand_dims(self.embedded_input_x1, -1)
pooled_outputs1 = []
for i, filter_size in enumerate(filter_sizes):
with tf.name_scope("1-conv-maxpool-%s" % filter_size):
filter_shape = [filter_size, embedding.shape[1], 1, num_filters]
W = tf.Variable(tf.truncated_normal(filter_shape, stddev=0.1), name="filter_weights")
b = tf.Variable(tf.constant(0.1, shape=[num_filters]), name="filter_biases")
conv = tf.nn.conv2d(self.embedded_input_x1_expanded, W, strides=[1, 1, 1, 1], padding="VALID", name="conv")
h = tf.nn.relu(tf.nn.bias_add(conv, b), name="relu")
pooled = tf.nn.max_pool(h, ksize=[1, x1_maxlen-filter_size+1, 1, 1], strides=[1, 1, 1, 1], padding="VALID", name="pool")
pooled_outputs1.append(pooled)
num_features = num_filters*len(filter_sizes)
self.h_pool1 = tf.concat(pooled_outputs1, 3)
self.h_pool_flat1 = tf.reshape(self.h_pool1, [-1, num_features])
if x3_size:
self.compressed_input_x3 = tf.layers.dense(tf.layers.dense(self.input_x3, 1024, activation=tf.nn.relu), 256, activation=tf.nn.relu)
self.h_pool_flat1 = tf.concat([self.h_pool_flat1, self.compressed_input_x3], axis=-1)
if hidden_size:
self.h_pool_flat1 = tf.layers.dense(self.h_pool_flat1, hidden_size, activation=tf.nn.relu)
self.h_drop1 = tf.layers.dropout(self.h_pool_flat1, rate=1-self.dropout_rate_hidden)
self.scores = tf.layers.dense(inputs=self.h_drop1, units=y_len)
if y_len == 1:
self.predictions = tf.nn.sigmoid(self.scores, name="prediction")
self.loss = tf.losses.mean_squared_error(self.input_z, self.predictions)
self.mse = tf.losses.mean_squared_error(self.input_z, self.predictions)
correct_predictions = tf.equal(tf.cast(tf.round(self.predictions), tf.int32), tf.cast(tf.round(self.input_y), tf.int32))
self.accuracy = tf.reduce_mean(tf.cast(correct_predictions, "float"), name="accuracy")
elif y_len == 2:
self.loss = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits=self.scores, labels=self.input_y))
self.predictions = tf.slice(tf.nn.softmax(self.scores), [0, 0], [-1, 1], name="prediction")
self.mse = tf.losses.mean_squared_error(self.input_z, self.predictions)
correct_predictions = tf.equal(tf.argmax(self.scores, 1), tf.argmax(self.input_y, 1))
self.accuracy = tf.reduce_mean(tf.cast(correct_predictions, "float"), name="accuracy")
elif y_len == 4:
self.normalised_scores = tf.nn.softmax(self.scores, name="distribution")
self.loss = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(labels=self.input_y, logits=self.scores))
self.predictions = tf.matmul(self.normalised_scores, tf.constant([0, 0.3333333333, 0.6666666666, 1.0], shape=[4, 1]), name="prediction")
self.mse = tf.losses.mean_squared_error(self.input_z, self.predictions)
correct_predictions = tf.equal(tf.argmax(tf.matmul(self.normalised_scores, tf.constant([1, 0, 1, 0, 0, 1, 0, 1], shape=[4, 2], dtype=tf.float32)), 1), tf.argmax(tf.matmul(self.input_y, tf.constant([1, 0, 1, 0, 0, 1, 0, 1], shape=[4, 2], dtype=tf.float32)), 1))
self.accuracy = tf.reduce_mean(tf.cast(correct_predictions, "float"), name="accuracy")
class DAN:
def __init__(self, x1_maxlen, x2_maxlen, y_len, embedding, filter_sizes, num_filters, hidden_size, state_size, x3_size):
self.input_x1 = tf.placeholder(tf.int32, [None, x1_maxlen], name="post_text")
self.input_x1_len = tf.placeholder(tf.int32, [None, ], name="post_text_len")
self.input_x2 = tf.placeholder(tf.int32, [None, x2_maxlen], name="target_description")
self.input_x2_len = tf.placeholder(tf.int32, [None, ], name="target_description_len")
self.input_x3 = tf.placeholder(tf.float32, [None, x3_size], name="image_feature")
self.input_y = tf.placeholder(tf.float32, [None, y_len], name="truth_class")
self.input_z = tf.placeholder(tf.float32, [None, 1], name="truth_mean")
self.dropout_rate_embedding = tf.placeholder(tf.float32, name="dropout_rate_embedding")
self.dropout_rate_hidden = tf.placeholder(tf.float32, name="dropout_rate_hidden")
self.dropout_rate_cell = tf.placeholder(tf.float32, name="dropout_rate_cell")
self.batch_size = tf.placeholder(tf.int32, name="batch_size")
self.W = tf.get_variable(shape=embedding.shape, initializer=tf.constant_initializer(embedding), name="embedding")
self.embedded_input_x1 = tf.nn.embedding_lookup(self.W, self.input_x1)
self.embedded_input_x1 = tf.layers.dropout(self.embedded_input_x1, rate=1-self.dropout_rate_embedding)
# self.avg_input_x1 = tf.reduce_mean(self.embedded_input_x1, axis=1)
mask = tf.cast(tf.contrib.keras.backend.repeat_elements(tf.expand_dims(tf.sequence_mask(self.input_x1_len, x1_maxlen), axis=-1), embedding.shape[1], axis=2), tf.float32)
masked_embedded_input_x1 = tf.multiply(self.embedded_input_x1, mask)
self.avg_input_x1 = tf.reduce_sum(masked_embedded_input_x1, axis=1)/tf.reduce_sum(mask, axis=1)
if hidden_size:
self.avg_input_x1 = tf.layers.dense(self.avg_input_x1, hidden_size, activation=tf.nn.relu)
self.h_drop1 = tf.layers.dropout(self.avg_input_x1, rate=1-self.dropout_rate_hidden)
self.scores = tf.layers.dense(inputs=self.h_drop1, units=y_len)
if y_len == 1:
self.predictions = tf.nn.sigmoid(self.scores, name="prediction")
self.loss = tf.losses.mean_squared_error(self.input_z, self.predictions)
self.mse = tf.losses.mean_squared_error(self.input_z, self.predictions)
correct_predictions = tf.equal(tf.cast(tf.round(self.predictions), tf.int32), tf.cast(tf.round(self.input_y), tf.int32))
self.accuracy = tf.reduce_mean(tf.cast(correct_predictions, "float"), name="accuracy")
elif y_len == 2:
self.loss = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits=self.scores, labels=self.input_y))
self.predictions = tf.slice(tf.nn.softmax(self.scores), [0, 0], [-1, 1], name="prediction")
self.mse = tf.losses.mean_squared_error(self.input_z, self.predictions)
correct_predictions = tf.equal(tf.argmax(self.scores, 1), tf.argmax(self.input_y, 1))
self.accuracy = tf.reduce_mean(tf.cast(correct_predictions, "float"), name="accuracy")
elif y_len == 4:
self.normalised_scores = tf.nn.softmax(self.scores, name="distribution")
self.loss = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(labels=self.input_y, logits=self.scores))
self.predictions = tf.matmul(self.normalised_scores, tf.constant([0, 0.3333333333, 0.6666666666, 1.0], shape=[4, 1]), name="prediction")
self.mse = tf.losses.mean_squared_error(self.input_z, self.predictions)
correct_predictions = tf.equal(tf.argmax(tf.matmul(self.normalised_scores, tf.constant([1, 0, 1, 0, 0, 1, 0, 1], shape=[4, 2], dtype=tf.float32)), 1), tf.argmax(tf.matmul(self.input_y, tf.constant([1, 0, 1, 0, 0, 1, 0, 1], shape=[4, 2], dtype=tf.float32)), 1))
self.accuracy = tf.reduce_mean(tf.cast(correct_predictions, "float"), name="accuracy")
def extract_last(output, lengths):
batch_range = tf.range(tf.shape(output)[0])
batch_idx = tf.stack([batch_range, lengths-1], axis=-1)
return tf.gather_nd(output, batch_idx)
class BiRNN:
def __init__(self, x1_maxlen, x2_maxlen, y_len, embedding, filter_sizes, num_filters, hidden_size, state_size, x3_size):
self.input_x1 = tf.placeholder(tf.int32, [None, x1_maxlen], name="post_text")
self.input_x1_len = tf.placeholder(tf.int32, [None, ], name="post_text_len")
self.input_x2 = tf.placeholder(tf.int32, [None, x2_maxlen], name="target_description")
self.input_x2_len = tf.placeholder(tf.int32, [None, ], name="target_description_len")
self.input_x3 = tf.placeholder(tf.float32, [None, x3_size], name="image_feature")
self.input_y = tf.placeholder(tf.float32, [None, y_len], name="truth_class")
self.input_z = tf.placeholder(tf.float32, [None, 1], name="truth_mean")
self.dropout_rate_embedding = tf.placeholder(tf.float32, name="dropout_rate_embedding")
self.dropout_rate_hidden = tf.placeholder(tf.float32, name="dropout_rate_hidden")
self.dropout_rate_cell = tf.placeholder(tf.float32, name="dropout_rate_cell")
self.batch_size = tf.placeholder(tf.int32, [], name="batch_size")
self.W = tf.get_variable(shape=embedding.shape, initializer=tf.constant_initializer(embedding), name="embedding")
self.embedded_input_x1 = tf.nn.embedding_lookup(self.W, self.input_x1)
self.embedded_input_x1 = tf.layers.dropout(self.embedded_input_x1, rate=1-self.dropout_rate_embedding)
cell_fw = tf.contrib.rnn.GRUCell(state_size)
cell_dropout_fw = tf.contrib.rnn.DropoutWrapper(cell_fw, output_keep_prob=1-self.dropout_rate_cell)
initial_state_fw = cell_fw.zero_state(self.batch_size, tf.float32)
cell_bw = tf.contrib.rnn.GRUCell(state_size)
cell_dropout_bw = tf.contrib.rnn.DropoutWrapper(cell_bw, output_keep_prob=1-self.dropout_rate_cell)
initial_state_bw = cell_bw.zero_state(self.batch_size, tf.float32)
outputs, states = tf.nn.bidirectional_dynamic_rnn(cell_fw=cell_dropout_fw, cell_bw=cell_dropout_bw, inputs=self.embedded_input_x1, sequence_length=self.input_x1_len, initial_state_bw=initial_state_bw, initial_state_fw=initial_state_fw)
bi_outputs = tf.concat(outputs, 2)
mask = tf.cast(tf.contrib.keras.backend.repeat_elements(tf.expand_dims(tf.sequence_mask(self.input_x1_len, x1_maxlen), axis=-1), 2*state_size, axis=2), tf.float32)
self.h_drop = tf.layers.dropout(tf.concat([extract_last(outputs[0], self.input_x1_len), outputs[1][:, 0, :]], -1), rate=1-self.dropout_rate_hidden)
# self.h_drop = tf.layers.dropout(tf.reduce_sum(bi_outputs, axis=1)/tf.reduce_sum(mask, axis=1), rate=1-self.dropout_rate_hidden)
#
# self.h_drop = tf.layers.dropout(tf.reduce_max(bi_outputs, axis=1), rate=1-self.dropout_rate_hidden)
self.scores = tf.layers.dense(inputs=self.h_drop, units=y_len)
if y_len == 1:
self.predictions = tf.nn.sigmoid(self.scores, name="prediction")
self.loss = tf.losses.mean_squared_error(self.input_z, self.predictions)
self.mse = tf.losses.mean_squared_error(self.input_z, self.predictions)
correct_predictions = tf.equal(tf.cast(tf.round(self.predictions), tf.int32), tf.cast(tf.round(self.input_y), tf.int32))
self.accuracy = tf.reduce_mean(tf.cast(correct_predictions, "float"), name="accuracy")
elif y_len == 2:
self.loss = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits=self.scores, labels=self.input_y))
self.predictions = tf.slice(tf.nn.softmax(self.scores), [0, 0], [-1, 1], name="prediction")
self.mse = tf.losses.mean_squared_error(self.input_z, self.predictions)
correct_predictions = tf.equal(tf.argmax(self.scores, 1), tf.argmax(self.input_y, 1))
self.accuracy = tf.reduce_mean(tf.cast(correct_predictions, "float"), name="accuracy")
elif y_len == 4:
self.normalised_scores = tf.nn.softmax(self.scores, name="distribution")
self.loss = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(labels=self.input_y, logits=self.scores))
self.predictions = tf.matmul(self.normalised_scores, tf.constant([0, 0.3333333333, 0.6666666666, 1.0], shape=[4, 1]), name="prediction")
self.mse = tf.losses.mean_squared_error(self.input_z, self.predictions)
correct_predictions = tf.equal(tf.argmax(tf.matmul(self.normalised_scores, tf.constant([1, 0, 1, 0, 0, 1, 0, 1], shape=[4, 2], dtype=tf.float32)), 1), tf.argmax(tf.matmul(self.input_y, tf.constant([1, 0, 1, 0, 0, 1, 0, 1], shape=[4, 2], dtype=tf.float32)), 1))
self.accuracy = tf.reduce_mean(tf.cast(correct_predictions, "float"), name="accuracy")
class SAN:
def __init__(self, x1_maxlen, x2_maxlen, y_len, embedding, filter_sizes, num_filters, hidden_size, state_size, x3_size, attention_size, view_size=1, alpha=0, beta=0):
if view_size == 1:
beta = 0
self.input_x1 = tf.placeholder(tf.int32, [None, x1_maxlen], name="post_text")
self.input_x1_len = tf.placeholder(tf.int32, [None, ], name="post_text_len")
self.input_x2 = tf.placeholder(tf.int32, [None, x2_maxlen], name="target_description")
self.input_x2_len = tf.placeholder(tf.int32, [None, ], name="target_description_len")
self.input_x3 = tf.placeholder(tf.float32, [None, x3_size], name="image_feature")
self.input_y = tf.placeholder(tf.float32, [None, y_len], name="truth_class")
self.input_z = tf.placeholder(tf.float32, [None, 1], name="truth_mean")
self.dropout_rate_embedding = tf.placeholder(tf.float32, name="dropout_rate_embedding")
self.dropout_rate_hidden = tf.placeholder(tf.float32, name="dropout_rate_hidden")
self.dropout_rate_cell = tf.placeholder(tf.float32, name="dropout_rate_cell")
self.batch_size = tf.placeholder(tf.int32, [], name="batch_size")
with tf.variable_scope("embedding"):
self.W = tf.get_variable(shape=embedding.shape, initializer=tf.constant_initializer(embedding), name="embedding")
self.embedded_input_x1 = tf.nn.embedding_lookup(self.W, self.input_x1)
self.embedded_input_x1 = tf.layers.dropout(self.embedded_input_x1, rate=1-self.dropout_rate_embedding)
with tf.variable_scope("biRNN"):
cell_fw = tf.contrib.rnn.GRUCell(state_size)
cell_dropout_fw = tf.contrib.rnn.DropoutWrapper(cell_fw, output_keep_prob=1-self.dropout_rate_cell)
initial_state_fw = cell_fw.zero_state(self.batch_size, tf.float32)
cell_bw = tf.contrib.rnn.GRUCell(state_size)
cell_dropout_bw = tf.contrib.rnn.DropoutWrapper(cell_bw, output_keep_prob=1-self.dropout_rate_cell)
initial_state_bw = cell_bw.zero_state(self.batch_size, tf.float32)
outputs, states = tf.nn.bidirectional_dynamic_rnn(cell_fw=cell_dropout_fw, cell_bw=cell_dropout_bw, inputs=self.embedded_input_x1, sequence_length=self.input_x1_len, initial_state_bw=initial_state_bw, initial_state_fw=initial_state_fw)
bi_outputs = tf.concat(outputs, 2)
with tf.variable_scope("attention"):
W_1 = tf.get_variable(shape=[2*state_size, attention_size], initializer=tf.contrib.layers.xavier_initializer(), name="W_1")
W_2 = tf.get_variable(shape=[attention_size, view_size], initializer=tf.contrib.layers.xavier_initializer(), name="W_2")
reshaped_bi_outputs = tf.reshape(bi_outputs, shape=[-1, 2*state_size])
if x3_size:
# self.compressed_input_x3 = tf.contrib.keras.backend.repeat(tf.layers.dense(tf.layers.dense(self.input_x3, 1024, activation=tf.nn.tanh), attention_size, activation=tf.nn.tanh), x1_maxlen)
self.compressed_input_x3 = tf.contrib.keras.backend.repeat(tf.layers.dense(self.input_x3, attention_size, activation=tf.nn.tanh), x1_maxlen)
self.compressed_input_x3 = tf.reshape(self.compressed_input_x3, shape=[-1, attention_size])
self.attention = tf.nn.softmax(tf.reshape(tf.matmul(tf.nn.tanh(tf.matmul(reshaped_bi_outputs, W_1)+self.compressed_input_x3), W_2), shape=[self.batch_size, x1_maxlen, view_size]), dim=1)
else:
self.attention = tf.nn.softmax(tf.reshape(tf.matmul(tf.nn.tanh(tf.matmul(reshaped_bi_outputs, W_1)), W_2), shape=[self.batch_size, x1_maxlen, view_size]), dim=1)
attention_output = tf.reshape(tf.matmul(tf.transpose(bi_outputs, perm=[0, 2, 1]), self.attention), shape=[self.batch_size, view_size*2*state_size])
with tf.variable_scope("penalty"):
attention_t = tf.transpose(self.attention, perm=[0, 2, 1])
attention_t_attention = tf.matmul(attention_t, self.attention)
identity = tf.reshape(tf.tile(tf.diag(tf.ones([view_size])), [self.batch_size, 1]), shape=[self.batch_size, view_size, view_size])
self.penalised_term = tf.square(tf.norm(attention_t_attention-identity, ord="euclidean", axis=[1, 2]))
self.h_drop = tf.layers.dropout(attention_output, rate=1-self.dropout_rate_hidden)
self.scores = tf.layers.dense(inputs=self.h_drop, units=y_len)
if y_len == 1:
self.predictions = tf.nn.sigmoid(self.scores, name="prediction")
self.loss = tf.reduce_mean(tf.square(tf.subtract(self.input_z, self.predictions))+beta*self.penalised_term)
self.mse = tf.losses.mean_squared_error(self.input_z, self.predictions)
correct_predictions = tf.equal(tf.cast(tf.round(self.predictions), tf.int32), tf.cast(tf.round(self.input_y), tf.int32))
self.accuracy = tf.reduce_mean(tf.cast(correct_predictions, "float"), name="accuracy")
elif y_len == 2:
self.loss = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits=self.scores, labels=self.input_y)+beta*self.penalised_term)
self.predictions = tf.slice(tf.nn.softmax(self.scores), [0, 0], [-1, 1], name="prediction")
self.mse = tf.losses.mean_squared_error(self.input_z, self.predictions)
correct_predictions = tf.equal(tf.argmax(self.scores, 1), tf.argmax(self.input_y, 1))
self.accuracy = tf.reduce_mean(tf.cast(correct_predictions, "float"), name="accuracy")
elif y_len == 4:
self.normalised_scores = tf.nn.softmax(self.scores, name="distribution")
self.loss = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(labels=self.input_y, logits=self.scores)+beta*self.penalised_term)
self.predictions = tf.matmul(self.normalised_scores, tf.constant([0, 0.3333333333, 0.6666666666, 1.0], shape=[4, 1]), name="prediction")
self.mse = tf.losses.mean_squared_error(self.input_z, self.predictions)
correct_predictions = tf.equal(tf.argmax(tf.matmul(self.normalised_scores, tf.constant([1, 0, 1, 0, 0, 1, 0, 1], shape=[4, 2], dtype=tf.float32)), 1), tf.argmax(tf.matmul(self.input_y, tf.constant([1, 0, 1, 0, 0, 1, 0, 1], shape=[4, 2], dtype=tf.float32)), 1))
self.accuracy = tf.reduce_mean(tf.cast(correct_predictions, "float"), name="accuracy")
| 20,233 | 80.58871 | 272 | py |
cl4ctr | cl4ctr-main/main_ml_base.py | import torch.nn as nn
import torch.nn.functional as F
from torch.optim.lr_scheduler import ReduceLROnPlateau
from model.FM import FactorizationMachineModel, FM_CL4CTR
from model.DeepFM import DeepFM, DeepFM_CL4CTR
import numpy as np
import random
import sys
import tqdm
import time
import argparse
import torch
import os
os.environ["KMP_DUPLICATE_LIB_OK"] = "TRUE"
from sklearn.metrics import log_loss, roc_auc_score
sys.path.append("../..")
from dataloader.frappe.dataloader import getdataloader_ml, getdataloader_frappe
from utils.utils_de import *
from utils.earlystoping import EarlyStopping
DEVICE = torch.device("cuda" if torch.cuda.is_available() else "cpu")
def get_model(
name,
field_dims,
batch_size=1024,
pratio=0.5,
embed_dim=20,
mlp_layers=(400, 400, 400)):
if name == "fm_cl4ctr":
return FM_CL4CTR(field_dims, embed_dim, batch_size=batch_size, pratio=pratio, fi_type="att")
elif name == "dfm_cl4ctr":
return DeepFM_CL4CTR(field_dims, embed_dim, mlp_layers=mlp_layers, batch_size=batch_size, pratio=pratio,
fi_type="att")
else:
raise ValueError('unknown model name: ' + name)
def count_params(model):
params = sum(param.numel() for param in model.parameters())
return params
def train(model,
optimizer,
data_loader,
criterion,
alpha=1.0,
beta=1e-2):
model.train()
pred = list()
target = list()
total_loss = 0
for i, (user_item, label) in enumerate(tqdm.tqdm(data_loader)):
label = label.float()
user_item = user_item.long()
user_item = user_item.cuda()
label = label.cuda()
model.zero_grad()
pred_y = torch.sigmoid(model(user_item).squeeze(1))
loss_y = criterion(pred_y, label)
# 1. Utilize simplified method to compute feature alignment and field uniformity
loss = loss_y + model.compute_cl_loss(user_item, alpha=alpha, beta=beta)
# 2. Utilize completely method to compute feature alignment and field uniformity
# loss = loss_y + model.compute_cl_loss_all(user_item, alpha=alpha, beta=beta)
loss.backward()
optimizer.step()
pred.extend(pred_y.tolist())
target.extend(label.tolist())
total_loss += loss.item()
# if (i + 1) % log_interval == 0:
# print('train_loss:', total_loss / (i + 1))
# print(f'loss_y:{loss_y.item()};loss_cl:{loss_cl.item()}')
# print("logloss",log_loss(target,pred))
ave_loss = total_loss / (i + 1)
return ave_loss
def test_roc(model, data_loader):
model.eval()
targets, predicts = list(), list()
with torch.no_grad():
for fields, target in tqdm.tqdm(
data_loader, smoothing=0, mininterval=1.0):
fields = fields.long()
target = target.float()
fields, target = fields.cuda(), target.cuda()
y = torch.sigmoid(model(fields).squeeze(1))
targets.extend(target.tolist())
predicts.extend(y.tolist())
return roc_auc_score(targets, predicts), log_loss(targets, predicts)
def main(dataset_name, model_name, epoch, embed_dim, learning_rate,
batch_size, weight_decay, save_dir, path,
pratio, alpha, beta):
path = "./data/"
field_dims, trainLoader, validLoader, testLoader = \
getdataloader_ml(path=path, batch_size=batch_size)
print(field_dims)
time_fix = time.strftime("%m%d%H%M%S", time.localtime())
for K in [embed_dim]:
paths = os.path.join(save_dir, dataset_name, model_name, str(K))
if not os.path.exists(paths):
os.makedirs(paths)
with open(paths + f"/{model_name}_{K}_{batch_size}_{alpha}_{beta}_{pratio}_{time_fix}.p",
"a+") as fout:
fout.write("Batch_size:{}\tembed_dim:{}\tlearning_rate:{}\tStartTime:{}\tweight_decay:{}\tpratio:{}\t"
"\talpha:{}\tbeta:{}\t\n"
.format(batch_size, K, learning_rate, time.strftime("%d%H%M%S", time.localtime()), weight_decay,
pratio, alpha, beta))
print("Start train -- K : {}".format(K))
criterion = torch.nn.BCELoss()
model = get_model(
name=model_name,
field_dims=field_dims,
batch_size=batch_size,
embed_dim=K,
pratio=pratio).cuda()
params = count_params(model)
fout.write("count_params:{}\n".format(params))
print(params)
optimizer = torch.optim.Adam(
params=model.parameters(),
lr=learning_rate,
weight_decay=weight_decay)
# Initial EarlyStopping
early_stopping = EarlyStopping(patience=8, verbose=True, prefix=path)
scheduler = ReduceLROnPlateau(optimizer, 'max', verbose=True, patience=4)
val_auc_best = 0
auc_index_record = ""
val_loss_best = 1000
loss_index_record = ""
for epoch_i in range(epoch):
print(__file__, model_name, K, epoch_i, "/", epoch)
print("Batch_size:{}\tembed_dim:{}\tlearning_rate:{}\tStartTime:{}\tweight_decay:{}\tpratio:{}\t"
"\talpha:{}\tbeta:{}\t"
.format(batch_size, K, learning_rate, time.strftime("%d%H%M%S", time.localtime()), weight_decay,
pratio, alpha, beta))
start = time.time()
train_loss = train(model, optimizer, trainLoader, criterion, alpha=alpha, beta=beta)
val_auc, val_loss = test_roc(model, validLoader)
test_auc, test_loss = test_roc(model, testLoader)
scheduler.step(val_auc)
end = time.time()
if val_loss < val_loss_best:
# torch.save({"state_dict": model.state_dict(), "best_auc": val_auc_best},
# paths + f"/{model_name}_final_{K}_{time_fix}.pt")
torch.save(model, paths + f"/{model_name}_best_auc_{K}_{pratio}_{time_fix}.pkl")
if val_auc > val_auc_best:
val_auc_best = val_auc
auc_index_record = "epoch_i:{}\t{:.6f}\t{:.6f}".format(epoch_i, test_auc, test_loss)
if val_loss < val_loss_best:
val_loss_best = val_loss
loss_index_record = "epoch_i:{}\t{:.6f}\t{:.6f}".format(epoch_i, test_auc, test_loss)
print(
"Train K:{}\tEpoch:{}\ttrain_loss:{:.6f}\tval_loss:{:.6f}\tval_auc:{:.6f}\ttime:{:.6f}\ttest_loss:{:.6f}\ttest_auc:{:.6f}\n"
.format(K, epoch_i, train_loss, val_loss, val_auc, end - start, test_loss, test_auc))
fout.write(
"Train K:{}\tEpoch:{}\ttrain_loss:{:.6f}\tval_loss:{:.6f}\tval_auc:{:.6f}\ttime:{:.6f}\ttest_loss:{:.6f}\ttest_auc:{:.6f}\n"
.format(K, epoch_i, train_loss, val_loss, val_auc, end - start, test_loss, test_auc))
early_stopping(val_auc)
if early_stopping.early_stop:
print("Early stopping")
break
print("Test:{}\t{:.6f}\t{:.6f}\t{:.6f}\t{:.6f}\t{:.6f}\t{:.6f}\n"
.format(K, val_auc, val_auc_best, val_loss, val_loss_best, test_loss, test_auc))
fout.write("Test:{}\t{:.6f}\t{:.6f}\t{:.6f}\t{:.6f}\t{:.6f}\t{:.6f}\n"
.format(K, val_auc, val_auc_best, val_loss, val_loss_best, test_loss, test_auc))
fout.write("auc_best:\t{}\nloss_best:\t{}".format(auc_index_record, loss_index_record))
def setup_seed(seed):
torch.manual_seed(seed)
torch.cuda.manual_seed_all(seed)
np.random.seed(seed)
random.seed(seed)
torch.backends.cudnn.deterministic = True
if __name__ == '__main__':
# CUDA_VISIBLE_DEVICES=1 python main_ml_base.py --choice 0
parser = argparse.ArgumentParser()
parser.add_argument('--dataset_name', default='ml_tag', help="")
parser.add_argument('--save_dir', default='chkpt_ml_tag', help="")
parser.add_argument('--path', default="../data/", help="")
parser.add_argument('--model_name', default='fm', help="")
parser.add_argument('--epoch', type=int, default=5, help="")
parser.add_argument('--learning_rate', type=float, default=0.01, help="learning rate")
parser.add_argument('--batch_size', type=int, default=1024, help="batch_size")
parser.add_argument('--weight_decay', type=float, default=1e-5, help="")
parser.add_argument('--device', default='cuda:0', help="cuda:0")
parser.add_argument('--choice', default=0, type=int, help="choice")
parser.add_argument('--hint', default="CL4CTR", help="")
parser.add_argument('--embed_dim', default=5, type=int, help="the size of feature dimension")
parser.add_argument('--pratio', default=0.5, type=float, help="pratio")
parser.add_argument('--alpha', default=1e-0, type=float, help="alpha")
parser.add_argument('--beta', default=1e-2, type=float, help="beta")
args = parser.parse_args()
if args.choice == 0:
model_names = ["fm_cl4ctr"] * 1
elif args.choice == 1:
model_names = ["dfm_cl4ctr"] * 1
print(model_names)
for name in model_names:
seed = np.random.randint(0, 100000)
setup_seed(seed)
main(dataset_name=args.dataset_name,
model_name=name,
epoch=args.epoch,
learning_rate=args.learning_rate,
batch_size=args.batch_size,
weight_decay=args.weight_decay,
save_dir=args.save_dir,
path=args.path,
pratio=args.pratio,
embed_dim=args.embed_dim,
alpha=args.alpha,
beta=args.beta
)
| 9,982 | 37.693798 | 145 | py |
cl4ctr | cl4ctr-main/utils/earlystoping.py | #!/usr/bin/env python
# -*- coding:utf-8 -*-
import numpy as np
import torch
class EarlyStopping:
"""Early stops the training if validation loss doesn't improve after a given patience."""
def __init__(self, patience=7, verbose=False, delta=0, prefix = None):
"""
Args:
patience (int): How long to wait after last time validation loss improved.
Default: 7
verbose (bool): If True, prints a message for each validation loss improvement.
Default: False
delta (float): Minimum change in the monitored quantity to qualify as an improvement.
Default: 0
"""
self.patience = patience
self.verbose = verbose
self.counter = 0
self.best_score = None
self.early_stop = False
self.val_loss_min = np.Inf
self.delta = delta
self.prefix_path = prefix
# def __call__(self, val_loss):
def __call__(self, val_auc):
score = val_auc
if self.best_score is None:
self.best_score = score
elif score < self.best_score + self.delta:
self.counter += 1
print(f'EarlyStopping counter: {self.counter} out of {self.patience}')
print("Now auc:{}\tBest_auc:{}".format(val_auc, self.best_score))
if self.counter >= self.patience:
self.early_stop = True
else:
self.best_score = score
self.counter = 0
def save_checkpoint(self, val_loss, model):
'''Saves model when validation loss decrease.'''
if self.verbose:
print(f'Validation loss decreased ({self.val_loss_min:.6f} --> {val_loss:.6f}). Saving model ...')
torch.save(model.state_dict(), self.prefix_path+'/es_checkpoint.pt') # 这里会存储迄今最优模型的参数
self.val_loss_min = val_loss
class EarlyStoppingLoss:
"""Early stops the training if validation loss doesn't improve after a given patience."""
def __init__(self, patience=7, verbose=False, delta=0, prefix = None):
"""
Args:
patience (int): How long to wait after last time validation loss improved.
Default: 7
verbose (bool): If True, prints a message for each validation loss improvement.
Default: False
delta (float): Minimum change in the monitored quantity to qualify as an improvement.
Default: 0
"""
self.patience = patience
self.verbose = verbose
self.counter = 0
self.best_score = None
self.early_stop = False
self.val_loss_min = np.Inf
self.delta = delta
self.prefix_path = prefix
def __call__(self, val_loss):
score = val_loss
if self.best_score is None:
self.best_score = score
elif score > self.best_score + self.delta:
self.counter += 1
print(f'EarlyStopping counter: {self.counter} out of {self.patience}')
print("Now loss:{}\tBest_loss:{}".format(val_loss,self.best_score))
if self.counter >= self.patience:
self.early_stop = True
else:
self.best_score = score
# self.save_checkpoint(val_loss, model)
self.counter = 0
def save_checkpoint(self, val_loss, model):
'''Saves model when validation loss decrease.'''
if self.verbose:
print(f'Validation loss decreased ({self.val_loss_min:.6f} --> {val_loss:.6f}). Saving model ...')
torch.save(model.state_dict(), self.prefix_path+'/es_checkpoint.pt') # 这里会存储迄今最优模型的参数
self.val_loss_min = val_loss | 3,748 | 38.052083 | 111 | py |
cl4ctr | cl4ctr-main/model/data_aug.py | import torch
def maskrandom(x_emb, mask_ratio):
B, F, E = x_emb.size()
mask1 = torch.bernoulli(torch.ones(B, F, E) * mask_ratio).cuda()
mask2 = torch.bernoulli(torch.ones(B, F, E) * mask_ratio).cuda()
x_emb1 = x_emb * mask1
x_emb2 = x_emb * mask2
return x_emb1, x_emb2
def maskdimension(x_emb, mask_ratio):
B, F, E = x_emb.size()
mask1 = torch.bernoulli(torch.ones(B, 1, E) * mask_ratio).cuda()
mask2 = torch.bernoulli(torch.ones(B, 1, E) * mask_ratio).cuda()
x_emb1 = x_emb * mask1
x_emb2 = x_emb * mask2
return x_emb1, x_emb2
def maskfeature(x_emb, mask_ratio):
B, F, E = x_emb.size()
mask1 = torch.bernoulli(torch.ones(B, F, 1) * mask_ratio).cuda()
mask2 = torch.bernoulli(torch.ones(B, F, 1) * mask_ratio).cuda()
x_emb1 = x_emb * mask1
x_emb2 = x_emb * mask2
return x_emb1, x_emb2
| 863 | 28.793103 | 68 | py |
cl4ctr | cl4ctr-main/model/BasiclLayer.py | import torch.nn as nn
import numpy as np
from .data_aug import *
class BasicCTR(nn.Module):
def __init__(self, field_dims, embed_dim):
super(BasicCTR, self).__init__()
self.embedding = FeaturesEmbedding(field_dims, embed_dim)
def forward(self, x):
raise NotImplemented
class BasicCL4CTR(nn.Module):
"""
The core implement of CL4CTR, in which three SSL losses(L_cl, L_ali and L_uni) are computed to regularize
feature representation.
"""
def __init__(self, field_dims, embed_dim, batch_size=1024, pratio=0.5, fi_type="att"):
super(BasicCL4CTR, self).__init__()
# 1、embedding layer
self.embedding = FeaturesEmbedding(field_dims, embed_dim)
self.field_dims = field_dims
self.num_field = len(field_dims)
self.input_dim = self.num_field * embed_dim
self.batch_size = batch_size
self.row, self.col = list(), list()
for i in range(batch_size - 1):
for j in range(i + 1, batch_size):
self.row.append(i), self.col.append(j)
# 2.1 Random mask.
self.pratio = pratio
self.dp1 = nn.Dropout(p=pratio)
self.dp2 = nn.Dropout(p=pratio)
# 2.2 FI_encoder. In most cases, we utilize three layer transformer layers.
self.encoder_layer = nn.TransformerEncoderLayer(d_model=embed_dim, nhead=1, dim_feedforward=128,
dropout=0.2)
self.fi_cl = nn.TransformerEncoder(self.encoder_layer, num_layers=3)
# 2.3 Projection
self.projector1 = nn.Linear(self.input_dim, embed_dim)
self.projector2 = nn.Linear(self.input_dim, embed_dim)
def forward(self, x):
raise NotImplemented
def compute_cl_loss(self, x, alpha=1.0, beta=0.01):
"""
:param x: embedding
:param alpha:
:param beta: beta = gamma
:return: L_cl * alpha + (L_ali+L_uni) * beta
# This is a simplified computation based only on the embedding of each batch,
# which can accelerate the training process.
"""
x_emb = self.embedding(x)
# 1. Compute feature alignment loss (L_ali) and feature uniformity loss (L_uni).
cl_align_loss = self.compute_alignment_loss(x_emb)
cl_uniform_loss = self.compute_uniformity_loss(x_emb)
if alpha == 0.0:
return (cl_align_loss + cl_uniform_loss) * beta
# 2. Compute contrastive loss.
x_emb1, x_emb2 = self.dp1(x_emb), self.dp2(x_emb)
x_h1 = self.fi_cl(x_emb1.transpose(0,1)).view(-1, self.input_dim) # B,E
x_h2 = self.fi_cl(x_emb2.transpose(0,1)).view(-1, self.input_dim) # B,E
x_h1 = self.projector1(x_h1)
x_h2 = self.projector2(x_h2)
cl_loss = torch.norm(x_h1.sub(x_h2), dim=1).pow_(2).mean()
# 3. Combine L_cl and (L_ali + L_uni) with two loss weights (alpha and beta)
loss = cl_loss * alpha + (cl_align_loss + cl_uniform_loss) * beta
return loss
def compute_cl_loss_all(self, x, alpha=1.0, beta=0.01):
"""
:param x: embedding
:param alpha:
:param beta: beta
:return: L_cl * alpha + (L_ali+L_uni) * beta
This is the full version of Cl4CTR, which computes L_ali and L_uni with full feature representations.
"""
x_emb = self.embedding(x)
# 1. Compute feature alignment loss (L_ali) and feature uniformity loss (L_uni).
cl_align_loss = self.compute_all_alignment_loss()
cl_uniform_loss = self.compute_all_uniformity_loss()
if alpha == 0.0:
return (cl_align_loss + cl_uniform_loss) * beta
# 2. Compute contrastive loss (L_cl).
x_emb1, x_emb2 = self.dp1(x_emb), self.dp2(x_emb)
x_h1 = self.fi_cl(x_emb1.transpose(0,1)).view(-1, self.input_dim) # B,E
x_h2 = self.fi_cl(x_emb2.transpose(0,1)).view(-1, self.input_dim) # B,E
x_h1 = self.projector1(x_h1)
x_h2 = self.projector2(x_h2)
cl_loss = torch.norm(x_h1.sub(x_h2), dim=1).pow_(2).mean()
# 3. Combine L_cl and (L_ali + L_uni) with two loss weights (alpha and beta)
loss = cl_loss * alpha + (cl_align_loss + cl_uniform_loss) * beta
return loss
def compute_alignment_loss(self, x_emb):
alignment_loss = torch.norm(x_emb[self.row].sub(x_emb[self.col]), dim=2).pow(2).mean()
return alignment_loss
def compute_uniformity_loss(self, x_emb):
frac = torch.matmul(x_emb, x_emb.transpose(2, 1)) # B,F,F
denom = torch.matmul(torch.norm(x_emb, dim=2).unsqueeze(2), torch.norm(x_emb, dim=2).unsqueeze(1)) # 64,30,30
res = torch.div(frac, denom + 1e-4)
uniformity_loss = res.mean()
return uniformity_loss
def compute_all_uniformity_loss(self):
"""
Calculate field uniformity loss based on all feature representation.
"""
embedds = self.embedding.embedding.weight
field_dims = self.field_dims
field_dims_cum = np.array((0, *np.cumsum(field_dims)))
field_len = embedds.size()[0]
field_index = np.array(range(field_len))
uniformity_loss = 0.0
# for i in
pairs = 0
for i, (start, end) in enumerate(zip(field_dims_cum[:-1], field_dims_cum[1:])):
index_f = np.logical_and(field_index >= start, field_index < end) # 前闭后开
embed_f = embedds[index_f, :]
embed_not_f = embedds[~index_f, :]
frac = torch.matmul(embed_f, embed_not_f.transpose(1, 0)) # f1,f2
denom = torch.matmul(torch.norm(embed_f, dim=1).unsqueeze(1),
torch.norm(embed_not_f, dim=1).unsqueeze(0)) # f1,f2
res = torch.div(frac, denom + 1e-4)
uniformity_loss += res.sum()
pairs += (field_len - field_dims[i]) * field_dims[i]
uniformity_loss /= pairs
return uniformity_loss
def compute_all_alignment_loss(self):
"""
Calculate feature alignment loss based on all feature representation.
"""
embedds = self.embedding.embedding.weight
field_dims = self.field_dims
field_dims_cum = np.array((0, *np.cumsum(field_dims)))
alignment_loss = 0.0
pairs = 0
for i, (start, end) in enumerate(zip(field_dims_cum[:-1], field_dims_cum[1:])):
embed_f = embedds[start:end, :]
loss_f = 0.0
for j in range(field_dims[i]):
loss_f += torch.norm(embed_f[j, :].sub(embed_f), dim=1).pow(2).sum()
pairs += field_dims[i] * field_dims[i]
alignment_loss += loss_f
alignment_loss /= pairs
return alignment_loss
class FeaturesLinear(torch.nn.Module):
"""
Linear regression layer for CTR prediction.
"""
def __init__(self, field_dims, output_dim=1):
super().__init__()
self.fc = torch.nn.Embedding(sum(field_dims), output_dim)
self.bias = torch.nn.Parameter(torch.zeros((output_dim,)))
self.offsets = np.array(
(0, *np.cumsum(field_dims)[:-1]), dtype=np.long)
def forward(self, x):
"""
:param x: B,F
:return: B,1
"""
x = x + x.new_tensor(self.offsets).unsqueeze(0)
return torch.sum(self.fc(x), dim=1) + self.bias
class FactorizationMachine(torch.nn.Module):
def __init__(self, reduce_sum=True):
super().__init__()
self.reduce_sum = reduce_sum
def forward(self, x):
"""
:param x: B,F,E
"""
square_of_sum = torch.sum(x, dim=1) ** 2 # B,embed_dim
sum_of_square = torch.sum(x ** 2, dim=1) # B,embed_dim
ix = square_of_sum - sum_of_square # B,embed_dim
if self.reduce_sum:
ix = torch.sum(ix, dim=1, keepdim=True)
return 0.5 * ix
class FeaturesEmbedding(torch.nn.Module):
def __init__(self, field_dims, embed_dim):
"""
:param field_dims: list
:param embed_dim
"""
super().__init__()
self.embedding = torch.nn.Embedding(sum(field_dims), embed_dim)
self.offsets = np.array((0, *np.cumsum(field_dims)[:-1]), dtype=np.long)
self._init_weight_()
def _init_weight_(self):
nn.init.normal_(self.embedding.weight, std=0.01)
# nn.init.xavier_normal_nn.init.xavier_normal_(self.embedding.weight)
def forward(self, x):
"""
:param x: B,F
:return: B,F,E
"""
x = x + x.new_tensor(self.offsets).unsqueeze(0)
return self.embedding(x)
class MultiLayerPerceptron(torch.nn.Module):
def __init__(self, input_dim, embed_dims, dropout=0.5, output_layer=False):
super().__init__()
layers = list()
for embed_dim in embed_dims:
layers.append(torch.nn.Linear(input_dim, embed_dim))
layers.append(torch.nn.BatchNorm1d(embed_dim))
layers.append(torch.nn.ReLU())
layers.append(torch.nn.Dropout(p=dropout))
input_dim = embed_dim
if output_layer:
layers.append(torch.nn.Linear(input_dim, 1))
self.mlp = torch.nn.Sequential(*layers)
self._init_weight_()
def _init_weight_(self):
for m in self.mlp:
if isinstance(m, nn.Linear):
nn.init.xavier_uniform_(m.weight)
def forward(self, x):
return self.mlp(x)
| 9,456 | 35.513514 | 118 | py |
cl4ctr | cl4ctr-main/dataloader/frappe/dataloader.py | import numpy as np
import pandas as pd
import torch
import os
import tqdm
import pickle
class LoadData():
def __init__(self, path="./data/", dataset="frappe"):
self.dataset = dataset
self.path = path + dataset + "/"
self.trainfile = self.path + dataset + ".train.libfm"
self.testfile = self.path + dataset + ".test.libfm"
self.validationfile = self.path + dataset + ".validation.libfm"
self.features_M = {}
self.construct_df()
# self.Train_data, self.Validation_data, self.Test_data = self.construct_data( loss_type )
def construct_df(self):
self.data_train = pd.read_table(self.trainfile, sep=" ", header=None, engine='python')
self.data_test = pd.read_table(self.testfile, sep=" ", header=None, engine="python")
self.data_valid = pd.read_table(self.validationfile, sep=" ", header=None, engine="python")
for i in self.data_test.columns[1:]:
self.data_test[i] = self.data_test[i].apply(lambda x: int(x.split(":")[0]))
self.data_train[i] = self.data_train[i].apply(lambda x: int(x.split(":")[0]))
self.data_valid[i] = self.data_valid[i].apply(lambda x: int(x.split(":")[0]))
self.all_data = pd.concat([self.data_train, self.data_test, self.data_valid])
self.field_dims = []
for i in self.all_data.columns[1:]:
maps = {val: k for k, val in enumerate(set(self.all_data[i]))}
self.data_test[i] = self.data_test[i].map(maps)
self.data_train[i] = self.data_train[i].map(maps)
self.data_valid[i] = self.data_valid[i].map(maps)
self.features_M[i] = maps
self.field_dims.append(len(set(self.all_data[i])))
# -1 改成 0
self.data_test[0] = self.data_test[0].apply(lambda x: max(x, 0))
self.data_train[0] = self.data_train[0].apply(lambda x: max(x, 0))
self.data_valid[0] = self.data_valid[0].apply(lambda x: max(x, 0))
class RecData():
# define the dataset
def __init__(self, all_data):
self.data_df = all_data
def __len__(self):
return len(self.data_df)
def __getitem__(self, idx):
x = self.data_df.iloc[idx].values[1:]
y1 = self.data_df.iloc[idx].values[0]
return x, y1
def getdataloader_frappe(path="../data/", dataset="frappe", batch_size=256):
print("Load frappe dataset.")
DataF = LoadData(path=path, dataset=dataset)
datatest = RecData(DataF.data_test)
datatrain = RecData(DataF.data_train)
datavalid = RecData(DataF.data_valid)
print("datatrain", len(datatrain))
print("datavalid", len(datavalid))
print("datatest", len(datatest))
trainLoader = torch.utils.data.DataLoader(datatrain, batch_size=batch_size, shuffle=True, drop_last=True,
num_workers=8, pin_memory=True)
validLoader = torch.utils.data.DataLoader(datavalid, batch_size=batch_size, shuffle=False, drop_last=True,
num_workers=4, pin_memory=True)
testLoader = torch.utils.data.DataLoader(datatest, batch_size=batch_size, shuffle=False, num_workers=4,
pin_memory=True)
return DataF.field_dims, trainLoader, validLoader, testLoader
def getdataloader_ml(path="../.././data/", dataset="ml-tag", batch_size=256):
# we delete one parameter num_ng, as we do not utilize it.
path_ml = path + 'preprocess-ml.p'
if not os.path.exists(path_ml):
DataF = LoadData(path=path, dataset=dataset)
pickle.dump((DataF.data_test, DataF.data_train, DataF.data_valid, DataF.field_dims), open(path_ml, 'wb'))
print("success")
print("start load ml_tag data")
data_test, data_train, data_valid, field_dims = pickle.load(open(path_ml, mode='rb'))
datatest = RecData(data_test)
datatrain = RecData(data_train)
datavalid = RecData(data_valid)
print("ml-datatrain", len(datatrain))
print("ml-datavalid", len(datavalid))
print("ml-datatest", len(datatest))
trainLoader = torch.utils.data.DataLoader(datatrain, batch_size=batch_size, shuffle=True, drop_last=True,
num_workers=8, pin_memory=True)
validLoader = torch.utils.data.DataLoader(datavalid, batch_size=batch_size, shuffle=False, drop_last=True,
num_workers=4, pin_memory=True)
testLoader = torch.utils.data.DataLoader(datatest, batch_size=batch_size, shuffle=False, num_workers=4,
pin_memory=True)
return field_dims, trainLoader, validLoader, testLoader
if __name__ == '__main__':
field_dims,trainLoader,validLoader,testLoader = getdataloader_ml(batch_size=256)
for _ in tqdm.tqdm(trainLoader):
pass
it = iter(trainLoader)
print(next(it)[0])
print(field_dims)
| 4,921 | 44.155963 | 113 | py |
rank-based-evaluation | rank-based-evaluation-main/src/kgm/modules/losses.py | # coding=utf-8
"""Loss functions for entity alignment and link prediction."""
import enum
import logging
from typing import Any, Callable, Mapping, Optional
import torch
from torch import nn
from torch.nn import functional
from .similarity import Similarity
from ..data import MatchSideEnum, SIDES
from ..utils.common import get_subclass_by_name
from ..utils.types import IDAlignment, NodeIDs
logger = logging.getLogger(name=__name__)
__all__ = [
'BaseLoss',
'ContrastiveLoss',
'FullMatchingLoss',
'MarginLoss',
'MatchingLoss',
'OrderPreservationLoss',
'SampledLinkPredictionLoss',
'SampledMatchingLoss',
'get_matching_loss',
'get_pairwise_loss',
]
# pylint: disable=abstract-method
class BaseLoss(nn.Module):
"""Abstract class for losses on similarity matrices."""
# pylint: disable=arguments-differ
def forward(self, similarities: torch.FloatTensor, true_indices: torch.LongTensor) -> torch.FloatTensor:
r"""
Efficiently compute loss values from a similarity matrix.
.. math::
\frac{1}{n(m-1))} \sum_{b=1}^{n} \sum_{j \neq true[b]} pairloss(sim[b, true[b]], sim[b, j])
:param similarities: shape: (n, m)
A batch of similarity values.
:param true_indices: shape (n,)
The index of the unique true choice in each batch.
"""
raise NotImplementedError
class MarginLoss(BaseLoss):
r"""Evaluate a margin based loss.
In particular the following form is used:
.. math::
baseloss(pos\_sim, neg\_sim) = g(neg\_sim + margin - pos\_sim)
where g is an activation function, e.g. ReLU leading to the classical margin loss formulation.
"""
def __init__(
self,
margin: float = 1.0,
exact_loss_value: bool = False,
activation: Callable[[torch.FloatTensor], torch.FloatTensor] = functional.relu,
):
"""
Initialize the loss.
:param margin: >0
The margin which should be between positive and negative similarity values.
:param exact_loss_value:
Can be disabled to compute the loss up to a constant additive term for improved performance.
:param activation:
The activation function to use. Typical examples:
- hard margin: torch.functional.relu
- soft margin: torch.functional.softplus
"""
super().__init__()
self.margin = margin
self.exact_loss_value = exact_loss_value
self.activation = activation
def forward(self, similarities: torch.FloatTensor, true_indices: torch.LongTensor) -> torch.FloatTensor: # noqa: D102
batch_size, num_choices = similarities.shape
batch_indices = torch.arange(batch_size, device=similarities.device)
pos_sim = similarities[batch_indices, true_indices].unsqueeze(dim=1)
# as pos_sim + margin - pos_sim = margin, there is no gradient for comparison of positives with positives
# as there are num_choices elements per row, with one positive, and (num_choices-1) negatives, we need to subtract
# (margin/num_choices) to compensate for that in the loss value.
# As this is a constant, the gradient is the same as if we would not add it, hence we only do it, if explicitly requested.
loss_value = self.activation(similarities + self.margin - pos_sim).mean()
if self.exact_loss_value:
loss_value = loss_value - (self.activation(torch.as_tensor(data=self.margin, dtype=torch.float, device=loss_value.device)) / num_choices)
return loss_value
@enum.unique
class LossDirectionEnum(str, enum.Enum):
"""An enum for specification of the direction of a matching loss."""
#: Loss is matching entities from a left graph to a right one
left_to_right = 'left_to_right'
#: Loss is matching entities from a right graph to a left one
right_to_left = 'right_to_left'
#: Loss is averaging loss of matching entities from a left to a right graph and from the right to the left one
symmetrical = 'symmetrical'
# pylint: disable=abstract-method
class MatchingLoss(nn.Module):
"""An API for graph matching losses."""
#: The similarity
similarity: Similarity
#: The direction in which to compute the loss
loss_direction: LossDirectionEnum
def __init__(
self,
similarity: Similarity,
loss_direction: LossDirectionEnum = LossDirectionEnum.symmetrical,
):
"""
Initialize the loss.
:param similarity:
The similarity to use for comparing node representations.
:param loss_direction:
Defines a direction of matching, which loss is optimized during training
"""
super().__init__()
self.similarity = similarity
self.loss_direction = loss_direction
# pylint: disable=arguments-differ
def forward(
self,
alignment: IDAlignment,
representations: Mapping[MatchSideEnum, torch.FloatTensor],
negatives: Optional[torch.LongTensor] = None,
) -> torch.FloatTensor:
"""
Compute the loss.
:param alignment: shape: (2, num_aligned)
The aligned nodes in form of node ID pairs.
:param representations:
side -> repr, where repr is a tensor of shape (num_nodes_side, dim)
:param negatives: shape: (2, num_aligned, num_negatives)
Negative samples. negatives[0] has to be combined with alignment[1] for a valid pair.
"""
partial_losses = []
# left-to-right loss
if self.loss_direction in {LossDirectionEnum.left_to_right, LossDirectionEnum.symmetrical}:
source_side, target_side = SIDES
partial_losses.append(
self._one_side_matching_loss(
source=representations[source_side],
target=representations[target_side],
alignment=alignment,
negatives=None if negatives is None else negatives[1],
)
)
# right-to-left loss
if self.loss_direction in {LossDirectionEnum.right_to_left, LossDirectionEnum.symmetrical}:
target_side, source_side = SIDES
partial_losses.append(
self._one_side_matching_loss(
source=representations[source_side],
target=representations[target_side],
alignment=alignment.flip(0),
negatives=None if negatives is None else negatives[0],
)
)
assert len(partial_losses) > 0
return sum(partial_losses) / len(partial_losses)
def _one_side_matching_loss(
self,
source: torch.FloatTensor,
target: torch.FloatTensor,
alignment: IDAlignment,
negatives: Optional[NodeIDs]
) -> torch.FloatTensor:
"""
Compute the loss from selected nodes in source graph to the other graph.
:param source: shape: (num_source, dim)
Source node representations.
:param target: shape: (num_target, dim)
Target node representations.
:param alignment: shape: (2, num_aligned)
The alignment.
:param negatives: shape: (num_aligned, num_negatives)
The negative examples from target side.
"""
raise NotImplementedError
class SampledMatchingLoss(MatchingLoss):
"""Apply a base loss to a similarity matrix where negative samples are used to reduce memory footprint."""
#: The base loss
base_loss: BaseLoss
#: The number of negative samples
num_negatives: int
#: Whether to use self-adversarial weighting
self_adversarial_weighting: bool
def __init__(
self,
similarity: Similarity,
base_loss: BaseLoss,
loss_direction: LossDirectionEnum = LossDirectionEnum.symmetrical,
num_negatives: int = 1,
self_adversarial_weighting: bool = False,
):
"""
Initialize the loss.
:param similarity:
The similarity to use for computing the similarity matrix.
:param base_loss:
The base loss to apply to the similarity matrix.
:param num_negatives:
The number of negative samples for each positive pair.
:param self_adversarial_weighting:
Whether to apply self-adversarial weighting.
"""
super().__init__(
similarity=similarity,
loss_direction=loss_direction
)
self.base_loss = base_loss
self.num_negatives = num_negatives
self.self_adversarial_weighting = self_adversarial_weighting
def _one_side_matching_loss(
self,
source: torch.FloatTensor,
target: torch.FloatTensor,
alignment: IDAlignment,
negatives: Optional[NodeIDs],
) -> torch.FloatTensor: # noqa: D102
# Split mapping
source_ind, target_ind_pos = alignment
# Extract representations, shape: (batch_size, dim)
anchor = source[source_ind]
# Positive scores
pos_scores = self.similarity.one_to_one(left=anchor, right=target[target_ind_pos])
# Negative samples in target graph, shape: (batch_size, num_negatives)
if negatives is None:
negatives = torch.randint(
target.shape[0],
size=(target_ind_pos.shape[0], self.num_negatives),
device=target.device,
)
# Negative scores, shape: (batch_size, num_negatives, dim)
neg_scores = self.similarity.one_to_one(left=anchor.unsqueeze(1), right=target[negatives])
# self-adversarial weighting as described in RotatE paper: https://arxiv.org/abs/1902.10197
if self.self_adversarial_weighting:
neg_scores = functional.softmax(neg_scores, dim=1).detach() * neg_scores
# Evaluate base loss
return self.base_loss(
similarities=torch.cat([pos_scores.unsqueeze(dim=-1), neg_scores], dim=-1),
true_indices=torch.zeros_like(target_ind_pos),
).mean()
def matching_loss_name_normalizer(name: str) -> str:
"""Normalize the class name of a MatchingLoss."""
return name.lower().replace('matchingloss', '')
def base_loss_name_normalizer(name: str) -> str:
"""Normalize the class name of a base BaseLoss."""
return name.lower().replace('loss', '')
def get_pairwise_loss(name: str, **kwargs: Any) -> BaseLoss:
"""
Get a pairwise loss by class name.
:param name:
The name of the class.
:param kwargs:
Additional key-word based constructor arguments.
:return:
The base loss instance.
"""
pairwise_loss_cls = get_subclass_by_name(base_class=BaseLoss, name=name, normalizer=base_loss_name_normalizer)
pairwise_loss = pairwise_loss_cls(**kwargs)
return pairwise_loss
def get_matching_loss(name: str, similarity: Similarity, **kwargs) -> MatchingLoss:
"""
Get a matching loss by class name.
:param name:
The name of the class.
:param similarity:
The similarity to use.
:param kwargs:
Additional key-word based constructor arguments.
:return:
The matching loss instance.
"""
matching_loss_cls = get_subclass_by_name(base_class=MatchingLoss, name=name, normalizer=matching_loss_name_normalizer)
matching_loss = matching_loss_cls(similarity=similarity, **kwargs)
return matching_loss
| 11,575 | 33.97281 | 149 | py |
rank-based-evaluation | rank-based-evaluation-main/src/kgm/modules/sampler.py | """Sampling methods for negative samples."""
from abc import abstractmethod
from typing import Optional, Tuple
import torch
from kgm.utils.types import NodeIDs
class NegativeSampler:
"""Abstract class encapsulating a logic of choosing negative examples."""
@abstractmethod
def sample(
self,
size: Tuple[int, ...],
device: torch.device,
max_id: Optional[int] = None,
candidates: Optional[NodeIDs] = None,
) -> NodeIDs:
"""Choose negative samples.
If a set of candidates is provided, the samples are chosen from them. Otherwise, the max_id parameter will be
used to sample from [0, max_id-1].
:param size:
Expected shape of the output tensor of indices.
:param device:
Device of the output tensor.
:param max_id: >0
The maximum ID (exclusive).
:param candidates: shape: (num_of_candidates,)
Tensor containing candidates for negative examples to choose from.
"""
raise NotImplementedError
class UniformRandomSampler(NegativeSampler):
"""NegativeExamplesSampler implementation using uniform random distribution to choose negative samples."""
def sample(
self,
size: Tuple[int, ...],
device: torch.device,
max_id: Optional[int] = None,
candidates: Optional[NodeIDs] = None,
) -> NodeIDs: # noqa: D102
if candidates is not None:
return candidates[torch.randint(candidates.shape[0], size=size, device=candidates.device)]
return torch.randint(max_id, size=size, dtype=torch.long, device=device)
| 1,654 | 31.45098 | 117 | py |
rank-based-evaluation | rank-based-evaluation-main/src/kgm/modules/graph.py | # coding=utf-8
"""
Module for message passing modules.
The message passing is split into three phases:
1) Message Creation
Calculate messages. Potentially takes the source and target node representations, as well as the relation-type of
the considered edge into account, i.e. for a triple (e_i, r, e_j): m_{i->j} = f(x_i, x_j, r)
2) Message Passing
The message are exchanged, i.e. m_{i->j} moves from i to j. This is done in parallel for all messages.
3) Message Aggregation
All incoming messages are aggregated into a single vector, i.e. a_j = agg({m_{i->j} for all i})
4) Node Update
The new node representations are calculated given the aggregated messages, as well as the old node representation,
i.e. x_j := update(x_j, a_j)
"""
import logging
from typing import Optional
import torch
from torch import nn
from ..utils.torch_utils import _guess_num_nodes
from ..utils.types import NodeIDs, RelationIDs
logger = logging.getLogger(name=__name__)
__all__ = [
'AliGAT',
'AliGate',
'BasesLinearRelationSpecificMessageCreator',
'BlockLinearRelationSpecificMessageCreator',
'GAT',
'GCNBlock',
'IdentityMessageCreator',
'LinearMessageCreator',
'MeanAggregator',
'MessagePassingBlock',
'MessagePassingBlock',
'OnlyUpdate',
'SumAggregator',
]
class MissingEdgeTypesException(BaseException):
"""Class requires edge information."""
def __init__(self, cls):
super().__init__(f'{cls.__name__} requires passing edge types.')
# pylint: disable=abstract-method
class MessageCreator(nn.Module):
"""Abstract class for different methods to create messages to send."""
def reset_parameters(self) -> None:
"""Reset the module's parameters."""
# TODO: Subclass from ExtendedModule
# pylint: disable=arguments-differ
def forward(
self,
x: torch.FloatTensor,
source: NodeIDs,
target: NodeIDs,
edge_type: Optional[RelationIDs] = None,
) -> torch.FloatTensor:
"""
Create messages.
:param x: shape: (num_nodes, node_embedding_dim)
The node representations.
:param source: (num_edges,)
The source indices for each edge.
:param target: shape: (num_edges,)
The target indices for each edge.
:param edge_type: shape: (num_edges,)
The edge type for each edge.
:return: shape: (num_edges, message_dim)
The messages source -> target.
"""
raise NotImplementedError
class IdentityMessageCreator(MessageCreator):
"""Send source embeddings unchanged."""
def forward(
self,
x: torch.FloatTensor,
source: NodeIDs,
target: NodeIDs,
edge_type: Optional[RelationIDs] = None,
) -> torch.FloatTensor: # noqa: D102
return x.index_select(dim=0, index=source)
class LinearMessageCreator(MessageCreator):
"""Transform source embeddings by learned linear transformation."""
def __init__(
self,
input_dim: int,
message_dim: int,
use_bias: bool = False,
):
"""
Initialize the message creator.
:param input_dim: >0
The number of input features, i.e. the dimension of the embedding vector.
:param message_dim: > 0
The number of output features, i.e. the dimension of the message vector.
:param use_bias:
Whether to use a bias after the linear transformation.
"""
super().__init__()
self.linear = nn.Linear(in_features=input_dim, out_features=message_dim, bias=use_bias)
def reset_parameters(self) -> None: # noqa: D102
# TODO: Subclass from ExtendedModule
self.linear.reset_parameters()
def forward(
self,
x: torch.FloatTensor,
source: NodeIDs,
target: NodeIDs,
edge_type: Optional[RelationIDs] = None,
) -> torch.FloatTensor: # noqa: D102
x = self.linear(x)
return x.index_select(dim=0, index=source)
# pylint: disable=abstract-method
class MessageAggregator(nn.Module):
"""
Aggregation method for incoming messages.
Should be permutation-invariant, and able to process an arbitrary number of messages into a single vector.
"""
def reset_parameters(self) -> None:
# TODO: Subclass from ExtendedModule
pass
# pylint: disable=arguments-differ
def forward(
self,
msg: torch.FloatTensor,
source: NodeIDs,
target: NodeIDs,
edge_type: Optional[RelationIDs] = None,
num_nodes: Optional[int] = None,
) -> torch.FloatTensor:
"""
Aggregate messages per node.
:param msg: shape: (num_edges, message_dim)
The messages source -> target.
:param source: (num_edges,)
The source indices for each edge.
:param target: shape: (num_edges,)
The target indices for each edge.
:param edge_type: shape: (num_edges,)
The edge type for each edge.
:param num_nodes: >0
The number of nodes. If None is provided tries to guess the number of nodes by max(source.max(), target.max()) + 1
:return: shape: (num_nodes, update_dim)
The node updates.
"""
raise NotImplementedError
class SumAggregator(MessageAggregator):
"""Sum over incoming messages."""
def forward(
self,
msg: torch.FloatTensor,
source: NodeIDs,
target: NodeIDs,
edge_type: Optional[RelationIDs] = None,
num_nodes: Optional[int] = None,
) -> torch.FloatTensor: # noqa: D102
num_nodes = _guess_num_nodes(num_nodes=num_nodes, source=source, target=target)
dim = msg.shape[1]
return torch.zeros(num_nodes, dim, dtype=msg.dtype, device=msg.device).index_add_(dim=0, index=target, source=msg)
class MeanAggregator(MessageAggregator):
"""Average over incoming messages."""
def forward(
self,
msg: torch.FloatTensor,
source: NodeIDs,
target: NodeIDs,
edge_type: Optional[RelationIDs] = None,
num_nodes: Optional[int] = None,
) -> torch.FloatTensor: # noqa: D102
num_nodes = _guess_num_nodes(num_nodes=num_nodes, source=source, target=target)
dim = msg.shape[1]
sum_agg = torch.zeros(num_nodes, dim, dtype=msg.dtype, device=msg.device).index_add_(dim=0, index=target, source=msg)
uniq, count = torch.unique(target, sorted=False, return_counts=True)
norm = torch.zeros(num_nodes, dtype=torch.long, device=msg.device).scatter_(dim=0, index=uniq, src=count).clamp_min(min=1).float().reciprocal().unsqueeze(dim=-1)
return sum_agg * norm
# pylint: disable=abstract-method
class NodeUpdater(nn.Module):
"""Compute new node representation based on old representation and aggregated messages."""
def reset_parameters(self) -> None:
# TODO: Merge with AbstractKGMatchingModel's reset_parameters
pass
# pylint: disable=arguments-differ
def forward(
self,
x: torch.FloatTensor,
delta: torch.FloatTensor,
) -> torch.FloatTensor:
"""
Update node representations.
:param x: shape: (num_nodes, node_embedding_dim)
The node representations.
:param delta: (num_nodes, update_dim)
The node updates.
:return: shape: (num_nodes, new_node_embedding_dim)
The new node representations.
"""
raise NotImplementedError
class OnlyUpdate(NodeUpdater):
"""Discard old node representation and only use aggregated messages."""
def forward(
self,
x: torch.FloatTensor,
delta: torch.FloatTensor,
) -> torch.FloatTensor: # noqa: D102
return delta
# pylint: disable=abstract-method
class MessagePassingBlock(nn.Module):
"""A message passing block comprising a message creation, message aggregation, and an update module."""
def __init__(
self,
message_creator: MessageCreator,
message_aggregator: MessageAggregator,
node_updater: NodeUpdater,
):
"""
Initialize the block.
:param message_creator:
The module to create messages potentially based on the source and target node representation, as well as the
edge type.
:param message_aggregator:
The module to aggregate all incoming messages to a fixed size vector.
:param node_updater:
The module to calculate the new node representation based on the old representation and the aggregated
incoming messages.
"""
super().__init__()
# Bind sub-modules
self.message_creator = message_creator
self.message_aggregator = message_aggregator
self.node_updater = node_updater
def reset_parameters(self) -> None:
"""Reset parameters. Delegates to submodules."""
self.message_creator.reset_parameters()
self.message_aggregator.reset_parameters()
self.node_updater.reset_parameters()
# pylint: disable=arguments-differ
def forward(
self,
x: torch.FloatTensor,
source: NodeIDs,
target: NodeIDs,
edge_type: Optional[RelationIDs] = None,
edge_weights: Optional[torch.FloatTensor] = None,
) -> torch.FloatTensor:
"""
Calculate new node representations by message passing.
:param x: shape: (num_nodes, node_embedding_dim)
The node representations.
:param source: (num_edges,)
The source indices for each edge.
:param target: shape: (num_edges,)
The target indices for each edge.
:param edge_type: shape: (num_edges,)
The edge type for each edge.
:param edge_weights: shape (num_edges,)
The edge weights.
:return: shape: (num_nodes, new_node_embedding_dim)
The new node representations.
"""
# create messages
messages = self.message_creator(x=x, source=source, target=target, edge_type=edge_type)
# apply edge weights
if edge_weights is not None:
messages = messages * edge_weights.unsqueeze(dim=-1)
# aggregate
delta = self.message_aggregator(msg=messages, source=source, target=target, edge_type=edge_type, num_nodes=x.shape[0])
del messages
return self.node_updater(x=x, delta=delta)
class GCNBlock(MessagePassingBlock):
"""
GCN model roughly following https://arxiv.org/abs/1609.02907.
Notice that this module does only the message passing part, and does **not** apply a non-linearity.
"""
def __init__(
self,
input_dim: int,
output_dim: int,
use_bias: bool,
):
"""
Initialize the block.
:param input_dim: >0
The number of input features, i.e. the dimension of the embedding vector.
:param output_dim: > 0
The number of output features.
:param use_bias:
Whether to use a bias after the linear transformation.
"""
super().__init__(
message_creator=LinearMessageCreator(
input_dim=input_dim,
message_dim=output_dim,
use_bias=use_bias
),
message_aggregator=SumAggregator(),
node_updater=OnlyUpdate()
)
| 11,533 | 30.172973 | 169 | py |
rank-based-evaluation | rank-based-evaluation-main/src/kgm/modules/similarity.py | # coding=utf-8
"""Modules for computing similarities between vectors."""
import enum
from abc import abstractmethod
from typing import Optional, Union
import torch
from torch import nn
from torch.nn import functional
from ..utils.common import get_subclass_by_name, value_to_enum
# pylint: disable=abstract-method
class DistanceToSimilarity(nn.Module):
"""A method to convert distances to similarities."""
# pylint: disable=arguments-differ
@abstractmethod
def forward(self, distances: torch.FloatTensor) -> torch.FloatTensor:
"""
Transform a distance value to a similarity value.
:param distances: The distances.
:return: The similarities.
"""
raise NotImplementedError
class BoundInverseTransformation(DistanceToSimilarity):
r"""
Compute the similarity using a transformation to (0, 1].
.. math::
sim = \frac{1}{1 + dist}
"""
def forward(self, distances: torch.FloatTensor) -> torch.FloatTensor: # noqa: D102
return (distances + 1).reciprocal()
class NegativeTransformation(DistanceToSimilarity):
r"""
Multiply similarity by -1.
.. math::
sim = -dist
"""
def forward(self, distances: torch.FloatTensor) -> torch.FloatTensor: # noqa: D102
return -distances
class SimilarityEnum(str, enum.Enum):
"""How to determine node/relation similarity."""
#: Dot product
dot = 'dot'
#: L2-distance based
l2 = 'l2'
#: L1-distance based
l1 = 'l1'
#: Cosine similarity
cos = 'cos'
# pylint: disable=abstract-method
class Similarity(nn.Module):
"""Base class for similarity functions."""
# pylint: disable=arguments-differ
def forward(
self,
left: torch.FloatTensor,
right: torch.FloatTensor,
) -> torch.FloatTensor:
"""
Compute pairwise similarity scores.
:param left: shape: (n, d)
The left vectors.
:param right: shape: (m, d)
The right vectors.
:return shape: (m, n)
The similarity matrix.
"""
return self.all_to_all(left=left, right=right)
@abstractmethod
def all_to_all(
self,
left: torch.FloatTensor,
right: torch.FloatTensor,
) -> torch.FloatTensor:
"""
Compute pairwise similarity scores.
.. math::
out[i, j] = sim(left[i], right[j])
:param left: shape: (n, d)
The left vectors.
:param right: shape: (m, d)
The right vectors.
:return shape: (m, n)
sim_ij = sim(left_i, right_j)
"""
raise NotImplementedError
@abstractmethod
def one_to_one(
self,
left: torch.FloatTensor,
right: torch.FloatTensor,
) -> torch.FloatTensor:
"""Compute similarity scores.
.. math::
out[i] = sim(left[i], right[i])
:param left: shape: (n, d)
:param right: shape: (n, d)
:return shape: (n,)
"""
raise NotImplementedError
class DotProductSimilarity(Similarity):
"""Dot product as similarity."""
def all_to_all(
self,
left: torch.FloatTensor,
right: torch.FloatTensor,
) -> torch.FloatTensor: # noqa: D102
return left @ right.t()
def one_to_one(
self,
left: torch.FloatTensor,
right: torch.FloatTensor,
) -> torch.FloatTensor: # noqa: D102
return torch.sum(left * right, dim=-1)
class LpSimilarity(Similarity):
"""Similarity based on Lp distance."""
def __init__(
self,
p: int = 2,
transformation: DistanceToSimilarity = None,
):
"""
Initialize the similarity.
:param p:
The p to use for the L_p distance.
:param transformation:
The distance to similarity transformation to use. If None, use 1 / (1 + dist).
"""
super().__init__()
if transformation is None:
transformation = BoundInverseTransformation()
self.p = p
self.transformation = transformation
def all_to_all(
self,
left: torch.FloatTensor,
right: torch.FloatTensor,
) -> torch.FloatTensor: # noqa: D102
if self.p == 1:
# work-around to avoid memory issue
distances = l1c(left, right)
elif self.p == 2:
# work-around to avoid memory issue in backward pass, cf. https://github.com/pytorch/pytorch/issues/31599
# || x - y ||**2 = <x-y, x-y> = <x,x> + <y,y> - 2<x,y>
distances = ((left ** 2).sum(dim=-1).unsqueeze(dim=1) + (right ** 2).sum(dim=-1).unsqueeze(dim=0) - 2. * left @ right.t()).relu().sqrt()
else:
distances = torch.cdist(left, right, p=self.p)
return self.transformation(distances)
def one_to_one(
self,
left: torch.FloatTensor,
right: torch.FloatTensor,
) -> torch.FloatTensor: # noqa: D102
return self.transformation(torch.norm(left - right, dim=-1, p=self.p))
def extra_repr(self) -> str: # noqa: D102
return f'p={self.p}, transformation={self.transformation}'
class CosineSimilarity(Similarity):
"""Cosine similarity."""
def all_to_all(
self,
left: torch.FloatTensor,
right: torch.FloatTensor,
) -> torch.FloatTensor: # noqa: D102
left_n = functional.normalize(left, p=2, dim=-1)
right_n = functional.normalize(right, p=2, dim=-1)
return left_n @ right_n.t()
def one_to_one(
self,
left: torch.FloatTensor,
right: torch.FloatTensor,
) -> torch.FloatTensor: # noqa: D102
left_n = functional.normalize(left, p=2, dim=-1)
right_n = functional.normalize(right, p=2, dim=-1)
return (left_n * right_n).sum(dim=-1)
def transformation_normalizer(name: str) -> str:
"""Normalize the name of a transformation."""
return name.lower().replace('_', '').replace('transformation', '')
def get_similarity(
similarity: Union[SimilarityEnum, str],
transformation: Optional[Union[DistanceToSimilarity, str]] = None,
) -> Similarity:
"""
Instantiate a similarity instance.
:param similarity:
The chosen similarity as enum.
:param transformation:
The transformation to use to convert distances to similarities.
:return:
The similarity function.
"""
if not isinstance(similarity, SimilarityEnum):
similarity = value_to_enum(enum_cls=SimilarityEnum, value=similarity)
if isinstance(transformation, str):
transformation = get_subclass_by_name(base_class=DistanceToSimilarity, name=transformation, normalizer=transformation_normalizer)()
if similarity == SimilarityEnum.dot:
return DotProductSimilarity()
elif similarity == SimilarityEnum.l2:
return LpSimilarity(p=2, transformation=transformation)
elif similarity == SimilarityEnum.l1:
return LpSimilarity(p=1, transformation=transformation)
elif similarity == SimilarityEnum.cos:
return CosineSimilarity()
else:
raise KeyError(f'Unknown similarity: {similarity}')
# Inherit from Function
class L1CDist(torch.autograd.Function):
"""
Compute L1 distance between all pairs of vectors.
.. note ::
This is a workaround for torch.cdist, until the memory problem is fixed: https://github.com/pytorch/pytorch/issues/24345
"""
# pylint: disable=arguments-differ
@staticmethod
def forward(ctx, x1, x2): # noqa: D102
ctx.save_for_backward(x1, x2)
# cdist.forward does not have the memory problem
return torch.cdist(x1, x2, p=1)
# pylint: disable=arguments-differ
@staticmethod
def backward(ctx, grad_dist): # noqa: D102
# This is a pattern that is very convenient - at the top of backward
# unpack saved_tensors and initialize all gradients w.r.t. inputs to
# None. Thanks to the fact that additional trailing Nones are
# ignored, the return statement is simple even when the function has
# optional inputs.
grad_x1 = grad_x2 = None
# Retrieve saved values
x1, x2 = ctx.saved_tensors
dims = x1.shape[1]
# These needs_input_grad checks are optional and there only to
# improve efficiency. If you want to make your code simpler, you can
# skip them. Returning gradients for inputs that don't require it is
# not an error.
if ctx.needs_input_grad[0]:
grad_x1 = torch.empty_like(x1)
if ctx.needs_input_grad[1]:
grad_x2 = torch.empty_like(x2)
if any(ctx.needs_input_grad):
for i in range(dims):
#: sign: shape: (n1, n2)
sign = torch.sign(x1[:, None, i] - x2[None, :, i])
if ctx.needs_input_grad[0]:
grad_x1[:, i] = torch.sum(grad_dist * sign, dim=1)
if ctx.needs_input_grad[1]:
grad_x2[:, i] = -torch.sum(grad_dist * sign, dim=0)
return grad_x1, grad_x2
l1c = L1CDist.apply
| 9,200 | 27.933962 | 148 | py |
rank-based-evaluation | rank-based-evaluation-main/src/kgm/modules/embeddings/base.py | """Basic node embedding modules."""
import enum
import math
import pathlib
from typing import Any, Mapping, Optional, Type, Union
import torch
from torch import nn
from .init.base import ConstantNodeEmbeddingInitializer, NodeEmbeddingInitializer, RandomNodeEmbeddingInitializer
from .norm import EmbeddingNormalizationMethod, EmbeddingNormalizer, NoneEmbeddingNormalizer, get_normalizer_by_name
from ...data import KnowledgeGraph, KnowledgeGraphAlignmentDataset, MatchSideEnum
from ...utils.common import reduce_kwargs_for_method
from ...utils.torch_utils import ExtendedModule
from ...utils.types import NodeIDs
class EmbeddingNormalizationMode(str, enum.Enum):
"""The embedding normalization mode."""
#: Do not normalize
none = "none"
#: Only normalize once after initialization
initial = "initial"
#: Normalize in every forward pass
every_forward = "every_forward"
#: Normalize after every parameter update (non-differentiable).
after_update = "after_update"
# pylint: disable=abstract-method
class Embedding(ExtendedModule):
"""An embedding with additional initialization and normalization logic."""
#: The actual data
_embedding: nn.Embedding
# The initializer
initializer: NodeEmbeddingInitializer
#: The normalizer
normalizer: EmbeddingNormalizer
#: additionally associated KnowledgeGraph
graph: Optional[KnowledgeGraph]
def __init__(
self,
num_embeddings: int,
embedding_dim: Optional[int] = None,
initializer: Optional[NodeEmbeddingInitializer] = None,
trainable: bool = True,
normalizer: Optional[EmbeddingNormalizer] = None,
normalization_mode: EmbeddingNormalizationMode = EmbeddingNormalizationMode.none,
dropout: Optional[float] = None,
shared: bool = False,
):
"""
Initialize the module.
:param num_embeddings:
The number of embeddings.
:param embedding_dim:
The embedding dimension. If not provided, the initializer must provide one.
:param initializer:
The node embedding initializer.
:param trainable:
Whether the embeddings are trainable.
:param normalizer:
The node embedding normalizer.
:param normalization_mode:
The node embedding normalization mode.
:param dropout:
A node embedding dropout.
:param shared:
Whether to use a shared embedding for all nodes.
"""
super().__init__()
# Store embedding initialization method for re-initialization
if initializer is None:
initializer = RandomNodeEmbeddingInitializer()
self.initializer = initializer
if embedding_dim is None:
embedding_dim = initializer.embedding_dim
if embedding_dim is None:
raise ValueError('Either embedding_dim must be provided, or the initializer must provide a dimension.')
self.embedding_dim = embedding_dim
if (normalization_mode == EmbeddingNormalizationMode.none) != (normalizer is None or isinstance(normalizer, NoneEmbeddingNormalizer)):
raise ValueError("normalization_mode == none if and only if normalize is None.")
if normalization_mode == EmbeddingNormalizationMode.after_update:
raise NotImplementedError(normalization_mode)
# Bind normalizer
self.normalizer = normalizer
self.normalization_mode = normalization_mode
# Node embedding dropout
if dropout is not None:
dropout = nn.Dropout(p=dropout)
self.dropout = dropout
# Whether to share embeddings
self.shared = shared
# Store num nodes
self.num_embeddings = num_embeddings
# Allocate embeddings
if self.shared:
num_embeddings = 1
self._embedding = nn.Embedding(
num_embeddings=num_embeddings,
embedding_dim=embedding_dim,
)
# Set trainability
self._embedding.weight.requires_grad_(trainable)
# Initialize
self.reset_parameters()
@property
def weight(self) -> nn.Parameter:
"""Return the embedding weights."""
return self._embedding.weight
# pylint: disable=arguments-differ
def forward(
self,
indices: Optional[NodeIDs] = None,
) -> torch.FloatTensor:
"""
Forward pass for embeddings.
Optionally applies dropout and embedding normalization.
:param indices:
The indices to lookup. May be None to get all embeddings.
:return: shape: (batch_size, embedding_dim)
The embeddings. If indices=None, batch_size=num_embeddings.
"""
if indices is None:
if self.shared:
x = self._embedding.weight.repeat(self.num_embeddings, 1)
else:
x = self._embedding.weight
else:
if self.shared:
indices = torch.zeros_like(indices)
x = self._embedding(indices)
# apply dropout if requested
if self.dropout is not None:
x = self.dropout(x)
# Apply normalization if requested
if self.normalization_mode == EmbeddingNormalizationMode.every_forward:
x = self.normalizer.normalize(x=x)
return x
@torch.no_grad()
def reset_parameters(self) -> None:
"""Reset parameters."""
self.initializer.init_one_(embedding=self._embedding.weight)
if self.normalization_mode in {
EmbeddingNormalizationMode.initial,
EmbeddingNormalizationMode.after_update
}:
self._embedding.weight.data = self.normalizer.normalize(x=self._embedding.weight.data)
class NodeEmbeddingInitMethod(str, enum.Enum):
"""Enum for selecting how to initialize node embeddings."""
#: Initialize all to ones
ones = 'ones'
#: standard normal distribution
std_one = 'std_one'
#: std = 1 / sqrt(sum_i n_nodes_i)
sqrt_total = 'sqrt_total'
#: std = 1 / sqrt(n_nodes_i)
sqrt_individual = 'sqrt_individual'
def __str__(self): # noqa: D105
return str(self.name)
def get_embedding_pair(
init: Union[NodeEmbeddingInitMethod, Type[NodeEmbeddingInitializer], NodeEmbeddingInitializer],
dataset: KnowledgeGraphAlignmentDataset,
embedding_dim: Optional[int] = None,
dropout: Optional[float] = None,
trainable: bool = True,
init_config: Optional[Mapping[str, Any]] = None,
norm: EmbeddingNormalizationMethod = EmbeddingNormalizationMethod.none,
normalization_mode: EmbeddingNormalizationMode = EmbeddingNormalizationMode.none,
shared: bool = False,
) -> Mapping[MatchSideEnum, Embedding]:
"""
Create node embeddings for each graph side.
:param init:
The initializer. Can be a enum, a class, or an already initialized initializer.
:param dataset:
The dataset.
:param embedding_dim:
The embedding dimension. If not provided, the initializer must provide one.
:param dropout:
A node embedding dropout value.
:param trainable:
Whether the embedding should be set trainable.
:param init_config:
A key-value dictionary used for initializing the node embedding initializer (only relevant if not already
initialized).
:param norm:
The embedding normalization method. The method is applied in every forward pass.
:param normalization_mode:
The node embedding normalization mode. None if and only if norm is None.
:param shared:
Whether to use one shared embedding for all nodes.
:return:
A mapping side -> node embedding.
"""
# Build normalizer
normalizer = get_normalizer_by_name(name=norm)
return nn.ModuleDict({
side: Embedding(
num_embeddings=num_embeddings,
embedding_dim=embedding_dim,
initializer=resolve_initializer(
init=init,
dataset=dataset,
side=side,
init_config=init_config,
),
trainable=trainable,
normalizer=normalizer,
normalization_mode=normalization_mode,
dropout=dropout,
shared=shared,
)
for side, num_embeddings in dataset.num_nodes.items()
})
def init_method_normalizer(name: str):
"""Normalize the name of an initialization method."""
return name.lower().replace('_', '').replace('nodeembeddinginitializer', '')
def resolve_initializer(
init: Union[NodeEmbeddingInitMethod, Type[NodeEmbeddingInitializer], NodeEmbeddingInitializer, Mapping[MatchSideEnum, NodeEmbeddingInitializer]],
dataset: KnowledgeGraphAlignmentDataset,
side: MatchSideEnum,
init_config: Optional[Mapping[str, Any]] = None,
cache_root: pathlib.Path = None,
) -> NodeEmbeddingInitializer:
"""
Resolve a node embedding intializer from a config.
:param init:
The chosen init. Can be
* enum value
* class
* instance
* mapping from side to instance.
:param dataset:
The dataset.
:param side:
The side for which the initializer should be created.
:param init_config:
Additional configuration for the initializer.
:param cache_root:
The cache root directory used for storing datasets. Defaults to ~/.kgm
:return:
An initializer instance.
"""
if isinstance(init, dict):
init = init[side]
if cache_root is None:
cache_root = pathlib.Path("~", ".kgm")
cache_root = cache_root.expanduser()
# already instantiated
if isinstance(init, NodeEmbeddingInitializer):
return init
if isinstance(init, type) and issubclass(init, NodeEmbeddingInitializer):
return init(**(reduce_kwargs_for_method(method=init.__init__, kwargs=init_config)))
if init == NodeEmbeddingInitMethod.sqrt_total:
total = sum(dataset.num_nodes.values())
return RandomNodeEmbeddingInitializer(std=1. / math.sqrt(total))
elif init == NodeEmbeddingInitMethod.sqrt_individual:
return RandomNodeEmbeddingInitializer(std=1. / math.sqrt(dataset.num_nodes[side]))
elif init == NodeEmbeddingInitMethod.std_one:
return RandomNodeEmbeddingInitializer(std=1.)
elif init == NodeEmbeddingInitMethod.ones:
return ConstantNodeEmbeddingInitializer(value=1.0)
else:
raise ValueError(init)
| 10,589 | 32.619048 | 149 | py |
rank-based-evaluation | rank-based-evaluation-main/src/kgm/modules/embeddings/norm.py | # coding=utf-8
"""Embedding normalization."""
import enum
from abc import abstractmethod
from typing import Union
import torch
from torch.nn import functional
from ...utils.common import get_subclass_by_name
class EmbeddingNormalizer:
"""Embedding normalization."""
@abstractmethod
def normalize(
self,
x: torch.FloatTensor,
) -> torch.FloatTensor:
"""Normalize a batch of embeddings, e.g. during forward pass.
:param x: shape: (batch_size, dim)
The tensor of embeddings.
"""
raise NotImplementedError
class LpNormalization(EmbeddingNormalizer):
"""Normalize the unit L_p norm."""
def __init__(self, p: int):
"""
Initialize the normalizer.
:param p: >0
The parameter p of the Lp distance.
"""
self.p = p
def normalize(
self,
x: torch.FloatTensor,
) -> torch.FloatTensor: # noqa: D102
return functional.normalize(x, p=self.p, dim=-1)
def norm_method_normalizer(name: str):
"""Normalize the name of a normalization method."""
return name.lower().replace('_', '').replace('embeddingnormalizer', '')
class L2EmbeddingNormalizer(LpNormalization):
"""L2 normalization."""
def __init__(self):
"""Initialize the normalizer."""
super().__init__(p=2)
class L1EmbeddingNormalizer(LpNormalization):
"""L1 normalization."""
def __init__(self):
"""Initialize the normalizer."""
super().__init__(p=1)
class NoneEmbeddingNormalizer(EmbeddingNormalizer):
"""Dummy normalization which does not actually change anything."""
def normalize(
self,
x: torch.FloatTensor,
) -> torch.FloatTensor: # noqa: D102
return x
@enum.unique
class EmbeddingNormalizationMethod(str, enum.Enum):
"""An enum for embedding normalizations."""
none = 'none'
l2 = 'l2'
l1 = 'l1'
def get_normalizer_by_name(name: Union[EmbeddingNormalizationMethod, str]) -> EmbeddingNormalizer:
"""Get an embedding normalizer by name."""
if isinstance(name, EmbeddingNormalizationMethod):
name = name.value
norm_class = get_subclass_by_name(
base_class=EmbeddingNormalizer,
name=name,
normalizer=norm_method_normalizer,
)
return norm_class()
| 2,340 | 22.887755 | 98 | py |
rank-based-evaluation | rank-based-evaluation-main/src/kgm/modules/embeddings/init/base.py | # coding=utf-8
"""Node embedding initialization."""
import pathlib
from typing import Any, Optional, Sequence, Union
import torch
from torch import nn
from ....data import KnowledgeGraph, MatchSideEnum
class NodeEmbeddingInitializer:
"""Initialization methods."""
def init_one_(
self,
embedding: torch.FloatTensor,
graph: Optional[KnowledgeGraph] = None,
) -> None:
"""
Initialize embedding in-place.
:param embedding:
The embedding.
:param graph:
The corresponding knowledge graph. TODO: DEPRECATED.
"""
raise NotImplementedError
@property
def embedding_dim(self) -> Optional[int]:
"""Return the embedding dimension."""
return None
class RandomNodeEmbeddingInitializer(NodeEmbeddingInitializer):
"""Initialize nodes i.i.d. with random vectors drawn from the given distribution."""
def __init__(
self,
random_distribution=nn.init.normal_,
**kwargs: Any,
):
"""
Initialize the initializers.
:param random_distribution:
The random distribution to use for initialization.
"""
self.random_dist_ = random_distribution
self.kwargs = kwargs
def init_one_(
self,
embedding: torch.FloatTensor,
graph: Optional[KnowledgeGraph] = None,
) -> None: # noqa: D102
self.random_dist_(embedding, **self.kwargs)
class ConstantNodeEmbeddingInitializer(NodeEmbeddingInitializer):
"""Initialize embeddings with a constant value."""
def __init__(
self,
value: float = 1.0,
):
"""
Initialize the initializer.
:param value:
The constant value.
"""
self.value = value
def init_one_(
self,
embedding: torch.FloatTensor,
graph: Optional[KnowledgeGraph] = None,
) -> None: # noqa: D102
nn.init.constant_(tensor=embedding, val=self.value)
class PretrainedNodeEmbeddingInitializer(NodeEmbeddingInitializer):
"""Load pretrained node embeddings."""
def __init__(
self,
embeddings: torch.FloatTensor,
):
"""
Initialize the initializer.
:param embeddings: shape: (n, d)
The pretrained embeddings.
"""
super().__init__()
self.pretrained = embeddings
@staticmethod
def from_path(directory: Union[pathlib.Path, str], side: MatchSideEnum) -> 'PretrainedNodeEmbeddingInitializer':
"""Construct initializer from pretrained embeddings stored under a path."""
# TODO: Watch out for ID mismatch!
return PretrainedNodeEmbeddingInitializer(
embeddings=torch.load(
PretrainedNodeEmbeddingInitializer.output_file_path(
directory=directory,
side=side,
)
)
)
@staticmethod
def output_file_path(directory: Union[pathlib.Path, str], side: MatchSideEnum) -> pathlib.Path:
"""Return the canonical file path."""
return pathlib.Path(directory) / f'{side.value}.pt'
def save_to_path(self, directory: Union[pathlib.Path, str], side: MatchSideEnum) -> pathlib.Path:
"""Save pretrained node embedding into a file."""
output_path = PretrainedNodeEmbeddingInitializer.output_file_path(directory=directory, side=side)
torch.save(obj=self.pretrained, f=output_path)
return output_path
def init_one_(
self,
embedding: torch.FloatTensor,
graph: Optional[KnowledgeGraph] = None,
) -> None: # noqa: D102
embedding.data.copy_(self.pretrained, non_blocking=True)
@property
def embedding_dim(self) -> Optional[int]: # noqa: D102
return self.pretrained.shape[-1]
class CombinedInitializer(NodeEmbeddingInitializer):
"""Combines several initializers, each of which is used for a subset of the embeddings."""
def __init__(
self,
initializer_map: torch.LongTensor,
initializers: Sequence[NodeEmbeddingInitializer],
):
"""
Initialize the initializer.
:param initializer_map: shape: (num_embeddings,)
A vector of the indices of the initializers to use for each embedding ID.
:param initializers:
The initializers.
"""
self.initializer_map = initializer_map
self.base_initializers = initializers
def init_one_(
self,
embedding: torch.FloatTensor,
graph: Optional[KnowledgeGraph] = None,
) -> None: # noqa: D102
for i, initializer in enumerate(self.base_initializers):
mask = self.initializer_map == i
emb = torch.empty_like(embedding[mask])
initializer.init_one_(emb)
embedding.data[mask] = emb
| 4,886 | 28.439759 | 116 | py |
rank-based-evaluation | rank-based-evaluation-main/src/kgm/training/base.py | """Common training loop parts."""
import logging
from typing import Any, Generic, Iterable, Mapping, Optional, Tuple, Type, TypeVar
import torch
from torch import nn
from torch.optim import Optimizer
from kgm.utils.common import NonFiniteLossError, kwargs_or_empty, last
from kgm.utils.torch_utils import construct_optimizer_from_config, get_device
logger = logging.getLogger(name=__name__)
BatchType = TypeVar('BatchType')
class BaseTrainer(Generic[BatchType]):
"""A base class for training loops."""
#: The model
model: nn.Module
#: The optimizer instance
optimizer: Optimizer
def __init__(
self,
model: nn.Module,
train_batch_size: Optional[int] = None,
optimizer_cls: Type[Optimizer] = None,
optimizer_kwargs: Optional[Mapping[str, Any]] = None,
clip_grad_norm: Optional[float] = None,
accumulate_gradients: int = 1,
device: Optional[torch.device] = None,
):
"""
Initialize a new training loop.
:param model:
The model to train.
:param train_batch_size:
The batch size to use for training.
:param optimizer_cls:
The optimizer class.
:param optimizer_kwargs:
Keyword-based arguments for the optimizer.
:param clip_grad_norm:
Whether to apply gradient clipping (norm-based).
:param accumulate_gradients:
Accumulate gradients over batches. This can be used to simulate a larger batch size, while keeping the
memory footprint small.
:param device:
The device on which to train.
:param accumulate_gradients:
Accumulate gradients over batches. This can be used to simulate a larger batch size, while keeping the
memory footprint small.
:param device:
The device on which to train.
"""
device = get_device(device=device)
# Bind parameters
self.train_batch_size = train_batch_size
self.model = model.to(device=device)
self.epoch = 0
self.accumulate_gradients = accumulate_gradients
self.device = device
self.clip_grad_norm = clip_grad_norm
self.accumulate_gradients = accumulate_gradients
self.device = device
# create optimizer
if optimizer_cls is None:
optimizer_cls = 'adam'
optimizer_config = dict(cls=optimizer_cls)
optimizer_config.update(kwargs_or_empty(optimizer_kwargs))
self.optimizer_config = optimizer_config
self.reset_optimizer()
def reset_optimizer(self) -> None:
"""Reset the optimizer."""
self.optimizer = construct_optimizer_from_config(
model=self.model,
optimizer_config=self.optimizer_config,
)
def _train_one_epoch(self) -> Mapping[str, Any]:
"""
Train the model for one epoch on the given device.
:return:
A dictionary of training results. Contains at least `loss` with the epoch loss value.
"""
epoch_loss, counter = 0., 0
# Iterate over batches
i = -1
for i, batch in enumerate(self._iter_batches()):
# Compute batch loss
batch_loss, real_batch_size = self._train_one_batch(batch=batch)
# Break on non-finite loss values
if not torch.isfinite(batch_loss).item():
raise NonFiniteLossError
# Update epoch loss
epoch_loss += batch_loss.item() * real_batch_size
counter += real_batch_size
# compute gradients
batch_loss.backward()
# Apply gradient updates
if i % self.accumulate_gradients == 0:
self._parameter_update()
# For the last batch, we definitely do an update
if self.accumulate_gradients > 1 and (i % self.accumulate_gradients) != 0:
self._parameter_update()
return dict(
loss=epoch_loss / counter
)
def _parameter_update(self):
"""Update the parameters using the optimizer."""
# Gradient clipping
if self.clip_grad_norm is not None:
nn.utils.clip_grad_norm_(
parameters=(p for p in self.model.parameters() if p.requires_grad),
max_norm=self.clip_grad_norm,
)
# update parameters
self.optimizer.step()
# clear gradients afterwards
self.optimizer.zero_grad()
def _iter_batches(self) -> Iterable[BatchType]:
"""Iterate over batches."""
raise NotImplementedError
def _train_one_batch(self, batch: BatchType) -> Tuple[torch.Tensor, int]:
"""
Train on a single batch.
:param batch: shape: (batch_size,)
The sample IDs.
:return:
A tuple (batch_loss, real_batch_size) of the batch loss (a scalar tensor), and the actual batch size.
"""
raise NotImplementedError
def train_iter(
self,
num_epochs: int = 1,
) -> Iterable[Mapping[str, Any]]:
"""
Train the model, and return intermediate results.
:param num_epochs:
The number of epochs.
:return:
One result dictionary per epoch.
"""
epoch_result = dict()
for _ in range(self.epoch, self.epoch + num_epochs):
self.model.train()
# training step
self.epoch += 1
epoch_result = dict(
epoch=self.epoch,
train=self._train_one_epoch(),
)
yield epoch_result
return epoch_result
def train(
self,
num_epochs: int = 1,
final_eval: bool = True,
) -> Mapping[str, Any]:
"""
Train the model, and return intermediate results.
:param num_epochs:
The number of epochs.
:param final_eval:
Whether to perform an evaluation after the last training epoch.
:return:
A dictionary containing the result.
"""
return last(self.train_iter(num_epochs=num_epochs))
| 6,203 | 30.175879 | 114 | py |
rank-based-evaluation | rank-based-evaluation-main/src/kgm/training/matching.py | # coding=utf-8
"""Training loops for KG matching models."""
import logging
from abc import abstractmethod
from typing import Any, Iterable, List, Mapping, Optional, Tuple, Type
import torch
from torch.optim import Optimizer
from torch.utils import data
from .base import BaseTrainer
from ..data import KnowledgeGraphAlignmentDataset, MatchSideEnum, SIDES
from ..models import KGMatchingModel
from ..modules import MatchingLoss, Similarity
from ..utils.types import IDAlignment, NodeIDs
logger = logging.getLogger(name=__name__)
class NodeSampler:
"""Abstract class for node sampler."""
@abstractmethod
def sample(
self,
positive_batch: IDAlignment,
) -> NodeIDs:
"""
Sample negative node indices for each side.
positive pair:
(positive_batch[0, i], positive_batch[1, i])
negative_pair:
(positive_batch[0, i], negative_batch[0, i, j])
:param positive_batch: shape: (2, pos_batch_size)
The batch of aligned nodes.
:return: shape: (2, pos_batch_size, num_negatives)
The negative node IDs. result[0] has to be combined with positive_batch[1] for a valid pair.
"""
raise NotImplementedError
class RandomNodeSampler(NodeSampler):
"""Randomly select additional nodes."""
def __init__(
self,
num_nodes: Mapping[MatchSideEnum, int],
num_negatives: int,
):
"""
Initialize the sampler.
:param num_nodes:
The number of nodes on each side.
:param num_negatives: >=0
The absolute number of negatives samples for each positive one.
"""
self.num_nodes = num_nodes
self.num_negatives = num_negatives
def sample(
self,
positive_batch: IDAlignment,
) -> NodeIDs: # noqa: D102
return torch.stack([
torch.randint(self.num_nodes[side], size=(positive_batch.shape[1], self.num_negatives))
for side in SIDES
], dim=0)
#: A 3-tuple:
# * indices (global)
# * positives (local)
# * negatives (local)
AlignmentBatch = Tuple[Optional[Mapping[MatchSideEnum, NodeIDs]], IDAlignment, Optional[NodeIDs]]
class AlignmentBatchCollator:
"""A custom collator for adding negative nodes to a batch of positives."""
def __init__(
self,
node_sampler: Optional[NodeSampler] = None,
):
"""
Initialize the collator.
:param node_sampler:
The node sampler.
"""
self.sampler = node_sampler
def collate(
self,
positives: List[Tuple[IDAlignment]],
) -> AlignmentBatch:
"""
Collate a batch.
:param positives:
A tuple of positive pairs.
:return:
A tuple of batch node indices per side and the number of positives in the batch.
"""
global_positives: IDAlignment = torch.stack([p[0] for p in positives], dim=-1)
# no sampling
if self.sampler is None:
return None, global_positives, None
global_negatives = self.sampler.sample(positive_batch=global_positives)
# Translate to batch local indices
indices = dict()
local_positives = []
local_negatives = []
for side, pos_on_side, neg_on_side in zip(SIDES, global_positives, global_negatives):
# There are positive indices P and negative indices N
# There may be duplicates
# * in P, due to 1-n alignments
# * in N, due to random sampling with replacement
# * between P and N due to not filtering in N
# We do not want to re-compute representations; thus we only keep the unique indices.
indices_on_side = torch.cat([pos_on_side.unsqueeze(dim=-1), neg_on_side], dim=-1)
indices[side], inverse = indices_on_side.unique(sorted=False, return_inverse=True)
local_positives.append(inverse[:, 0])
local_negatives.append(inverse[:, 1:])
return (
indices,
torch.stack(local_positives, dim=0),
torch.stack(local_negatives, dim=0),
)
def prepare_alignment_batch_data_loader(
dataset: KnowledgeGraphAlignmentDataset,
positive_batch_size: Optional[int] = None,
negative_sampler: Optional[NodeSampler] = None,
num_workers: int = 0,
) -> data.DataLoader:
"""
Prepare a PyTorch data loader for alignment model training.
:param dataset:
The knowledge graph alignment dataset.
:param positive_batch_size:
The batch size for alignment pairs.
:param negative_sampler:
The sampler for additional nodes from the graphs.
:param num_workers:
The number of worker processes.
.. seealso ::
torch.utils.data.DataLoader
:return:
The data loader.
"""
positives = data.TensorDataset(dataset.alignment.train.t())
if positive_batch_size is None:
positive_batch_size = dataset.alignment.num_train
collator = AlignmentBatchCollator(node_sampler=negative_sampler)
return data.DataLoader(
dataset=positives,
batch_size=positive_batch_size,
shuffle=True,
num_workers=num_workers,
collate_fn=collator.collate,
pin_memory=True,
)
class AlignmentModelTrainer(BaseTrainer[AlignmentBatch]):
"""A wrapper around a model encapsulating training and evaluation."""
#: The model instance
model: KGMatchingModel
#: The similarity instance
similarity: Similarity
#: The loss instance
loss: MatchingLoss
def __init__(
self,
model: KGMatchingModel,
similarity: Similarity,
dataset: KnowledgeGraphAlignmentDataset,
loss: MatchingLoss,
batch_size: Optional[int] = None,
optimizer_cls: Type[Optimizer] = None,
optimizer_kwargs: Optional[Mapping[str, Any]] = None,
clip_grad_norm: Optional[float] = None,
accumulate_gradients: int = 1,
device: Optional[torch.device] = None,
negative_sampler: Optional[NodeSampler] = None,
num_workers: int = 0,
):
"""
Initialize a new training loop.
:param model:
The model.
:param similarity:
The similarity.
:param dataset:
The dataset.
:param loss:
The loss instance.
:param batch_size:
The batch size, or None for full-batch training.
:param optimizer_cls:
The optimizer class.
:param optimizer_kwargs:
Keyword-based arguments for the optimizer.
:param clip_grad_norm:
Whether to apply gradient clipping (norm-based).
:param accumulate_gradients:
Accumulate gradients over batches. This can be used to simulate a larger batch size, while keeping the
memory footprint small.
:param device:
The device on which to train.
:param num_workers:
The number of workers to use for preparing batches.
"""
super().__init__(
model=model,
train_batch_size=batch_size,
optimizer_cls=optimizer_cls,
optimizer_kwargs=optimizer_kwargs,
clip_grad_norm=clip_grad_norm,
accumulate_gradients=accumulate_gradients,
device=device,
)
self.similarity = similarity
self.loss = loss
self.dataset = dataset
self.alignment = dataset.alignment
self.num_workers = num_workers
self.negative_sampler = negative_sampler
def _iter_batches(self) -> Iterable[AlignmentBatch]: # noqa: D102
return prepare_alignment_batch_data_loader(
dataset=self.dataset,
positive_batch_size=self.train_batch_size,
negative_sampler=self.negative_sampler,
num_workers=self.num_workers,
)
def _train_one_batch(self, batch: AlignmentBatch) -> Tuple[torch.Tensor, int]:
# Unpack
batch_node_indices, batch_alignment, negatives = batch
# Calculate node representations
node_repr = self.model(indices=batch_node_indices)
# return batch loss
return self.loss(
alignment=batch_alignment,
representations=node_repr,
negatives=negatives,
), batch_alignment.shape[1]
| 8,475 | 30.509294 | 114 | py |
rank-based-evaluation | rank-based-evaluation-main/src/kgm/eval/matching.py | # coding=utf-8
"""Entity Alignment evaluation methods."""
from typing import Collection, Dict, Mapping, Optional, Tuple, TypeVar, Union
import torch
from .common import aggregate_ranks, get_rank
from ..data import MatchSideEnum, SIDES
from ..models import KGMatchingModel
from ..modules import Similarity
from ..utils.torch_utils import maximize_memory_utilization
from ..utils.types import IDAlignment
__all__ = [
'AlignmentEvaluator',
'evaluate_matching_model',
'evaluate_alignment',
]
T = TypeVar('T')
def evaluate_matching_model(
model: KGMatchingModel,
alignments: Mapping[T, IDAlignment],
similarity: Similarity,
eval_batch_size: Optional[int] = None,
ks: Collection[int] = (1, 10, 50, 100),
) -> Tuple[Mapping[T, Mapping[str, float]], int]:
"""Evaluate a model on multiple alignments.
:param model:
The KG matching model to evaluate.
:param alignments:
A mapping of key -> alignment, where alignment is a LongTensor of shape (2, num_alignments).
:param similarity:
The similarity.
:param eval_batch_size:
The evaluation batch size.
:param ks:
The values for which to evaluate hits@k.
:return:
A mapping key -> subresult, where subresult is a mapping from metric-name to metric value.
"""
# Evaluation
with torch.no_grad():
# Set model in evaluation mode
model.eval()
result = {}
safe_eval_batch_size = None
for key, alignment in alignments.items():
alignment, indices = _reduce_alignment(alignment=alignment)
partial_repr = model.get_node_representations(indices=indices)
partial_result, this_eval_batch_size = evaluate_alignment(
similarity=similarity,
alignment=alignment,
representations=partial_repr,
eval_batch_size=eval_batch_size,
ks=ks,
)
result[key] = partial_result
if this_eval_batch_size is not None:
if safe_eval_batch_size is None:
safe_eval_batch_size = this_eval_batch_size
else:
safe_eval_batch_size = min(safe_eval_batch_size, this_eval_batch_size)
assert safe_eval_batch_size is not None
return result, safe_eval_batch_size
def evaluate_alignment(
similarity: Similarity,
alignment: IDAlignment,
representations: Mapping[MatchSideEnum, torch.FloatTensor],
eval_batch_size: Optional[int] = None,
ks: Collection[int] = (1, 10, 50, 100),
) -> Tuple[Dict[str, float], int]:
"""
Evaluate an alignment.
:param representations: side -> repr
The node representations, a tensor of shape (num_nodes[side], d).
:param alignment: shape: (2, num_alignments)
The alignment.
:param similarity:
The similarity.
:param eval_batch_size: int (positive, optional)
The batch size to use for evaluation.
:param ks:
The values for which to compute hits@k.
:return: A tuple with
1) dictionary with keys 'mr, 'mrr', 'hits_at_k' for all k in ks.
2) The maximum evaluation batch size.
"""
num_alignments = alignment.shape[1]
if num_alignments <= 0:
return dict(), None
node_repr = dict()
for side, alignment_on_side in zip(SIDES, alignment):
repr_on_side = representations[side]
node_repr[side] = repr_on_side[alignment_on_side.to(repr_on_side.device)]
left, right = [representations[side] for side in SIDES]
# Ensure data is on correct device
right, alignment = [t.to(device=left.device) for t in (right, alignment)]
if eval_batch_size is None:
eval_batch_size = num_alignments
return maximize_memory_utilization(
_evaluate_alignment,
parameter_name='eval_batch_size',
parameter_max_value=eval_batch_size,
alignment=alignment,
similarity=similarity,
left=left,
right=right,
ks=ks,
)
def _summarize_ranks(
ranks: torch.LongTensor,
n: Union[int, Tuple[int, int]],
ks: Collection[int],
) -> Dict[str, float]:
if isinstance(n, int):
n = (n, n)
# overall
result = dict(aggregate_ranks(
ranks=ranks,
emr=(sum(n) / 2 + 1) / 2,
ks=ks,
))
# side-specific
for i, side in enumerate(SIDES):
result[side.value] = aggregate_ranks(
ranks=ranks[i],
emr=(n[i] + 1) / 2,
ks=ks,
)
return result
def _evaluate_alignment(
eval_batch_size: int,
alignment: IDAlignment,
similarity: Similarity,
left: torch.FloatTensor,
right: torch.FloatTensor,
ks: Collection[int],
) -> Dict[str, float]:
"""Evaluate an entity alignment.
:param eval_batch_size:
The evaluation batch size.
:param alignment: shape: (2, num_alignments)
The alignment.
:param similarity:
The similarity.
:param left: shape: (num_left, dim)
The left aligned representations.
:param right: shape: (num_right, dim)
The right aligned representations.
:param ks:
The values for which to calculate Hits@k.
:return:
The evaluation results as dictionary.
"""
num_alignments = alignment.shape[1]
ranks = left.new_empty(2, num_alignments)
for i in range(0, num_alignments, eval_batch_size):
batch = alignment[:, i:i + eval_batch_size]
# match a batch of right nodes to all left nodes
sim_right_to_all_left = similarity.all_to_all(left, right[batch[1]]).t()
ranks[0, i:i + eval_batch_size] = get_rank(sim=sim_right_to_all_left, true=batch[0])
# match a batch of left nodes to all right nodes
sim_left_to_all_right = similarity.all_to_all(left[batch[0]], right)
ranks[1, i:i + eval_batch_size] = get_rank(sim=sim_left_to_all_right, true=batch[1])
num_nodes = [n.shape[0] for n in (left, right)]
return _summarize_ranks(ranks=ranks, n=num_nodes, ks=ks)
def _reduce_alignment(alignment: IDAlignment) -> Tuple[IDAlignment, Mapping[MatchSideEnum, torch.LongTensor]]:
indices = dict()
local_alignment = []
for side, alignment_on_side in zip(SIDES, alignment):
uniq, inverse = torch.unique(alignment_on_side, sorted=False, return_inverse=True)
indices[side] = uniq
local_alignment.append(inverse)
alignment = torch.stack(local_alignment, dim=0)
return alignment, indices
| 6,525 | 31.63 | 110 | py |
rank-based-evaluation | rank-based-evaluation-main/src/kgm/eval/common.py | """Common utility methods for evaluation."""
import logging
from typing import Collection, Mapping, Optional
import torch
logger = logging.getLogger(name=__name__)
# Small constant for floating point comparison
EPSILON = 1.0e-08
def get_rank(sim: torch.FloatTensor, true: torch.LongTensor) -> torch.FloatTensor:
"""Compute the rank, exploiting that there is only one true hit."""
batch_size = true.shape[0]
true_sim = sim[torch.arange(batch_size), true].unsqueeze(1)
best_rank = torch.sum(sim > true_sim, dim=1, dtype=torch.long).float() + 1
worst_rank = torch.sum(sim >= true_sim, dim=1, dtype=torch.long).float()
return 0.5 * (best_rank + worst_rank)
def compute_ranks(
scores: torch.FloatTensor,
true_indices: torch.LongTensor,
smaller_is_better: bool = True,
mask: Optional[torch.LongTensor] = None,
) -> torch.FloatTensor:
"""Compute the rank of the true hit.
:param scores: shape: (k, n)
The scores for each sample.
:param true_indices: shape: (k,)
Values between 0 (incl.) and n (excl.)
:param smaller_is_better:
Whether smaller of larger values are better.
:param mask: shape: (m, 2), optional
Optional mask for filtered setting
:return: shape: (k,)
The ranks, a number between 1 and n.
"""
# Ensure that larger is better
if smaller_is_better:
scores = -scores
# Get the scores of the currently considered true entity.
batch_size = scores.shape[0]
true_score = (scores[torch.arange(0, batch_size), true_indices.flatten()]).view(-1, 1)
# The best rank is the rank when assuming all options with an equal score are placed behind the currently
# considered. Hence, the rank is the number of options with better scores, plus one, as the rank is one-based.
best_rank = (scores > true_score).sum(dim=1) + 1
# The worst rank is the rank when assuming all options with an equal score are placed in front of the currently
# considered. Hence, the rank is the number of options which have at least the same score minus one (as the
# currently considered option in included in all options). As the rank is one-based, we have to add 1, which
# nullifies the "minus 1" from before.
worst_rank = (scores >= true_score).sum(dim=1)
# The average rank is the average of the best and worst rank, and hence the expected rank over all permutations of
# the elements with the same score as the currently considered option.
# We use the double average rank to avoid precision loss due to floating point operations.
double_avg_rank = best_rank + worst_rank
# In filtered setting ranking another true entity higher than the currently considered one should not be punished.
# Hence, an adjustment is computed, which is the number of other true entities ranked higher. This adjustment is
# subtracted from the rank.
if mask is not None:
batch_indices, entity_indices = mask.t()
true_scores = true_score[batch_indices, 0]
other_true_scores = scores[batch_indices, entity_indices]
double_other_true_in_front = -2 * (other_true_scores > true_scores).long()
double_avg_rank.index_add_(dim=0, index=batch_indices, source=double_other_true_in_front)
avg_rank = 0.5 * double_avg_rank.float()
return avg_rank
def aggregate_ranks(
ranks: torch.FloatTensor,
emr: float,
ks: Collection[int] = (1, 10, 50, 100),
) -> Mapping[str, float]:
"""
Compute rank aggregation metrics.
:param ranks:
The individual ranks.
:param emr:
The expected mean rank.
:param ks:
The values for which to compute Hits@k.
:return:
A dictionary
{
'mean_rank': The mean rank.
'amr': The adjusted mean rank.
'mrr': The mean reciprocal rank.
'hits_at_k': Hits@k for each provided k.
}
"""
mr = torch.mean(ranks).item()
result = dict(
num_rank=ranks.numel(),
mean_rank=mr,
median_rank=torch.median(ranks).item(),
std_rank=ranks.std(unbiased=True).item(),
adjusted_mean_rank=mr / emr,
adjusted_mean_rank_index=1 - (mr - 1) / (emr - 1) if emr > 1.0 else 0.0,
mean_reciprocal_rank=torch.mean(torch.reciprocal(ranks)).item(),
)
result.update({
f'hits_at_{k}': torch.mean((ranks <= (k + EPSILON)).float()).item()
for k in ks
})
return result
| 4,478 | 36.957627 | 118 | py |
rank-based-evaluation | rank-based-evaluation-main/src/kgm/models/matching/base.py | # coding=utf-8
"""API for models for knowledge graph matching."""
import logging
from abc import ABC, abstractmethod
from collections import defaultdict
from typing import Any, Callable, Mapping, Optional, Type
import torch
from frozendict import frozendict
from torch import nn
from ...data import KnowledgeGraphAlignmentDataset, MatchSideEnum, SIDES
from ...data.reduction import KnowledgeGraphToGraphReduction
from ...modules.embeddings import get_embedding_pair
from ...modules.embeddings.base import EmbeddingNormalizationMode, NodeEmbeddingInitMethod
from ...modules.embeddings.norm import EmbeddingNormalizationMethod
from ...utils.common import get_subclass_by_name, kwargs_or_empty
from ...utils.torch_utils import ExtendedModule, maximize_memory_utilization
from ...utils.types import EntityIDs
logger = logging.getLogger(name=__name__)
__all__ = [
'GraphBasedKGMatchingModel',
'IndependentSideMixin',
'KGMatchingModel',
'PureEmbeddingModel',
'get_matching_model_by_name',
]
class KGMatchingModel(ExtendedModule):
"""
Generic class for (knowledge) graph matching models of a specific form.
The models produce vector representation for each node, and the matching is done by comparing these representations
by some similarity measure.
"""
#: The number of nodes on each side.
num_nodes: Mapping[MatchSideEnum, int]
def __init__(
self,
num_nodes: Mapping[MatchSideEnum, int],
):
"""
Initialize the model.
:param num_nodes:
The number of nodes on each side.
"""
super().__init__()
self.num_nodes = frozendict(num_nodes)
self.batch_size = sum(num_nodes.values())
# pylint: disable=arguments-differ
@abstractmethod
def forward(
self,
indices: Optional[Mapping[MatchSideEnum, EntityIDs]] = None,
) -> Mapping[MatchSideEnum, torch.FloatTensor]:
"""Return embeddings for nodes on both sides.
:param indices:
If provided only return representations for these indices.
:return: a mapping side -> representations
where
representations: shape: (num_nodes_on_side, embedding_dim)
"""
raise NotImplementedError
def _get_node_representations(
self,
indices: Mapping[MatchSideEnum, EntityIDs],
batch_size: int,
) -> Mapping[MatchSideEnum, torch.FloatTensor]:
"""
Batched calculation of node representations.
:param indices:
The indices for each side.
:param batch_size:
The batch size.
:return:
A mapping from side to node representations on side.
"""
result = defaultdict(list)
total_num_nodes = sum(v.shape[0] for v in indices.values())
num_first_side = indices[SIDES[0]].shape[0]
for start in range(0, total_num_nodes, batch_size):
# construct indices
batch_indices = dict()
for i_side, side in enumerate(SIDES):
start_side = max(start - i_side * num_first_side, 0)
end_side = min(max(start + batch_size - i_side * num_first_side, 0), self.num_nodes[side])
if end_side - start_side > 0:
batch_indices[side] = indices[side][start_side:end_side].to(self.device)
# update result
for side, partial_node_repr in self(indices=batch_indices).items():
result[side].append(partial_node_repr)
# combine result
return {
side: torch.cat(partial_node_repr)
for side, partial_node_repr in result.items()
}
def get_node_representations(
self,
indices: Optional[Mapping[MatchSideEnum, EntityIDs]] = None,
) -> Mapping[MatchSideEnum, torch.FloatTensor]:
"""
Calculate node representations for all nodes using batching.
:param indices:
Optional restriction to some indices.
:return:
The node representations.
"""
if indices is None:
indices = {
side: torch.arange(num, device=self.device)
for side, num in self.num_nodes.items()
}
result, self.batch_size = maximize_memory_utilization(
self._get_node_representations,
parameter_name='batch_size',
parameter_max_value=self.batch_size,
indices=indices,
)
return result
class IndependentSideMixin(KGMatchingModel):
"""Mix-in for models which compute independent representations on each side."""
def forward(
self,
indices: Optional[Mapping[MatchSideEnum, EntityIDs]] = None,
) -> Mapping[MatchSideEnum, torch.FloatTensor]: # noqa: D102
if indices is None:
indices = {
side: None
for side in SIDES
}
return {
side: self._forward_side(side=side, indices=indices_on_side)
for side, indices_on_side in indices.items()
}
@abstractmethod
def _forward_side(
self,
side: MatchSideEnum,
indices: Optional[torch.LongTensor] = None,
) -> torch.FloatTensor:
"""
Compute node representations on one side.
:param side:
The side.
:param indices:
The indices. None means to compute all representations.
:return: shape: (num_indices, embedding_dim)
The node representations.
"""
raise NotImplementedError
# pylint: disable=abstract-method
class GraphBasedKGMatchingModel(KGMatchingModel, ABC):
"""A knowledge graph matching model explicitly using the graph structure."""
#: The reductions to adjacency matrices.
reductions: Mapping[MatchSideEnum, KnowledgeGraphToGraphReduction]
def __init__(
self,
dataset: KnowledgeGraphAlignmentDataset,
reduction_cls: Type[KnowledgeGraphToGraphReduction],
reduction_kwargs: Optional[Mapping[str, Any]] = None,
):
"""
Initialize the model.
:param dataset:
The dataset.
:param reduction_cls:
The reduction strategy to obtain a (weighted) adjacency matrix from a knowledge graph.
:param reduction_kwargs:
Optional key-word based arguments to pass to the reduction.
"""
super().__init__(num_nodes=dataset.num_nodes)
reduction_kwargs = kwargs_or_empty(reduction_kwargs)
self.reductions = nn.ModuleDict({
side: reduction_cls(knowledge_graph=graph, **reduction_kwargs)
for side, graph in dataset.graphs.items()
})
def get_matching_model_by_name(
name: str,
normalizer: Optional[Callable[[str], str]] = None,
) -> Type[KGMatchingModel]:
"""
Get a matching model class by name.
:param name:
The name.
:param normalizer:
An optional custom name normalization method.
:return:
The matching class.
"""
if normalizer is None:
normalizer = str.lower
return get_subclass_by_name(base_class=KGMatchingModel, name=name, normalizer=normalizer, exclude={GraphBasedKGMatchingModel})
class PureEmbeddingModel(IndependentSideMixin, KGMatchingModel):
"""A knowledge graph matching model with learned node representations without interaction between the nodes."""
def __init__(
self,
dataset: KnowledgeGraphAlignmentDataset,
embedding_dim: int = 3,
node_embedding_init_method: NodeEmbeddingInitMethod = NodeEmbeddingInitMethod.sqrt_individual,
node_embedding_init_config: Optional[Mapping[str, Any]] = None,
node_embedding_normalization_method: EmbeddingNormalizationMethod = EmbeddingNormalizationMethod.none,
node_embedding_normalization_mode: EmbeddingNormalizationMode = EmbeddingNormalizationMode.none,
dropout: Optional[float] = None,
):
"""
Initialize the model.
:param embedding_dim: > 0
The dimensionality of the embedding.
:param node_embedding_init_method:
The embedding initialization method used for the node embeddings.
:param node_embedding_init_config:
Additional keyword based arguments for the initializer.
:param node_embedding_normalization_method:
The node embedding normalization method.
:param node_embedding_normalization_mode:
The node embedding normalization mode.
:param dropout:
If present, apply dropout to the node embeddings.
"""
super().__init__(num_nodes=dataset.num_nodes)
self.embeddings = get_embedding_pair(
init=node_embedding_init_method,
dataset=dataset,
embedding_dim=embedding_dim,
dropout=dropout,
trainable=True,
init_config=node_embedding_init_config,
norm=node_embedding_normalization_method,
normalization_mode=node_embedding_normalization_mode,
)
self.reset_parameters()
def _forward_side(
self,
side: MatchSideEnum,
indices: Optional[torch.LongTensor] = None,
) -> torch.FloatTensor: # noqa: D102
return self.embeddings[side](indices=indices)
| 9,382 | 32.996377 | 130 | py |
rank-based-evaluation | rank-based-evaluation-main/src/kgm/models/matching/gcn_align.py | # coding=utf-8
"""
Implementation of GCN-Align.
The paper introducing the model can be found at https://www.aclweb.org/anthology/D18-1032.pdf.
The authors' implementation can be found at https://github.com/1049451037/GCN-Align and they also refer to
https://github.com/1049451037/HIN-Align for an improved implementation.
"""
import logging
from typing import Any, Mapping, Optional
import torch
from torch import nn
from .base import GraphBasedKGMatchingModel, IndependentSideMixin
from ...data import KnowledgeGraphAlignmentDataset, MatchSideEnum, SIDES
from ...data.reduction import DropRelationInformationKnowledgeGraphToGraphReduction, KnowledgeGraphToGraphReduction, target_normalization
from ...modules.embeddings.base import Embedding, EmbeddingNormalizationMode, NodeEmbeddingInitMethod, get_embedding_pair
from ...modules.embeddings.norm import EmbeddingNormalizationMethod
from ...modules.graph import GCNBlock, IdentityMessageCreator, MessagePassingBlock, OnlyUpdate, SumAggregator
logger = logging.getLogger(name=__name__)
class GCNAlign(IndependentSideMixin, GraphBasedKGMatchingModel):
"""GCN-Align model implementation."""
#: The node embeddings
node_embeddings: Mapping[MatchSideEnum, Embedding]
def __init__(
self,
dataset: KnowledgeGraphAlignmentDataset,
reduction_cls: Optional[KnowledgeGraphToGraphReduction] = None,
reduction_kwargs: Optional[Mapping[str, Any]] = None,
embedding_dim: int = 200,
activation_cls: nn.Module = nn.ReLU,
n_layers: int = 2,
use_conv_weights: bool = False,
node_embedding_init_method: NodeEmbeddingInitMethod = NodeEmbeddingInitMethod.sqrt_total, # 'total', # 'individual'
vertical_sharing: bool = True,
node_embedding_dropout: Optional[float] = None,
node_embedding_init_config: Optional[Mapping[str, Any]] = None,
):
"""
Initialize the model.
:param dataset:
The dataset.
:param reduction_cls:
The reduction strategy to obtain a (weighted) adjacency matrix from a knowledge graph.
:param embedding_dim:
The dimension of the node embedding.
:param activation_cls:
The non-linear activation to use between the message passing steps.
:param n_layers:
The number of layers.
:param use_conv_weights:
Whether to use convolution weights.
:param node_embedding_init_method:
The method used to initialize the node embeddings.
:param vertical_sharing:
Whether to use "vertical weight sharing", i.e. apply the same convolution weights for all layers.
:param node_embedding_dropout:
An optional dropout to use on the node embeddings.
"""
if reduction_cls is None:
reduction_cls = DropRelationInformationKnowledgeGraphToGraphReduction
reduction_kwargs = dict(
normalization=target_normalization,
)
super().__init__(dataset=dataset, reduction_cls=reduction_cls, reduction_kwargs=reduction_kwargs)
# node embeddings
self.node_embeddings = get_embedding_pair(
init=node_embedding_init_method,
dataset=dataset,
embedding_dim=embedding_dim,
dropout=node_embedding_dropout,
trainable=True,
init_config=node_embedding_init_config,
norm=EmbeddingNormalizationMethod.l2,
normalization_mode=EmbeddingNormalizationMode.every_forward,
)
# GCN layers
self.n_layers = n_layers
self.use_conv_weights = use_conv_weights
self.vertical_sharing = vertical_sharing
blocks = []
if use_conv_weights:
if self.vertical_sharing:
gcn_block = GCNBlock(input_dim=embedding_dim, output_dim=embedding_dim, use_bias=True)
activation = activation_cls()
for _ in range(n_layers):
blocks.append(gcn_block)
blocks.append(activation)
else:
for _ in range(n_layers):
gcn_block = GCNBlock(input_dim=embedding_dim, output_dim=embedding_dim, use_bias=True)
activation = activation_cls()
blocks.append(gcn_block)
blocks.append(activation)
else:
message_block = MessagePassingBlock(
message_creator=IdentityMessageCreator(),
message_aggregator=SumAggregator(),
node_updater=OnlyUpdate(),
)
for _ in range(n_layers):
blocks.append(message_block)
activation = activation_cls()
blocks.append(activation)
side_to_modules = {
side: nn.ModuleList(blocks)
for side in SIDES
}
self.layers = nn.ModuleDict(modules=side_to_modules)
# Initialize parameters
self.reset_parameters()
def _forward_side(
self,
side: MatchSideEnum,
indices: Optional[torch.LongTensor] = None,
) -> torch.FloatTensor: # noqa: D102
x = self.node_embeddings[side](indices=None)
# Prepare message passing keyword arguments
adjacency = self.reductions[side]()
message_passing_kwargs = {
'source': adjacency.source,
'target': adjacency.target,
'edge_weights': adjacency.values,
}
# forward pass through all layers
if side in self.layers.keys():
layers = self.layers[side] if side in self.layers.keys() else []
else:
logger.warning('No layers for side %s', side)
layers = []
for layer in layers:
if isinstance(layer, MessagePassingBlock):
x = layer(x, **message_passing_kwargs)
else:
x = layer(x)
# Select indices if requested
if indices is not None:
x = x[indices]
return x
| 6,089 | 37.789809 | 137 | py |
rank-based-evaluation | rank-based-evaluation-main/src/kgm/utils/types.py | """Type annotation aliases."""
import torch
#: A (n, 3) tensor of IDs.
Triples = torch.LongTensor
#: A (n,) tensor of IDs.
EntityIDs = torch.LongTensor
#: A (n,) tensor of IDs.
RelationIDs = torch.LongTensor
#: A (n,) tensor of IDs.
NodeIDs = torch.LongTensor
#: A (2, n) tensor of IDs.
IDAlignment = torch.LongTensor
#: A (2, n) tensor of IDs.
EdgeTensor = torch.LongTensor
| 381 | 17.190476 | 30 | py |
rank-based-evaluation | rank-based-evaluation-main/src/kgm/utils/torch_utils.py | """Utility methods using pytorch."""
import itertools
import logging
from abc import ABC
from collections import defaultdict
from operator import itemgetter
from typing import Any, Callable, MutableMapping, Optional, Sequence, Tuple, Type, TypeVar, Union
import numpy
import torch
from torch import nn, optim
from .common import get_subclass_by_name, integer_portion, reduce_kwargs_for_method
from .types import EdgeTensor, NodeIDs
logger = logging.getLogger(name=__name__)
def send_messages(
edge_tensor: EdgeTensor,
source_data: torch.FloatTensor,
edge_weights: Optional[torch.FloatTensor] = None,
accumulator: Optional[torch.FloatTensor] = None,
) -> torch.FloatTensor:
"""
Message passing.
:param edge_tensor: shape: (2, num_edges)
The edges as (source, target) tuples.
:param source_data: shape: (num_nodes, dim)
The node features.
:param edge_weights: shape: (num_edges,)
Edge weights (optional).
:param accumulator: shape: (num_nodes, dim)
The accumulator.
:return: shape: (num_nodes, dim)
The updated node representations.
"""
# Send messages to edges
source, target = edge_tensor
msg = source_data.index_select(dim=0, index=source)
# Message weighting
if edge_weights is not None:
if edge_weights.ndimension() < msg.ndimension():
edge_weights = edge_weights.unsqueeze(dim=-1)
msg = msg * edge_weights
# Allocate accumulator if none is given
if accumulator is None:
accumulator = torch.zeros_like(source_data)
# Accumulate messages
return accumulator.index_add(dim=0, index=target, source=msg)
def get_device(
device: Union[None, str, torch.device],
) -> torch.device:
"""Resolve the device, either specified as name, or device."""
if device is None:
device = 'cuda'
if isinstance(device, str):
device = torch.device(device=device)
assert isinstance(device, torch.device)
if not torch.cuda.is_available() and device.type == 'cuda':
logger.warning('Requested device %s, but CUDA is unavailable. Falling back to cpu.', device)
device = torch.device('cpu')
return device
def split_tensor(
tensor: torch.Tensor,
ratios: Union[float, Sequence[float]],
shuffle: Optional[bool] = True,
dim: Optional[int] = 0,
seed: Optional[int] = 42,
) -> Sequence[torch.Tensor]:
"""
Split tensor into multiple partitions along a dimension.
The splits are performed consecutive, where each individual split is according to the given ratios.
:param tensor:
The tensor to split.
:param ratios:
A sequence of floats between [0, 1] specifying the ratio of the first partition of each split.
:param shuffle:
Whether to randomize order of data.
:param dim:
The dimension to split along.
:param seed:
The random seed to use for shuffling.
:return:
A sequence of disjoint subsets of the input tensor.
"""
if isinstance(ratios, float):
ratios = [ratios]
num_elements = tensor.shape[dim]
# shuffle
if shuffle:
# random seeding
if seed is not None:
generator = torch.manual_seed(seed=seed)
else:
generator = torch.random.default_generator
indices = torch.randperm(n=num_elements, generator=generator, device=tensor.device)
else:
indices = torch.arange(0, num_elements, device=tensor.device)
output = []
remainder = indices
for ratio in ratios:
size_first = integer_portion(number=remainder.shape[0], ratio=ratio)
this, remainder = remainder[:size_first], remainder[size_first:]
output.append(tensor.index_select(dim=dim, index=this))
output.append(tensor.index_select(dim=dim, index=remainder))
return output
def _guess_num_nodes(
num_nodes: Optional[int],
source: Optional[NodeIDs] = None,
target: Optional[NodeIDs] = None,
) -> int:
"""Try to guess the number of nodes."""
if num_nodes is not None:
return num_nodes
if source is None and target is None:
raise ValueError('If no num_nodes are given, either source, or target must be given!')
return max(x.max().item() for x in (source, target) if x is not None)
def get_optimizer_class_by_name(name: str) -> Type[optim.Optimizer]:
"""Return an optimizer class given its name."""
return get_subclass_by_name(base_class=optim.Optimizer, name=name, normalizer=str.lower)
def _is_oom_error(error: RuntimeError) -> bool:
"""Check whether a runtime error was caused by insufficient memory."""
message = error.args[0]
# CUDA out of memory
if 'CUDA out of memory.' in message:
return True
# CPU out of memory
if "[enforce fail at CPUAllocator.cpp:64] . DefaultCPUAllocator: can't allocate memory:" in message:
return True
return False
R = TypeVar('R')
def maximize_memory_utilization(
func: Callable[..., R],
parameter_name: str,
parameter_max_value: int,
*args,
**kwargs
) -> Tuple[R, int]: # noqa: D401
"""
Iteratively reduce parameter value until no RuntimeError is generated by CUDA.
:param func:
The callable.
:param parameter_name:
The name of the parameter to maximise.
:param parameter_max_value:
The maximum value to start with.
:param args:
Additional positional arguments for func. Does _not_ include parameter_name!
:param kwargs:
Additional keyword-based arguments for func. Does _not_ include parameter_name!
:return:
The result, as well as the maximum value which led to successful execution.
"""
result = None
direct_success = True
if not all((not torch.is_tensor(obj) or obj.device.type == 'cuda') for obj in itertools.chain(args, kwargs.values())):
logger.warning('Using maximize_memory_utilization on non-CUDA tensors. This may lead to undocumented crashes due to CPU OOM killer.')
while parameter_max_value > 0:
p_kwargs = {parameter_name: parameter_max_value}
try:
result = func(*args, **p_kwargs, **kwargs)
if not direct_success:
logger.info('Execution succeeded with %s=%d', parameter_name, parameter_max_value)
break
except RuntimeError as runtime_error:
# Failed at least once
direct_success = False
# clear cache
torch.cuda.empty_cache()
# check whether the error is an out-of-memory error
if not _is_oom_error(error=runtime_error):
raise runtime_error
logger.info('Execution failed with %s=%d', parameter_name, parameter_max_value)
parameter_max_value //= 2
if parameter_max_value == 0:
raise MemoryError(f'Execution did not even succeed with {parameter_name}=1.')
return result, parameter_max_value
def construct_optimizer_from_config(model: nn.Module, optimizer_config: MutableMapping[str, Any]) -> optim.Optimizer:
"""
Create a pytorch optimizer for a model, given a config.
:param model:
The model.
:param optimizer_config:
The config: dict(
cls=<OPTIMIZER_CLASS_NAME>,
**kwargs,
)
where kwargs are passed down to the optimizer's constructor, and stripped before from unused arguments.
:return:
The optimizer instance.
"""
optim_name = optimizer_config.pop('cls')
opt_cls = get_optimizer_class_by_name(name=optim_name)
# reduce to parameter needed
optimizer_config = reduce_kwargs_for_method(opt_cls.__init__, kwargs=optimizer_config, raise_on_missing=False)
# instantiate optimizer
optimizer = opt_cls(params=(p for p in model.parameters() if p.requires_grad), **optimizer_config)
return optimizer
# pylint: disable=abstract-method
class ExtendedModule(nn.Module):
"""Extends nn.Module by a few utility methods."""
@property
def device(self) -> torch.device:
"""Return the model's device."""
devices = {
tensor.data.device
for tensor in itertools.chain(self.parameters(), self.buffers())
}
if len(devices) == 0:
raise ValueError('Could not infer device, since there are neither parameters nor buffers.')
elif len(devices) > 1:
device_info = dict(
parameters=dict(self.named_parameters()),
buffers=dict(self.named_buffers()),
)
raise ValueError(f'Ambiguous device! Found: {devices}\n\n{device_info}')
return next(iter(devices))
def reset_parameters(self):
"""Reset the model's parameters."""
# Make sure that all modules with parameters do have a reset_parameters method.
uninitialized_parameters = set(map(id, self.parameters()))
parents = defaultdict(list)
# Recursively visit all sub-modules
task_list = []
for name, module in self.named_modules():
# skip self
if module is self:
continue
# Track parents for blaming
for p in module.parameters():
parents[id(p)].append(module)
# call reset_parameters if possible
if hasattr(module, 'reset_parameters'):
task_list.append((name.count('.'), module))
# initialize from bottom to top
# This ensures that specialized initializations will take priority over the default ones of its components.
for module in map(itemgetter(1), sorted(task_list, reverse=True, key=itemgetter(0))):
module.reset_parameters()
uninitialized_parameters.difference_update(map(id, module.parameters()))
# emit warning if there where parameters which were not initialised by reset_parameters.
if len(uninitialized_parameters) > 0:
logger.warning('reset_parameters() not found for all modules containing parameters. %d parameters where likely not initialised.', len(uninitialized_parameters))
# Additional debug information
for i, p_id in enumerate(uninitialized_parameters, start=1):
logger.debug('[%3d] Parents to blame: %s', i, parents.get(p_id))
class SparseMatrix(ExtendedModule, ABC):
"""A matrix."""
#: The shape (n_rows, n_cols)
shape: Tuple[int, int]
def __init__(self, shape: Tuple[int, int]):
"""
Initialize matrix.
:param shape:
The shape, (n_rows, n_cols).
"""
super().__init__()
self.shape = shape
def __matmul__(self, other: torch.Tensor) -> torch.Tensor:
"""
Matrix-matrix multiplication.
:param other: shape: (n_cols, d)
The vector.
:return: shape: (n_rows, d)
out[i, :] = self[:, i] * other[i, :]
"""
if other.shape[0] != self.shape[1]:
raise ValueError(f'Shape mismatch: self.shape={self.shape}, other.shape={other.shape}. {self.shape[1]} != {other.shape[0]}.')
return self._real_matmul(other=other)
def _real_matmul(self, other: torch.Tensor) -> torch.Tensor:
"""Perform the matrix-matrix multiplication."""
raise NotImplementedError
def t(self) -> 'SparseMatrix':
"""Matrix transposition."""
raise NotImplementedError
def detach(self) -> 'SparseMatrix':
"""Detaches the values, i.e. breaks the gradient flow."""
raise NotImplementedError
def dense(self) -> torch.Tensor:
"""Return a dense version of the matrix."""
raise NotImplementedError
# pylint: disable=arguments-differ
def forward(self, x: torch.Tensor) -> torch.Tensor:
"""Return self @ x."""
return self @ x
class SparseCOOMatrix(SparseMatrix):
"""A sparse matrix in COO format."""
#: The indices of the non-zero elements.
sparse_matrix: torch.sparse.Tensor
def __init__(
self,
matrix: torch.sparse.Tensor
):
"""
Initialize the matrix.
:param matrix:
The matrix.
"""
super().__init__(shape=matrix.shape)
assert len(matrix.shape) == 2
self.register_buffer(name='sparse_matrix', tensor=matrix.coalesce())
@staticmethod
def from_indices_values_pair(
indices: torch.LongTensor,
values: Optional[torch.Tensor] = None,
size: Optional[Union[int, Tuple[int, int]]] = None,
) -> 'SparseCOOMatrix':
"""
Instantiate the matrix using a pair of indices and optional values.
:param indices: shape: (2, nnz)
The indices.
:param values: shape: (nnz,)
The values.
:param size:
The size. If None, infer from indices.
:return:
The matrix.
"""
if size is None:
size = tuple((indices.max(dim=1).values + 1).tolist())
if isinstance(size, int):
size = (size, size)
for dim, (index_dim, size_dim) in enumerate(zip(indices, size)):
max_id_on_dim = index_dim.max().item()
if max_id_on_dim >= size_dim:
raise ValueError(f'Index out of range for dim={dim}: {max_id_on_dim} vs. {size_dim}')
if values is None:
values = indices.new_ones(indices.shape[1], dtype=torch.float32)
return SparseCOOMatrix(matrix=torch.sparse_coo_tensor(indices=indices, values=values, size=size))
@staticmethod
def from_edge_tensor(
edge_tensor: torch.LongTensor,
edge_weights: Optional[torch.Tensor] = None,
size: Optional[Union[int, Tuple[int, int]]] = None,
) -> 'SparseCOOMatrix':
"""
Construct a sparse adjacency matrix for a given edge_tensor.
:param edge_tensor: shape: (2, num_edges)
The edge tensor, elements: (source, target)
:param edge_weights: shape: (num_edges,)
Edge weights.
:param size: >0
The size, format num_nodes or (num_targets, num_sources).
:return:
The adjacency matrix.
"""
return SparseCOOMatrix.from_indices_values_pair(
indices=edge_tensor.flip(0),
values=edge_weights,
size=size,
)
@staticmethod
def from_dense(
dense: torch.Tensor,
) -> 'SparseCOOMatrix':
"""
Construct a sparse matrix from a given dense version.
:param dense: shape: (m, n)
The dense matrix. Should have some/many zero elements.
:return:
The sparse matrix containing only the non-zero elements.
"""
# convert to sparse matrix
indices = dense.nonzero(as_tuple=True)
values = dense[indices]
return SparseCOOMatrix.from_indices_values_pair(
indices=torch.stack(indices, dim=0),
values=values,
size=dense.shape,
)
@staticmethod
def eye(n: int, device: Union[torch.device, str, None] = None) -> 'SparseCOOMatrix':
"""
Construct a sparse identity matrix.
:param n:
The dimension.
:param device:
The device.
:return:
The identity matrix.
"""
return SparseCOOMatrix.from_indices_values_pair(
indices=torch.arange(n, device=device).unsqueeze(dim=0).repeat(2, 1),
size=n,
)
@property
def indices(self) -> torch.LongTensor:
"""Return the indices."""
return self.sparse_matrix.indices()
@property
def values(self) -> torch.FloatTensor:
"""Return the values."""
return self.sparse_matrix.values()
def sum(self, dim: int) -> torch.Tensor:
"""
Compute the sum along a dimension.
:param dim:
The dimension. From {0, 1}.
:return: shape: (shape_at_dim,)
The sum, a tensor of shape[dim].
"""
return torch.sparse.sum(input=self.sparse_matrix, dim=dim).to_dense()
def normalize(
self,
dim: int = 1,
target_sum: Optional[float] = None,
) -> 'SparseCOOMatrix':
"""
Normalize the matrix row-wise / column-wise.
:param dim:
The dimension.
:param target_sum:
An optional target value for the row/column sum. Defaults to 1.
:return:
The normalized matrix.
"""
weights = self.sum(dim=dim).reciprocal()
if target_sum is not None:
weights = weights * target_sum
weights = self.scatter(x=weights, dim=1 - dim) * self.values
return self.with_weights(weights=weights)
@property
def source(self) -> torch.LongTensor:
"""Return the source indices for message passing."""
return self.indices[1]
@property
def target(self) -> torch.LongTensor:
"""Return the target indices for message passing."""
return self.indices[0]
def with_weights(self, weights: torch.Tensor) -> 'SparseCOOMatrix':
"""Return a matrix of the same structure, with adjusted weights."""
return SparseCOOMatrix.from_indices_values_pair(
indices=self.indices,
values=weights,
size=self.shape,
)
def without_weights(self) -> 'SparseCOOMatrix':
"""Return the matrix without weights."""
self.coalesce_()
return SparseCOOMatrix(
matrix=torch.sparse_coo_tensor(
indices=self.sparse_matrix.indices(),
values=torch.ones_like(self.sparse_matrix.values()),
size=self.shape,
),
)
def scatter(self, x: torch.Tensor, dim: int = 1) -> torch.Tensor:
"""
Scatter elements of x to the edges.
:param x: shape: (self.shape[dim], d1, ..., dk)
The values for each node.
:param dim: The dimension, from {0, 1}.
dim=0 -> from target
dim=1 -> from source
:return: shape: (nnz, d1, ..., dk)
The values broadcasted to each edge.
"""
if x.shape[0] != self.shape[dim]:
raise ValueError(x.shape, self.shape[dim])
return x.index_select(dim=0, index=self.indices[dim])
def gather(self, m: torch.Tensor, dim: int = 0) -> torch.Tensor:
"""
Gather elements of m from edges to nodes.
:param m: shape: (num_edges, d1, ..., dk)
The values for each edge.
:param dim: The dimension, from {0, 1}.
dim=0 -> to source
dim=1 -> to target
:return: shape: (num_nodes, d1, ..., dk)
The values broadcasted to each node.
"""
if m.shape[0] != self.indices.shape[1]:
raise ValueError(m.shape, self.indices.shape[1])
return m.new_zeros(self.shape[dim], *m.shape[1:]).index_add(dim=0, index=self.indices[dim], source=m)
def t(self) -> 'SparseCOOMatrix':
"""Transposed matrix."""
return SparseCOOMatrix(matrix=self.sparse_matrix.t())
def _real_matmul(self, other: torch.Tensor) -> torch.Tensor: # noqa: D102
# torch.sparse.mm requires float values
if self.values.is_floating_point() and other.is_floating_point():
return torch.sparse.mm(mat1=self.sparse_matrix, mat2=other)
msg = self.scatter(x=other)
if self.values is not None:
msg = msg * self.values.view(msg.shape[0], 1)
return self.gather(m=msg)
def coalesce_(self) -> 'SparseCOOMatrix':
"""In-place index de-duplication."""
self.sparse_matrix = self.sparse_matrix.coalesce()
return self
def coalesce(self) -> 'SparseCOOMatrix':
"""
Collapses duplicate entries for (row, col) in indices.
Since COO format permits duplicates (row, col), and some operations require unique indices, this operation
collapses them, by adding the elements. This operation is quite costly.
"""
return SparseCOOMatrix(matrix=self.sparse_matrix.coalesce())
def __add__(self, other: 'SparseCOOMatrix') -> 'SparseCOOMatrix': # noqa: D105
if not isinstance(other, SparseCOOMatrix):
raise NotImplementedError
return SparseCOOMatrix(matrix=self.sparse_matrix + other.sparse_matrix)
def detach(self) -> 'SparseCOOMatrix': # noqa: D102
return SparseCOOMatrix(matrix=self.sparse_matrix.detach())
def dense(self) -> torch.Tensor: # noqa: D102
assert len(self.shape) == 2
self.coalesce_()
result = self.values.new_zeros(size=self.shape)
result[self.indices[0], self.indices[1]] = self.values
return result
@property
def edge_tensor(self) -> torch.LongTensor:
"""Return the edge_tensor view of the adjacency matrix."""
return torch.stack([
self.source,
self.target,
], dim=0)
@property
def edge_weights(self) -> torch.FloatTensor:
"""Return the edge_weights view of the adjacency matrix."""
return self.values
@property
def nnz(self) -> int:
"""Return the number of occupied indices."""
return self.indices.shape[1]
def extra_repr(self) -> str:
"""Return a string with some basic information."""
return f'size={self.shape}, nnz={self.nnz}, sparsity={1. - (self.nnz / numpy.prod(self.shape)):.2%}'
| 21,537 | 32.86478 | 172 | py |
rank-based-evaluation | rank-based-evaluation-main/src/kgm/data/reduction.py | """Reduction strategies from Knowledge Graph to (weighted) uni-relational graphs."""
import enum
import logging
from typing import Callable, Optional
import torch
from .knowledge_graph import KnowledgeGraph
from ..utils.torch_utils import ExtendedModule, SparseCOOMatrix
logger = logging.getLogger(name=__name__)
# pylint: disable=abstract-method
class KnowledgeGraphToGraphReduction(ExtendedModule):
r"""
Base class for methods reducing the full KG tensor to a single adjacency matrix.
A knowledge graph (KG) comprises a set of triples :math:`\mathcal{T} = \{(h, r, t)\}`, where
:math:`h, r \in \mathcal{E}` are entities, and :math:`r \in \mathcal{R}` are relations.
The KG can also be represenated by a three-dimensional binary tensor
:math:`\mathbf{T} \in \{0, 1\}^{E \times R \times E}`, where :math:`E := |\mathcal{E}|`, and :math:`R := |\mathcal{R}|`.
Often GCN-based models are only defined for uni-relational graphs. Thus, the KG adjacency tensor :math:`\mathbf{T}`
needs to be reduced to a (weighted) adjacency matrix :math:`\mathbf{A} \in \mathbb{R}^{E \times E}`.
"""
# pylint: disable=arguments-differ
def forward(self) -> SparseCOOMatrix:
"""Get the (weighted) uni-relational adjacency matrix."""
return self.get_adjacency()
def _get_raw_edge_tensor(knowledge_graph: KnowledgeGraph) -> torch.LongTensor:
"""Get the raw edge_tensor, i.e. {{(h,t) | (h,r,t) in T}}."""
return knowledge_graph.triples[:, [0, 2]].t()
class StaticKnowledgeGraphToGraphReduction(KnowledgeGraphToGraphReduction):
"""A base class for parameter-free reduction."""
def __init__(
self,
knowledge_graph: KnowledgeGraph,
normalization: Optional[Callable[[SparseCOOMatrix], SparseCOOMatrix]] = None,
):
"""
Initialize the reduction strategy.
:param knowledge_graph:
The knowledge graph to reduce.
:param normalization:
An optional normalization of the resulting adjacency matrix.
"""
super().__init__()
adjacency = self.get_static_adjacency(knowledge_graph=knowledge_graph)
if normalization is not None:
adjacency = normalization(adjacency)
self.adjacency = adjacency
def get_static_adjacency(self, knowledge_graph: KnowledgeGraph) -> SparseCOOMatrix:
"""Compute the adjacency matrix in advance."""
raise NotImplementedError
def forward(self) -> SparseCOOMatrix: # noqa: D102
return self.adjacency
# pylint: disable=abstract-method
class DropRelationInformationKnowledgeGraphToGraphReduction(StaticKnowledgeGraphToGraphReduction):
"""Drop the relation information, i.e. there is an edge if there is at least one triple."""
def __init__(
self,
knowledge_graph: KnowledgeGraph,
normalization: Optional[Callable[[SparseCOOMatrix], SparseCOOMatrix]] = None,
unique: bool = True,
add_self_loops: bool = False,
add_inverse: bool = False,
):
"""
Initialize the reduction strategy.
:param knowledge_graph:
The knowledge graph to reduce.
:param normalization:
An optional normalization of the resulting adjacency matrix.
:param unique:
Whether to drop duplicate edges.
:param add_self_loops:
Whether to add self-loops.
:param add_inverse:
Whether to add inverse edges, i.e. make the adjacency symmetric.
"""
self.unique = unique
self.add_self_loops = add_self_loops
self.add_inverse = add_inverse
super().__init__(knowledge_graph=knowledge_graph, normalization=normalization)
def get_static_adjacency(self, knowledge_graph: KnowledgeGraph) -> SparseCOOMatrix: # noqa: D102
edge_tensor = _get_raw_edge_tensor(knowledge_graph)
if self.add_inverse:
edge_tensor = torch.cat([edge_tensor, edge_tensor.flip(0)], dim=1)
if self.add_self_loops:
edge_tensor = torch.cat([edge_tensor, torch.arange(knowledge_graph.num_entities, device=edge_tensor.device).view(1, -1).repeat(2, 1)], dim=-1)
# Drop duplicates
if self.unique:
num_edges = edge_tensor.shape[1]
edge_tensor = torch.unique(edge_tensor, dim=1)
num_edges_reduced = edge_tensor.shape[1]
if num_edges_reduced < num_edges:
logger.info('Dropped %d/%d edges.', num_edges - num_edges_reduced, num_edges)
return SparseCOOMatrix.from_edge_tensor(
edge_tensor=edge_tensor,
edge_weights=None,
size=knowledge_graph.num_entities,
)
def _scale_edge_weights(
adjacency: SparseCOOMatrix,
edge_factor: torch.FloatTensor,
) -> SparseCOOMatrix:
"""
Multiply the edge weights by an edge-specific factor.
Handles special case where the original matrix is unweighted.
:param adjacency:
The adjacency.
:param edge_factor: shape: (num_edges,)
The edge-wise factor.
:return:
The scaled adjacency matrix.
"""
if adjacency.values is not None:
edge_factor = adjacency.values * edge_factor
return adjacency.with_weights(weights=edge_factor)
def target_normalization(adjacency: SparseCOOMatrix) -> SparseCOOMatrix:
r"""
Normalize an adjacency matrix row-wise.
.. math ::
\hat{A}_{ij} = A_{ij} / \sum_{k} A_{ik}
:param adjacency:
The adjacency matrix.
:return:
The normalized adjacency matrix.
"""
return adjacency.normalize(dim=1)
def source_normalization(adjacency: SparseCOOMatrix) -> SparseCOOMatrix:
r"""
Normalize an adjacency matrix column-wise.
.. math ::
\hat{A}_{ij} = A_{ij} / \sum_{k} A_{kj}
:param adjacency:
The adjacency matrix.
:return:
The normalized adjacency matrix.
"""
return adjacency.normalize(dim=0)
def symmetric_normalization(adjacency: SparseCOOMatrix) -> SparseCOOMatrix:
r"""
Normalize an adjacency matrix symmetrically.
.. math ::
\hat{A}_{ij} = A_{ij} / \sqrt{\left(\sum_{k} A_{kj} \right) \cdot \left(\sum_{k} A_{kj} \right)}
:param adjacency:
The adjacency matrix.
:return:
The normalized adjacency matrix.
"""
edge_factor = (adjacency.scatter(adjacency.sum(dim=1), dim=0) * adjacency.scatter(adjacency.sum(dim=0), dim=1)).sqrt().reciprocal()
# edge_factor = adjacency.scatter(adjacency.sum(dim=1).sqrt().reciprocal(), dim=0) * adjacency.scatter(adjacency.sum(dim=0).sqrt().reciprocal(), dim=1)
return _scale_edge_weights(
adjacency=adjacency,
edge_factor=edge_factor,
)
class EdgeWeightsEnum(str, enum.Enum):
"""Which edge weights to use."""
#: None
none = 'none'
#: Inverse in-degree -> sum of weights for incoming messages = 1
inverse_in_degree = 'inverse_in_degree'
#: Inverse out-degree -> sum of weights for outgoing messages = 1
inverse_out_degree = 'inverse_out_degree'
#: 1 / sqrt(in-degree * out-degree)
symmetric = 'symmetric'
def normalize_adjacency(
adjacency: SparseCOOMatrix,
mode: EdgeWeightsEnum,
) -> SparseCOOMatrix:
"""
Normalize adjacency according to normalization mode.
:param adjacency:
The adjacency matrix.
:param mode:
The mode.
:return:
The normalized adjacency.
"""
if mode == EdgeWeightsEnum.inverse_in_degree:
return target_normalization(adjacency=adjacency)
elif mode == EdgeWeightsEnum.inverse_out_degree:
return source_normalization(adjacency=adjacency)
elif mode == EdgeWeightsEnum.symmetric:
return symmetric_normalization(adjacency=adjacency)
return adjacency
| 7,812 | 32.246809 | 155 | py |
rank-based-evaluation | rank-based-evaluation-main/src/kgm/data/loaders.py | """Data loading for Entity Alignment datasets."""
import abc
import io
import json
import logging
import lzma
import pathlib
import tarfile
import zipfile
from typing import Collection, Generic, Mapping, Optional, Tuple, Type, TypeVar, Union
import pandas
import requests
import torch
from .knowledge_graph import EntityAlignment, KnowledgeGraph, KnowledgeGraphAlignmentDataset, MatchSideEnum, SIDES
from ..utils.common import get_all_subclasses, get_subclass_by_name, multi_hash
from ..utils.data_utils import check_hashsums, resolve_cache_root, resolve_google_drive_file_url, save_response_content
from ..utils.torch_utils import split_tensor
from ..utils.types import IDAlignment, Triples
A = TypeVar('A', zipfile.ZipFile, tarfile.TarFile)
logger = logging.getLogger(name=__name__)
class Archive(Generic[A]):
"""A generic class for reading from archives."""
#: The archive file
archive_file: A
#: The default file extension:
default_file_extension: str
def __init__(self, archive_path: pathlib.Path):
"""
Initialize the archive.
:param archive_path:
The archive path.
"""
self.path = archive_path
def __enter__(self): # noqa: D105
self.archive_file = self._open_archive(path=self.path)
return self
def __exit__(self, exc_type, exc_val, exc_tb): # noqa: D105
self.archive_file.close()
# pylint: disable=unused-argument
def open_file(
self,
relative_path: Union[pathlib.Path, str],
encoding: Optional[str] = None,
) -> io.TextIOBase:
"""Open a file from the archive in read mode."""
return self.archive_file.open(name=str(relative_path))
def _open_archive(self, path: pathlib.Path) -> A:
"""Open the archive in read mode."""
raise NotImplementedError
class ZipArchive(Archive[zipfile.ZipFile]):
"""A zipfile archive."""
default_file_extension = 'zip'
def _open_archive(
self,
path: pathlib.Path,
) -> zipfile.ZipFile: # noqa: D102
return zipfile.ZipFile(file=path)
class TarArchive(Archive[tarfile.TarFile]):
"""A tarfile archive."""
default_file_extension = 'tar.gz'
def _open_archive(
self,
path: pathlib.Path,
) -> tarfile.TarFile: # noqa: D102
return tarfile.open(name=path)
def open_file(
self,
relative_path: Union[pathlib.Path, str],
encoding: Optional[str] = None,
) -> io.TextIOBase: # noqa: D102
return io.TextIOWrapper(self.archive_file.extractfile(member=str(relative_path)), encoding=encoding)
def apply_compaction(
triples: Triples,
compaction: Mapping[int, int],
columns: Union[int, Collection[int]],
dim: int = 0,
) -> Triples:
"""
Apply ID compaction to triples.
:param triples:
The triples
:param compaction:
The ID compaction, i.e. mapping old ID to new ID.
:param columns:
The columns on which to apply the compaction.
:param dim:
The dimension along which to apply the compaction.
:return:
The updated triples.
"""
if compaction is None:
return triples
if isinstance(columns, int):
columns = [columns]
if dim not in {0, 1}:
raise KeyError(dim)
triple_shape = triples.shape
if dim == 1:
triples = triples.t()
new_cols = []
for c in range(triples.shape[1]):
this_column = triples[:, c]
if c in columns:
new_cols.append(torch.tensor([compaction[int(e)] for e in this_column])) # pylint: disable=not-callable
else:
new_cols.append(this_column)
new_triples = torch.stack(new_cols, dim=1 - dim)
assert new_triples.shape == triple_shape
return new_triples
def compact_columns(
triples: Triples,
label_to_id_mapping: Mapping[str, int],
columns: Union[int, Collection[int]],
) -> Tuple[Triples, Optional[Mapping[str, int]], Optional[Mapping[int, int]]]:
"""
Calculate compaction of the columns of triples.
:param triples: shape: (num_triples, 3)
The original triples.
:param label_to_id_mapping:
The old label-to-ID mapping.
:param columns:
The columns on which to calculate the compaction.
:return:
A 3-tuple (new_triples, new_mapping, compaction) where
* new_triples: shape: (num_triples, 3)
The compacted triples.
* new_mapping:
The updated label to ID mapping.
* compaction:
A mapping old ID to new ID.
Note: new_mapping and compaction may be None, if the old triples where already compact.
"""
ids = label_to_id_mapping.values()
num_ids = len(ids)
assert len(set(ids)) == len(ids)
max_id = max(ids)
if num_ids < max_id + 1:
compaction = dict((old, new) for new, old in enumerate(sorted(ids)))
assert set(compaction.keys()) == set(label_to_id_mapping.values())
assert set(compaction.values()) == set(range(num_ids))
new_triples = apply_compaction(triples, compaction, columns, dim=0)
new_mapping = {label: compaction[_id] for label, _id in label_to_id_mapping.items()}
logger.info('Compacted: %d -> %d', max_id, num_ids - 1)
else:
compaction = None
new_triples = triples
new_mapping = label_to_id_mapping
logger.debug('No compaction necessary.')
return new_triples, new_mapping, compaction
def compact_graph(
graph: KnowledgeGraph,
no_duplicates: bool = True,
) -> Tuple[KnowledgeGraph, Optional[Mapping[int, int]], Optional[Mapping[int, int]]]:
"""
Compact a KG.
:param graph:
The KG.
:param no_duplicates:
Whether to drop duplicates.
:return:
The updated KG, and mappings from old ID to compact ID, or None if the KG is already compliant.
"""
if graph.inverse_triples:
raise NotImplementedError
triples0 = graph.triples
# Compact entities
triples1, compact_entity_label_to_id, entity_compaction = compact_columns(triples=triples0, label_to_id_mapping=graph.entity_label_to_id, columns=(0, 2))
# Compact relations
triples2, compact_relation_label_to_id, relation_compaction = compact_columns(triples=triples1, label_to_id_mapping=graph.relation_label_to_id, columns=(1,))
# Filter duplicates
if no_duplicates:
old_size = triples2.shape[0]
triples2 = torch.unique(triples2, dim=0)
new_size = triples2.shape[0]
if new_size < old_size:
logger.info('Aggregated edges: %d -> %d.', old_size, new_size)
# Compile to new knowledge graph
compact_graph_ = KnowledgeGraph(
triples=triples2,
entity_label_to_id=compact_entity_label_to_id,
relation_label_to_id=compact_relation_label_to_id,
lang_code=graph.lang_code,
dataset_name=graph.dataset_name,
subset_name=graph.subset_name
)
return compact_graph_, entity_compaction, relation_compaction
def compact_single_alignment(
single_alignment: IDAlignment,
left_compaction: Mapping[int, int],
right_compaction: Mapping[int, int],
) -> IDAlignment:
"""
Apply ID compaction to a single alignment.
:param single_alignment: shape: (2, num_alignments), dtype: long
The alignment.
:param left_compaction:
The compaction for the left side, i.e. a mapping old ID -> new ID for the left graph.
:param right_compaction:
The compaction for the right side, i.e. a mapping old ID -> new ID for the right graph.
:return: shape: (2, num_alignments)
The updated alignment.
"""
compact_single_alignment_ = single_alignment
for col, compaction in enumerate([left_compaction, right_compaction]):
compact_single_alignment_ = apply_compaction(triples=compact_single_alignment_, compaction=compaction, columns=col, dim=1)
return compact_single_alignment_
def compact_knowledge_graph_alignment(
alignment: EntityAlignment,
left_entity_compaction: Mapping[int, int],
right_entity_compaction: Mapping[int, int],
) -> EntityAlignment:
"""
Apply ID compaction to entity alignment.
:param alignment:
The entity alignment.
:param left_entity_compaction:
The compaction for the left side, i.e. a mapping old ID -> new ID for the left graph.
:param right_entity_compaction:
The compaction for the right side, i.e. a mapping old ID -> new ID for the right graph.
:return:
The updated entity alignment.
"""
# Entity compaction
compact_entity_alignment_train = compact_single_alignment(single_alignment=alignment.train, left_compaction=left_entity_compaction, right_compaction=right_entity_compaction)
compact_entity_alignment_test = compact_single_alignment(single_alignment=alignment.test, left_compaction=left_entity_compaction, right_compaction=right_entity_compaction)
if alignment.num_validation > 0:
compact_entity_alignment_val = compact_single_alignment(single_alignment=alignment.validation, left_compaction=left_entity_compaction, right_compaction=right_entity_compaction)
else:
compact_entity_alignment_val = None
return EntityAlignment(
train=compact_entity_alignment_train,
test=compact_entity_alignment_test,
_validation=compact_entity_alignment_val,
)
def compact_knowledge_graph_alignment_dataset(
left_graph: KnowledgeGraph,
right_graph: KnowledgeGraph,
alignment: EntityAlignment,
no_duplicates: bool = True,
) -> Tuple[KnowledgeGraph, KnowledgeGraph, EntityAlignment]:
"""
Compact a knowledge graph alignment dataset.
When loading a KG with pre-defined label-to-ID mappings, it might happen that the ID range is not consecutive, or starts from 0.
Thus, a compaction is applied by mapping the IDs monotonously to {0, ..., num_labels - 1}.
:param left_graph:
The left KG.
:param right_graph:
The right KG.
:param alignment:
The entity alignment.
:param no_duplicates:
Whether to discard duplicate triples.
:return:
The updated left/right graph and alignment.
"""
left_compact_graph, left_entity_compaction = compact_graph(graph=left_graph, no_duplicates=no_duplicates)[:2]
right_compact_graph, right_entity_compaction = compact_graph(graph=right_graph, no_duplicates=no_duplicates)[:2]
compact_alignment = compact_knowledge_graph_alignment(
alignment=alignment,
left_entity_compaction=left_entity_compaction,
right_entity_compaction=right_entity_compaction,
)
return left_compact_graph, right_compact_graph, compact_alignment
def load_triples(
triples_file: io.TextIOBase,
delimiter: str = '\t',
encoding: str = 'utf8',
engine: str = 'c',
) -> Tuple[Triples, Mapping[str, int], Mapping[str, int]]:
"""
Load triples from a file-like object.
:param triples_file:
The opened file-like object.
:param delimiter:
The delimiter.
:param encoding:
The encoding,
:param engine:
The pandas engine.
:return:
A tuple (triples, entity_label_to_id, relation_label_to_id) where
* triples: shape: (num_triples, 3), dtype: long
* entity_label_to_id / relation_label_to_id: mapping from labels to IDs.
"""
# Load triples from tsv file
df = pandas.read_csv(
filepath_or_buffer=triples_file,
sep=delimiter,
encoding=encoding,
header=None,
names=['h', 'r', 't'],
engine=engine,
dtype=str,
)
df = df.applymap(str)
# Sorting ensures consistent results when the triples are permuted
entity_label_to_id = {
e: i for i, e in enumerate(sorted(set(df['h'].unique()).union(set(df['t'].unique()))))
}
relation_label_to_id = {
r: i for i, r in enumerate(sorted(df['r'].unique()))
}
# Label triples to ID
for col, mapping in zip('hrt', [entity_label_to_id, relation_label_to_id, entity_label_to_id]):
df[col] = df[col].apply(mapping.__getitem__)
triples = torch.as_tensor(data=df.values, dtype=torch.long).unique(dim=0)
# Log some info
logger.info(
'Loaded %d unique triples, with %d unique entities and %d unique relations.',
triples.shape[0],
len(entity_label_to_id),
len(relation_label_to_id)
)
return triples, entity_label_to_id, relation_label_to_id
def _load_label_to_id(
archive: Archive,
relative_path: pathlib.Path,
) -> Mapping[str, int]:
"""
Load entity label to ID file.
:param archive:
The opened archive file.
:param relative_path:
The relative path within the archive.
:return:
A mapping from entity labels to IDs.
"""
with archive.open_file(relative_path=relative_path) as text_file:
df = pandas.read_csv(filepath_or_buffer=text_file, names=['id', 'label'], header=None, sep='\t', encoding='utf8', engine='c')
return dict(zip(df['label'].values.tolist(), df['id'].values.tolist()))
def _load_entity_alignment(
archive: Archive,
relative_path: pathlib.Path,
left_graph: KnowledgeGraph,
right_graph: KnowledgeGraph,
sep: str = '\t',
) -> IDAlignment:
"""
Load entity alignment from an open archive.
:param archive:
The opened archive.
:param relative_path:
The relative path within the archive.
:param left_graph:
The left KG.
:param right_graph:
The right KG.
:return: shape: (2, num_alignments)
The entity alignment.
"""
# Load label alignment
with archive.open_file(relative_path=relative_path) as text_file:
entity_alignment = pandas.read_csv(
filepath_or_buffer=text_file,
names=['L', 'R'],
header=None,
sep=sep,
encoding='utf8',
engine='c' if len(sep) == 1 else 'python',
dtype=str,
)
return translate_alignment(labelled_entity_alignment=entity_alignment, left_graph=left_graph, right_graph=right_graph)
def translate_alignment(
labelled_entity_alignment: pandas.DataFrame,
left_graph: KnowledgeGraph,
right_graph: KnowledgeGraph,
) -> IDAlignment:
"""
Convert an alignment of labels to an alignment of IDs.
:param labelled_entity_alignment: columns: ['L', 'R']
The entity alignment, label-based.
:param left_graph:
The left KG.
:param right_graph:
The right KG.
:return: shape: (2, num_alignments)
The ID-based alignment.
"""
# Translate to ID alignment
alignment = torch.stack(
[
torch.as_tensor(
data=labelled_entity_alignment[col].apply(graph.entity_label_to_id.get, args=(-1,)),
dtype=torch.long,
)
for col, graph in zip('LR', [left_graph, right_graph])
],
dim=0,
)
# Drop invalid
invalid_mask = (alignment < 0).any(dim=0)
num_invalid = invalid_mask.sum()
if num_invalid > 0:
logger.warning('Dropping %d invalid rows.', num_invalid)
alignment = alignment[:, ~invalid_mask]
alignment = alignment.unique(dim=1)
logger.info('Loaded alignment of size %d.', alignment.shape[1])
return alignment
def _load_tensor_from_csv(
archive: Archive,
relative_path: pathlib.Path,
) -> torch.LongTensor:
"""
Load an integer tensor from a TSV file in an opened archive.
:param archive:
The opened archive.
:param relative_path:
The relative path within the archive.
:return: dtype: long
The tensor.
"""
with archive.open_file(relative_path=relative_path) as text_file:
return torch.tensor( # pylint: disable=not-callable
data=pandas.read_csv(filepath_or_buffer=text_file, header=None, sep='\t', encoding='utf8', engine='c').values,
dtype=torch.long,
)
class OnlineKnowledgeGraphAlignmentDatasetLoader:
"""Contains a lazy reference to a knowledge graph alignment data set."""
#: The URL where the data can be downloaded from
url: str
#: The subsets
subsets: Collection[str] = frozenset()
#: The pre-defined train-test splits
predefined_splits: Collection[float] = frozenset()
#: The archive file type
archive_type: Type[Archive] = TarArchive
#: The file name for the archive
archive_file_name: str
#: The directory where the datasets will be extracted to
cache_root: pathlib.Path
def __init__(
self,
subset: Optional[str] = None,
train_test_split: Optional[float] = None,
cache_root: Optional[Union[pathlib.Path, str]] = None,
compact: bool = True,
train_validation_split: Optional[float] = 0.8,
with_inverse_triples: bool = False,
with_self_loops: bool = False,
random_seed: int = 42,
) -> None:
"""
Initialize the data loader.
:param subset:
The name of the subset to use. Check subsets() for available subsets. If None, use the alphabetically
first one. This should *not* happen within a production environment.
:param train_test_split:
The train-test split ratio.
:param cache_root:
The cache root to use for caching downloaded files.
:param compact:
Whether to compact the label-to-ID mappings, i.e. ensure that the IDs are consecutive from
{0, ..., num_labels-1}
:param train_validation_split:
The train-validation split ratio.
:param with_inverse_triples:
Whether to add inverse triples.
:param with_self_loops:
Whether to add self-loops.
:param random_seed:
The random seed to use for splitting.
"""
self.cache_root = resolve_cache_root(cache_root, self.cache_sub_directory_name)
logger.info('Using cache_root=%s', self.cache_root)
if subset is None:
subset = sorted(self.subsets)[0]
logger.warning('No subset specified. This should not happen in production. Using "%s".', subset)
if subset not in self.subsets:
raise ValueError(f'Invalid subset={subset}. Allowed subsets: {self.subsets} (check '
f'{self.__class__.__name__}.subsets() for this list).')
self.subset = subset
if train_test_split is None:
train_test_split = 0.3
logger.warning('No train_test_split was given. Defaulting to 0.3.')
if train_test_split <= 0.0 or train_test_split >= 1.0:
raise ValueError(f'Split must be a float with 0 < train_test_split < 1, but train_test_split={train_test_split},')
if train_test_split not in self.predefined_splits:
logger.warning('Using a custom train_test_split=%f, and none of the pre-defined: %s.', train_test_split, self.predefined_splits)
self.train_test_split = train_test_split
self.compact = compact
self.train_validation_split = train_validation_split
self.with_inverse_triples = with_inverse_triples
self.with_self_loops = with_self_loops
self.random_seed = random_seed
@property
def cache_sub_directory_name(self) -> str:
"""Return the name of the sub-directory within the cache root."""
return self.__class__.__name__.lower()
def _get_split_name(self) -> str:
"""Get a unique split name."""
return str(hash((self.train_validation_split, self.train_test_split, self.random_seed)))
def load(
self,
force_download: bool = False,
) -> KnowledgeGraphAlignmentDataset:
"""
Load the dataset.
:param force_download:
Whether to force downloading the file, even if is already exists.
:return:
The dataset.
"""
# Ensure directory exists
self.cache_root.mkdir(parents=True, exist_ok=True)
# Check if files already exist
archive_path = self.cache_root / f'{self.archive_file_name}.{self.archive_type.default_file_extension}' # pylint: disable=no-member
if archive_path.is_file() and not force_download:
logger.info('Checking hash sums for existing file %s.', str(archive_path))
check_sums_match = check_hashsums(destination=archive_path, **self.hash_digests())
if not check_sums_match:
logger.warning('Checksums do not match. Forcing download.')
force_download = not check_sums_match
else:
force_download = True
if force_download:
# create session
session = requests.Session()
if 'drive.google.com' in self.url:
_id = self.url.split('?id=')[1]
response = resolve_google_drive_file_url(id_=_id, session=session)
else:
logger.info('Requesting dataset from %s', self.url)
response = session.get(url=self.url, stream=True)
# Real download
save_response_content(response=response, destination=archive_path)
check_sums_match = check_hashsums(destination=archive_path, **self.hash_digests())
if not check_sums_match:
raise ValueError('Checksums do not match!')
else:
logger.info('Skipping to download from %s due to existing files in %s.', self.url, self.cache_root)
# Try to load from artifact
artifact_root = self.cache_root / 'preprocessed' / self.__class__.__name__.lower() / self.subset
# graphs
graphs = dict()
compactions = dict()
for side in SIDES:
graph = compaction = "load-from-archive"
# try to load from artifact
graph_path = artifact_root / f"{side.value}_graph"
compaction_path = graph_path / "compaction.json.xz"
if graph_path.is_dir():
try:
graph = KnowledgeGraph.load(directory=graph_path)
logger.info(f"Loaded preprocessed graph from {graph_path}")
except FileNotFoundError as error:
logger.error(f"Error occurred by loading graph from {graph_path}: {error}")
if compaction_path.is_file():
with lzma.open(compaction_path, "rt") as json_file:
compaction = json.load(json_file)
# load from archive only if necessary
if graph == "load-from-archive" or compaction == "load-from-archive":
with self.archive_type(archive_path=archive_path) as archive:
graph = self._load_graph(archive=archive, side=side)
# compact
graph, compaction = compact_graph(graph=graph, no_duplicates=True)[:2]
# save
graph.save(directory=graph_path)
with lzma.open(compaction_path, "wt") as json_file:
json.dump(
compaction,
fp=json_file,
sort_keys=True,
indent=2,
)
logger.info(f"Saved preprocessed graph to {graph_path}")
assert graph is not None
graphs[side], compactions[side] = graph, compaction
left_graph, right_graph = [graphs[side] for side in SIDES]
# alignment
# key0 = .
all_alignment_path = artifact_root / "alignment.pt"
# key1 = (train_test_split, random_seed)
train_test_key = multi_hash(self.train_test_split, self.random_seed, hash_function="md5")
test_indices_path = artifact_root / "splits" / f"test_{train_test_key}.pt"
test_indices_path.parent.mkdir(parents=True, exist_ok=True)
# key2 = (train_test_split, train_validation_split, random_seed)
train_test_validation_key = multi_hash(self.train_test_split, self.random_seed, self.train_validation_split, hash_function="md5")
train_indices_path = artifact_root / "splits" / f"train_{train_test_validation_key}.pt"
validation_indices_path = artifact_root / "splits" / f"validation_{train_test_validation_key}.pt"
if all_alignment_path.is_file():
all_alignment = torch.load(all_alignment_path)
num_alignments = all_alignment.shape[1]
logger.info(f"Loaded {num_alignments} preprocessed alignments from {all_alignment_path}")
train_validation_indices = None
if test_indices_path.is_file():
test_indices = torch.load(test_indices_path)
logger.info(f"Loaded {test_indices.numel()} preprocessed test indices from {test_indices_path}")
else:
# train-test split
train_validation_indices, test_indices = split_tensor(tensor=torch.randperm(num_alignments), ratios=self.train_test_split, seed=self.random_seed)
torch.save(test_indices, test_indices_path)
logger.info(f"Saved {test_indices.numel()} preprocessed test indices to {test_indices_path}")
validation_indices = None
if train_indices_path.is_file():
train_indices = torch.load(train_indices_path)
logger.info(f"Loaded {train_indices.numel()} preprocessed train indices from {train_indices_path}")
if self.train_validation_split is not None:
validation_indices = torch.load(validation_indices_path)
logger.info(f"Loaded {validation_indices.numel()} preprocessed validation indices from {validation_indices_path}")
else:
if train_validation_indices is None:
train_validation_indices = torch.as_tensor(data=sorted(set(range(num_alignments)).difference(test_indices.tolist())))
if self.train_validation_split is not None:
train_indices, validation_indices = split_tensor(tensor=train_validation_indices, ratios=self.train_validation_split, )
torch.save(validation_indices, validation_indices_path)
logger.info(f"Saved {validation_indices.numel()} preprocessed validation indices to {validation_indices_path}")
else:
train_indices = train_validation_indices
torch.save(train_indices, train_indices_path)
logger.info(f"Saved {train_indices.numel()} preprocessed train indices to {train_indices_path}")
# Compose alignment
alignment = EntityAlignment(**{
part: all_alignment[:, indices]
for part, indices in dict(
train=train_indices,
test=test_indices,
_validation=validation_indices,
).items()
})
else:
# load from archive only if necessary
with self.archive_type(archive_path=archive_path) as archive:
alignment = self._load_alignment(archive=archive, left_graph=left_graph, right_graph=right_graph)
# compact
alignment = compact_knowledge_graph_alignment(
alignment=alignment,
left_entity_compaction=compactions[MatchSideEnum.left],
right_entity_compaction=compactions[MatchSideEnum.right],
)
# (re-)split if necessary
if self.train_validation_split is not None:
if round(self.train_validation_split * (alignment.num_train + alignment.num_validation)) == alignment.num_train:
logger.debug('Data was already split')
else:
if alignment.num_validation > 0:
logger.warning('Re-splitting data.')
alignment = alignment.validation_split(train_ratio=self.train_validation_split, seed=self.random_seed)
logger.info('Train-Validation-Split')
# better format for saving
a = torch.cat([alignment.train, alignment.test, alignment.validation], dim=1)
# lexicographic sort
i1 = a[1].argsort()
i2 = a[0, i1].argsort()
i = i1[i2]
i: torch.Tensor
a = a[:, i]
torch.save(a, all_alignment_path)
logger.info(f"Store preprocessed alignments to {all_alignment_path}")
# inverse
i = i.argsort()
i_train, i_test, i_validation = i.split(
split_size=[
alignment.num_train,
alignment.num_test,
alignment.num_validation,
])
for path, indices in (
(test_indices_path, i_test),
(train_indices_path, i_train),
(validation_indices_path, i_validation),
):
torch.save(indices, path)
logger.info(f"Store preprocessed split to {path}")
dataset = KnowledgeGraphAlignmentDataset(
left_graph=left_graph,
right_graph=right_graph,
alignment=alignment,
)
if self.with_inverse_triples:
dataset = dataset.with_inverse_triples()
logger.info('Created inverse triples')
if self.with_self_loops:
dataset = dataset.with_self_loops()
logger.info('Created self-loops')
return dataset
def hash_digests(self) -> Mapping[str, str]:
"""Return the hash digests for file integrity check."""
return dict()
def _load_graph(
self,
archive: Archive,
side: MatchSideEnum,
) -> KnowledgeGraph:
"""
Load one graph from an archive.
:param archive:
The opened archive.
:param side:
The side.
:return:
The knowledge graph for this side.
"""
raise NotImplementedError
def _load_alignment(
self,
archive: Archive,
left_graph: KnowledgeGraph,
right_graph: KnowledgeGraph,
) -> EntityAlignment:
"""
Load the entity alignment from an opened archive.
:param archive:
The opened archive.
:param left_graph:
The left graph.
:param right_graph:
The right graph.
:return:
The alignment.
"""
raise NotImplementedError
class _DBP15k(OnlineKnowledgeGraphAlignmentDatasetLoader, abc.ABC):
"""
Superclass for DBP15k variants.
The datasets were first described in https://iswc2017.semanticweb.org/wp-content/uploads/papers/MainProceedings/188.pdf
> We selected DBpedia (2016-04) to build three cross-lingual datasets. DBpedia isa large-scale multi-lingual KB
> including inter-language links (ILLs) from entities of English version to those in other languages. In our
> experiments, we extracted 15 thousand ILLs with popular entities from English to Chinese, Japanese and French
> respectively, and considered them as our reference alignment (i.e., gold standards). Our strategy to extract
> datasets is that we randomly selected an ILL pair s.t. the involved entities have at least 4
> relationship triples and then extracted relationship and attribute infobox triples for selected entities.
This implementation only considers the relationship triples, and NOT the attributes triples.
"""
subsets = frozenset({'zh_en', 'ja_en', 'fr_en', })
class DBP15kJAPE(_DBP15k):
"""Smaller variant of DBP15k from JAPE repository."""
url = 'https://github.com/nju-websoft/JAPE/raw/master/data/dbp15k.tar.gz'
predefined_splits = frozenset({0.1, 0.2, 0.3, 0.4, 0.5})
archive_file_name = 'dbp15k_jape'
@property
def root(self) -> pathlib.Path:
"""Return the relative path within the archive."""
return pathlib.Path('dbp15k', self.subset, f'0_{str(int(100 * self.train_test_split))[0]}')
def hash_digests(self) -> Mapping[str, str]: # noqa: D102
return dict(
sha512='a3bcee42dd0ecfd7188be36c57b9ec6d57b2995d0cf6a17e8fd6f302b4e70d2fc354282f7f7130040bcdcc6c7a55eab7a3af4c361fb1fd98c376bda1490e3f9d',
)
def _load_graph(
self,
archive: Archive,
side: MatchSideEnum,
) -> KnowledgeGraph: # noqa: D102
lang_codes = self.subset.split('_')
lang_code = lang_codes[0] if side == MatchSideEnum.left else lang_codes[1]
num = 1 if side == MatchSideEnum.left else 2
triples = _load_tensor_from_csv(archive=archive, relative_path=self.root / f'triples_{num}')
entity_label_to_id = _load_label_to_id(archive=archive, relative_path=self.root / f'ent_ids_{num}')
relation_label_to_id = _load_label_to_id(archive=archive, relative_path=self.root / f'rel_ids_{num}')
return KnowledgeGraph(
triples=triples,
entity_label_to_id=entity_label_to_id,
relation_label_to_id=relation_label_to_id,
lang_code=lang_code,
dataset_name='dbp15kjape',
subset_name=self.subset
)
def _load_alignment(
self,
archive: Archive,
left_graph: KnowledgeGraph,
right_graph: KnowledgeGraph,
) -> EntityAlignment: # noqa: D102
return EntityAlignment(
train=_load_tensor_from_csv(archive=archive, relative_path=self.root / 'sup_ent_ids').t(),
test=_load_tensor_from_csv(archive=archive, relative_path=self.root / 'ref_ent_ids').t(),
)
def dataset_name_normalization(name: str) -> str:
"""Normalize a dataset name."""
return name.lower().replace('_', '')
def available_datasets() -> Mapping[str, Collection[str]]:
"""List available datasets with their subsets."""
return {
dataset_name_normalization(cls.__name__): cls.subsets
for cls in get_all_subclasses(base_class=OnlineKnowledgeGraphAlignmentDatasetLoader)
if not cls.__name__.startswith('_')
}
def get_dataset_by_name(
dataset_name: str,
subset_name: Optional[str] = None,
train_test_split: Optional[float] = None,
cache_root: Optional[Union[pathlib.Path, str]] = None,
compact: bool = True,
train_validation_split: Optional[float] = 0.8,
inverse_triples: bool = False,
self_loops: bool = False,
random_seed: int = 42,
force_download: bool = False,
) -> KnowledgeGraphAlignmentDataset:
"""Load a dataset specified by name and subset name.
:param dataset_name:
The case-insensitive dataset name. One of ("DBP15k", )
:param subset_name:
An optional subset name
:param train_test_split: 0 < x < 1
A specification of the train-test split to use.
:param cache_root:
An optional cache directory for extracted downloads. If None is given, use /tmp/{dataset_name}
:param compact:
Whether to apply compaction, i.e. ensure consecutive relation and entity IDs.
:param train_validation_split: 0 < x < 1
An optional train-validation split ratio.
:param inverse_triples:
Whether to generate inverse triples (o, p_inv, s) for every triple (s, p, o).
:param self_loops:
Whether to generate self-loops (e, self_loop, e) for each entity e.
:param random_seed:
The seed to use for random splitting.
:param force_download:
Force downloading the files even if they already exist.
:return:
A dataset, a collection of two KG, and an entity alignment.
"""
dataset_loader = get_dataset_loader_by_name(
dataset_name=dataset_name,
subset_name=subset_name,
train_test_split=train_test_split,
cache_root=cache_root,
compact=compact,
train_validation_split=train_validation_split,
inverse_triples=inverse_triples,
self_loops=self_loops,
random_seed=random_seed,
)
# load dataset
dataset = dataset_loader.load(force_download=force_download)
logger.info('Created dataset: %s', dataset)
return dataset
def get_dataset_loader_by_name(
dataset_name: str,
subset_name: Optional[str] = None,
train_test_split: Optional[float] = None,
cache_root: Optional[Union[pathlib.Path, str]] = None,
compact: bool = True,
train_validation_split: Optional[float] = 0.8,
inverse_triples: bool = False,
self_loops: bool = False,
random_seed: int = 42,
):
"""Create a dataset loader for a dataset specified by name and subset name.
:param dataset_name:
The case-insensitive dataset name. One of ("DBP15k", )
:param subset_name:
An optional subset name
:param train_test_split: 0 < x < 1
A specification of the train-test split to use.
:param cache_root:
An optional cache directory for extracted downloads. If None is given, use /tmp/{dataset_name}
:param compact:
Whether to apply compaction, i.e. ensure consecutive relation and entity IDs.
:param train_validation_split: 0 < x < 1
An optional train-validation split ratio.
:param inverse_triples:
Whether to generate inverse triples (o, p_inv, s) for every triple (s, p, o).
:param self_loops:
Whether to generate self-loops (e, self_loop, e) for each entity e.
:param random_seed:
The seed to use for random splitting.
:return:
A dataset loader.
"""
# Normalize train-test-split
if train_test_split is None:
train_test_split = 0.3
if isinstance(train_test_split, str):
train_test_split = int(train_test_split) / 100.
assert isinstance(train_test_split, float)
# Resolve data set loader class
dataset_loader_cls = get_subclass_by_name(
base_class=OnlineKnowledgeGraphAlignmentDatasetLoader,
name=dataset_name,
normalizer=dataset_name_normalization,
)
# Instantiate dataset loader
return dataset_loader_cls(
subset=subset_name,
train_test_split=train_test_split,
cache_root=cache_root,
compact=compact,
train_validation_split=train_validation_split,
with_inverse_triples=inverse_triples,
with_self_loops=self_loops,
random_seed=random_seed,
)
| 38,530 | 35.942474 | 184 | py |
rank-based-evaluation | rank-based-evaluation-main/src/kgm/data/knowledge_graph.py | # coding=utf-8
"""Various knowledge graph related data structures."""
import enum
import json
import logging
import lzma
import pathlib
from dataclasses import dataclass
from typing import Mapping, Optional, Tuple, Union
import torch
from ..utils.torch_utils import split_tensor
from ..utils.types import EntityIDs, IDAlignment, Triples
logger = logging.getLogger(__name__)
__all__ = [
'EntityAlignment',
'KnowledgeGraph',
'KnowledgeGraphAlignmentDataset',
'MatchSideEnum',
'SIDES',
'exact_self_alignment',
'get_erdos_renyi',
'get_other_side',
'get_synthetic_math_graph',
'sub_graph_alignment',
'validation_split',
]
@enum.unique
class MatchSideEnum(str, enum.Enum):
"""The graph selection for a entity alignment dataset."""
#: The left side
left = 'left'
#: The right side
right = 'right'
# The canonical order of match sides
SIDES = (MatchSideEnum.left, MatchSideEnum.right)
def get_other_side(side: MatchSideEnum) -> MatchSideEnum:
"""Get the enum of the other side."""
return MatchSideEnum.left if side == MatchSideEnum.right else MatchSideEnum.right
def add_self_loops(
triples: Triples,
entity_label_to_id: Mapping[str, int],
relation_label_to_id: Mapping[str, int],
self_loop_relation_name: Optional[str] = None,
) -> Tuple[Triples, Mapping[str, int]]:
"""Add self loops with dummy relation.
For each entity e, add (e, self_loop, e).
:param triples: shape: (n, 3)
The triples.
:param entity_label_to_id:
The mapping from entity labels to ids.
:param relation_label_to_id:
The mapping from relation labels to ids.
:param self_loop_relation_name:
The name of the self-loop relation. Must not exist.
:return:
cat(triples, self_loop_triples)
updated mapping
"""
if self_loop_relation_name is None:
self_loop_relation_name = 'self_loop'
p = triples[:, 1]
# check if name clashes might occur
if self_loop_relation_name in relation_label_to_id.keys():
raise AssertionError(f'There exists a relation "{self_loop_relation_name}".')
# Append inverse relations to translation table
max_relation_id = max(relation_label_to_id.values())
updated_relation_label_to_id = {r_label: r_id for r_label, r_id in relation_label_to_id.items()}
self_loop_relation_id = max_relation_id + 1
updated_relation_label_to_id.update({self_loop_relation_name: self_loop_relation_id})
assert len(updated_relation_label_to_id) == len(relation_label_to_id) + 1
# create self-loops triples
assert (p <= max_relation_id).all()
e = torch.tensor(sorted(entity_label_to_id.values()), dtype=torch.long) # pylint: disable=not-callable
p_self_loop = torch.ones_like(e) * self_loop_relation_id
self_loop_triples = torch.stack([e, p_self_loop, e], dim=1)
all_triples = torch.cat([triples, self_loop_triples], dim=0)
return all_triples, updated_relation_label_to_id
def add_inverse_triples(
triples: Triples,
relation_label_to_id: Mapping[str, int],
inverse_relation_postfix: Optional[str] = None,
) -> Tuple[Triples, Mapping[str, int]]:
"""Create and append inverse triples.
For each triple (s, p, o), an inverse triple (o, p_inv, s) is added.
:param triples: shape: (n, 3)
The triples.
:param relation_label_to_id:
The mapping from relation labels to ids.
:param inverse_relation_postfix:
A postfix to use for creating labels for the inverse relations.
:return: cat(triples, inverse_triples)
"""
if inverse_relation_postfix is None:
inverse_relation_postfix = '_inv'
assert len(inverse_relation_postfix) > 0
s, p, o = triples[:, 0], triples[:, 1], triples[:, 2]
# check if name clashes might occur
suspicious_relations = sorted(k for k in relation_label_to_id.keys() if k.endswith('_inv'))
if len(suspicious_relations) > 0:
raise AssertionError(
f'Some of the inverse relations did already exist! Suspicious relations: {suspicious_relations}')
# Append inverse relations to translation table
num_relations = len(relation_label_to_id)
updated_relation_label_to_id = {r_label: r_id for r_label, r_id in relation_label_to_id.items()}
updated_relation_label_to_id.update({r_label + inverse_relation_postfix: r_id + num_relations for r_label, r_id in relation_label_to_id.items()})
assert len(updated_relation_label_to_id) == 2 * num_relations
# create inverse triples
assert (p < num_relations).all()
p_inv = p + num_relations
inverse_triples = torch.stack([o, p_inv, s], dim=1)
all_triples = torch.cat([triples, inverse_triples], dim=0)
return all_triples, updated_relation_label_to_id
@dataclass
class KnowledgeGraph:
"""A knowledge graph, a multi-relational graph, represented by triples."""
#: The triples, shape: (n, 3)
triples: Triples
#: The mapping from entity labels to IDs
entity_label_to_id: Optional[Mapping[str, int]]
#: The mapping from relations labels to IDs
relation_label_to_id: Optional[Mapping[str, int]]
#: Language code of the knowledge graph (e.g. zh, en, ...)
lang_code: Optional[str] = None
#: Dataset name
dataset_name: Optional[str] = None
#: Dataset subset name
subset_name: Optional[str] = None
#: Whether inverse triples have been added
inverse_triples: bool = False
#: Whether self-loops have been added.
self_loops: bool = False
@property
def num_triples(self) -> int:
"""Return the number of triples."""
return self.triples.shape[0]
@property
def num_entities(self) -> int:
"""Return the number of entities."""
return len(set(self.entity_label_to_id.values()))
@property
def num_relations(self) -> int:
"""Return the number of relations."""
return len(set(self.relation_label_to_id.values()))
def with_inverse_triples(
self,
inverse_relation_postfix: Optional[str] = None,
) -> 'KnowledgeGraph':
"""Return a KG with added inverse triples, if not already contained. Otherwise return reference to self."""
assert not self.self_loops
if self.inverse_triples:
return self
else:
enriched_triples, enriched_relation_label_to_id = add_inverse_triples(
triples=self.triples,
relation_label_to_id=self.relation_label_to_id,
inverse_relation_postfix=inverse_relation_postfix,
)
return KnowledgeGraph(
triples=enriched_triples,
entity_label_to_id=self.entity_label_to_id,
relation_label_to_id=enriched_relation_label_to_id,
inverse_triples=True,
self_loops=False,
lang_code=self.lang_code,
dataset_name=self.dataset_name,
subset_name=self.subset_name
)
def with_self_loops(
self,
self_loop_relation_name: Optional[str] = None,
) -> 'KnowledgeGraph':
"""Return a KG with added self-loops, if not already contained. Otherwise return reference to self."""
if self.self_loops:
return self
else:
enriched_triples, enriched_relation_label_to_id = add_self_loops(
triples=self.triples,
entity_label_to_id=self.entity_label_to_id,
relation_label_to_id=self.relation_label_to_id,
self_loop_relation_name=self_loop_relation_name,
)
return KnowledgeGraph(
triples=enriched_triples,
entity_label_to_id=self.entity_label_to_id,
relation_label_to_id=enriched_relation_label_to_id,
inverse_triples=self.inverse_triples,
self_loops=True,
lang_code=self.lang_code,
dataset_name=self.dataset_name,
subset_name=self.subset_name
)
def __str__(self): # noqa: D105
return f'{self.__class__.__name__}(num_triples={self.num_triples}, num_entities={self.num_entities}, num_relations={self.num_relations}, inverse_triples={self.inverse_triples}, self_loops={self.self_loops})'
def get_relation_label_by_id(self, relation_id: int) -> Optional[str]:
"""Lookup a relation label for a given ID."""
matches = [label for (label, id_) in self.relation_label_to_id.items() if id_ == relation_id]
if len(matches) == 0:
return None
if len(matches) > 1:
raise ValueError(f'More than one relation with ID {relation_id}')
return matches[0]
def save(self, directory: pathlib.Path) -> None:
"""Save the KG to a directory."""
# ensure the directory exists
directory.mkdir(parents=True, exist_ok=True)
# save triples
torch.save(self.triples, directory / 'triples.pth')
assert not self.inverse_triples
assert not self.self_loops
# save label-to-id
with lzma.open(directory / 'metadata.json.xz', 'wt') as json_file:
json.dump(
obj=dict(
entity_label_to_id=self.entity_label_to_id,
relation_label_to_id=self.relation_label_to_id,
lang_code=self.lang_code,
dataset_name=self.dataset_name,
subset_name=self.subset_name,
),
fp=json_file,
sort_keys=True,
indent=2,
)
@staticmethod
def load(directory: pathlib.Path) -> 'KnowledgeGraph':
"""Load the KG from a directory."""
triples = torch.load(directory / 'triples.pth')
with lzma.open(directory / 'metadata.json.xz', 'r') as json_file:
meta = json.load(json_file)
return KnowledgeGraph(
triples=triples,
entity_label_to_id=meta['entity_label_to_id'],
relation_label_to_id=meta['relation_label_to_id'],
lang_code=meta['lang_code'],
dataset_name=meta['dataset_name'],
subset_name=meta['subset_name'],
)
@dataclass
class EntityAlignment:
"""An entity alignment between two knowledge graphs."""
#: The entity alignment used for training, shape: (2, num_train_alignments)
train: IDAlignment
#: The entity alignment used for testing, shape: (2, num_test_alignments)
test: IDAlignment
#: The entity alignment used for validation, shape: (2, num_validation_alignments)
_validation: Optional[IDAlignment] = None
@property
def validation(self) -> IDAlignment:
"""
Return the validation alignment.
:return: shape: (2, num_val_alignments), dtype=long
The validation alignment.
"""
if self._validation is None:
return torch.empty(2, 0, dtype=torch.long, device=self.train.device)
return self._validation
@property
def num_train(self) -> int:
"""Return the number of training alignment pairs."""
return self.train.shape[1]
@property
def num_validation(self) -> int:
"""Return the number of validation alignment pairs."""
return self.validation.shape[1]
@property
def num_test(self) -> int:
"""Return the number of test alignment pairs."""
return self.test.shape[1]
@property
def all(self) -> IDAlignment:
"""
Return the concatenation of all alignments parts.
:return: shape: (2, num_total_alignments), dtype=long
All alignments (train, validation, test)
"""
return torch.cat([self.train, self.validation, self.test], dim=1)
def to_dict(self) -> Mapping[str, IDAlignment]:
"""Convert the alignment to a dictionary with keys {'train', 'test'}, and optionally 'validation'."""
return {
key: value
for key, value in zip(
('train', 'test', 'validation'),
(self.train, self.test, self.validation)
)
if value.numel() > 0
}
def validation_split(self, train_ratio: float, seed: Optional[int] = None) -> 'EntityAlignment':
"""Return a new alignment object where the training alignments have been split to train, and validation."""
if train_ratio <= 0. or train_ratio >= 1.:
raise ValueError(f'ratio must be in (0, 1), but is {train_ratio}')
return validation_split(alignment=self, train_ratio=train_ratio, seed=seed)
def __str__(self): # noqa: D105
return f'{self.__class__.__name__}(num_train={self.num_train}, num_test={self.num_test}, num_val={self.num_validation})'
@staticmethod
def from_full_alignment(
alignment: IDAlignment,
train_test_split: Optional[float],
train_validation_split: Optional[float],
seed: Optional[int] = 42,
) -> 'EntityAlignment':
"""
Create an entity alignment by splitting a given alignment tensor.
If requested the alignment is first split into a train and test part. Afterwards, if requested, the train part
is split to train and validation.
:param alignment: shape: (2, total_num_alignments)
The ID-based alignment.
:param train_test_split:
The train-test split ratio.
:param train_validation_split:
The train-validation split ratio.
:param seed:
The seed to be used for splitting.
:return:
An entity alignment.
"""
if train_test_split is None:
train_test_split = 1.
if train_validation_split is None:
train_validation_split = 1.
test_train_split = 1. - train_test_split
# pylint: disable=unbalanced-tuple-unpacking
test, train, validation = split_tensor(alignment, ratios=[test_train_split, train_validation_split], shuffle=True, dim=1, seed=seed)
return EntityAlignment(
train=train,
test=test,
_validation=validation,
)
def __getitem__(self, item: str) -> IDAlignment: # noqa: D105
if item == 'train':
return self.train
elif item == 'test':
return self.test
elif item == 'validation':
return self.validation
else:
raise KeyError(item)
class KnowledgeGraphAlignmentDataset:
"""A knowledge graph alignment data set, comprising a pair of graphs, and a (partial) alignment of their entities."""
#: The first knowledge graph
left_graph: KnowledgeGraph
#: The second knowledge graph
right_graph: KnowledgeGraph
#: The alignment
alignment: EntityAlignment
def __init__(
self,
left_graph: KnowledgeGraph,
right_graph: KnowledgeGraph,
alignment: EntityAlignment,
):
"""
Initialize the alignment dataset.
:param left_graph:
The left graph.
:param right_graph:
The right graph.
:param alignment:
The alignment between the graphs.
"""
self.left_graph = left_graph
self.right_graph = right_graph
self.alignment = alignment
def validation_split(self, train_ratio: float, seed: Optional[int] = None) -> 'KnowledgeGraphAlignmentDataset':
"""Return the dataset, where the training alignment part has been split into train and validation part."""
return KnowledgeGraphAlignmentDataset(
left_graph=self.left_graph,
right_graph=self.right_graph,
alignment=self.alignment.validation_split(train_ratio=train_ratio, seed=seed),
)
@property
def triples(self) -> Mapping[MatchSideEnum, Triples]:
"""Return a dictionary of the side to the corresponding triples on this side."""
return {
MatchSideEnum.left: self.left_graph.triples,
MatchSideEnum.right: self.right_graph.triples,
}
@property
def graphs(self) -> Mapping[MatchSideEnum, KnowledgeGraph]:
"""Return a dictionary of the side to KG on this side."""
return {
MatchSideEnum.left: self.left_graph,
MatchSideEnum.right: self.right_graph,
}
@property
def num_nodes(self) -> Mapping[MatchSideEnum, int]:
"""Return a dictionary of side to number of entities."""
return {
MatchSideEnum.left: self.left_graph.num_entities,
MatchSideEnum.right: self.right_graph.num_entities,
}
@property
def num_exclusives(self) -> Mapping[MatchSideEnum, int]:
"""Return a dictionary of side to number of exclusive nodes."""
return {
side: self.num_nodes[side] - len(set(aligned_on_side.tolist()))
for side, aligned_on_side in zip(SIDES, self.alignment.all)
}
@property
def exclusives(self) -> Mapping[MatchSideEnum, EntityIDs]:
"""Return a dictionary of side to ID of exclusive entities."""
return {
side: torch.as_tensor(
data=sorted(set(range(self.graphs[side].num_entities)).difference(aligned_on_side.tolist())),
dtype=torch.long,
)
for side, aligned_on_side in zip(
[MatchSideEnum.left, MatchSideEnum.right],
self.alignment.all,
)
}
@property
def dataset_name(self) -> str:
"""Return the name of the dataset."""
return self.left_graph.dataset_name
@property
def subset_name(self) -> str:
"""Return the name of the subset."""
return self.left_graph.subset_name
def __str__(self): # noqa: D105
return f'{self.__class__.__name__}(left={self.left_graph}, right={self.right_graph}, align={self.alignment})'
def to_dict(self) -> Mapping[str, Union[KnowledgeGraph, EntityAlignment]]:
"""Return a dictionary view of the dataset."""
return dict(
left_graph=self.left_graph,
right_graph=self.right_graph,
alignment=self.alignment,
)
def with_inverse_triples(self) -> 'KnowledgeGraphAlignmentDataset':
"""Return the dataset where both sides have been extended by inverse triples."""
return KnowledgeGraphAlignmentDataset(
left_graph=self.left_graph.with_inverse_triples(),
right_graph=self.right_graph.with_inverse_triples(),
alignment=self.alignment,
)
def with_self_loops(self) -> 'KnowledgeGraphAlignmentDataset':
"""Return the dataset where both sides have been extended by self-loops."""
return KnowledgeGraphAlignmentDataset(
left_graph=self.left_graph.with_self_loops(),
right_graph=self.right_graph.with_self_loops(),
alignment=self.alignment,
)
def validation_split(
alignment: EntityAlignment,
train_ratio: float = 0.8,
seed: int = 42,
) -> EntityAlignment:
"""
Split the train part of an entity alignment into train and validation.
:param alignment:
The alignment.
:param train_ratio: 0 < x < 1
The ratio of alignments to use for the train part.
:param seed:
The seed to use for randomisation.
:return:
An entity alignment with the updated train and validation part.
"""
# Check input
if not (0. < train_ratio < 1.):
raise ValueError(f'train_ratio must be between 0 and 1, but is {train_ratio}')
# re-combine train and validation, if already split
num_total = alignment.num_train
pool = alignment.train
if alignment.num_validation > 0:
num_total += alignment.num_validation
pool = torch.cat([pool, alignment.validation], dim=1)
# Delegate to tensor-based split.
# pylint: disable=unbalanced-tuple-unpacking
train_alignments, validation_alignments = split_tensor(tensor=pool, ratios=train_ratio, dim=1, seed=seed)
# Construct new alignment object.
return EntityAlignment(
train=train_alignments,
_validation=validation_alignments,
test=alignment.test,
)
def exact_self_alignment(
graph: KnowledgeGraph,
train_percentage: float = 0.5,
) -> KnowledgeGraphAlignmentDataset:
"""
Create a alignment between a graph a randomly permuted version of it.
:param graph: The graph.
:param train_percentage: The percentage of training alignments.
:return: A knowledge graph alignment dataset.
"""
# Create a random permutation as alignment
full_alignment = torch.stack([
torch.arange(graph.num_entities, dtype=torch.long),
torch.randperm(graph.num_entities)
], dim=0)
# shuffle
full_alignment = full_alignment[:, torch.randperm(graph.num_entities)]
# create mapping
mapping = {int(a): int(b) for a, b in full_alignment.t()}
# translate triples
h, r, t = graph.triples.t()
h_new, t_new = [torch.tensor([mapping[int(e)] for e in es], dtype=torch.long) for es in (h, t)] # pylint: disable=not-callable
r_new = r.detach().clone()
new_triples = torch.stack([h_new, r_new, t_new], dim=-1)
# compose second KG
second_graph = KnowledgeGraph(
triples=new_triples,
entity_label_to_id={k: mapping[v] for k, v in graph.entity_label_to_id.items()},
relation_label_to_id=graph.relation_label_to_id.copy(),
inverse_triples=False,
self_loops=False,
)
second_graph.inverse_triples = graph.inverse_triples
second_graph.self_loops = graph.self_loops
# split alignment
split_id = int(train_percentage * graph.num_entities)
alignment = EntityAlignment(
train=full_alignment[:, :split_id],
test=full_alignment[:, split_id:],
)
return KnowledgeGraphAlignmentDataset(
left_graph=graph,
right_graph=second_graph,
alignment=alignment,
)
def sub_graph_alignment(
graph: KnowledgeGraph,
overlap: float = 0.5,
ratio: float = 0.7,
train_test_split: float = 0.5,
train_validation_split: Optional[float] = 0.8,
) -> KnowledgeGraphAlignmentDataset:
"""
Create a synthetic entity alignment dataset, where both sides are random subgraphs from a larger one.
:param graph:
The source KG.
:param overlap:
The percentage of overlapping entities.
:param ratio:
The ratio of entities between the two KG.
:param train_test_split:
The ratio for train-test splitting the aligned entities.
:return:
A entity alignment dataset.
"""
# split entities
entities = torch.arange(graph.num_entities)
# pylint: disable=unbalanced-tuple-unpacking
common, left, right = split_tensor(tensor=entities, ratios=[overlap, ratio])
left = torch.cat([common, left])
right = torch.cat([common, right])
# create alignment
alignment = EntityAlignment.from_full_alignment(
alignment=torch.arange(common.shape[0]).unsqueeze(dim=0).repeat(2, 1),
train_test_split=train_test_split,
train_validation_split=train_validation_split,
)
# induced subgraph
graphs = []
for ent in [left, right]:
ent = set(ent.tolist())
entity_label_to_id = {
str(old_id): new_id
for new_id, old_id in enumerate(ent)
}
triples = torch.as_tensor(data=[
(entity_label_to_id[str(h)], r, entity_label_to_id[str(t)])
for h, r, t in graph.triples.tolist()
if (h in ent and t in ent)
], dtype=torch.long)
graphs.append(KnowledgeGraph(
triples=triples,
entity_label_to_id=entity_label_to_id,
relation_label_to_id=graph.relation_label_to_id,
))
return KnowledgeGraphAlignmentDataset(
left_graph=graphs[0],
right_graph=graphs[1],
alignment=alignment,
)
def get_erdos_renyi(
num_entities: int,
num_relations: int,
num_triples: int,
) -> KnowledgeGraph:
"""
Generate a synthetic KG using Erdos-Renyi, and random edge typing.
:param num_entities: >0
The number of entities.
:param num_relations: >0
The number of relations.
:param p:
The edge probability.
:param num_triples:
The number of triples. If present, ignore p.
:return:
A KG.
"""
triples = torch.stack([
torch.randint(max_id, size=(num_triples,))
for max_id in (num_entities, num_relations, num_entities)
], dim=-1)
return KnowledgeGraph(
triples=triples,
entity_label_to_id={str(i): i for i in range(num_entities)},
relation_label_to_id={str(i): i for i in range(num_relations)},
dataset_name='erdos_renyi',
subset_name=f'{num_entities}-{num_relations}-{p}',
)
def get_synthetic_math_graph(
num_entities: int,
) -> KnowledgeGraph:
"""
Generate a synthetic KG of positive integers, linked by modulo relations.
:param num_entities:
The number of entities.
:return:
A KG.
"""
entities = list(range(num_entities))
relations = list(range(num_entities))
triples = [(e, r, (e + r) % num_entities) for r in relations for e in entities]
return KnowledgeGraph(
triples=torch.as_tensor(triples, dtype=torch.long),
entity_label_to_id={str(e): e for e in entities},
relation_label_to_id={'+' + str(r): r for r in relations},
)
| 25,646 | 33.287433 | 215 | py |
rank-based-evaluation | rank-based-evaluation-main/executables/adjusted_ranking_experiments.py | # coding=utf-8
"""Evaluation of different training and test sizes."""
import argparse
import logging
import random
import mlflow
import numpy
import torch
import tqdm
from kgm.data import get_dataset_by_name
from kgm.eval.matching import evaluate_matching_model
from kgm.models import GCNAlign
from kgm.modules import MarginLoss, SampledMatchingLoss, get_similarity
from kgm.training.matching import AlignmentModelTrainer
from kgm.utils.mlflow_utils import log_metrics_to_mlflow, log_params_to_mlflow
def main():
logging.basicConfig(level=logging.INFO)
parser = argparse.ArgumentParser()
parser.add_argument('--dataset', type=str, default='dbp15k_jape')
parser.add_argument('--subset', type=str, default='zh_en')
parser.add_argument('--num_epochs', type=int, default=2_000)
parser.add_argument('--iterations', type=int, default=5)
parser.add_argument('--device', type=int, default=0)
parser.add_argument('--tracking_uri', type=str, default='http://localhost:5000')
args = parser.parse_args()
# Mlflow settings
logging.info(f'Logging to MLFlow @ {args.tracking_uri}')
mlflow.set_tracking_uri(uri=args.tracking_uri)
mlflow.set_experiment('adjusted_ranking_experiments')
# Determine device
device = torch.device("cuda") if torch.cuda.is_available() else torch.device("cpu")
logging.info(f"Using device={device}")
# load dataset
dataset = get_dataset_by_name(
dataset_name=args.dataset,
subset_name=args.subset,
inverse_triples=True, # GCNAlign default
self_loops=True, # GCNAlign default
)
for num_train in [
0,
10,
20,
50,
100,
200,
500,
1000,
2000,
3000,
5000,
7500,
]:
ea_full = dataset.alignment.all
i_all = ea_full.shape[1]
i_train = num_train
# store optimal evaluation batch size for different sizes
for iteration in tqdm.trange(args.iterations, unit='run', unit_scale=True):
# fix random seed
torch.manual_seed(iteration)
numpy.random.seed(iteration)
random.seed(iteration)
# train-test split
assert ea_full.shape[0] == 2
ea_full = ea_full[:, torch.randperm(i_all)]
ea_train, ea_test = ea_full[:, :i_train], ea_full[:, i_train:]
# instantiate model
model = GCNAlign(
dataset=dataset,
embedding_dim=200,
n_layers=2,
use_conv_weights=False,
).to(device=device)
# instantiate similarity
similarity = get_similarity(
similarity="l1",
transformation="negative",
)
if i_train > 0:
# instantiate loss
loss = SampledMatchingLoss(
similarity=similarity,
base_loss=MarginLoss(margin=3.),
num_negatives=50,
)
# instantiate trainer
trainer = AlignmentModelTrainer(
model=model,
similarity=similarity,
dataset=dataset,
loss=loss,
optimizer_cls="adam",
optimizer_kwargs=dict(
lr=1.0,
),
)
# train
trainer.train(num_epochs=args.num_epochs)
# evaluate with different test set sizes
total_num_test_alignments = ea_test.shape[1]
test_sizes = list(range(1_000, total_num_test_alignments, 1_000))
results = dict(evaluate_matching_model(
model=model,
alignments={
k: ea_test[:, :k]
for k in test_sizes
},
similarity=similarity,
)[0])
# store results
for size, result in results.items():
# start experiment
with mlflow.start_run():
log_params_to_mlflow(config=dict(
dataset=args.dataset,
subset=args.subset,
num_epochs=args.num_epochs,
num_train_alignments=i_train,
num_test_alignments=ea_test[:, :size].shape[1],
seed=iteration,
))
log_metrics_to_mlflow(metrics=result)
if __name__ == '__main__':
main()
| 4,618 | 30.855172 | 87 | py |
rank-based-evaluation | rank-based-evaluation-main/executables/degree_investigation.py | """Script to generate the evaluation for degree inductive bias."""
import numpy
import torch
from matplotlib import pyplot as plt
from scipy.stats import pearsonr, spearmanr
from kgm.data import SIDES, get_dataset_by_name
from kgm.models import GCNAlign, PureEmbeddingModel
def degree_vs_norm(
dataset
):
# calculate degree
degrees = dict()
for i, side in enumerate(SIDES):
graph = dataset.graphs[side]
degree = torch.ones(graph.num_entities, dtype=torch.long) # self-loops
for col in [0, 2]:
idx, cnt = torch.unique(graph.triples[:, col], return_counts=True)
degree[idx] += cnt
degrees[side] = degree
# just random vectors
pure_model = PureEmbeddingModel(
dataset=dataset,
embedding_dim=200,
)
# untrained gcn model on random vectors
gcn_model = GCNAlign(
dataset=dataset,
embedding_dim=200,
n_layers=2,
use_conv_weights=False,
)
for label, model in dict(
gcn=gcn_model,
pure=pure_model,
).items():
norm = {
side: vectors.norm(dim=-1).detach().numpy()
for side, vectors in model().items()
}
x, y = [], []
for side, deg in degrees.items():
x.append(deg)
y.append(norm[side])
x = numpy.concatenate(x)
y = numpy.concatenate(y)
print(label, spearmanr(y, x))
def degree_correlation(dataset):
# compute degree for all aligned nodes
degree = torch.empty_like(dataset.alignment.all)
for i, side in enumerate(SIDES):
graph = dataset.graphs[side]
deg = torch.ones(graph.num_entities, dtype=torch.long) # self-loops
for col in [0, 2]:
idx, cnt = torch.unique(graph.triples[:, col], return_counts=True)
deg[idx] += cnt
degree[i] = deg[dataset.alignment.all[i]]
# compute correlation
rho_p, p_p = pearsonr(*degree.numpy())
rho_s, p_s = spearmanr(*degree.numpy())
# plot
plt.clf()
plt.figure(figsize=(6, 6))
plt.scatter(*degree.numpy(), marker=".", color="black")
plt.yscale("log")
plt.xscale("log")
plt.axis("equal")
plt.xlabel("degree " + dataset.graphs[SIDES[0]].lang_code + " [log]")
plt.ylabel("degree " + dataset.graphs[SIDES[1]].lang_code + " [log]")
plt.title(rf"Pearson $\rho$={rho_p:2.2%} (p={p_p}); Spearman $\rho$={rho_s:2.2%} (p={p_s})")
plt.tight_layout()
plt.savefig("degree_correlation.pdf")
return degree
def main():
# get dataset
dataset = get_dataset_by_name(
dataset_name='dbp15k_jape',
subset_name='zh_en',
)
# degree correlation of aligned nodes
degree_correlation(dataset=dataset)
# degree vs. embedding norm
degree_vs_norm(dataset=dataset)
if __name__ == '__main__':
main()
| 2,854 | 27.55 | 96 | py |
CIF-HieraDist | CIF-HieraDist-main/setup.py | #!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import os
import subprocess
import sys
from setuptools import setup, find_packages, Extension
from setuptools import Extension, find_packages, setup
if sys.version_info < (3, 6):
sys.exit("Sorry, Python >= 3.6 is required for fairseq.")
def write_version_py():
with open(os.path.join("fairseq", "version.txt")) as f:
version = f.read().strip()
# append latest commit hash to version string
try:
sha = (
subprocess.check_output(["git", "rev-parse", "HEAD"])
.decode("ascii")
.strip()
)
version += "+" + sha[:7]
except Exception:
pass
# write version info to fairseq/version.py
with open(os.path.join("fairseq", "version.py"), "w") as f:
f.write('__version__ = "{}"\n'.format(version))
return version
version = write_version_py()
with open("README.md") as f:
readme = f.read()
if sys.platform == "darwin":
extra_compile_args = ["-stdlib=libc++", "-O3"]
else:
extra_compile_args = ["-std=c++11", "-O3"]
class NumpyExtension(Extension):
"""Source: https://stackoverflow.com/a/54128391"""
def __init__(self, *args, **kwargs):
self.__include_dirs = []
super().__init__(*args, **kwargs)
@property
def include_dirs(self):
import numpy
return self.__include_dirs + [numpy.get_include()]
@include_dirs.setter
def include_dirs(self, dirs):
self.__include_dirs = dirs
extensions = [
Extension(
"fairseq.libbleu",
sources=[
"fairseq/clib/libbleu/libbleu.cpp",
"fairseq/clib/libbleu/module.cpp",
],
extra_compile_args=extra_compile_args,
),
NumpyExtension(
"fairseq.data.data_utils_fast",
sources=["fairseq/data/data_utils_fast.pyx"],
language="c++",
extra_compile_args=extra_compile_args,
),
NumpyExtension(
"fairseq.data.token_block_utils_fast",
sources=["fairseq/data/token_block_utils_fast.pyx"],
language="c++",
extra_compile_args=extra_compile_args,
),
]
cmdclass = {}
try:
# torch is not available when generating docs
from torch.utils import cpp_extension
extensions.extend(
[
cpp_extension.CppExtension(
"fairseq.libbase",
sources=[
"fairseq/clib/libbase/balanced_assignment.cpp",
],
)
]
)
extensions.extend(
[
cpp_extension.CppExtension(
"fairseq.libnat",
sources=[
"fairseq/clib/libnat/edit_dist.cpp",
],
),
cpp_extension.CppExtension(
"alignment_train_cpu_binding",
sources=[
"examples/operators/alignment_train_cpu.cpp",
],
),
]
)
if "CUDA_HOME" in os.environ:
extensions.extend(
[
cpp_extension.CppExtension(
"fairseq.libnat_cuda",
sources=[
"fairseq/clib/libnat_cuda/edit_dist.cu",
"fairseq/clib/libnat_cuda/binding.cpp",
],
),
cpp_extension.CppExtension(
"fairseq.ngram_repeat_block_cuda",
sources=[
"fairseq/clib/cuda/ngram_repeat_block_cuda.cpp",
"fairseq/clib/cuda/ngram_repeat_block_cuda_kernel.cu",
],
),
cpp_extension.CppExtension(
"alignment_train_cuda_binding",
sources=[
"examples/operators/alignment_train_kernel.cu",
"examples/operators/alignment_train_cuda.cpp",
],
),
]
)
cmdclass["build_ext"] = cpp_extension.BuildExtension
except ImportError:
pass
if "READTHEDOCS" in os.environ:
# don't build extensions when generating docs
extensions = []
if "build_ext" in cmdclass:
del cmdclass["build_ext"]
# use CPU build of PyTorch
dependency_links = [
"https://download.pytorch.org/whl/cpu/torch-1.7.0%2Bcpu-cp36-cp36m-linux_x86_64.whl"
]
else:
dependency_links = []
if "clean" in sys.argv[1:]:
# Source: https://bit.ly/2NLVsgE
print("deleting Cython files...")
import subprocess
subprocess.run(
["rm -f fairseq/*.so fairseq/**/*.so fairseq/*.pyd fairseq/**/*.pyd"],
shell=True,
)
extra_packages = []
if os.path.exists(os.path.join("fairseq", "model_parallel", "megatron", "mpu")):
extra_packages.append("fairseq.model_parallel.megatron.mpu")
def do_setup(package_data):
setup(
name="fairseq",
version=version,
description="Facebook AI Research Sequence-to-Sequence Toolkit",
url="https://github.com/pytorch/fairseq",
classifiers=[
"Intended Audience :: Science/Research",
"License :: OSI Approved :: MIT License",
"Programming Language :: Python :: 3.6",
"Programming Language :: Python :: 3.7",
"Programming Language :: Python :: 3.8",
"Topic :: Scientific/Engineering :: Artificial Intelligence",
],
long_description=readme,
long_description_content_type="text/markdown",
setup_requires=[
"cython",
'numpy<1.20.0; python_version<"3.7"',
'numpy; python_version>="3.7"',
"setuptools>=18.0",
],
install_requires=[
"cffi",
"cython",
'dataclasses; python_version<"3.7"',
"hydra-core>=1.0.7,<1.1",
"omegaconf<2.1",
'numpy<1.20.0; python_version<"3.7"',
'numpy; python_version>="3.7"',
"regex",
"sacrebleu>=1.4.12",
"torch",
"tqdm",
"bitarray",
# "torchaudio>=0.8.0",
],
dependency_links=dependency_links,
packages=find_packages(
exclude=[
"examples",
"examples.*",
"scripts",
"scripts.*",
"tests",
"tests.*",
]
)
+ extra_packages,
package_data=package_data,
ext_modules=extensions,
test_suite="tests",
entry_points={
"console_scripts": [
"fairseq-eval-lm = fairseq_cli.eval_lm:cli_main",
"fairseq-generate = fairseq_cli.generate:cli_main",
"fairseq-hydra-train = fairseq_cli.hydra_train:cli_main",
"fairseq-interactive = fairseq_cli.interactive:cli_main",
"fairseq-preprocess = fairseq_cli.preprocess:cli_main",
"fairseq-score = fairseq_cli.score:cli_main",
"fairseq-train = fairseq_cli.train:cli_main",
"fairseq-validate = fairseq_cli.validate:cli_main",
],
},
cmdclass=cmdclass,
zip_safe=False,
)
def get_files(path, relative_to="fairseq"):
all_files = []
for root, _dirs, files in os.walk(path, followlinks=True):
root = os.path.relpath(root, relative_to)
for file in files:
if file.endswith(".pyc"):
continue
all_files.append(os.path.join(root, file))
return all_files
if __name__ == "__main__":
try:
# symlink examples into fairseq package so package_data accepts them
fairseq_examples = os.path.join("fairseq", "examples")
if "build_ext" not in sys.argv[1:] and not os.path.exists(fairseq_examples):
os.symlink(os.path.join("..", "examples"), fairseq_examples)
package_data = {
"fairseq": (
get_files(fairseq_examples)
+ get_files(os.path.join("fairseq", "config"))
)
}
do_setup(package_data)
finally:
if "build_ext" not in sys.argv[1:] and os.path.islink(fairseq_examples):
os.unlink(fairseq_examples)
| 8,435 | 28.291667 | 92 | py |
CIF-HieraDist | CIF-HieraDist-main/hubconf.py | # Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""isort:skip_file"""
import functools
import importlib
dependencies = [
"dataclasses",
"hydra",
"numpy",
"omegaconf",
"regex",
"requests",
"torch",
]
# Check for required dependencies and raise a RuntimeError if any are missing.
missing_deps = []
for dep in dependencies:
try:
importlib.import_module(dep)
except ImportError:
# Hack: the hydra package is provided under the "hydra-core" name in
# pypi. We don't want the user mistakenly calling `pip install hydra`
# since that will install an unrelated package.
if dep == "hydra":
dep = "hydra-core"
missing_deps.append(dep)
if len(missing_deps) > 0:
raise RuntimeError("Missing dependencies: {}".format(", ".join(missing_deps)))
# only do fairseq imports after checking for dependencies
from fairseq.hub_utils import ( # noqa; noqa
BPEHubInterface as bpe,
TokenizerHubInterface as tokenizer,
)
from fairseq.models import MODEL_REGISTRY # noqa
# torch.hub doesn't build Cython components, so if they are not found then try
# to build them here
try:
import fairseq.data.token_block_utils_fast # noqa
except ImportError:
try:
import cython # noqa
import os
from setuptools import sandbox
sandbox.run_setup(
os.path.join(os.path.dirname(__file__), "setup.py"),
["build_ext", "--inplace"],
)
except ImportError:
print(
"Unable to build Cython components. Please make sure Cython is "
"installed if the torch.hub model you are loading depends on it."
)
# automatically expose models defined in FairseqModel::hub_models
for _model_type, _cls in MODEL_REGISTRY.items():
for model_name in _cls.hub_models().keys():
globals()[model_name] = functools.partial(
_cls.from_pretrained,
model_name,
)
| 2,099 | 27.378378 | 82 | py |
CIF-HieraDist | CIF-HieraDist-main/examples/speech_to_text/prep_covost_data.py | #!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import argparse
import logging
from pathlib import Path
import shutil
from tempfile import NamedTemporaryFile
from typing import Optional, Tuple
import pandas as pd
import torchaudio
from examples.speech_to_text.data_utils import (
create_zip,
extract_fbank_features,
filter_manifest_df,
gen_config_yaml,
gen_vocab,
get_zip_manifest,
load_df_from_tsv,
save_df_to_tsv,
)
from torch import Tensor
from torch.utils.data import Dataset
from torchaudio.datasets.utils import download_url, extract_archive
from tqdm import tqdm
log = logging.getLogger(__name__)
MANIFEST_COLUMNS = ["id", "audio", "n_frames", "tgt_text", "speaker"]
class CoVoST(Dataset):
"""Create a Dataset for CoVoST (https://github.com/facebookresearch/covost).
Args:
root (str): root path to the dataset and generated manifests/features
source_language (str): source (audio) language
target_language (str, optional): target (text) language,
None for no translation (default: None)
version (int, optional): CoVoST version. (default: 2)
download (bool, optional): Whether to download the dataset if it is not
found at root path. (default: ``False``).
"""
COVOST_URL_TEMPLATE = (
"https://dl.fbaipublicfiles.com/covost/"
"covost_v2.{src_lang}_{tgt_lang}.tsv.tar.gz"
)
VERSIONS = {2}
SPLITS = ["train", "dev", "test"]
XX_EN_LANGUAGES = {
1: ["fr", "de", "nl", "ru", "es", "it", "tr", "fa", "sv-SE", "mn", "zh-CN"],
2: [
"fr",
"de",
"es",
"ca",
"it",
"ru",
"zh-CN",
"pt",
"fa",
"et",
"mn",
"nl",
"tr",
"ar",
"sv-SE",
"lv",
"sl",
"ta",
"ja",
"id",
"cy",
],
}
EN_XX_LANGUAGES = {
1: [],
2: [
"de",
"tr",
"fa",
"sv-SE",
"mn",
"zh-CN",
"cy",
"ca",
"sl",
"et",
"id",
"ar",
"ta",
"lv",
"ja",
],
}
def __init__(
self,
root: str,
split: str,
source_language: str,
target_language: Optional[str] = None,
version: int = 2,
) -> None:
assert version in self.VERSIONS and split in self.SPLITS
assert source_language is not None
self.no_translation = target_language is None
if not self.no_translation:
assert "en" in {source_language, target_language}
if source_language == "en":
assert target_language in self.EN_XX_LANGUAGES[version]
else:
assert source_language in self.XX_EN_LANGUAGES[version]
else:
# Hack here so that we can get "split" column from CoVoST TSV.
# Note that we use CoVoST train split for ASR which is an extension
# to Common Voice train split.
target_language = "de" if source_language == "en" else "en"
self.root: Path = Path(root)
cv_tsv_path = self.root / "validated.tsv"
assert cv_tsv_path.is_file()
covost_url = self.COVOST_URL_TEMPLATE.format(
src_lang=source_language, tgt_lang=target_language
)
covost_archive = self.root / Path(covost_url).name
if not covost_archive.is_file():
download_url(covost_url, self.root.as_posix(), hash_value=None)
extract_archive(covost_archive.as_posix())
cv_tsv = load_df_from_tsv(cv_tsv_path)
covost_tsv = load_df_from_tsv(
self.root / Path(covost_url).name.replace(".tar.gz", "")
)
df = pd.merge(
left=cv_tsv[["path", "sentence", "client_id"]],
right=covost_tsv[["path", "translation", "split"]],
how="inner",
on="path",
)
if split == "train":
df = df[(df["split"] == split) | (df["split"] == f"{split}_covost")]
else:
df = df[df["split"] == split]
data = df.to_dict(orient="index").items()
data = [v for k, v in sorted(data, key=lambda x: x[0])]
self.data = []
for e in data:
try:
path = self.root / "clips" / e["path"]
_ = torchaudio.info(path.as_posix())
self.data.append(e)
except RuntimeError:
pass
def __getitem__(
self, n: int
) -> Tuple[Tensor, int, str, str, Optional[str], str, str]:
"""Load the n-th sample from the dataset.
Args:
n (int): The index of the sample to be loaded
Returns:
tuple: ``(waveform, sample_rate, sentence, translation, speaker_id,
sample_id)``
"""
data = self.data[n]
path = self.root / "clips" / data["path"]
waveform, sample_rate = torchaudio.load(path)
sentence = data["sentence"]
translation = None if self.no_translation else data["translation"]
speaker_id = data["client_id"]
_id = data["path"].replace(".mp3", "")
return waveform, sample_rate, sentence, translation, speaker_id, _id
def __len__(self) -> int:
return len(self.data)
def process(args):
root = Path(args.data_root).absolute() / args.src_lang
if not root.is_dir():
raise NotADirectoryError(f"{root} does not exist")
# Extract features
feature_root = root / "fbank80"
feature_root.mkdir(exist_ok=True)
for split in CoVoST.SPLITS:
print(f"Fetching split {split}...")
dataset = CoVoST(root, split, args.src_lang, args.tgt_lang)
print("Extracting log mel filter bank features...")
for waveform, sample_rate, _, _, _, utt_id in tqdm(dataset):
extract_fbank_features(
waveform, sample_rate, feature_root / f"{utt_id}.npy"
)
# Pack features into ZIP
zip_path = root / "fbank80.zip"
print("ZIPing features...")
create_zip(feature_root, zip_path)
print("Fetching ZIP manifest...")
audio_paths, audio_lengths = get_zip_manifest(zip_path)
# Generate TSV manifest
print("Generating manifest...")
train_text = []
task = f"asr_{args.src_lang}"
if args.tgt_lang is not None:
task = f"st_{args.src_lang}_{args.tgt_lang}"
for split in CoVoST.SPLITS:
manifest = {c: [] for c in MANIFEST_COLUMNS}
dataset = CoVoST(root, split, args.src_lang, args.tgt_lang)
for _, _, src_utt, tgt_utt, speaker_id, utt_id in tqdm(dataset):
manifest["id"].append(utt_id)
manifest["audio"].append(audio_paths[utt_id])
manifest["n_frames"].append(audio_lengths[utt_id])
manifest["tgt_text"].append(src_utt if args.tgt_lang is None else tgt_utt)
manifest["speaker"].append(speaker_id)
is_train_split = split.startswith("train")
if is_train_split:
train_text.extend(manifest["tgt_text"])
df = pd.DataFrame.from_dict(manifest)
df = filter_manifest_df(df, is_train_split=is_train_split)
save_df_to_tsv(df, root / f"{split}_{task}.tsv")
# Generate vocab
vocab_size_str = "" if args.vocab_type == "char" else str(args.vocab_size)
spm_filename_prefix = f"spm_{args.vocab_type}{vocab_size_str}_{task}"
with NamedTemporaryFile(mode="w") as f:
for t in train_text:
f.write(t + "\n")
gen_vocab(
Path(f.name), root / spm_filename_prefix, args.vocab_type, args.vocab_size
)
# Generate config YAML
gen_config_yaml(
root,
spm_filename=spm_filename_prefix + ".model",
yaml_filename=f"config_{task}.yaml",
specaugment_policy="lb",
)
# Clean up
shutil.rmtree(feature_root)
def main():
parser = argparse.ArgumentParser()
parser.add_argument(
"--data-root",
"-d",
required=True,
type=str,
help="data root with sub-folders for each language <root>/<src_lang>",
)
parser.add_argument(
"--vocab-type",
default="unigram",
required=True,
type=str,
choices=["bpe", "unigram", "char"],
),
parser.add_argument("--vocab-size", default=1000, type=int)
parser.add_argument("--src-lang", "-s", required=True, type=str)
parser.add_argument("--tgt-lang", "-t", type=str)
args = parser.parse_args()
process(args)
if __name__ == "__main__":
main()
| 8,898 | 30.782143 | 86 | py |
CIF-HieraDist | CIF-HieraDist-main/examples/speech_to_text/prep_mtedx_data.py | #!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import argparse
import logging
import os
from pathlib import Path
import shutil
from itertools import groupby
from tempfile import NamedTemporaryFile
from typing import Tuple
import pandas as pd
import soundfile as sf
from examples.speech_to_text.data_utils import (
create_zip,
extract_fbank_features,
filter_manifest_df,
gen_config_yaml,
gen_vocab,
get_zip_manifest,
load_df_from_tsv,
save_df_to_tsv,
)
import torch
from torch.utils.data import Dataset
from tqdm import tqdm
from fairseq.data.audio.audio_utils import get_waveform, convert_waveform
log = logging.getLogger(__name__)
MANIFEST_COLUMNS = ["id", "audio", "n_frames", "tgt_text", "speaker", "tgt_lang"]
class mTEDx(Dataset):
"""
Create a Dataset for Multilingual TEDx.
Each item is a tuple of the form: waveform, sample_rate, source utterance,
target utterance, speaker_id, utterance_id
"""
SPLITS = ["train", "valid", "test"]
LANGPAIRS = [
"es-es",
"fr-fr",
"pt-pt",
"it-it",
"ru-ru",
"el-el",
"ar-ar",
"de-de",
"es-en",
"es-fr",
"es-pt",
"es-it",
"fr-en",
"fr-es",
"fr-pt",
"pt-en",
"pt-es",
"it-en",
"it-es",
"ru-en",
"el-en",
]
def __init__(self, root: str, lang: str, split: str) -> None:
assert split in self.SPLITS and lang in self.LANGPAIRS
_root = Path(root) / f"{lang}" / "data" / split
wav_root, txt_root = _root / "wav", _root / "txt"
assert _root.is_dir() and wav_root.is_dir() and txt_root.is_dir()
# Load audio segments
try:
import yaml
except ImportError:
print("Please install PyYAML to load the Multilingual TEDx YAML files")
with open(txt_root / f"{split}.yaml") as f:
segments = yaml.load(f, Loader=yaml.BaseLoader)
# Load source and target utterances
src, tgt = lang.split("-")
for _lang in [src, tgt]:
with open(txt_root / f"{split}.{_lang}") as f:
utterances = [r.strip() for r in f]
assert len(segments) == len(utterances)
for i, u in enumerate(utterances):
segments[i][_lang] = u
# Gather info
self.data = []
for wav_filename, _seg_group in groupby(segments, lambda x: x["wav"]):
wav_filename = wav_filename.replace(".wav", ".flac")
wav_path = wav_root / wav_filename
sample_rate = sf.info(wav_path.as_posix()).samplerate
seg_group = sorted(_seg_group, key=lambda x: float(x["offset"]))
for i, segment in enumerate(seg_group):
offset = int(float(segment["offset"]) * sample_rate)
n_frames = int(float(segment["duration"]) * sample_rate)
_id = f"{wav_path.stem}_{i}"
self.data.append(
(
wav_path.as_posix(),
offset,
n_frames,
sample_rate,
segment[src],
segment[tgt],
segment["speaker_id"],
tgt,
_id,
)
)
def __getitem__(self, n: int) -> Tuple[torch.Tensor, int, str, str, str, str, str]:
(
wav_path,
offset,
n_frames,
sr,
src_utt,
tgt_utt,
spk_id,
tgt_lang,
utt_id,
) = self.data[n]
waveform, _ = get_waveform(wav_path, frames=n_frames, start=offset)
waveform = torch.from_numpy(waveform)
return waveform, sr, src_utt, tgt_utt, spk_id, tgt_lang, utt_id
def __len__(self) -> int:
return len(self.data)
def process(args):
root = Path(args.data_root).absolute()
for lang in mTEDx.LANGPAIRS:
cur_root = root / f"{lang}"
if not cur_root.is_dir():
print(f"{cur_root.as_posix()} does not exist. Skipped.")
continue
# Extract features
audio_root = cur_root / ("flac" if args.use_audio_input else "fbank80")
audio_root.mkdir(exist_ok=True)
for split in mTEDx.SPLITS:
print(f"Fetching split {split}...")
dataset = mTEDx(root.as_posix(), lang, split)
if args.use_audio_input:
print("Converting audios...")
for waveform, sample_rate, _, _, _, utt_id in tqdm(dataset):
tgt_sample_rate = 16_000
_wavform, _ = convert_waveform(
waveform,
sample_rate,
to_mono=True,
to_sample_rate=tgt_sample_rate,
)
sf.write(
(audio_root / f"{utt_id}.flac").as_posix(),
_wavform.numpy(),
tgt_sample_rate,
)
else:
print("Extracting log mel filter bank features...")
for waveform, sample_rate, _, _, _, _, utt_id in tqdm(dataset):
extract_fbank_features(
waveform, sample_rate, audio_root / f"{utt_id}.npy"
)
# Pack features into ZIP
zip_path = cur_root / f"{audio_root.name}.zip"
print("ZIPing audios/features...")
create_zip(audio_root, zip_path)
print("Fetching ZIP manifest...")
audio_paths, audio_lengths = get_zip_manifest(zip_path)
# Generate TSV manifest
print("Generating manifest...")
train_text = []
for split in mTEDx.SPLITS:
is_train_split = split.startswith("train")
manifest = {c: [] for c in MANIFEST_COLUMNS}
ds = mTEDx(args.data_root, lang, split)
for _, _, src_utt, tgt_utt, spk_id, tgt_lang, utt_id in tqdm(ds):
manifest["id"].append(utt_id)
manifest["audio"].append(audio_paths[utt_id])
manifest["n_frames"].append(audio_lengths[utt_id])
manifest["tgt_text"].append(src_utt if args.task == "asr" else tgt_utt)
manifest["speaker"].append(spk_id)
manifest["tgt_lang"].append(tgt_lang)
if is_train_split:
train_text.extend(manifest["tgt_text"])
df = pd.DataFrame.from_dict(manifest)
df = filter_manifest_df(df, is_train_split=is_train_split)
save_df_to_tsv(df, cur_root / f"{split}_{args.task}.tsv")
# Generate vocab
v_size_str = "" if args.vocab_type == "char" else str(args.vocab_size)
spm_filename_prefix = f"spm_{args.vocab_type}{v_size_str}_{args.task}"
with NamedTemporaryFile(mode="w") as f:
for t in train_text:
f.write(t + "\n")
gen_vocab(
Path(f.name),
cur_root / spm_filename_prefix,
args.vocab_type,
args.vocab_size,
)
# Generate config YAML
if args.use_audio_input:
gen_config_yaml(
cur_root,
spm_filename=spm_filename_prefix + ".model",
yaml_filename=f"config_{args.task}.yaml",
specaugment_policy=None,
extra={"use_audio_input": True},
)
else:
gen_config_yaml(
cur_root,
spm_filename=spm_filename_prefix + ".model",
yaml_filename=f"config_{args.task}.yaml",
specaugment_policy="lb",
)
# Clean up
shutil.rmtree(audio_root)
def process_joint(args):
cur_root = Path(args.data_root)
assert all(
(cur_root / f"{lang}").is_dir() for lang in mTEDx.LANGPAIRS
), "do not have downloaded data available for all languages"
# Generate vocab
vocab_size_str = "" if args.vocab_type == "char" else str(args.vocab_size)
spm_filename_prefix = f"spm_{args.vocab_type}{vocab_size_str}_{args.task}"
with NamedTemporaryFile(mode="w") as f:
for lang in mTEDx.LANGPAIRS:
tsv_path = cur_root / f"{lang}" / f"train_{args.task}.tsv"
df = load_df_from_tsv(tsv_path)
for t in df["tgt_text"]:
f.write(t + "\n")
special_symbols = None
if args.joint:
# Add tgt_lang tags to dict
special_symbols = list(
{f'<lang:{lang.split("-")[1]}>' for lang in mTEDx.LANGPAIRS}
)
gen_vocab(
Path(f.name),
cur_root / spm_filename_prefix,
args.vocab_type,
args.vocab_size,
special_symbols=special_symbols,
)
# Generate config YAML
gen_config_yaml(
cur_root,
spm_filename=spm_filename_prefix + ".model",
yaml_filename=f"config_{args.task}.yaml",
specaugment_policy="ld",
prepend_tgt_lang_tag=(args.joint),
)
# Make symbolic links to manifests
for lang in mTEDx.LANGPAIRS:
for split in mTEDx.SPLITS:
src_path = cur_root / f"{lang}" / f"{split}_{args.task}.tsv"
desc_path = cur_root / f"{split}_{lang}_{args.task}.tsv"
if not desc_path.is_symlink():
os.symlink(src_path, desc_path)
def main():
parser = argparse.ArgumentParser()
parser.add_argument("--data-root", "-d", required=True, type=str)
parser.add_argument(
"--vocab-type",
default="unigram",
required=True,
type=str,
choices=["bpe", "unigram", "char"],
),
parser.add_argument("--vocab-size", default=8000, type=int)
parser.add_argument("--task", type=str, choices=["asr", "st"])
parser.add_argument("--joint", action="store_true", help="")
parser.add_argument("--use-audio-input", action="store_true")
args = parser.parse_args()
if args.joint:
process_joint(args)
else:
process(args)
if __name__ == "__main__":
main()
| 10,404 | 34.03367 | 87 | py |
CIF-HieraDist | CIF-HieraDist-main/examples/speech_to_text/prep_aishell2_data.py | #!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import os
import argparse
import logging
from pathlib import Path
import shutil
from tempfile import NamedTemporaryFile
from torch.utils.data import Dataset
from typing import Tuple, Union
import torchaudio
import pandas as pd
from examples.speech_to_text.data_utils import (
create_zip,
extract_fbank_features,
gen_config_yaml,
gen_vocab,
get_zip_manifest,
save_df_to_tsv,
)
from tqdm import tqdm
log = logging.getLogger(__name__)
# Define data splits
SPLITS = [
"train-error"
# "train"
# "test-ios", "test-android",
# "test-mic", "dev-ios",
# "dev-android", "dev-mic"
]
# Define the headers of columns
MANIFEST_COLUMNS = ["id", "audio", "n_frames", "tgt_text", "speaker"]
# Define special tokens
BOS_TOKEN, BOS_TOKEN_ID = "<s>", 0
PAD_TOKEN, PAD_TOKEN_ID = "<pad>", 1
EOS_TOKEN, EOS_TOKEN_ID = "</s>", 2
UNK_TOKEN, UNK_TOKEN_ID = "<unk>", 3
def load_aishell2_item(file_id, id2audios, id2trans, id2spk):
speaker_id = id2spk[file_id]
file_audio = id2audios[file_id]
waveform, sample_rate = torchaudio.load(file_audio)
assert sample_rate == 16000, "sample rate is not correct."
if file_id in id2trans.keys():
transcript = id2trans[file_id]
transcript = transcript.strip().replace(" ", "")
else:
# Translation not found
print("Translation not found for " + fileid)
transcript = None
return (
waveform,
sample_rate,
transcript,
str(speaker_id),
str(file_id),
)
class AISHELL2(Dataset):
"""Create a Dataset for AISHELL2."""
txt_filename = "trans.txt"
audio_scp_filename = "wav.scp"
speaker_filename = "spk_info.txt"
def __init__(self, root, split):
assert split in [
"train",
"test-ios",
"test-android",
"test-mic",
"dev-ios",
"dev-android",
"dev-mic",
"train-error",
], "data split is invalid."
root = os.fspath(root)
print(root)
if split == "train":
data_root_dir = os.path.join(root, "iOS", "data")
elif split == "train-error":
data_root_dir = os.path.join(root, "iOS", "data_error")
elif "dev" in split or "test" in split:
if "dev" in split:
if "ios" in split:
data_root_dir = os.path.join(
root, "AISHELL-DEV-TEST-SET", "iOS", "dev"
)
elif "android" in split:
data_root_dir = os.path.join(
root, "AISHELL-DEV-TEST-SET", "Android", "dev"
)
elif "mic" in split:
data_root_dir = os.path.join(
root, "AISHELL-DEV-TEST-SET", "Mic", "dev"
)
else:
raise ValueError("Invalid options %s" % split)
else:
if "ios" in split:
data_root_dir = os.path.join(
root, "AISHELL-DEV-TEST-SET", "iOS", "test"
)
elif "android" in split:
data_root_dir = os.path.join(
root, "AISHELL-DEV-TEST-SET", "Android", "test"
)
elif "mic" in split:
data_root_dir = os.path.join(
root, "AISHELL-DEV-TEST-SET", "Mic", "test"
)
else:
raise ValueError("Invalid options %s" % split)
else:
raise ValueError("Invalid options %s" % split)
self.trans_filename = os.path.join(data_root_dir, self.txt_filename)
self.wav_scp_filename = os.path.join(data_root_dir, self.audio_scp_filename)
self.id2txt_dict = dict()
with open(self.trans_filename, "r") as f_trans:
for line in f_trans:
uttid, text = line.strip().split("\t", 1)
self.id2txt_dict[uttid] = text
self.id2audios_dict = dict()
self.id2spk_dict = dict()
with open(self.wav_scp_filename, "r") as f_audios:
for line in f_audios:
uttid, audio_path = line.strip().split("\t", 1)
spk_id = audio_path.split("/")[1]
abs_audio_path = os.path.join(data_root_dir, audio_path)
self.id2audios_dict[uttid] = abs_audio_path
self.id2spk_dict[uttid] = spk_id
self._walker = list(self.id2txt_dict.keys())
self._walker.sort()
def __getitem__(self, n: int):
"""Load the n-th sample from the dataset.
Args:
n (int): The index of the sample to be loaded
Returns:
(Tensor, int, str, int, int, int):
``(waveform, sample_rate, transcript, speaker_id, chapter_id, utterance_id)``
"""
fileid = self._walker[n]
return load_aishell2_item(
fileid, self.id2audios_dict, self.id2txt_dict, self.id2spk_dict
)
def __len__(self) -> int:
return len(self._walker)
def process(args):
print("Begin process...")
input_root = Path(args.input_root).absolute()
out_root = Path(args.output_root).absolute()
out_root.mkdir(exist_ok=True)
# Extract features
feature_root = out_root / "fbank80"
feature_root.mkdir(exist_ok=True)
for split in SPLITS:
print(f"Fetching split {split}...")
dataset = AISHELL2(input_root.as_posix(), split=split)
print("Extracting log mel filter bank features...")
for wav, sample_rate, _, spk_id, utt_id in tqdm(dataset):
sample_id = utt_id
try:
extract_fbank_features(
wav, sample_rate, feature_root / f"{sample_id}.npy"
)
except Exception as e:
print(e)
print("Encounter error for %s" % utt_id)
else:
continue
# Pack features into ZIP
zip_path = out_root / "fbank80.zip"
print("ZIPing features...")
create_zip(feature_root, zip_path)
print("Fetching ZIP manifest...")
audio_paths, audio_lengths = get_zip_manifest(zip_path)
# Generate TSV manifest
print("Generating manifest...")
train_text = []
for split in SPLITS: # conduct for each data split
manifest = {c: [] for c in MANIFEST_COLUMNS}
dataset = AISHELL2(input_root.as_posix(), split=split)
for _, _, trans, spk_id, utt_id in tqdm(dataset):
if trans is not None and utt_id.strip() in audio_paths.keys():
# Add items one-by-one
sample_id = utt_id
manifest["id"].append(sample_id)
manifest["audio"].append(audio_paths[sample_id])
manifest["n_frames"].append(audio_lengths[sample_id])
manifest["tgt_text"].append(" ".join(list(trans.lower())))
manifest["speaker"].append(spk_id)
save_df_to_tsv(pd.DataFrame.from_dict(manifest), out_root / f"{split}.tsv")
if split.startswith("train"):
train_text.extend(manifest["tgt_text"])
# Generate vocab
vocab_file_path = os.path.join(str(out_root), "vocab.txt")
if len(train_text) != 0:
vocab_dict = dict()
for line in train_text:
tokens_list = line.strip().split(" ")
for tok in tokens_list:
if tok not in vocab_dict:
vocab_dict[tok] = 1
else:
vocab_dict[tok] += 1
sorted_vocab_dict = {
sort_k: sort_v
for sort_k, sort_v in sorted(
vocab_dict.items(), key=lambda kv: (kv[1], kv[0]), reverse=True
)
}
f_vocab = open(vocab_file_path, "w")
f_vocab.write("\t".join([BOS_TOKEN, str(0)]) + "\n")
f_vocab.write("\t".join([PAD_TOKEN, str(0)]) + "\n")
f_vocab.write("\t".join([EOS_TOKEN, str(0)]) + "\n")
f_vocab.write("\t".join([UNK_TOKEN, str(0)]) + "\n")
for idx, (tok, freq) in enumerate(sorted_vocab_dict.items()):
f_vocab.write("\t".join([tok, str(freq)]) + "\n")
f_vocab.close()
# Generate config YAML
gen_config_yaml(out_root, vocab_name=vocab_file_path, specaugment_policy="ld")
# Clean up
shutil.rmtree(feature_root)
def main():
parser = argparse.ArgumentParser()
parser.add_argument(
"--input-root",
"-i",
default="/data/LibriSpeech/mlhan_extra_files/AISHELL2",
required=False,
type=str,
) # assign the data output root directory
parser.add_argument(
"--output-root",
"-o",
default="/workspace/fairseq-uni/examples/speech_to_text/egs/aishell2/data/train_error",
required=False,
type=str,
) # assign the data output root directory
parser.add_argument(
"--vocab-type",
default="char",
required=False,
type=str,
choices=["bpe", "unigram", "char"],
) # assign the vocabulary type
args = parser.parse_args()
process(args)
if __name__ == "__main__":
main()
| 9,428 | 31.513793 | 95 | py |
CIF-HieraDist | CIF-HieraDist-main/examples/speech_to_text/prep_librispeech_data.py | #!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import argparse
import logging
from pathlib import Path
import shutil
from tempfile import NamedTemporaryFile
import pandas as pd
from examples.speech_to_text.data_utils import (
create_zip,
extract_fbank_features,
gen_config_yaml,
gen_vocab,
get_zip_manifest,
save_df_to_tsv,
)
from torchaudio.datasets import LIBRISPEECH
from tqdm import tqdm
log = logging.getLogger(__name__)
# Define data splits
SPLITS = [
"train-clean-100",
"train-clean-360",
"train-other-500",
"dev-clean",
"dev-other",
"test-clean",
"test-other",
]
# Define the headers of columns
MANIFEST_COLUMNS = ["id", "audio", "n_frames", "tgt_text", "speaker"]
def process(args):
out_root = Path(args.output_root).absolute()
out_root.mkdir(exist_ok=True)
# Extract features
feature_root = out_root / "fbank80"
feature_root.mkdir(exist_ok=True)
for split in SPLITS:
print(f"Fetching split {split}...")
dataset = LIBRISPEECH(out_root.as_posix(), url=split, download=False)
print("Extracting log mel filter bank features...")
for wav, sample_rate, _, spk_id, chapter_no, utt_no in tqdm(dataset):
sample_id = f"{spk_id}-{chapter_no}-{utt_no}"
extract_fbank_features(wav, sample_rate, feature_root / f"{sample_id}.npy")
# Pack features into ZIP
zip_path = out_root / "fbank80.zip"
print("ZIPing features...")
create_zip(feature_root, zip_path)
print("Fetching ZIP manifest...")
audio_paths, audio_lengths = get_zip_manifest(zip_path)
# Generate TSV manifest
print("Generating manifest...")
train_text = []
for split in SPLITS: # conduct for each data split
manifest = {c: [] for c in MANIFEST_COLUMNS}
dataset = LIBRISPEECH(out_root.as_posix(), url=split)
for _, _, utt, spk_id, chapter_no, utt_no in tqdm(dataset):
# Add items one-by-one
sample_id = f"{spk_id}-{chapter_no}-{utt_no}"
manifest["id"].append(sample_id)
manifest["audio"].append(audio_paths[sample_id])
manifest["n_frames"].append(audio_lengths[sample_id])
manifest["tgt_text"].append(utt.lower())
manifest["speaker"].append(spk_id)
save_df_to_tsv(pd.DataFrame.from_dict(manifest), out_root / f"{split}.tsv")
if split.startswith("train"):
train_text.extend(manifest["tgt_text"])
# Generate vocab
vocab_size = "" if args.vocab_type == "char" else str(args.vocab_size)
spm_filename_prefix = f"spm_{args.vocab_type}{vocab_size}"
with NamedTemporaryFile(mode="w") as f:
for t in train_text:
f.write(t + "\n")
gen_vocab(
Path(f.name),
out_root / spm_filename_prefix,
args.vocab_type,
args.vocab_size,
)
# Generate config YAML
gen_config_yaml(
out_root, spm_filename=spm_filename_prefix + ".model", specaugment_policy="ld"
)
# Clean up
shutil.rmtree(feature_root)
def main():
parser = argparse.ArgumentParser()
parser.add_argument(
"--output-root", "-o", required=True, type=str
) # assign the data output root directory
parser.add_argument(
"--vocab-type",
default="unigram",
required=True,
type=str,
choices=["bpe", "unigram", "char"],
) # assign the vocabulary type
parser.add_argument(
"--vocab-size", default=10000, type=int
) # assign the size of vocabulary
args = parser.parse_args()
process(args)
if __name__ == "__main__":
main()
| 3,811 | 29.253968 | 87 | py |
CIF-HieraDist | CIF-HieraDist-main/examples/speech_to_text/data_utils.py | # Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import csv
from pathlib import Path
import zipfile
from functools import reduce
from multiprocessing import cpu_count
from typing import Any, Dict, List, Optional, Union
import io
import numpy as np
import pandas as pd
import sentencepiece as sp
from fairseq.data.audio.audio_utils import (
convert_waveform,
_get_kaldi_fbank,
_get_torchaudio_fbank,
is_npy_data,
is_sf_audio_data,
)
import torch
import soundfile as sf
from tqdm import tqdm
UNK_TOKEN, UNK_TOKEN_ID = "<unk>", 3
BOS_TOKEN, BOS_TOKEN_ID = "<s>", 0
EOS_TOKEN, EOS_TOKEN_ID = "</s>", 2
PAD_TOKEN, PAD_TOKEN_ID = "<pad>", 1
def gen_vocab(
input_path: Path,
output_path_prefix: Path,
model_type="bpe",
vocab_size=1000,
special_symbols: Optional[List[str]] = None,
):
# Train SentencePiece Model
arguments = [
f"--input={input_path.as_posix()}",
f"--model_prefix={output_path_prefix.as_posix()}",
f"--model_type={model_type}",
f"--vocab_size={vocab_size}",
"--character_coverage=1.0",
f"--num_threads={cpu_count()}",
f"--unk_id={UNK_TOKEN_ID}",
f"--bos_id={BOS_TOKEN_ID}",
f"--eos_id={EOS_TOKEN_ID}",
f"--pad_id={PAD_TOKEN_ID}",
]
if special_symbols is not None:
_special_symbols = ",".join(special_symbols)
arguments.append(f"--user_defined_symbols={_special_symbols}")
sp.SentencePieceTrainer.Train(" ".join(arguments))
# Export fairseq dictionary
spm = sp.SentencePieceProcessor()
spm.Load(output_path_prefix.as_posix() + ".model")
vocab = {i: spm.IdToPiece(i) for i in range(spm.GetPieceSize())}
assert (
vocab.get(UNK_TOKEN_ID) == UNK_TOKEN
and vocab.get(PAD_TOKEN_ID) == PAD_TOKEN
and vocab.get(BOS_TOKEN_ID) == BOS_TOKEN
and vocab.get(EOS_TOKEN_ID) == EOS_TOKEN
)
vocab = {
i: s
for i, s in vocab.items()
if s not in {UNK_TOKEN, BOS_TOKEN, EOS_TOKEN, PAD_TOKEN}
}
with open(output_path_prefix.as_posix() + ".txt", "w") as f_out:
for _, s in sorted(vocab.items(), key=lambda x: x[0]):
f_out.write(f"{s} 1\n")
def extract_fbank_features(
waveform: torch.FloatTensor,
sample_rate: int,
output_path: Optional[Path] = None,
n_mel_bins: int = 80,
overwrite: bool = False,
):
if output_path is not None and output_path.is_file() and not overwrite:
return
_waveform = convert_waveform(waveform, sample_rate, to_mono=True)
# Kaldi compliance: 16-bit signed integers
_waveform = _waveform * (2**15)
_waveform = _waveform[0].numpy()
features = _get_kaldi_fbank(_waveform, sample_rate, n_mel_bins)
if features is None:
features = _get_torchaudio_fbank(_waveform, sample_rate, n_mel_bins)
if features is None:
raise ImportError(
"Please install pyKaldi or torchaudio to enable fbank feature extraction"
)
if output_path is not None:
np.save(output_path.as_posix(), features)
return features
def create_zip(data_root: Path, zip_path: Path):
paths = list(data_root.glob("*.npy"))
with zipfile.ZipFile(zip_path, "w", zipfile.ZIP_STORED) as f:
for path in tqdm(paths):
f.write(path, arcname=path.name)
def get_zip_manifest(zip_path: Path, zip_root: Optional[Path] = None, is_audio=False):
_zip_path = Path.joinpath(zip_root or Path(""), zip_path)
with zipfile.ZipFile(_zip_path, mode="r") as f:
info = f.infolist()
paths, lengths = {}, {}
for i in tqdm(info):
utt_id = Path(i.filename).stem
offset, file_size = i.header_offset + 30 + len(i.filename), i.file_size
paths[utt_id] = f"{zip_path.as_posix()}:{offset}:{file_size}"
with open(_zip_path, "rb") as f:
f.seek(offset)
byte_data = f.read(file_size)
assert len(byte_data) > 1
if is_audio:
assert is_sf_audio_data(byte_data), i
else:
assert is_npy_data(byte_data), i
byte_data_fp = io.BytesIO(byte_data)
if is_audio:
lengths[utt_id] = sf.info(byte_data_fp).frames
else:
lengths[utt_id] = np.load(byte_data_fp).shape[0]
return paths, lengths
def gen_config_yaml(
manifest_root: Path,
spm_filename: Optional[str] = None,
vocab_name: Optional[str] = None,
yaml_filename: str = "config.yaml",
specaugment_policy: Optional[str] = "lb",
prepend_tgt_lang_tag: bool = False,
sampling_alpha: Optional[float] = None,
input_channels: Optional[int] = 1,
input_feat_per_channel: Optional[int] = 80,
audio_root: str = "",
cmvn_type: str = "utterance",
gcmvn_path: Optional[Path] = None,
extra=None,
):
manifest_root = manifest_root.absolute()
writer = S2TDataConfigWriter(manifest_root / yaml_filename)
assert spm_filename is not None or vocab_name is not None
vocab_name = (
spm_filename.replace(".model", ".txt") if vocab_name is None else vocab_name
)
writer.set_vocab_filename(vocab_name)
if input_channels is not None:
writer.set_input_channels(input_channels)
if input_feat_per_channel is not None:
writer.set_input_feat_per_channel(input_feat_per_channel)
specaugment_setters = {
"lb": writer.set_specaugment_lb_policy,
"ld": writer.set_specaugment_ld_policy,
"sm": writer.set_specaugment_sm_policy,
"ss": writer.set_specaugment_ss_policy,
}
specaugment_setter = specaugment_setters.get(specaugment_policy, None)
if specaugment_setter is not None:
specaugment_setter()
if spm_filename is not None:
writer.set_bpe_tokenizer(
{
"bpe": "sentencepiece",
"sentencepiece_model": (manifest_root / spm_filename).as_posix(),
}
)
if prepend_tgt_lang_tag:
writer.set_prepend_tgt_lang_tag(True)
if sampling_alpha is not None:
writer.set_sampling_alpha(sampling_alpha)
if cmvn_type not in ["global", "utterance"]:
raise NotImplementedError
if specaugment_policy is not None:
writer.set_feature_transforms("_train", [f"{cmvn_type}_cmvn", "specaugment"])
writer.set_feature_transforms("*", [f"{cmvn_type}_cmvn"])
if cmvn_type == "global":
if gcmvn_path is None:
raise ValueError("Please provide path of global cmvn file.")
else:
writer.set_global_cmvn(gcmvn_path.as_posix())
if len(audio_root) > 0:
writer.set_audio_root(audio_root)
if extra is not None:
writer.set_extra(extra)
writer.flush()
def load_df_from_tsv(path: Union[str, Path]) -> pd.DataFrame:
_path = path if isinstance(path, str) else path.as_posix()
return pd.read_csv(
_path,
sep="\t",
header=0,
encoding="utf-8",
escapechar="\\",
quoting=csv.QUOTE_NONE,
na_filter=False,
)
def save_df_to_tsv(dataframe, path: Union[str, Path]):
_path = path if isinstance(path, str) else path.as_posix()
dataframe.to_csv(
_path,
sep="\t",
header=True,
index=False,
encoding="utf-8",
escapechar="\\",
quoting=csv.QUOTE_NONE,
)
def load_tsv_to_dicts(path: Union[str, Path]) -> List[dict]:
with open(path, "r") as f:
reader = csv.DictReader(
f,
delimiter="\t",
quotechar=None,
doublequote=False,
lineterminator="\n",
quoting=csv.QUOTE_NONE,
)
rows = [dict(e) for e in reader]
return rows
def filter_manifest_df(
df, is_train_split=False, extra_filters=None, min_n_frames=5, max_n_frames=3000
):
filters = {
"no speech": df["audio"] == "",
f"short speech (<{min_n_frames} frames)": df["n_frames"] < min_n_frames,
"empty sentence": df["tgt_text"] == "",
}
if is_train_split:
filters[f"long speech (>{max_n_frames} frames)"] = df["n_frames"] > max_n_frames
if extra_filters is not None:
filters.update(extra_filters)
invalid = reduce(lambda x, y: x | y, filters.values())
valid = ~invalid
print(
"| "
+ ", ".join(f"{n}: {f.sum()}" for n, f in filters.items())
+ f", total {invalid.sum()} filtered, {valid.sum()} remained."
)
return df[valid]
def cal_gcmvn_stats(features_list):
features = np.concatenate(features_list)
square_sums = (features**2).sum(axis=0)
mean = features.mean(axis=0)
features = np.subtract(features, mean)
var = square_sums / features.shape[0] - mean**2
std = np.sqrt(np.maximum(var, 1e-8))
return {"mean": mean.astype("float32"), "std": std.astype("float32")}
class S2TDataConfigWriter(object):
DEFAULT_VOCAB_FILENAME = "dict.txt"
DEFAULT_INPUT_FEAT_PER_CHANNEL = 80
DEFAULT_INPUT_CHANNELS = 1
def __init__(self, yaml_path: Path):
try:
import yaml
except ImportError:
print("Please install PyYAML for S2T data config YAML files")
self.yaml = yaml
self.yaml_path = yaml_path
self.config = {}
def flush(self):
with open(self.yaml_path, "w") as f:
self.yaml.dump(self.config, f)
def set_audio_root(self, audio_root=""):
self.config["audio_root"] = audio_root
def set_vocab_filename(self, vocab_filename: str = "dict.txt"):
self.config["vocab_filename"] = vocab_filename
def set_specaugment(
self,
time_wrap_w: int,
freq_mask_n: int,
freq_mask_f: int,
time_mask_n: int,
time_mask_t: int,
time_mask_p: float,
):
self.config["specaugment"] = {
"time_wrap_W": time_wrap_w,
"freq_mask_N": freq_mask_n,
"freq_mask_F": freq_mask_f,
"time_mask_N": time_mask_n,
"time_mask_T": time_mask_t,
"time_mask_p": time_mask_p,
}
def set_specaugment_lb_policy(self):
self.set_specaugment(
time_wrap_w=0,
freq_mask_n=1,
freq_mask_f=27,
time_mask_n=1,
time_mask_t=100,
time_mask_p=1.0,
)
def set_specaugment_ld_policy(self):
self.set_specaugment(
time_wrap_w=0,
freq_mask_n=2,
freq_mask_f=27,
time_mask_n=2,
time_mask_t=100,
time_mask_p=1.0,
)
def set_specaugment_sm_policy(self):
self.set_specaugment(
time_wrap_w=0,
freq_mask_n=2,
freq_mask_f=15,
time_mask_n=2,
time_mask_t=70,
time_mask_p=0.2,
)
def set_specaugment_ss_policy(self):
self.set_specaugment(
time_wrap_w=0,
freq_mask_n=2,
freq_mask_f=27,
time_mask_n=2,
time_mask_t=70,
time_mask_p=0.2,
)
def set_input_channels(self, input_channels: int = 1):
self.config["input_channels"] = input_channels
def set_input_feat_per_channel(self, input_feat_per_channel: int = 80):
self.config["input_feat_per_channel"] = input_feat_per_channel
def set_bpe_tokenizer(self, bpe_tokenizer: Dict[str, Any]):
self.config["bpe_tokenizer"] = bpe_tokenizer
def set_global_cmvn(self, stats_npz_path: str):
self.config["global_cmvn"] = {"stats_npz_path": stats_npz_path}
def set_feature_transforms(self, split: str, transforms: List[str]):
if "transforms" not in self.config:
self.config["transforms"] = {}
self.config["transforms"][split] = transforms
def set_prepend_tgt_lang_tag(self, flag: bool = True):
self.config["prepend_tgt_lang_tag"] = flag
def set_sampling_alpha(self, sampling_alpha: float = 1.0):
self.config["sampling_alpha"] = sampling_alpha
def set_extra(self, data):
self.config.update(data)
| 12,224 | 30.670984 | 88 | py |
CIF-HieraDist | CIF-HieraDist-main/examples/speech_to_text/prep_mustc_data.py | #!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import argparse
import logging
import os
from pathlib import Path
import shutil
from itertools import groupby
from tempfile import NamedTemporaryFile
from typing import Tuple
import numpy as np
import pandas as pd
import soundfile as sf
from examples.speech_to_text.data_utils import (
create_zip,
extract_fbank_features,
filter_manifest_df,
gen_config_yaml,
gen_vocab,
get_zip_manifest,
load_df_from_tsv,
save_df_to_tsv,
cal_gcmvn_stats,
)
import torch
from torch.utils.data import Dataset
from tqdm import tqdm
from fairseq.data.audio.audio_utils import get_waveform, convert_waveform
log = logging.getLogger(__name__)
MANIFEST_COLUMNS = ["id", "audio", "n_frames", "tgt_text", "speaker"]
class MUSTC(Dataset):
"""
Create a Dataset for MuST-C. Each item is a tuple of the form:
waveform, sample_rate, source utterance, target utterance, speaker_id,
utterance_id
"""
SPLITS = ["train", "dev", "tst-COMMON", "tst-HE"]
LANGUAGES = ["de", "es", "fr", "it", "nl", "pt", "ro", "ru"]
def __init__(self, root: str, lang: str, split: str) -> None:
assert split in self.SPLITS and lang in self.LANGUAGES
_root = Path(root) / f"en-{lang}" / "data" / split
wav_root, txt_root = _root / "wav", _root / "txt"
assert _root.is_dir() and wav_root.is_dir() and txt_root.is_dir()
# Load audio segments
try:
import yaml
except ImportError:
print("Please install PyYAML to load the MuST-C YAML files")
with open(txt_root / f"{split}.yaml") as f:
segments = yaml.load(f, Loader=yaml.BaseLoader)
# Load source and target utterances
for _lang in ["en", lang]:
with open(txt_root / f"{split}.{_lang}") as f:
utterances = [r.strip() for r in f]
assert len(segments) == len(utterances)
for i, u in enumerate(utterances):
segments[i][_lang] = u
# Gather info
self.data = []
for wav_filename, _seg_group in groupby(segments, lambda x: x["wav"]):
wav_path = wav_root / wav_filename
sample_rate = sf.info(wav_path.as_posix()).samplerate
seg_group = sorted(_seg_group, key=lambda x: x["offset"])
for i, segment in enumerate(seg_group):
offset = int(float(segment["offset"]) * sample_rate)
n_frames = int(float(segment["duration"]) * sample_rate)
_id = f"{wav_path.stem}_{i}"
self.data.append(
(
wav_path.as_posix(),
offset,
n_frames,
sample_rate,
segment["en"],
segment[lang],
segment["speaker_id"],
_id,
)
)
def __getitem__(self, n: int) -> Tuple[torch.Tensor, int, str, str, str, str]:
wav_path, offset, n_frames, sr, src_utt, tgt_utt, spk_id, utt_id = self.data[n]
waveform, _ = get_waveform(wav_path, frames=n_frames, start=offset)
waveform = torch.from_numpy(waveform)
return waveform, sr, src_utt, tgt_utt, spk_id, utt_id
def __len__(self) -> int:
return len(self.data)
def process(args):
root = Path(args.data_root).absolute()
for lang in MUSTC.LANGUAGES:
cur_root = root / f"en-{lang}"
if not cur_root.is_dir():
print(f"{cur_root.as_posix()} does not exist. Skipped.")
continue
# Extract features
audio_root = cur_root / ("flac" if args.use_audio_input else "fbank80")
audio_root.mkdir(exist_ok=True)
for split in MUSTC.SPLITS:
print(f"Fetching split {split}...")
dataset = MUSTC(root.as_posix(), lang, split)
if args.use_audio_input:
print("Converting audios...")
for waveform, sample_rate, _, _, _, utt_id in tqdm(dataset):
tgt_sample_rate = 16_000
_wavform, _ = convert_waveform(
waveform,
sample_rate,
to_mono=True,
to_sample_rate=tgt_sample_rate,
)
sf.write(
(audio_root / f"{utt_id}.flac").as_posix(),
_wavform.numpy(),
tgt_sample_rate,
)
else:
print("Extracting log mel filter bank features...")
gcmvn_feature_list = []
if split == "train" and args.cmvn_type == "global":
print("And estimating cepstral mean and variance stats...")
for waveform, sample_rate, _, _, _, utt_id in tqdm(dataset):
features = extract_fbank_features(
waveform, sample_rate, audio_root / f"{utt_id}.npy"
)
if split == "train" and args.cmvn_type == "global":
if len(gcmvn_feature_list) < args.gcmvn_max_num:
gcmvn_feature_list.append(features)
if split == "train" and args.cmvn_type == "global":
# Estimate and save cmv
stats = cal_gcmvn_stats(gcmvn_feature_list)
with open(cur_root / "gcmvn.npz", "wb") as f:
np.savez(f, mean=stats["mean"], std=stats["std"])
# Pack features into ZIP
zip_path = cur_root / f"{audio_root.name}.zip"
print("ZIPing audios/features...")
create_zip(audio_root, zip_path)
print("Fetching ZIP manifest...")
audio_paths, audio_lengths = get_zip_manifest(zip_path)
# Generate TSV manifest
print("Generating manifest...")
train_text = []
for split in MUSTC.SPLITS:
is_train_split = split.startswith("train")
manifest = {c: [] for c in MANIFEST_COLUMNS}
dataset = MUSTC(args.data_root, lang, split)
for _, _, src_utt, tgt_utt, speaker_id, utt_id in tqdm(dataset):
manifest["id"].append(utt_id)
manifest["audio"].append(audio_paths[utt_id])
manifest["n_frames"].append(audio_lengths[utt_id])
manifest["tgt_text"].append(src_utt if args.task == "asr" else tgt_utt)
manifest["speaker"].append(speaker_id)
if is_train_split:
train_text.extend(manifest["tgt_text"])
df = pd.DataFrame.from_dict(manifest)
df = filter_manifest_df(df, is_train_split=is_train_split)
save_df_to_tsv(df, cur_root / f"{split}_{args.task}.tsv")
# Generate vocab
v_size_str = "" if args.vocab_type == "char" else str(args.vocab_size)
spm_filename_prefix = f"spm_{args.vocab_type}{v_size_str}_{args.task}"
with NamedTemporaryFile(mode="w") as f:
for t in train_text:
f.write(t + "\n")
gen_vocab(
Path(f.name),
cur_root / spm_filename_prefix,
args.vocab_type,
args.vocab_size,
)
# Generate config YAML
if args.use_audio_input:
gen_config_yaml(
cur_root,
spm_filename=spm_filename_prefix + ".model",
yaml_filename=f"config_{args.task}.yaml",
specaugment_policy=None,
extra={"use_audio_input": True},
)
else:
gen_config_yaml(
cur_root,
spm_filename=spm_filename_prefix + ".model",
yaml_filename=f"config_{args.task}.yaml",
specaugment_policy="lb",
cmvn_type=args.cmvn_type,
gcmvn_path=(
cur_root / "gcmvn.npz" if args.cmvn_type == "global" else None
),
)
# Clean up
shutil.rmtree(audio_root)
def process_joint(args):
cur_root = Path(args.data_root)
assert all(
(cur_root / f"en-{lang}").is_dir() for lang in MUSTC.LANGUAGES
), "do not have downloaded data available for all 8 languages"
# Generate vocab
vocab_size_str = "" if args.vocab_type == "char" else str(args.vocab_size)
spm_filename_prefix = f"spm_{args.vocab_type}{vocab_size_str}_{args.task}"
with NamedTemporaryFile(mode="w") as f:
for lang in MUSTC.LANGUAGES:
tsv_path = cur_root / f"en-{lang}" / f"train_{args.task}.tsv"
df = load_df_from_tsv(tsv_path)
for t in df["tgt_text"]:
f.write(t + "\n")
special_symbols = None
if args.task == "st":
special_symbols = [f"<lang:{lang}>" for lang in MUSTC.LANGUAGES]
gen_vocab(
Path(f.name),
cur_root / spm_filename_prefix,
args.vocab_type,
args.vocab_size,
special_symbols=special_symbols,
)
# Generate config YAML
gen_config_yaml(
cur_root,
spm_filename=spm_filename_prefix + ".model",
yaml_filename=f"config_{args.task}.yaml",
specaugment_policy="ld",
prepend_tgt_lang_tag=(args.task == "st"),
)
# Make symbolic links to manifests
for lang in MUSTC.LANGUAGES:
for split in MUSTC.SPLITS:
src_path = cur_root / f"en-{lang}" / f"{split}_{args.task}.tsv"
desc_path = cur_root / f"{split}_{lang}_{args.task}.tsv"
if not desc_path.is_symlink():
os.symlink(src_path, desc_path)
def main():
parser = argparse.ArgumentParser()
parser.add_argument("--data-root", "-d", required=True, type=str)
parser.add_argument(
"--vocab-type",
default="unigram",
required=True,
type=str,
choices=["bpe", "unigram", "char"],
),
parser.add_argument("--vocab-size", default=8000, type=int)
parser.add_argument("--task", type=str, choices=["asr", "st"])
parser.add_argument("--joint", action="store_true", help="")
parser.add_argument(
"--cmvn-type",
default="utterance",
choices=["global", "utterance"],
help="The type of cepstral mean and variance normalization",
)
parser.add_argument(
"--gcmvn-max-num",
default=150000,
type=int,
help="Maximum number of sentences to use to estimate global mean and "
"variance",
)
parser.add_argument("--use-audio-input", action="store_true")
args = parser.parse_args()
if args.joint:
process_joint(args)
else:
process(args)
if __name__ == "__main__":
main()
| 11,015 | 36.726027 | 87 | py |
CIF-HieraDist | CIF-HieraDist-main/examples/speech_to_text/prep_aishell1_data.py | #!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import os
import argparse
import logging
from pathlib import Path
import shutil
from tempfile import NamedTemporaryFile
from torch.utils.data import Dataset
from typing import Tuple, Union
import torchaudio
import pandas as pd
from examples.speech_to_text.data_utils import (
create_zip,
extract_fbank_features,
gen_config_yaml,
gen_vocab,
get_zip_manifest,
save_df_to_tsv,
)
from tqdm import tqdm
log = logging.getLogger(__name__)
# Define data splits
SPLITS = ["dev", "test", "train_sp"]
# SPLITS = ["dev"]
# Define the headers of columns
MANIFEST_COLUMNS = ["id", "audio", "n_frames", "tgt_text", "speaker"]
# Define special tokens
BOS_TOKEN, BOS_TOKEN_ID = "<s>", 0
PAD_TOKEN, PAD_TOKEN_ID = "<pad>", 1
EOS_TOKEN, EOS_TOKEN_ID = "</s>", 2
UNK_TOKEN, UNK_TOKEN_ID = "<unk>", 3
def load_aishell1_item(fileid: str, path: str, ext_audio: str, id2txt_dict):
# get speaker id
speaker_id = "".join(list(fileid)[6:11])
# Specify the path to audio
file_audio = fileid + ext_audio
file_audio = os.path.join(path, speaker_id, file_audio)
# Load audio
waveform, sample_rate = torchaudio.load(file_audio)
# Load text
if fileid in id2txt_dict.keys():
transcript = id2txt_dict[fileid]
transcript = transcript.strip().replace(" ", "")
else:
# Translation not found
print("Translation not found for " + fileid)
transcript = None
return (
waveform,
sample_rate,
transcript,
str(speaker_id),
str(fileid),
)
def load_speed_perturbated_aishell1_item(fileid: str, id2txt_dict, id2filedir_dict):
if fileid.startswith("sp"):
# For speed perturbated audio
temp_fileid = fileid.split("-", 1)[-1]
speaker_id = "".join(list(temp_fileid)[6:11])
else:
# For original audio
speaker_id = "".join(list(fileid)[6:11])
# Load audio
file_path = id2filedir_dict[fileid]
waveform, sample_rate = torchaudio.load(file_path)
# Load text
if fileid in id2txt_dict.keys():
transcript = id2txt_dict[fileid]
transcript = transcript.strip().replace(" ", "")
else:
# Translation not found
print("Translation not found for " + fileid)
transcript = None
return (
waveform,
sample_rate,
transcript,
str(speaker_id),
str(fileid),
)
class AISHELL1(Dataset):
"""Create a Dataset for AISHELL1.
Args:
root (str or Path): Path to the directory where the dataset is found or downloaded.
url (str, optional): The URL to download the dataset from,
or the type of the dataset to dowload.
folder_in_archive (str, optional):
The top-level directory of the dataset. (default: ``"LibriSpeech"``)
download (bool, optional):
Whether to download the dataset if it is not found at root path. (default: ``False``).
"""
_txt_file_name = "aishell_transcript_v0.8.txt"
_ext_audio = ".wav"
FOLDER_IN_ARCHIVE = "AISHELL1/data_aishell/wav"
TRANSCROPT_IN_ARCHIVE = "AISHELL1/data_aishell/transcript"
def __init__(self, root, split):
if split in [
"train",
"dev",
"test",
]:
print("Valid data split detected.")
basename = split
root = os.fspath(root)
folder_in_archive = os.path.join(self.FOLDER_IN_ARCHIVE, basename)
self._path = os.path.join(root, folder_in_archive) # Obtain target wav path
self._walker = sorted(
str(p.stem) for p in Path(self._path).glob("*/*" + self._ext_audio)
) # Traverse all samples
self._txt_file_path = os.path.join(
root, self.TRANSCROPT_IN_ARCHIVE, self._txt_file_name
)
self._id2txt_dict = dict()
with open(self._txt_file_path, "r") as ft:
for line in ft:
uttid, text = line.strip().split(" ", 1)
self._id2txt_dict[uttid] = text
def __getitem__(self, n: int):
"""Load the n-th sample from the dataset.
Args:
n (int): The index of the sample to be loaded
Returns:
(Tensor, int, str, int, int, int):
``(waveform, sample_rate, transcript, speaker_id, chapter_id, utterance_id)``
"""
fileid = self._walker[n]
return load_aishell1_item(
fileid, self._path, self._ext_audio, self._id2txt_dict
)
def __len__(self) -> int:
return len(self._walker)
class SpeedPerturbatedAISHELL1(Dataset):
"""Create a Dataset for AISHELL1.
Args:
root (str or Path): Path to the directory where the dataset is found or downloaded.
url (str, optional): The URL to download the dataset from,
or the type of the dataset to dowload.
folder_in_archive (str, optional):
The top-level directory of the dataset. (default: ``"LibriSpeech"``)
download (bool, optional):
Whether to download the dataset if it is not found at root path. (default: ``False``).
"""
_txt_file_name = "text"
_scp_file_name = "wav.scp"
_ext_audio = ".wav"
FOLDER_IN_ARCHIVE = "dump/raw"
def __init__(self, root, split):
if split in ["dev", "test", "train_sp"]:
print("Valid data split detected.")
basename = split
root = os.fspath(root)
folder_in_archive = os.path.join(self.FOLDER_IN_ARCHIVE, basename)
self._path = os.path.join(root, folder_in_archive)
# Register path
self._walker = []
self.id2filedir_dict = dict()
self._scp_file_path = os.path.join(root, folder_in_archive, self._scp_file_name)
with open(self._scp_file_path) as fp:
for line in fp:
uttid, utt_dir = line.strip().split(" ", 1)
if uttid.startswith("sp"):
self.id2filedir_dict[uttid] = os.path.join(root, utt_dir.strip())
else:
self.id2filedir_dict[uttid] = utt_dir.strip()
self._walker.append(uttid.strip())
self._walker = sorted(self._walker)
logging.info("Utterance path registration done")
# Register text
self._id2txt_dict = dict()
self._txt_file_path = os.path.join(root, folder_in_archive, self._txt_file_name)
with open(self._txt_file_path, "r") as ft:
line_cnt = 0
for line in ft:
if uttid in self._walker:
uttid, text = line.strip().split(" ", 1)
self._id2txt_dict[uttid] = text.strip().replace(" ", "")
line_cnt += 1
if line_cnt % 10000 == 0:
logging.info("have processed %d lines" % line_cnt)
logging.info("Vocabulary collection done")
logging.info("Dataset initialization done")
def __getitem__(self, n: int):
"""Load the n-th sample from the dataset.
Args:
n (int): The index of the sample to be loaded
Returns:
``(waveform, sample_rate, transcript, speaker_id, chapter_id, utterance_id)``
"""
fileid = self._walker[n]
return load_speed_perturbated_aishell1_item(
fileid, self._id2txt_dict, self.id2filedir_dict
)
def __len__(self) -> int:
return len(self._walker)
def process(args):
input_root = Path(args.input_root).absolute()
out_root = Path(args.output_root).absolute()
out_root.mkdir(exist_ok=True)
# Extract features
feature_root = out_root / "fbank80"
feature_root.mkdir(exist_ok=True)
for split in SPLITS:
print(f"Fetching split {split}...")
dataset = AISHELL1(input_root.as_posix(), split=split)
# dataset = SpeedPerturbatedAISHELL1(input_root.as_posix(), split=split)
print("Extracting log mel filter bank features...")
for wav, sample_rate, _, spk_id, utt_id in tqdm(dataset):
sample_id = utt_id
try:
extract_fbank_features(
wav, sample_rate, feature_root / f"{sample_id}.npy"
)
except:
print("Encounter error for %s" % utt_id)
else:
continue
# Pack features into ZIP
zip_path = out_root / "fbank80.zip"
print("ZIPing features...")
create_zip(feature_root, zip_path)
print("Fetching ZIP manifest...")
audio_paths, audio_lengths = get_zip_manifest(zip_path)
# Generate TSV manifest
print("Generating manifest...")
train_text = []
for split in SPLITS: # conduct for each data split
manifest = {c: [] for c in MANIFEST_COLUMNS}
# dataset = AISHELL1(input_root.as_posix(), split=split)
dataset = SpeedPerturbatedAISHELL1(input_root.as_posix(), split=split)
for _, _, trans, spk_id, utt_id in tqdm(dataset):
if trans is not None and utt_id.strip() in audio_paths.keys():
# Add items one-by-one
sample_id = utt_id
manifest["id"].append(sample_id)
manifest["audio"].append(audio_paths[sample_id])
manifest["n_frames"].append(audio_lengths[sample_id])
manifest["tgt_text"].append(" ".join(list(trans.lower())))
manifest["speaker"].append(spk_id)
save_df_to_tsv(pd.DataFrame.from_dict(manifest), out_root / f"{split}.tsv")
if split.startswith("train"):
train_text.extend(manifest["tgt_text"])
# Generate vocab
vocab_dict = dict()
for line in train_text:
tokens_list = line.strip().split(" ")
for tok in tokens_list:
if tok not in vocab_dict:
vocab_dict[tok] = 1
else:
vocab_dict[tok] += 1
sorted_vocab_dict = {
sort_k: sort_v
for sort_k, sort_v in sorted(
vocab_dict.items(), key=lambda kv: (kv[1], kv[0]), reverse=True
)
}
vocab_file_path = os.path.join(str(out_root), "vocab.txt")
f_vocab = open(vocab_file_path, "w")
f_vocab.write("\t".join([BOS_TOKEN, str(0)]) + "\n")
f_vocab.write("\t".join([PAD_TOKEN, str(0)]) + "\n")
f_vocab.write("\t".join([EOS_TOKEN, str(0)]) + "\n")
f_vocab.write("\t".join([UNK_TOKEN, str(0)]) + "\n")
for idx, (tok, freq) in enumerate(sorted_vocab_dict.items()):
f_vocab.write("\t".join([tok, str(freq)]) + "\n")
f_vocab.close()
# Generate config YAML
gen_config_yaml(out_root, vocab_name=vocab_file_path, specaugment_policy="ld")
# Clean up
shutil.rmtree(feature_root)
def main():
parser = argparse.ArgumentParser()
parser.add_argument(
"--input-root",
"-i",
default="/data/LibriSpeech/mlhan_extra_files/",
required=False,
type=str,
) # assign the data output root directory
parser.add_argument(
"--output-root",
"-o",
default="/workspace/fairseq-uni/examples/speech_to_text/egs/aishell1/data/",
required=False,
type=str,
) # assign the data output root directory
args = parser.parse_args()
process(args)
if __name__ == "__main__":
main()
| 11,501 | 31.038997 | 98 | py |
CIF-HieraDist | CIF-HieraDist-main/examples/speech_to_text/gen_librispeech_vocab.py | # @Time : 2022/3/2
# @Author : Minglun Han
# @File : gen_vocab.py
import argparse
import logging
from pathlib import Path
import shutil
from tempfile import NamedTemporaryFile
import pandas as pd
from examples.speech_to_text.data_utils import (
create_zip,
extract_fbank_features,
gen_config_yaml,
gen_vocab,
get_zip_manifest,
save_df_to_tsv,
)
from torchaudio.datasets import LIBRISPEECH
from tqdm import tqdm
# Define data splits
SPLITS = [
"train-clean-100",
"train-clean-360",
"train-other-500",
]
MANIFEST_COLUMNS = ["id", "audio", "n_frames", "tgt_text", "speaker"]
output_root = (
"/workspace/fairseq-uni/examples/speech_to_text/egs/librispeech/data" or sys.argv[0]
)
vocab_size = 1000 or sys.argv[1]
vocab_type = "unigram" or sys.argv[2]
out_root = Path(output_root).absolute()
out_root.mkdir(exist_ok=True)
# Load text
train_text = []
for split in SPLITS:
manifest = {c: [] for c in MANIFEST_COLUMNS}
dataset = LIBRISPEECH(out_root.as_posix(), url=split)
for _, _, utt, _, _, _ in tqdm(dataset):
manifest["tgt_text"].append(utt.lower())
train_text.extend(manifest["tgt_text"])
# Generate vocabulary
vocab_size = "" if vocab_type == "char" else str(vocab_size)
spm_filename_prefix = f"spm_{vocab_type}{vocab_size}"
with NamedTemporaryFile(mode="w") as f:
for t in train_text:
f.write(t + "\n")
gen_vocab(
Path(f.name),
out_root / spm_filename_prefix,
vocab_type,
vocab_size,
)
| 1,514 | 24.25 | 88 | py |
CIF-HieraDist | CIF-HieraDist-main/examples/speech_to_text/gen_vocab.py | # @Time : 2022/3/2
# @Author : Minglun Han
# @File : gen_vocab.py
import argparse
import logging
from pathlib import Path
import shutil
from tempfile import NamedTemporaryFile
import pandas as pd
from examples.speech_to_text.data_utils import (
create_zip,
extract_fbank_features,
gen_config_yaml,
gen_vocab,
get_zip_manifest,
save_df_to_tsv,
)
from torchaudio.datasets import LIBRISPEECH
from tqdm import tqdm
# Define data splits
SPLITS = [
"train-clean-100",
"train-clean-360",
"train-other-500",
]
output_root = sys.argv[0]
vocab_size = sys.argv[1]
vocab_type = sys.argv[2]
out_root = Path(output_root).absolute()
out_root.mkdir(exist_ok=True)
# Load text
train_text = []
for split in SPLITS: # conduct for each data split
manifest = {c: [] for c in MANIFEST_COLUMNS}
dataset = LIBRISPEECH(out_root.as_posix(), url=split)
for _, _, utt, spk_id, chapter_no, utt_no in tqdm(dataset):
# Add items one-by-one
sample_id = f"{spk_id}-{chapter_no}-{utt_no}"
manifest["tgt_text"].append(utt.lower())
if split.startswith("train"):
train_text.extend(manifest["tgt_text"])
# Generate vocabulary
vocab_size = "" if vocab_type == "char" else str(vocab_size)
spm_filename_prefix = f"spm_{vocab_type}{vocab_size}"
with NamedTemporaryFile(mode="w") as f:
for t in train_text:
f.write(t + "\n")
gen_vocab(
Path(f.name),
out_root / spm_filename_prefix,
vocab_type,
vocab_size,
)
| 1,515 | 24.266667 | 63 | py |
CIF-HieraDist | CIF-HieraDist-main/examples/speech_to_text/bert_feat_extract/extract_bert_feats.py | # @Time : 2022/8/15
# @Author : Minglun Han
# @File : extract_bert_feats.py
import os
import sys
import argparse
import random
import string
import json
import numpy as np
import torch
from transformers import BertTokenizer, BertModel
"""
Description:
This program is used to extract the features from pretrained models.
You can specify the pretrained model and its vocabulary. The input should be a file of text_id and text pairs.
The outputs will be npy files with text_id as prefix, and a hash table with text_id to feature.npy mapping.
Outputs:
1. ${utterance_id}.npy files;
2. features hash json;
Chinese Pretraining models:
MODEL_NAME MODEL_KEY
Bert-base-chinese bert-base-chinese
"""
os.environ["CUDA_VISIBLE_DEVICES"] = "0"
def parse_args():
parse = argparse.ArgumentParser(description="generate data tables")
parse.add_argument(
"--input_text_file_dir",
type=str,
default="/data1/student/mlhan/myprojects/fairseq-uni/examples/speech_to_text/egs/aishell2/data/aishell2.map",
help="directory to texts, format '${utterance_id}\t${text}'",
)
parse.add_argument(
"--split_name", type=str, default="aishell2", help="the split name"
)
parse.add_argument(
"--output_dir",
type=str,
default="/data1/student/mlhan/myprojects/fairseq-uni/examples/speech_to_text/egs/aishell2/bert_feats/bert-base-chinese/",
help="directory used to save outputs",
)
parse.add_argument(
"--pretrained_model",
type=str,
default="bert-base-chinese",
help="determine which pretrained model to be used to extract features",
)
parse.add_argument(
"--pretrained_model_vocab",
type=str,
default="bert-base-chinese",
help="the vocabulary of the pretrained model",
)
parse.add_argument(
"--batch_size",
type=int,
default=256,
help="the batch size for feature extraction",
)
parse.add_argument(
"--lang", "-l", type=str, default="cn", help="the language of text"
)
parse.add_argument("--gpu", action="store_true")
args = parse.parse_args()
return args
def split_and_save(
final_output_dir, utt_id_list, input_ids, attention_mask, last_hidden_states
):
output_list = []
for utt_id, ids, padding_mask, feat in zip(
utt_id_list, input_ids, attention_mask, last_hidden_states
):
cur_dict = dict()
cur_dict["utt_id"] = utt_id
cur_dict["input_ids"] = ids.cpu().detach().numpy().tolist()
cur_dict["padding_mask"] = padding_mask.cpu().detach().numpy().tolist()
cur_dict["length"] = int(padding_mask.sum().cpu().detach().numpy())
cur_output_filename = os.path.join(final_output_dir, utt_id + ".npy")
if not os.path.exists(cur_output_filename):
np.save(cur_output_filename, feat.cpu().detach().numpy())
cur_dict["feat_path"] = cur_output_filename
output_list.append(cur_dict)
return output_list
def main(args):
input_text_file_dir = args.input_text_file_dir
split_name = args.split_name
output_dir = args.output_dir
pretrained_model = args.pretrained_model
pretrained_model_vocab = args.pretrained_model_vocab
lang = args.lang
# Load tokenizer and model
# tokenizer = BertTokenizer.from_pretrained('bert-base-uncased')
# bert = BertModel.from_pretrained('bert-base-uncased')
print("1. Load pretrained models and vocabulary")
tokenizer = BertTokenizer.from_pretrained(pretrained_model_vocab)
bert = BertModel.from_pretrained(pretrained_model)
if args.gpu:
bert = bert.cuda()
# Prepare output directory
print("2. Create working directory")
final_output_dir = os.path.join(output_dir, split_name)
if not os.path.exists(output_dir):
os.mkdir(output_dir)
if os.path.exists(final_output_dir):
print("features are existing in %s" % final_output_dir)
else:
os.mkdir(final_output_dir)
hash_table_dir = os.path.join(final_output_dir, split_name + "_text_feat" + ".json")
f_hash = open(hash_table_dir, "w")
# Extract features from pretrained models
print("3. Extracting features")
utt_id_list = []
batch_inputs = []
data_list = []
batch_counter = 0
with open(input_text_file_dir, "r") as f:
batch_size_counter = 0
for line in f:
utt_id, text = line.strip().split("\t", 1)
if lang == "cn":
text = text.strip().replace(" ", "") # For Chinese temporarily
else:
text = text.strip()
batch_inputs.append(text)
utt_id_list.append(utt_id)
batch_size_counter += 1
if batch_size_counter % args.batch_size == 0:
# Forward pretrained models
inputs = tokenizer(batch_inputs, return_tensors="pt", padding=True)
if args.gpu:
for k in inputs.keys():
inputs[k] = inputs[k].cuda()
outputs = bert(**inputs)
# Split and save samples
input_ids = inputs["input_ids"] # B x T
attention_mask = inputs["attention_mask"] # B x T
last_hidden_states = outputs.last_hidden_state # B x T x C
output_list = split_and_save(
final_output_dir,
utt_id_list,
input_ids,
attention_mask,
last_hidden_states,
)
data_list.extend(output_list)
# Empty buffers
batch_counter += 1
batch_size_counter = 0
batch_inputs = []
utt_id_list = []
print("have processed %d batches. " % batch_counter)
# Process samples in the last batch
print("4. Process residual batch")
inputs = tokenizer(batch_inputs, return_tensors="pt", padding=True)
if args.gpu:
for k in inputs.keys():
inputs[k] = inputs[k].cuda()
outputs = bert(**inputs)
input_ids = inputs["input_ids"] # B x T
attention_mask = inputs["attention_mask"] # B x T
last_hidden_states = outputs.last_hidden_state # B x T x C
output_list = split_and_save(
final_output_dir, utt_id_list, input_ids, attention_mask, last_hidden_states
)
data_list.extend(output_list)
data_dict = {"data": data_list}
json_data_dict = json.dumps(data_dict, indent=4)
f_hash.write(json_data_dict)
f_hash.close()
print("Feature extraction from pretrained language model is Done.")
if __name__ == "__main__":
args = parse_args()
main(args)
| 6,843 | 32.54902 | 129 | py |
CIF-HieraDist | CIF-HieraDist-main/examples/speech_to_text/simultaneous_translation/agents/fairseq_simul_st_agent.py | import math
import os
import json
import numpy as np
import torch
import torchaudio.compliance.kaldi as kaldi
import yaml
from fairseq import checkpoint_utils, tasks
from fairseq.file_io import PathManager
try:
from simuleval import READ_ACTION, WRITE_ACTION, DEFAULT_EOS
from simuleval.agents import SpeechAgent
from simuleval.states import ListEntry, SpeechStates
except ImportError:
print("Please install simuleval 'pip install simuleval'")
SHIFT_SIZE = 10
WINDOW_SIZE = 25
SAMPLE_RATE = 16000
FEATURE_DIM = 80
BOW_PREFIX = "\u2581"
class OnlineFeatureExtractor:
"""
Extract speech feature on the fly.
"""
def __init__(self, args):
self.shift_size = args.shift_size
self.window_size = args.window_size
assert self.window_size >= self.shift_size
self.sample_rate = args.sample_rate
self.feature_dim = args.feature_dim
self.num_samples_per_shift = int(self.shift_size * self.sample_rate / 1000)
self.num_samples_per_window = int(self.window_size * self.sample_rate / 1000)
self.len_ms_to_samples = lambda x: x * self.sample_rate / 1000
self.previous_residual_samples = []
self.global_cmvn = args.global_cmvn
def clear_cache(self):
self.previous_residual_samples = []
def __call__(self, new_samples):
samples = self.previous_residual_samples + new_samples
if len(samples) < self.num_samples_per_window:
self.previous_residual_samples = samples
return
# num_frames is the number of frames from the new segment
num_frames = math.floor(
(len(samples) - self.len_ms_to_samples(self.window_size - self.shift_size))
/ self.num_samples_per_shift
)
# the number of frames used for feature extraction
# including some part of thte previous segment
effective_num_samples = int(
num_frames * self.len_ms_to_samples(self.shift_size)
+ self.len_ms_to_samples(self.window_size - self.shift_size)
)
input_samples = samples[:effective_num_samples]
self.previous_residual_samples = samples[
num_frames * self.num_samples_per_shift :
]
torch.manual_seed(1)
output = kaldi.fbank(
torch.FloatTensor(input_samples).unsqueeze(0),
num_mel_bins=self.feature_dim,
frame_length=self.window_size,
frame_shift=self.shift_size,
).numpy()
output = self.transform(output)
return torch.from_numpy(output)
def transform(self, input):
if self.global_cmvn is None:
return input
mean = self.global_cmvn["mean"]
std = self.global_cmvn["std"]
x = np.subtract(input, mean)
x = np.divide(x, std)
return x
class TensorListEntry(ListEntry):
"""
Data structure to store a list of tensor.
"""
def append(self, value):
if len(self.value) == 0:
self.value = value
return
self.value = torch.cat([self.value] + [value], dim=0)
def info(self):
return {
"type": str(self.new_value_type),
"length": self.__len__(),
"value": "" if type(self.value) is list else self.value.size(),
}
class FairseqSimulSTAgent(SpeechAgent):
speech_segment_size = 40 # in ms, 4 pooling ratio * 10 ms step size
def __init__(self, args):
super().__init__(args)
self.eos = DEFAULT_EOS
self.gpu = getattr(args, "gpu", False)
self.args = args
self.load_model_vocab(args)
if (
getattr(
self.model.decoder.layers[0].encoder_attn, "pre_decision_ratio", None
)
is not None
):
self.speech_segment_size *= self.model.decoder.layers[
0
].encoder_attn.pre_decision_ratio
args.global_cmvn = None
if args.config:
with open(os.path.join(args.data_bin, args.config), "r") as f:
config = yaml.load(f, Loader=yaml.BaseLoader)
if "global_cmvn" in config:
args.global_cmvn = np.load(config["global_cmvn"]["stats_npz_path"])
if args.global_stats:
with PathManager.open(args.global_stats, "r") as f:
global_cmvn = json.loads(f.read())
self.global_cmvn = {
"mean": global_cmvn["mean"],
"std": global_cmvn["stddev"],
}
self.feature_extractor = OnlineFeatureExtractor(args)
self.max_len = args.max_len
self.force_finish = args.force_finish
torch.set_grad_enabled(False)
def build_states(self, args, client, sentence_id):
# Initialize states here, for example add customized entry to states
# This function will be called at beginning of every new sentence
states = SpeechStates(args, client, sentence_id, self)
self.initialize_states(states)
return states
def to_device(self, tensor):
if self.gpu:
return tensor.cuda()
else:
return tensor.cpu()
@staticmethod
def add_args(parser):
# fmt: off
parser.add_argument('--model-path', type=str, required=True,
help='path to your pretrained model.')
parser.add_argument("--data-bin", type=str, required=True,
help="Path of data binary")
parser.add_argument("--config", type=str, default=None,
help="Path to config yaml file")
parser.add_argument("--global-stats", type=str, default=None,
help="Path to json file containing cmvn stats")
parser.add_argument("--tgt-splitter-type", type=str, default="SentencePiece",
help="Subword splitter type for target text")
parser.add_argument("--tgt-splitter-path", type=str, default=None,
help="Subword splitter model path for target text")
parser.add_argument("--user-dir", type=str, default="examples/simultaneous_translation",
help="User directory for simultaneous translation")
parser.add_argument("--max-len", type=int, default=200,
help="Max length of translation")
parser.add_argument("--force-finish", default=False, action="store_true",
help="Force the model to finish the hypothsis if the source is not finished")
parser.add_argument("--shift-size", type=int, default=SHIFT_SIZE,
help="Shift size of feature extraction window.")
parser.add_argument("--window-size", type=int, default=WINDOW_SIZE,
help="Window size of feature extraction window.")
parser.add_argument("--sample-rate", type=int, default=SAMPLE_RATE,
help="Sample rate")
parser.add_argument("--feature-dim", type=int, default=FEATURE_DIM,
help="Acoustic feature dimension.")
# fmt: on
return parser
def load_model_vocab(self, args):
filename = args.model_path
if not os.path.exists(filename):
raise IOError("Model file not found: {}".format(filename))
state = checkpoint_utils.load_checkpoint_to_cpu(filename)
task_args = state["cfg"]["task"]
task_args.data = args.data_bin
if args.config is not None:
task_args.config_yaml = args.config
task = tasks.setup_task(task_args)
# build model for ensemble
state["cfg"]["model"].load_pretrained_encoder_from = None
state["cfg"]["model"].load_pretrained_decoder_from = None
self.model = task.build_model(state["cfg"]["model"])
self.model.load_state_dict(state["model"], strict=True)
self.model.eval()
self.model.share_memory()
if self.gpu:
self.model.cuda()
# Set dictionary
self.dict = {}
self.dict["tgt"] = task.target_dictionary
def initialize_states(self, states):
self.feature_extractor.clear_cache()
states.units.source = TensorListEntry()
states.units.target = ListEntry()
states.incremental_states = dict()
def segment_to_units(self, segment, states):
# Convert speech samples to features
features = self.feature_extractor(segment)
if features is not None:
return [features]
else:
return []
def units_to_segment(self, units, states):
# Merge sub word to full word.
if self.model.decoder.dictionary.eos() == units[0]:
return DEFAULT_EOS
segment = []
if None in units.value:
units.value.remove(None)
for index in units:
if index is None:
units.pop()
token = self.model.decoder.dictionary.string([index])
if token.startswith(BOW_PREFIX):
if len(segment) == 0:
segment += [token.replace(BOW_PREFIX, "")]
else:
for j in range(len(segment)):
units.pop()
string_to_return = ["".join(segment)]
if self.model.decoder.dictionary.eos() == units[0]:
string_to_return += [DEFAULT_EOS]
return string_to_return
else:
segment += [token.replace(BOW_PREFIX, "")]
if (
len(units) > 0
and self.model.decoder.dictionary.eos() == units[-1]
or len(states.units.target) > self.max_len
):
tokens = [self.model.decoder.dictionary.string([unit]) for unit in units]
return ["".join(tokens).replace(BOW_PREFIX, "")] + [DEFAULT_EOS]
return None
def update_model_encoder(self, states):
if len(states.units.source) == 0:
return
src_indices = self.to_device(states.units.source.value.unsqueeze(0))
src_lengths = self.to_device(
torch.LongTensor([states.units.source.value.size(0)])
)
states.encoder_states = self.model.encoder(src_indices, src_lengths)
torch.cuda.empty_cache()
def update_states_read(self, states):
# Happens after a read action.
self.update_model_encoder(states)
def policy(self, states):
if not getattr(states, "encoder_states", None):
return READ_ACTION
tgt_indices = self.to_device(
torch.LongTensor(
[self.model.decoder.dictionary.eos()]
+ [x for x in states.units.target.value if x is not None]
).unsqueeze(0)
)
states.incremental_states["steps"] = {
"src": states.encoder_states["encoder_out"][0].size(0),
"tgt": 1 + len(states.units.target),
}
states.incremental_states["online"] = {
"only": torch.tensor(not states.finish_read())
}
x, outputs = self.model.decoder.forward(
prev_output_tokens=tgt_indices,
encoder_out=states.encoder_states,
incremental_state=states.incremental_states,
)
states.decoder_out = x
states.decoder_out_extra = outputs
torch.cuda.empty_cache()
if outputs.action == 0:
return READ_ACTION
else:
return WRITE_ACTION
def predict(self, states):
decoder_states = states.decoder_out
lprobs = self.model.get_normalized_probs(
[decoder_states[:, -1:]], log_probs=True
)
index = lprobs.argmax(dim=-1)
index = index[0, 0].item()
if (
self.force_finish
and index == self.model.decoder.dictionary.eos()
and not states.finish_read()
):
# If we want to force finish the translation
# (don't stop before finish reading), return a None
# self.model.decoder.clear_cache(states.incremental_states)
index = None
return index
| 12,271 | 32.347826 | 105 | py |
CIF-HieraDist | CIF-HieraDist-main/examples/speech_recognition/ctc_decoder.py | # @Time : 2021/7/26
# @Author : Minglun Han
# @File : ctc_decoder.py
import os
import sys
import torch
import random
import logging
import torch.nn.functional as F
import numpy as np
import itertools as it
# Control print options
torch.set_printoptions(profile="full")
torch.set_printoptions(profile="default")
np.set_printoptions(threshold=sys.maxsize)
class CtcDecoder(object):
def __init__(self, args, tgt_dict):
self.tgt_dict = tgt_dict
self.vocab_size = len(tgt_dict)
self.beam = args.beam
# Get the index of special tokens
self.blank = (
tgt_dict.index("<ctc_blank>")
if "<ctc_blank>" in tgt_dict.indices
else tgt_dict.bos()
) # if <ctc_blank> in dictionary, use its index else use bos token's index
self.bos = tgt_dict.bos()
self.eos = tgt_dict.eos()
self.pad = tgt_dict.pad()
if self.beam == 1:
logging.info("employ ctc greedy decoder")
self.decode = self.batch_greedy_decode
else:
raise NotImplementedError("Not supported options!")
def generate(self, models, sample, **unused):
"""Generate a batch of inferences."""
# model.forward normally channels prev_output_tokens into the decoder
# separately, but SequenceGenerator directly calls model.encoder
model_inputs = {
k: v for k, v in sample["net_input"].items() if k != "prev_output_tokens"
} # remove prev_output_tokens
# Forward encoder
ctc_logits, encoder_outputs_padding_mask = models[0].get_ctc_output(
src_tokens=model_inputs["src_tokens"],
src_lengths=model_inputs["src_lengths"],
)
# Obtain log-probabilities and conduct decoding
ctc_log_probs = models[0].get_probs_from_logits(ctc_logits, log_probs=True)
beam_results, beam_scores, out_seqlens = self.decode(
ctc_log_probs, encoder_outputs_padding_mask
)
return self.generate_hypos(
beam_results=beam_results,
beam_scores=beam_scores,
out_seqlens=out_seqlens,
)
def generate_hypos(self, beam_results, beam_scores, out_seqlens):
hypos = []
for beam_result, scores, lengths in zip(beam_results, beam_scores, out_seqlens):
# beam_ids: beam x id; score: beam; length: beam
top = []
for result, score, length in zip(beam_result, scores, lengths):
top.append({"tokens": self.get_tokens(result[:length]), "score": score})
hypos.append(top)
return hypos
def get_tokens(self, idxs):
"""
Normalize tokens by handling CTC blank, ASG replabels, etc.
"""
# Remove blank id and eos id
idxs = (g[0] for g in it.groupby(idxs))
idxs = filter(lambda x: x != self.blank, idxs)
idxs = filter(lambda x: x != self.eos, idxs)
return torch.LongTensor(list(idxs))
def batch_greedy_decode(self, ctc_log_probs, encoder_outputs_padding_mask):
"""
:param model: the model in usage
:param ctc_log_probs: the log probabilities of ctc outputs
:return: prev_tokens, out_seqlens, scores
"""
# Get the maximum length of decoding steps
batch_size, max_ctc_outputs_len, _ = ctc_log_probs.size()
input_lengths = (~encoder_outputs_padding_mask).int().sum(-1)
# Acquire output seqlens and scores
out_seqlens = []
scores = []
for sample_id in range(batch_size):
# Acquire current sample's ctc log probabilities
cur_sample_encoder_out_len = input_lengths[sample_id]
# print(cur_sample_encoder_out_len)
cur_ctc_log_probs = ctc_log_probs[sample_id, :cur_sample_encoder_out_len, :]
# cur_sample_encoder_out_len x V
# print(cur_ctc_log_probs.size())
cur_score = cur_ctc_log_probs.max(dim=-1)[0].sum().item() # 1
cur_toks = cur_ctc_log_probs.argmax(
dim=-1
).unique_consecutive() # cur_sample_encoder_out_len
cur_toks = cur_toks[cur_toks != self.blank]
cur_out_seqlen = cur_toks.size(0)
scores.append(cur_score)
out_seqlens.append(cur_out_seqlen)
# Acquire output hypotheses
scores = torch.tensor(scores)
out_seqlens = torch.tensor(out_seqlens)
prev_tokens = []
max_output_seqlen = out_seqlens.max().item()
for sample_id in range(batch_size):
cur_sample_encoder_out_len = input_lengths[sample_id]
cur_ctc_log_probs = ctc_log_probs[sample_id, :cur_sample_encoder_out_len, :]
cur_toks = cur_ctc_log_probs.argmax(dim=-1)
# print(cur_toks)
cur_toks = cur_toks.unique_consecutive()
# print(cur_toks)
cur_toks = cur_toks[cur_toks != self.blank]
# print(cur_toks)
cur_out_seqlen = cur_toks.size(0)
padding_tensor = (
(torch.ones([max_output_seqlen - cur_out_seqlen]) * self.tgt_dict.pad())
.long()
.cuda()
)
sample_pred = torch.unsqueeze(
torch.cat([cur_toks, padding_tensor], dim=0), dim=0
)
prev_tokens.append(sample_pred)
sys.exit(0)
prev_tokens = torch.cat(prev_tokens, dim=0)
# Reform outputs
prev_tokens = torch.unsqueeze(prev_tokens, dim=1) # B x 1 x T
out_seqlens = torch.unsqueeze(out_seqlens, dim=-1) # B x 1
scores = torch.unsqueeze(scores, dim=-1) # B x 1
return prev_tokens, scores, out_seqlens
| 5,750 | 35.169811 | 88 | py |
CIF-HieraDist | CIF-HieraDist-main/examples/speech_recognition/infer.py | #!/usr/bin/env python3 -u
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""
Run inference for pre-processed data with a trained model.
"""
import ast
import logging
import math
import os
import sys
import edlib
import editdistance
import numpy as np
import torch
from fairseq import checkpoint_utils, options, progress_bar, tasks, utils
from fairseq.data.data_utils import post_process
from fairseq.logging.meters import StopwatchMeter, TimeMeter
from transformers import BertTokenizer, BertModel, BertLayer
logging.basicConfig()
logging.root.setLevel(logging.INFO)
logging.basicConfig(level=logging.INFO)
logger = logging.getLogger(__name__)
np.set_printoptions(threshold=10000000)
torch.set_printoptions(profile="full")
def add_asr_eval_argument(parser):
parser.add_argument("--kspmodel", default=None, help="sentence piece model")
parser.add_argument(
"--wfstlm", default=None, help="wfstlm on dictonary output units"
)
parser.add_argument(
"--rnnt_decoding_type",
default="greedy",
help="wfstlm on dictonary output units",
)
try:
parser.add_argument(
"--lm-weight",
"--lm_weight",
type=float,
default=0.2,
help="weight for lm while interpolating with neural score",
)
except:
pass
parser.add_argument(
"--rnnt_len_penalty", default=-0.5, help="rnnt length penalty on word level"
)
parser.add_argument(
"--w2l-decoder",
choices=["viterbi", "kenlm", "fairseqlm"],
help="use a w2l decoder",
)
parser.add_argument("--lexicon", help="lexicon for w2l decoder")
parser.add_argument("--unit-lm", action="store_true", help="if using a unit lm")
parser.add_argument("--kenlm-model", "--lm-model", help="lm model for w2l decoder")
parser.add_argument("--beam-threshold", type=float, default=25.0)
parser.add_argument("--beam-size-token", type=float, default=100)
parser.add_argument("--word-score", type=float, default=1.0)
parser.add_argument("--unk-weight", type=float, default=-math.inf)
parser.add_argument("--sil-weight", type=float, default=0.0)
parser.add_argument(
"--dump-emissions",
type=str,
default=None,
help="if present, dumps emissions into this file and exits",
)
parser.add_argument(
"--dump-features",
type=str,
default=None,
help="if present, dumps features into this file and exits",
)
parser.add_argument(
"--load-emissions",
type=str,
default=None,
help="if present, loads emissions from this file",
)
# Other decoder settings
parser.add_argument(
"--cif-decoder",
choices=["cif"],
help="use a cif decoder",
)
parser.add_argument(
"--cif-decoder-mode",
choices=["ar", "nar", "fast_ar"],
help="the mode of cif decoder",
)
parser.add_argument(
"--tail-handling-firing-threshold",
type=float,
default=0.5,
help="tail handling firing threshold",
)
parser.add_argument("--ctc-decoder", choices=["ctc"], help="use a ctc decoder")
# Shallow fusion settings
parser.add_argument(
"--use-nnlm", action="store_true", help="use neural language model"
)
parser.add_argument(
"--fetch-nnlm-from",
default="",
)
parser.add_argument("--specified-dict-path", default="")
# Multi-modal decoder settings
parser.add_argument("--use-multimodal-info", action="store_true")
parser.add_argument("--mask-multimodal-feats", action="store_true")
# Transformers tokenizer settings
parser.add_argument("--use-transformers-tokenizer", action="store_true")
parser.add_argument(
"--pretrained-model-vocab", type=str, default="bert-base-uncased"
)
return parser
def check_args(args):
# assert args.path is not None, "--path required for generation!"
# assert args.results_path is not None, "--results_path required for generation!"
assert (
not args.sampling or args.nbest == args.beam
), "--sampling requires --nbest to be equal to --beam"
assert (
args.replace_unk is None or args.raw_text
), "--replace-unk requires a raw text dataset (--raw-text)"
def get_dataset_itr(args, task, models):
return task.get_batch_iterator(
dataset=task.dataset(args.gen_subset),
max_tokens=args.max_tokens,
max_sentences=args.batch_size,
max_positions=(sys.maxsize, sys.maxsize),
ignore_invalid_inputs=args.skip_invalid_size_inputs_valid_test,
required_batch_size_multiple=args.required_batch_size_multiple,
num_shards=args.num_shards,
shard_id=args.shard_id,
num_workers=args.num_workers,
data_buffer_size=args.data_buffer_size,
).next_epoch_itr(shuffle=False)
def process_predictions(
args,
hypos,
sp,
tgt_dict,
target_tokens,
res_files,
speaker,
id,
tokenizer=None,
):
for hypo in hypos[: min(len(hypos), args.nbest)]:
hyp_pieces = tgt_dict.string(hypo["tokens"].int().cpu())
if "words" in hypo:
hyp_words = " ".join(hypo["words"])
else:
hyp_words = post_process(
hyp_pieces, args.post_process, args, huggingface_tokenizer=tokenizer
)
if res_files is not None:
print(
"{} ({}-{})".format(hyp_pieces, speaker, id),
file=res_files["hypo.units"],
)
print(
"{} ({}-{})".format(hyp_words, speaker, id),
file=res_files["hypo.words"],
)
tgt_pieces = tgt_dict.string(target_tokens)
tgt_words = post_process(
tgt_pieces, args.post_process, args, huggingface_tokenizer=tokenizer
)
if res_files is not None:
print(
"{} ({}-{})".format(tgt_pieces, speaker, id),
file=res_files["ref.units"],
)
print(
"{} ({}-{})".format(tgt_words, speaker, id),
file=res_files["ref.words"],
)
if not args.quiet:
logger.info("HYPO: " + hyp_words)
logger.info("TARGET: " + tgt_words)
logger.info("HYPO PIECES: " + hyp_pieces)
logger.info("TARGET PIECES: " + tgt_pieces)
logger.info("___________________")
hyp_words = hyp_words.split()
tgt_words = tgt_words.split()
# Get aligned errors
align_stats = edlib.align(hyp_words, tgt_words, task="path")
align_info = align_stats["cigar"]
op_list, len_list = [], []
tmp_len_str = ""
align_special_toks = ["=", "D", "I", "X"]
for char in list(align_info):
if char in align_special_toks:
op_list.append(char)
len_list.append(int(tmp_len_str))
tmp_len_str = ""
else:
tmp_len_str += char
op_dict = tuple(zip(op_list, len_list))
sub_errs, ins_errs, del_errs = 0, 0, 0
for err_type, num in op_dict:
if err_type == "X":
sub_errs += num
elif err_type == "I":
ins_errs += num
elif err_type == "D":
del_errs += num
else:
continue
return (
editdistance.eval(hyp_words, tgt_words),
len(tgt_words),
sub_errs,
ins_errs,
del_errs,
)
def prepare_result_files(args):
def get_res_file(file_prefix):
if args.num_shards > 1:
file_prefix = f"{args.shard_id}_{file_prefix}"
path = os.path.join(
args.results_path,
"{}-{}-{}.txt".format(
file_prefix, os.path.basename(args.path), args.gen_subset
),
)
return open(path, "w", buffering=1)
if not args.results_path:
return None
return {
"hypo.words": get_res_file("hypo.word"),
"hypo.units": get_res_file("hypo.units"),
"ref.words": get_res_file("ref.word"),
"ref.units": get_res_file("ref.units"),
}
def optimize_models(args, use_cuda, models):
"""Optimize ensemble for generation"""
for model in models:
model.make_generation_fast_(
beamable_mm_beam_size=None if args.no_beamable_mm else args.beam,
need_attn=args.print_alignment,
)
if args.fp16:
model.half()
if use_cuda:
model.cuda()
def apply_half(t):
if t.dtype is torch.float32:
return t.to(dtype=torch.half)
return t
class ExistingEmissionsDecoder(object):
def __init__(self, decoder, emissions):
self.decoder = decoder
self.emissions = emissions
def generate(self, models, sample, **unused):
ids = sample["id"].cpu().numpy()
try:
emissions = np.stack(self.emissions[ids])
except:
print([x.shape for x in self.emissions[ids]])
raise Exception("invalid sizes")
emissions = torch.from_numpy(emissions)
return self.decoder.decode(emissions)
def main(args, task=None, model_state=None):
check_args(args)
use_fp16 = args.fp16
if args.max_tokens is None and args.batch_size is None:
args.max_tokens = 4000000
logger.info(args)
use_cuda = torch.cuda.is_available() and not args.cpu
logger.info("| decoding with criterion {}".format(args.criterion))
task = tasks.setup_task(args)
# Load ensemble
if args.load_emissions:
models, criterions = [], []
task.load_dataset(args.gen_subset)
else:
logger.info("| loading model(s) from {}".format(args.path))
models, saved_cfg, task = checkpoint_utils.load_model_ensemble_and_task(
utils.split_paths(args.path, separator="\\"),
arg_overrides=ast.literal_eval(args.model_overrides),
task=task,
suffix=args.checkpoint_suffix,
strict=(args.checkpoint_shard_count == 1),
num_shards=args.checkpoint_shard_count,
state=model_state,
)
optimize_models(args, use_cuda, models)
task.load_dataset(args.gen_subset, task_cfg=saved_cfg.task)
# Set dictionary
tgt_dict = task.target_dictionary
logger.info(
"| {} {} {} examples".format(
args.data, args.gen_subset, len(task.dataset(args.gen_subset))
)
)
# hack to pass transitions to W2lDecoder
if args.criterion == "asg_loss":
raise NotImplementedError("asg_loss is currently not supported")
# trans = criterions[0].asg.trans.data
# args.asg_transitions = torch.flatten(trans).tolist()
# Load dataset (possibly sharded)
itr = get_dataset_itr(args, task, models)
# Initialize generator
gen_timer = StopwatchMeter()
def build_generator(args):
w2l_decoder = getattr(args, "w2l_decoder", None)
cif_decoder = getattr(args, "cif_decoder", None)
ctc_decoder = getattr(args, "ctc_decoder", None)
if w2l_decoder is not None:
if w2l_decoder == "viterbi":
from examples.speech_recognition.w2l_decoder import W2lViterbiDecoder
return W2lViterbiDecoder(args, task.target_dictionary)
elif w2l_decoder == "kenlm":
from examples.speech_recognition.w2l_decoder import W2lKenLMDecoder
return W2lKenLMDecoder(args, task.target_dictionary)
elif w2l_decoder == "fairseqlm":
from examples.speech_recognition.w2l_decoder import W2lFairseqLMDecoder
return W2lFairseqLMDecoder(args, task.target_dictionary)
else:
print(
"only flashlight decoders with (viterbi, kenlm, fairseqlm) options are supported at the moment"
)
elif cif_decoder:
if cif_decoder == "cif":
from examples.speech_recognition.cif_decoder import CifDecoder
return CifDecoder(args, task.target_dictionary)
elif ctc_decoder:
if ctc_decoder == "ctc":
from examples.speech_recognition.ctc_decoder import CtcDecoder
return CtcDecoder(args, task.target_dictionary)
else:
raise NotImplementedError("unsupported options.")
# please do not touch this unless you test both generate.py and infer.py with audio_pretraining task
generator = build_generator(args)
if args.load_emissions:
generator = ExistingEmissionsDecoder(
generator, np.load(args.load_emissions, allow_pickle=True)
)
logger.info("loaded emissions from " + args.load_emissions)
num_sentences = 0
if args.results_path is not None and not os.path.exists(args.results_path):
os.makedirs(args.results_path)
max_source_pos = (
utils.resolve_max_positions(
task.max_positions(), *[model.max_positions() for model in models]
),
)
if max_source_pos is not None:
max_source_pos = max_source_pos[0]
if max_source_pos is not None:
max_source_pos = max_source_pos[0] - 1
if args.dump_emissions:
emissions = {}
if args.dump_features:
features = {}
models[0].bert.proj = None
else:
res_files = prepare_result_files(args)
# logger.info("Model Structure: ")
# logger.info(f"{models[0]}")
# load hugginface tokenizer
tokenizer = None
if args.use_transformers_tokenizer:
tokenizer = BertTokenizer.from_pretrained(args.pretrained_model_vocab)
errs_t, lengths_t = 0, 0
sub_errs_t, del_errs_t, ins_errs_t = 0, 0, 0
with progress_bar.build_progress_bar(args, itr) as t:
wps_meter = TimeMeter()
for sample in t:
sample = utils.move_to_cuda(sample) if use_cuda else sample
if use_fp16:
sample = utils.apply_to_sample(apply_half, sample)
if "net_input" not in sample:
continue
prefix_tokens = None
if args.prefix_size > 0:
prefix_tokens = sample["target"][:, : args.prefix_size]
gen_timer.start()
if args.dump_emissions:
with torch.no_grad():
encoder_out = models[0](**sample["net_input"])
emm = models[0].get_normalized_probs(encoder_out, log_probs=True)
emm = emm.transpose(0, 1).cpu().numpy()
for i, id in enumerate(sample["id"]):
emissions[id.item()] = emm[i]
continue
elif args.dump_features:
with torch.no_grad():
encoder_out = models[0](**sample["net_input"])
feat = encoder_out["encoder_out"].transpose(0, 1).cpu().numpy()
for i, id in enumerate(sample["id"]):
padding = (
encoder_out["encoder_padding_mask"][i].cpu().numpy()
if encoder_out["encoder_padding_mask"] is not None
else None
)
features[id.item()] = (feat[i], padding)
continue
hypos = task.inference_step(generator, models, sample, prefix_tokens)
num_generated_tokens = sum(len(h[0]["tokens"]) for h in hypos)
gen_timer.stop(num_generated_tokens)
for i, sample_id in enumerate(sample["id"].tolist()):
speaker = None
# id = task.dataset(args.gen_subset).ids[int(sample_id)]
id = sample_id
toks = (
sample["target"][i, :]
if "target_label" not in sample
else sample["target_label"][i, :]
)
target_tokens = utils.strip_pad(toks, tgt_dict.pad()).int().cpu()
# Process top predictions
errs, length, sub_errs, del_errs, ins_errs = process_predictions(
args,
hypos[i],
None,
tgt_dict,
target_tokens,
res_files,
speaker,
id,
tokenizer=tokenizer,
)
errs_t += errs
sub_errs_t += sub_errs
del_errs_t += del_errs
ins_errs_t += ins_errs
lengths_t += length
wps_meter.update(num_generated_tokens)
t.log({"wps": round(wps_meter.avg)})
num_sentences += (
sample["nsentences"] if "nsentences" in sample else sample["id"].numel()
)
wer = None
if args.dump_emissions:
emm_arr = []
for i in range(len(emissions)):
emm_arr.append(emissions[i])
np.save(args.dump_emissions, emm_arr)
logger.info(f"saved {len(emissions)} emissions to {args.dump_emissions}")
elif args.dump_features:
feat_arr = []
for i in range(len(features)):
feat_arr.append(features[i])
np.save(args.dump_features, feat_arr)
logger.info(f"saved {len(features)} emissions to {args.dump_features}")
else:
if lengths_t > 0:
wer = errs_t * 100.0 / lengths_t
logger.info(f"WER: {wer}, ERRORS: {errs_t}, TOTAL_REF_LEN: {lengths_t}")
logger.info(
"| Processed {} sentences ({} tokens) in {:.1f}s ({:.2f}"
"sentences/s, {:.2f} tokens/s)".format(
num_sentences,
gen_timer.n,
gen_timer.sum,
num_sentences / gen_timer.sum,
1.0 / gen_timer.avg,
)
)
logger.info("| Generate {} with beam={}".format(args.gen_subset, args.beam))
return task, wer
def make_parser():
parser = options.get_generation_parser()
parser = add_asr_eval_argument(parser)
return parser
def cli_main():
parser = make_parser()
args = options.parse_args_and_arch(parser)
main(args)
if __name__ == "__main__":
cli_main()
| 18,560 | 32.203936 | 115 | py |
CIF-HieraDist | CIF-HieraDist-main/examples/speech_recognition/w2l_decoder.py | #!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""
Flashlight decoders.
"""
import gc
import itertools as it
import os.path as osp
from typing import List
import warnings
from collections import deque, namedtuple
import numpy as np
import torch
from examples.speech_recognition.data.replabels import unpack_replabels
from fairseq import tasks
from fairseq.utils import apply_to_sample
from omegaconf import open_dict
from fairseq.dataclass.utils import convert_namespace_to_omegaconf
try:
from flashlight.lib.text.dictionary import create_word_dict, load_words
from flashlight.lib.sequence.criterion import CpuViterbiPath, get_data_ptr_as_bytes
from flashlight.lib.text.decoder import (
CriterionType,
LexiconDecoderOptions,
KenLM,
LM,
LMState,
SmearingMode,
Trie,
LexiconDecoder,
)
except:
warnings.warn(
"flashlight python bindings are required to use this functionality. Please install from https://github.com/facebookresearch/flashlight/tree/master/bindings/python"
)
LM = object
LMState = object
class W2lDecoder(object):
def __init__(self, args, tgt_dict):
self.tgt_dict = tgt_dict
self.vocab_size = len(tgt_dict)
self.nbest = args.nbest
# criterion-specific init
self.criterion_type = CriterionType.CTC
self.blank = (
tgt_dict.index("<ctc_blank>")
if "<ctc_blank>" in tgt_dict.indices
else tgt_dict.bos()
)
if "<sep>" in tgt_dict.indices:
self.silence = tgt_dict.index("<sep>")
elif "|" in tgt_dict.indices:
self.silence = tgt_dict.index("|")
else:
self.silence = tgt_dict.eos()
self.asg_transitions = None
def generate(self, models, sample, **unused):
"""Generate a batch of inferences."""
# model.forward normally channels prev_output_tokens into the decoder
# separately, but SequenceGenerator directly calls model.encoder
encoder_input = {
k: v for k, v in sample["net_input"].items() if k != "prev_output_tokens"
}
emissions = self.get_emissions(models, encoder_input)
return self.decode(emissions)
def get_emissions(self, models, encoder_input):
"""Run encoder and normalize emissions"""
model = models[0]
encoder_out = model(**encoder_input)
if hasattr(model, "get_logits"):
emissions = model.get_logits(encoder_out) # no need to normalize emissions
else:
emissions = model.get_normalized_probs(encoder_out, log_probs=True)
return emissions.transpose(0, 1).float().cpu().contiguous()
def get_tokens(self, idxs):
"""Normalize tokens by handling CTC blank, ASG replabels, etc."""
idxs = (g[0] for g in it.groupby(idxs))
idxs = filter(lambda x: x != self.blank, idxs)
return torch.LongTensor(list(idxs))
class W2lViterbiDecoder(W2lDecoder):
def __init__(self, args, tgt_dict):
super().__init__(args, tgt_dict)
def decode(self, emissions):
B, T, N = emissions.size()
hypos = []
if self.asg_transitions is None:
transitions = torch.FloatTensor(N, N).zero_()
else:
transitions = torch.FloatTensor(self.asg_transitions).view(N, N)
viterbi_path = torch.IntTensor(B, T)
workspace = torch.ByteTensor(CpuViterbiPath.get_workspace_size(B, T, N))
CpuViterbiPath.compute(
B,
T,
N,
get_data_ptr_as_bytes(emissions),
get_data_ptr_as_bytes(transitions),
get_data_ptr_as_bytes(viterbi_path),
get_data_ptr_as_bytes(workspace),
)
return [
[{"tokens": self.get_tokens(viterbi_path[b].tolist()), "score": 0}]
for b in range(B)
]
class W2lKenLMDecoder(W2lDecoder):
def __init__(self, args, tgt_dict):
super().__init__(args, tgt_dict)
self.unit_lm = getattr(args, "unit_lm", False)
if args.lexicon:
self.lexicon = load_words(args.lexicon)
self.word_dict = create_word_dict(self.lexicon)
self.unk_word = self.word_dict.get_index("<unk>")
self.lm = KenLM(args.kenlm_model, self.word_dict)
self.trie = Trie(self.vocab_size, self.silence)
start_state = self.lm.start(False)
for i, (word, spellings) in enumerate(self.lexicon.items()):
word_idx = self.word_dict.get_index(word)
_, score = self.lm.score(start_state, word_idx)
for spelling in spellings:
spelling_idxs = [tgt_dict.index(token) for token in spelling]
assert (
tgt_dict.unk() not in spelling_idxs
), f"{spelling} {spelling_idxs}"
self.trie.insert(spelling_idxs, word_idx, score)
self.trie.smear(SmearingMode.MAX)
self.decoder_opts = LexiconDecoderOptions(
beam_size=args.beam,
beam_size_token=int(getattr(args, "beam_size_token", len(tgt_dict))),
beam_threshold=args.beam_threshold,
lm_weight=args.lm_weight,
word_score=args.word_score,
unk_score=args.unk_weight,
sil_score=args.sil_weight,
log_add=False,
criterion_type=self.criterion_type,
)
if self.asg_transitions is None:
N = 768
# self.asg_transitions = torch.FloatTensor(N, N).zero_()
self.asg_transitions = []
self.decoder = LexiconDecoder(
self.decoder_opts,
self.trie,
self.lm,
self.silence,
self.blank,
self.unk_word,
self.asg_transitions,
self.unit_lm,
)
else:
assert (
args.unit_lm
), "lexicon free decoding can only be done with a unit language model"
from flashlight.lib.text.decoder import (
LexiconFreeDecoder,
LexiconFreeDecoderOptions,
)
d = {w: [[w]] for w in tgt_dict.symbols}
self.word_dict = create_word_dict(d)
self.lm = KenLM(args.kenlm_model, self.word_dict)
self.decoder_opts = LexiconFreeDecoderOptions(
beam_size=args.beam,
beam_size_token=int(getattr(args, "beam_size_token", len(tgt_dict))),
beam_threshold=args.beam_threshold,
lm_weight=args.lm_weight,
sil_score=args.sil_weight,
log_add=False,
criterion_type=self.criterion_type,
)
self.decoder = LexiconFreeDecoder(
self.decoder_opts, self.lm, self.silence, self.blank, []
)
def get_timesteps(self, token_idxs: List[int]) -> List[int]:
"""Returns frame numbers corresponding to every non-blank token.
Parameters
----------
token_idxs : List[int]
IDs of decoded tokens.
Returns
-------
List[int]
Frame numbers corresponding to every non-blank token.
"""
timesteps = []
for i, token_idx in enumerate(token_idxs):
if token_idx == self.blank:
continue
if i == 0 or token_idx != token_idxs[i - 1]:
timesteps.append(i)
return timesteps
def decode(self, emissions):
B, T, N = emissions.size()
hypos = []
for b in range(B):
emissions_ptr = emissions.data_ptr() + 4 * b * emissions.stride(0)
results = self.decoder.decode(emissions_ptr, T, N)
nbest_results = results[: self.nbest]
hypos.append(
[
{
"tokens": self.get_tokens(result.tokens),
"score": result.score,
"timesteps": self.get_timesteps(result.tokens),
"words": [
self.word_dict.get_entry(x) for x in result.words if x >= 0
],
}
for result in nbest_results
]
)
return hypos
FairseqLMState = namedtuple("FairseqLMState", ["prefix", "incremental_state", "probs"])
class FairseqLM(LM):
def __init__(self, dictionary, model):
LM.__init__(self)
self.dictionary = dictionary
self.model = model
self.unk = self.dictionary.unk()
self.save_incremental = False # this currently does not work properly
self.max_cache = 20_000
model.cuda()
model.eval()
model.make_generation_fast_()
self.states = {}
self.stateq = deque()
def start(self, start_with_nothing):
state = LMState()
prefix = torch.LongTensor([[self.dictionary.eos()]])
incremental_state = {} if self.save_incremental else None
with torch.no_grad():
res = self.model(prefix.cuda(), incremental_state=incremental_state)
probs = self.model.get_normalized_probs(res, log_probs=True, sample=None)
if incremental_state is not None:
incremental_state = apply_to_sample(lambda x: x.cpu(), incremental_state)
self.states[state] = FairseqLMState(
prefix.numpy(), incremental_state, probs[0, -1].cpu().numpy()
)
self.stateq.append(state)
return state
def score(self, state: LMState, token_index: int, no_cache: bool = False):
"""
Evaluate language model based on the current lm state and new word
Parameters:
-----------
state: current lm state
token_index: index of the word
(can be lexicon index then you should store inside LM the
mapping between indices of lexicon and lm, or lm index of a word)
Returns:
--------
(LMState, float): pair of (new state, score for the current word)
"""
curr_state = self.states[state]
def trim_cache(targ_size):
while len(self.stateq) > targ_size:
rem_k = self.stateq.popleft()
rem_st = self.states[rem_k]
rem_st = FairseqLMState(rem_st.prefix, None, None)
self.states[rem_k] = rem_st
if curr_state.probs is None:
new_incremental_state = (
curr_state.incremental_state.copy()
if curr_state.incremental_state is not None
else None
)
with torch.no_grad():
if new_incremental_state is not None:
new_incremental_state = apply_to_sample(
lambda x: x.cuda(), new_incremental_state
)
elif self.save_incremental:
new_incremental_state = {}
res = self.model(
torch.from_numpy(curr_state.prefix).cuda(),
incremental_state=new_incremental_state,
)
probs = self.model.get_normalized_probs(
res, log_probs=True, sample=None
)
if new_incremental_state is not None:
new_incremental_state = apply_to_sample(
lambda x: x.cpu(), new_incremental_state
)
curr_state = FairseqLMState(
curr_state.prefix, new_incremental_state, probs[0, -1].cpu().numpy()
)
if not no_cache:
self.states[state] = curr_state
self.stateq.append(state)
score = curr_state.probs[token_index].item()
trim_cache(self.max_cache)
outstate = state.child(token_index)
if outstate not in self.states and not no_cache:
prefix = np.concatenate(
[curr_state.prefix, torch.LongTensor([[token_index]])], -1
)
incr_state = curr_state.incremental_state
self.states[outstate] = FairseqLMState(prefix, incr_state, None)
if token_index == self.unk:
score = float("-inf")
return outstate, score
def finish(self, state: LMState):
"""
Evaluate eos for language model based on the current lm state
Returns:
--------
(LMState, float): pair of (new state, score for the current word)
"""
return self.score(state, self.dictionary.eos())
def empty_cache(self):
self.states = {}
self.stateq = deque()
gc.collect()
class W2lFairseqLMDecoder(W2lDecoder):
def __init__(self, args, tgt_dict):
super().__init__(args, tgt_dict)
self.unit_lm = getattr(args, "unit_lm", False)
self.lexicon = load_words(args.lexicon) if args.lexicon else None
self.idx_to_wrd = {}
checkpoint = torch.load(args.kenlm_model, map_location="cpu")
if "cfg" in checkpoint and checkpoint["cfg"] is not None:
lm_args = checkpoint["cfg"]
else:
lm_args = convert_namespace_to_omegaconf(checkpoint["args"])
with open_dict(lm_args.task):
lm_args.task.data = osp.dirname(args.kenlm_model)
task = tasks.setup_task(lm_args.task)
model = task.build_model(lm_args.model)
model.load_state_dict(checkpoint["model"], strict=False)
self.trie = Trie(self.vocab_size, self.silence)
self.word_dict = task.dictionary
self.unk_word = self.word_dict.unk()
self.lm = FairseqLM(self.word_dict, model)
if self.lexicon:
start_state = self.lm.start(False)
for i, (word, spellings) in enumerate(self.lexicon.items()):
if self.unit_lm:
word_idx = i
self.idx_to_wrd[i] = word
score = 0
else:
word_idx = self.word_dict.index(word)
_, score = self.lm.score(start_state, word_idx, no_cache=True)
for spelling in spellings:
spelling_idxs = [tgt_dict.index(token) for token in spelling]
assert (
tgt_dict.unk() not in spelling_idxs
), f"{spelling} {spelling_idxs}"
self.trie.insert(spelling_idxs, word_idx, score)
self.trie.smear(SmearingMode.MAX)
self.decoder_opts = LexiconDecoderOptions(
beam_size=args.beam,
beam_size_token=int(getattr(args, "beam_size_token", len(tgt_dict))),
beam_threshold=args.beam_threshold,
lm_weight=args.lm_weight,
word_score=args.word_score,
unk_score=args.unk_weight,
sil_score=args.sil_weight,
log_add=False,
criterion_type=self.criterion_type,
)
self.decoder = LexiconDecoder(
self.decoder_opts,
self.trie,
self.lm,
self.silence,
self.blank,
self.unk_word,
[],
self.unit_lm,
)
else:
assert (
args.unit_lm
), "lexicon free decoding can only be done with a unit language model"
from flashlight.lib.text.decoder import (
LexiconFreeDecoder,
LexiconFreeDecoderOptions,
)
d = {w: [[w]] for w in tgt_dict.symbols}
self.word_dict = create_word_dict(d)
self.lm = KenLM(args.kenlm_model, self.word_dict)
self.decoder_opts = LexiconFreeDecoderOptions(
beam_size=args.beam,
beam_size_token=int(getattr(args, "beam_size_token", len(tgt_dict))),
beam_threshold=args.beam_threshold,
lm_weight=args.lm_weight,
sil_score=args.sil_weight,
log_add=False,
criterion_type=self.criterion_type,
)
self.decoder = LexiconFreeDecoder(
self.decoder_opts, self.lm, self.silence, self.blank, []
)
def decode(self, emissions):
B, T, N = emissions.size()
hypos = []
def idx_to_word(idx):
if self.unit_lm:
return self.idx_to_wrd[idx]
else:
return self.word_dict[idx]
def make_hypo(result):
hypo = {"tokens": self.get_tokens(result.tokens), "score": result.score}
if self.lexicon:
hypo["words"] = [idx_to_word(x) for x in result.words if x >= 0]
return hypo
for b in range(B):
emissions_ptr = emissions.data_ptr() + 4 * b * emissions.stride(0)
results = self.decoder.decode(emissions_ptr, T, N)
nbest_results = results[: self.nbest]
hypos.append([make_hypo(result) for result in nbest_results])
self.lm.empty_cache()
return hypos
| 17,561 | 34.336016 | 171 | py |
CIF-HieraDist | CIF-HieraDist-main/examples/speech_recognition/cif_decoder.py | # @Time : 2021/7/14
# @Author : Minglun Han
# @File : cif_decoder.py
"""""
Update:
By 2022/06/19
1. support LM decoding with language model by shallow fusion;
""" ""
import os
import sys
import torch
import logging
import numpy as np
import itertools as it
from torch import Tensor
import torch.nn.functional as F
from typing import Dict, Tuple, List, Optional
from fairseq import checkpoint_utils, distributed_utils, options, tasks, utils
np.set_printoptions(threshold=10000000)
torch.set_printoptions(profile="full")
class CifDecoder(object):
def __init__(self, args, tgt_dict):
self.tgt_dict = tgt_dict
self.vocab_size = len(tgt_dict)
self.nbest = args.nbest
self.beam = args.beam
self.tail_handling_firing_threshold = args.tail_handling_firing_threshold
# Obtain ids of special tokens
self.blank = (
tgt_dict.index("<ctc_blank>")
if "<ctc_blank>" in tgt_dict.indices
else tgt_dict.bos()
)
self.bos = tgt_dict.bos()
self.eos = tgt_dict.eos()
self.pad = tgt_dict.pad()
self.unk = tgt_dict.unk()
self.cif_decoder_mode = args.cif_decoder_mode
self.use_nnlm = args.use_nnlm
self.fetch_nnlm_from = args.fetch_nnlm_from
self.lm_weight = args.lm_weight
self.specified_dict_path = args.specified_dict_path
# Load language model
self.lm_decoder = None
if self.use_nnlm:
logging.info("load language model from %s" % self.fetch_nnlm_from)
state = checkpoint_utils.load_checkpoint_to_cpu(self.fetch_nnlm_from)
# build task
cfg = None
if "args" in state and state["args"] is not None:
cfg = convert_namespace_to_omegaconf(state["args"])
elif "cfg" in state and state["cfg"] is not None:
cfg = state["cfg"]
assert cfg is not None, "Configuration is None"
cfg.task.data = self.specified_dict_path
task = tasks.setup_task(cfg.task)
if "task_state" in state:
task.load_state_dict(state["task_state"])
# build model & load model parameters
model = task.build_model(cfg.model)
model.load_state_dict(
state["model"],
strict=True,
model_cfg=cfg.model,
)
if args.fp16:
model.half()
model.cuda()
model.eval()
# register language model
self.lm_decoder = model
# # Check: inspect LM loading process and LM model
# logging.info(" Checking language model ...... ")
# dummy_inputs = torch.tensor(
# [[2,38,817,72,220,80,594,168,
# 29,19,17,42,146,518,436]]
# ).cuda() # For validation
# # dummy_inputs = torch.tensor(
# # [[2, 320, 1018, 1090, 553]]
# # ).cuda() # For training
# dummy_lm_logits, _ = self.lm_decoder(src_tokens=dummy_inputs)
# dummy_preds = dummy_lm_logits.max(-1).indices
# dummy_logprobs = utils.log_softmax(
# dummy_lm_logits.float(), dim=-1)
# nonmean_dummy_nll_loss = F.nll_loss(
# dummy_logprobs[0], dummy_inputs[0],
# ignore_index=self.pad, reduction="none")
# dummy_nll_loss = F.nll_loss(
# dummy_logprobs[0], dummy_inputs[0],
# ignore_index=self.pad, reduction="mean")
# logging.info(f"dummy_inputs: {dummy_inputs[0, 1:]}")
# logging.info(f"dummy_preds: {dummy_preds[0]}")
# logging.info(f"dummy_nll_loss: {dummy_nll_loss}")
# logging.info(f"nonmean_dummy_nll_loss: {nonmean_dummy_nll_loss}")
# logging.info(f"Language model inspection is done.")
if self.beam == 1:
if self.cif_decoder_mode == "ar":
logging.info("employ ar greedy decoder")
self.decode = self.ar_batch_greedy_decode
elif self.cif_decoder_mode == "fast_ar":
logging.info("employ ar fast greedy decoder")
self.decode = self.ar_fast_batch_greedy_decode
else:
logging.info("employ nar greedy decoder")
# self.decode = self.nar_batch_greedy_decode
self.decode = self.nar_batch_parallel_greedy_decode
# Parallel Greedy Decoding which is better for NAR decoder
else:
if self.cif_decoder_mode == "ar":
logging.info("employ ar beam decoder")
self.decode = self.ar_batch_beam_decode
elif self.cif_decoder_mode == "fast_ar":
logging.info("employ ar fast beam decoder")
self.decode = self.ar_fast_batch_beam_decode
else:
logging.info("employ nar beam decoder")
self.decode = self.nar_batch_beam_decode
def generate(self, models, sample, **kwargs):
"""Generate a batch of inferences."""
# model.forward normally channels prev_output_tokens into the decoder
# separately, but SequenceGenerator directly calls model.encoder
# Prepare model inputs
model_inputs = {
k: v for k, v in sample["net_input"].items() if k != "prev_output_tokens"
} # remove prev_output_tokens
# Forward encoder and cif
if self.tail_handling_firing_threshold:
models[
0
].encoder.cif.tail_handling_firing_threshold = (
self.tail_handling_firing_threshold
)
cif_outputs = models[0].get_cif_output(
src_tokens=model_inputs["src_tokens"],
src_lengths=model_inputs["src_lengths"],
target_lengths=sample["target_lengths"],
)
# Decode
beam_results, beam_scores, out_seqlens = self.decode(models[0], cif_outputs)
# Truncate at <eos>
tmp_beam_results = []
bsz, beam_size, max_len = beam_results.size()
beam_results = beam_results.view((bsz * beam_size), -1) # (B * beam_size) x T
for n in range(bsz):
cur_res = beam_results[n] # T
eos_inds = (cur_res == 2).nonzero()
if len(eos_inds) > 0:
cur_max_valid_len = eos_inds[0][0]
else:
cur_max_valid_len = max_len
cur_res = cur_res[:cur_max_valid_len]
pad_len = max_len - cur_max_valid_len
cur_res = torch.cat(
[cur_res, torch.tensor([self.pad for _ in range(pad_len)]).cuda()],
dim=0,
)
tmp_beam_results.append(cur_res.unsqueeze(0))
beam_results = torch.cat(tmp_beam_results, dim=0).view(bsz, beam_size, -1)
return self.generate_hypos(
beam_results=beam_results,
beam_scores=beam_scores,
out_seqlens=out_seqlens,
)
def generate_hypos(self, beam_results, beam_scores, out_seqlens):
hypos = []
for beam_result, scores, lengths in zip(beam_results, beam_scores, out_seqlens):
# beam_ids: beam x id; score: beam; length: beam
top = []
for result, score, length in zip(beam_result, scores, lengths):
top.append({"tokens": self.get_tokens(result[:length]), "score": score})
hypos.append(top)
return hypos
def get_tokens(self, idxs):
"""Normalize tokens by handling CTC blank, ASG replabels, etc."""
# Remove blank id and eos id
# idxs = (g[0] for g in it.groupby(idxs)) # remove repetition
idxs = filter(lambda x: x != self.blank, idxs)
idxs = filter(lambda x: x != self.eos, idxs)
idxs = filter(lambda x: x != self.pad, idxs)
return torch.LongTensor(list(idxs))
def ar_batch_greedy_decode(self, model, cif_outputs):
"""
:param model: the model in usage
:param cif_outputs: the outputs of cif module
:return: prev_tokens, out_seqlens, scores
"""
# Get Cif outputs
cif_out = cif_outputs["cif_out"]
cif_out_padding_mask = cif_outputs["cif_out_padding_mask"]
raw_encoder_out = cif_outputs["encoder_out"]
raw_encoder_padding_mask = cif_outputs["encoder_padding_mask"]
# Get the maximum length of decoding steps
batch_size, max_decode_length, _ = cif_out.size()
out_seqlens = cif_out_padding_mask.sum(-1) # B
# Initialize previous decoded tokens
prev_tokens = torch.ones([batch_size, 1]).long().cuda() * self.eos
# B x 1, use <eos> as the beginning of sentence (<bos>)
scores = torch.ones([batch_size]).cuda() # B
for step_i in range(max_decode_length):
# Conduct forward of current step t
cur_step_cif_outputs = cif_out[:, : (step_i + 1), :] # B x t x C
cur_step_cif_out_padding_mask = cif_out_padding_mask[
:, : (step_i + 1)
] # B x t
cur_step_cif_out = {
"cif_out": cur_step_cif_outputs,
"cif_out_padding_mask": cur_step_cif_out_padding_mask,
"ctxt_cif_out": None,
"raw_encoder_out": raw_encoder_out,
"raw_encoder_padding_mask": raw_encoder_padding_mask,
}
# Get decoder outputs of current step
decoder_output_i, extra_outputs, _ = model.step_forward_decoder(
prev_decoded_tokens=prev_tokens, cif_outputs=cur_step_cif_out
)
# Update previous decoded tokens & scores
decoder_output_i = model.get_probs_from_logits(
decoder_output_i[:, -1, :], log_probs=False
)
latest_token = torch.argmax(decoder_output_i, dim=-1).unsqueeze(
dim=-1
) # shape = B x 1
prev_tokens = torch.cat([prev_tokens, latest_token], dim=-1)
max_prob_of_last_step = decoder_output_i.max(-1)[0] # shape = B
scores = scores * max_prob_of_last_step
# Reform outputs
prev_tokens = torch.unsqueeze(prev_tokens, dim=1)[:, :, 1:] # B x 1 x T
out_seqlens = torch.unsqueeze(out_seqlens, dim=1) # B x 1
scores = torch.unsqueeze(scores, dim=-1) # B x 1
return prev_tokens, scores, out_seqlens
def ar_fast_batch_greedy_decode(self, model, cif_outputs):
"""
:param model: the model in usage
:param cif_outputs: the outputs of cif module
:return: prev_tokens, out_seqlens, scores
"""
# Get Cif outputs
cif_out = cif_outputs["cif_out"]
cif_out_padding_mask = cif_outputs["cif_out_padding_mask"]
raw_encoder_out = cif_outputs["encoder_out"]
raw_encoder_padding_mask = cif_outputs["encoder_padding_mask"]
# Get the maximum length of decoding steps
batch_size, max_decode_length, _ = cif_out.size()
out_seqlens = cif_out_padding_mask.sum(-1) # B
# Initialize incremental states for fast decoding
incremental_state = torch.jit.annotate(
Dict[str, Dict[str, Optional[Tensor]]], {}
)
# incremental_states is a dictionary of dictionaries of tensors
# Initialize previous decoded tokens
prev_tokens = torch.ones([batch_size, 1]).long().cuda() * self.eos
# B x 1, use <eos> as the beginning of sentence (<bos>)
scores = torch.ones([batch_size]).cuda() # B
for step_i in range(max_decode_length):
# Forward decoder
cur_step_cif_outputs = cif_out[:, : (step_i + 1), :] # B x t x C
cur_step_cif_out_padding_mask = cif_out_padding_mask[
:, : (step_i + 1)
] # B x t
cur_step_cif_out = {
"cif_out": cur_step_cif_outputs,
"cif_out_padding_mask": cur_step_cif_out_padding_mask,
"ctxt_cif_out": None,
"raw_encoder_out": raw_encoder_out,
"raw_encoder_padding_mask": raw_encoder_padding_mask,
}
# Get decoder outputs of current step
decoder_output_i, _, _ = model.step_forward_decoder(
prev_decoded_tokens=prev_tokens,
cif_outputs=cur_step_cif_out,
incremental_state=incremental_state,
)
# This is different from normal decoding process,
# because the historical states are put into buffer
# Update previous decoded tokens
decoder_output_i = model.get_probs_from_logits(
decoder_output_i[:, -1, :], log_probs=False
)
latest_token = torch.argmax(decoder_output_i, dim=-1).unsqueeze(
dim=-1
) # B x 1
prev_tokens = torch.cat([prev_tokens, latest_token], dim=-1)
max_prob_of_last_step = decoder_output_i.max(-1)[0] # B
scores = scores * max_prob_of_last_step
# Reform outputs
prev_tokens = torch.unsqueeze(prev_tokens, dim=1)[:, :, 1:] # B x 1 x T
out_seqlens = torch.unsqueeze(out_seqlens, dim=1) # B x 1
scores = torch.unsqueeze(scores, dim=-1) # B x 1
return prev_tokens, scores, out_seqlens
def ar_batch_beam_decode(self, model, cif_outputs):
"""
:param model: the model in usage
:param cif_outputs: the outputs of cif module
:return: prev_tokens, out_seqlens, scores
"""
cif_out = cif_outputs["cif_out"] # B x T x C
cif_out_padding_mask = cif_outputs["cif_out_padding_mask"] # B x T
raw_encoder_out = None
raw_encoder_padding_mask = None
# Get the maximum length of decoding steps
batch_size, max_decode_length, cif_out_dim = cif_out.size() # B x T x C
out_seqlens = cif_out_padding_mask.sum(-1) # B
# Initialize all needed variables
cif_out = torch.unsqueeze(cif_out, dim=1).repeat(
1, self.beam, 1, 1
) # B x beam_size x T x C
prev_tokens = (
torch.ones([batch_size, self.beam, 1]).long().cuda() * self.eos
) # B x beam_size x 1
scores = torch.zeros([batch_size, self.beam]).float().cuda() # B x beam_size
cif_out_padding_mask = torch.unsqueeze(cif_out_padding_mask, dim=1).repeat(
[1, self.beam, 1]
)
# B x beam_size x T
cif_out = cif_out.view(
[batch_size * self.beam, max_decode_length, cif_out_dim]
) # (B * beam_size) x T x C
prev_tokens = prev_tokens.view(
[batch_size * self.beam, 1]
) # (B * beam_size) x 1
scores = scores.view([batch_size * self.beam]) # (B * beam_size)
cif_out_padding_mask = cif_out_padding_mask.view(
[batch_size * self.beam, max_decode_length]
) # (B * beam_size) x T
if not model.decoder.no_encoder_attn:
raw_encoder_out = cif_outputs["encoder_out"] # T x B x C
raw_encoder_padding_mask = cif_outputs["encoder_padding_mask"] # B x T
max_raw_out_length, _, raw_out_dim = raw_encoder_out.size()
raw_encoder_out = (
raw_encoder_out.transpose(0, 1)
.unsqueeze(dim=1)
.repeat(1, self.beam, 1, 1)
.view(batch_size * self.beam, max_raw_out_length, raw_out_dim)
.transpose(0, 1)
) # T x (B x beam_size) x C
raw_encoder_padding_mask = (
raw_encoder_padding_mask.unsqueeze(dim=1)
.repeat(1, self.beam, 1)
.view(batch_size * self.beam, max_raw_out_length)
) # (B * beam_size) x T
for step_i in range(1, max_decode_length + 1):
# Get cif outputs of current step
cur_step_cif_outputs = cif_out[:, :step_i, :] # (B * beam_size) x t x C
cur_step_cif_out_padding_mask = cif_out_padding_mask[
:, :step_i
] # (B * beam_size) x t
cur_step_cif_out = {
"cif_out": cur_step_cif_outputs,
"cif_out_padding_mask": cur_step_cif_out_padding_mask,
"ctxt_cif_out": None,
"raw_encoder_out": raw_encoder_out,
"raw_encoder_padding_mask": raw_encoder_padding_mask,
}
# Get decoder outputs at step_i
decoder_output_i, extra_outputs, _ = model.step_forward_decoder(
prev_decoded_tokens=prev_tokens, # (B x beam_size) x t
cif_outputs=cur_step_cif_out,
# cif_out: (B * beam_size) x t x C, cif_out_padding_mask: (B * beam_size) x t
) # decoder_output_i has shape [(B * beam_size), t, V]
cur_decoder_output = model.get_probs_from_logits(
decoder_output_i[:, -1, :], log_probs=True
) # [B * beam_size, V]
tmp_scores = scores # Backup scores, with shape [B * beam_size]
scores = scores.unsqueeze(dim=-1).repeat(
[1, self.vocab_size]
) # [B * beam_size, V]
cur_score = cur_decoder_output
# cur_score, with shape [(B x beam_size) x V]
updated_scores = (scores + cur_score).view(
[batch_size, self.beam * self.vocab_size]
) # converted from shape [B * beam_size, V] to [B, beam_size * V]
# Handle the first timestep with special operation
if step_i == 1:
# For the first step, due to the same input token, only consider one beam.
topk_scores, topk_indices = torch.topk(
updated_scores.view([batch_size, self.beam, self.vocab_size])[
:, 0, :
],
k=self.beam,
dim=-1,
)
beam_indices = (
torch.zeros(batch_size, self.beam).long().cuda()
) # [B, beam_size] with all zero elements
fixed_topk_indices = topk_indices # [B, beam_size]
else:
# For all the other beams, due to their inputs are varying, consider all beams.
topk_scores, topk_indices = torch.topk(
updated_scores, k=self.beam, dim=-1
) # topk_scores shape [B, beam_size], topk_indices shape [B, beam_size]
# beam_indices = \
# torch.div(topk_indices, self.vocab_size, rounding_mode='floor') # [B, beam_size]
beam_indices = topk_indices // vocab_size
fixed_topk_indices = topk_indices % self.vocab_size # [B, beam_size]
# Update previous decoded tokens and scores
prev_tokens = prev_tokens.view(
[batch_size, self.beam, -1]
) # [B, beam_size, t]
tmp_scores = tmp_scores.view(
[batch_size, self.beam]
) # previous scores, with shape [B, beam_size]
prev_token_tmp_list = []
scores_tmp_list = []
for n in range(batch_size): # n ranges from 0 to (batch_size - 1)
# Get the max length of current sample
cur_output_maxlen = out_seqlens[n]
# If some sample's decode length is smaller than current step id, keep its score and decoded results
if step_i > cur_output_maxlen:
cur_scores = tmp_scores[n, :] # beam_size
cur_prev_tokens = prev_tokens[n, :, :] # beam_size x t
else:
cur_scores = topk_scores[n, :] # beam_size
cur_prev_tokens = prev_tokens[n, :, :] # beam_size x t
cur_beam_indices = beam_indices[n, :] # beam_size
# Get reformed previous tokens
cur_prev_tokens = torch.index_select(
cur_prev_tokens, dim=0, index=cur_beam_indices
) # beam_size x t
scores_tmp_list.append(cur_scores.unsqueeze(dim=0))
prev_token_tmp_list.append(cur_prev_tokens.unsqueeze(dim=0))
fixed_prev_tokens = torch.cat(prev_token_tmp_list, dim=0)
fixed_topk_indices = torch.where(
step_i <= out_seqlens.unsqueeze(dim=-1).repeat([1, self.beam]),
fixed_topk_indices, # B x beam_size
torch.ones_like(fixed_topk_indices).cuda() * self.pad,
) # Mask locations that outnumber cif max length using <pad>
fixed_topk_indices = fixed_topk_indices.unsqueeze(
dim=-1
) # [B, beam_size, 1]
prev_tokens = torch.cat(
[fixed_prev_tokens, fixed_topk_indices], dim=-1
).view(
[batch_size * self.beam, -1]
) # [B * beam_size, t + 1]
scores = torch.cat(scores_tmp_list, dim=0).view(
[batch_size * self.beam]
) # [B * beam_size]
scores = scores.view([batch_size, self.beam])[:, : self.nbest] # B x beam_size
prev_tokens = prev_tokens.view([batch_size, self.beam, -1])[
:, : self.nbest, 1:
] # B x beam_size x T
out_seqlens = torch.unsqueeze(out_seqlens, dim=-1).repeat(1, self.beam)[
:, : self.nbest
] # B x beam_size
return prev_tokens, scores, out_seqlens
def ar_fast_batch_beam_decode(self, model, cif_outputs):
"""
:param model: the model in usage
:param cif_outputs: the outputs of cif module
:return: prev_tokens, out_seqlens, scores
"""
cif_out = cif_outputs["cif_out"] # B x T x C
cif_out_padding_mask = cif_outputs["cif_out_padding_mask"] # B x T
raw_encoder_out = None
raw_encoder_padding_mask = None
# Get the maximum length of decoding steps
batch_size, max_decode_length, cif_out_dim = cif_out.size() # B x T x C
out_seqlens = cif_out_padding_mask.sum(-1) # B
# Initialize all needed variables
cif_out = torch.unsqueeze(cif_out, dim=1).repeat(
1, self.beam, 1, 1
) # B x beam_size x T x C
prev_tokens = (
torch.ones([batch_size, self.beam, 1]).long().cuda() * self.eos
) # B x beam_size x 1
scores = torch.zeros([batch_size, self.beam]).float().cuda() # B x beam_size
cif_out_padding_mask = torch.unsqueeze(cif_out_padding_mask, dim=1).repeat(
[1, self.beam, 1]
)
# B x beam_size x T
cif_out = cif_out.view(
[batch_size * self.beam, max_decode_length, cif_out_dim]
) # (B * beam_size) x T x C
prev_tokens = prev_tokens.view(
[batch_size * self.beam, 1]
) # (B * beam_size) x 1
scores = scores.view([batch_size * self.beam]) # (B * beam_size)
cif_out_padding_mask = cif_out_padding_mask.view(
[batch_size * self.beam, max_decode_length]
) # (B * beam_size) x T
if not model.decoder.no_encoder_attn:
raw_encoder_out = cif_outputs["encoder_out"] # T x B x C
raw_encoder_padding_mask = cif_outputs["encoder_padding_mask"] # B x T
max_raw_out_length, _, raw_out_dim = raw_encoder_out.size()
raw_encoder_out = (
raw_encoder_out.transpose(0, 1)
.unsqueeze(dim=1)
.repeat(1, self.beam, 1, 1)
.view(batch_size * self.beam, max_raw_out_length, raw_out_dim)
.transpose(0, 1)
) # T x (B x beam_size) x C
raw_encoder_padding_mask = (
raw_encoder_padding_mask.unsqueeze(dim=1)
.repeat(1, self.beam, 1)
.view(batch_size * self.beam, max_raw_out_length)
) # (B * beam_size) x T
# Initialize incremental states for fast decoding
reorder_state = None
lm_reorder_state = None
incremental_state = torch.jit.annotate(
Dict[str, Dict[str, Optional[Tensor]]], {}
)
lm_incremental_state = torch.jit.annotate(
Dict[str, Dict[str, Optional[Tensor]]], {}
)
# incremental_states is a dictionary of dictionaries of tensors
for step_i in range(1, max_decode_length + 1):
# Reorder decoder internal states
if reorder_state is not None:
model.decoder.reorder_incremental_state_scripting(
incremental_state, reorder_state
)
if self.use_nnlm and lm_reorder_state is not None:
self.lm_decoder.decoder.reorder_incremental_state_scripting(
lm_incremental_state, lm_reorder_state
)
# Get cif outputs of current step
cur_step_cif_outputs = cif_out[:, :step_i, :] # (B * beam_size) x t x C
cur_step_cif_out_padding_mask = cif_out_padding_mask[
:, :step_i
] # (B * beam_size) x t
cur_step_cif_out = {
"cif_out": cur_step_cif_outputs,
"cif_out_padding_mask": cur_step_cif_out_padding_mask,
"ctxt_cif_out": None,
"raw_encoder_out": raw_encoder_out,
"raw_encoder_padding_mask": raw_encoder_padding_mask,
}
# Get decoder outputs at step_i
decoder_output_i, extra_outputs, _ = model.step_forward_decoder(
prev_decoded_tokens=prev_tokens,
cif_outputs=cur_step_cif_out,
incremental_state=incremental_state,
)
cur_decoder_output = model.get_probs_from_logits(
decoder_output_i[:, -1, :], log_probs=True
) # [B * beam_size, V]
tmp_scores = scores # Backup scores, with shape [B * beam_size]
scores = scores.unsqueeze(dim=-1).repeat(
[1, self.vocab_size]
) # [B * beam_size, V]
# Forward language model
cur_lm_decoder_output = None
if self.use_nnlm and self.lm_decoder is not None:
lm_decoder_output_i, _ = self.lm_decoder(
src_tokens=prev_tokens,
incremental_state=lm_incremental_state,
)
cur_lm_decoder_output = model.get_probs_from_logits(
lm_decoder_output_i[:, -1, :],
log_probs=True,
) # [B * beam_size, V]
# Update scores
if self.use_nnlm:
cur_score = cur_decoder_output + self.lm_weight * cur_lm_decoder_output
else:
cur_score = cur_decoder_output
# cur_score, with shape [(B x beam_size) x V]
updated_scores = (scores + cur_score).view(
[batch_size, self.beam * self.vocab_size]
) # converted from shape [B * beam_size, V] to [B, beam_size * V]
# Handle the first timestep with special operation
if step_i == 1:
# For the first step, due to the same input token, only consider one beam.
topk_scores, topk_indices = torch.topk(
updated_scores.view([batch_size, self.beam, self.vocab_size])[
:, 0, :
],
k=self.beam,
dim=-1,
)
beam_indices = (
torch.zeros(batch_size, self.beam).long().cuda()
) # [B, beam_size] with all zero elements
fixed_topk_indices = topk_indices # [B, beam_size]
else:
# For all the other steps, due to their inputs are varying, consider all beams.
topk_scores, topk_indices = torch.topk(
updated_scores, k=self.beam, dim=-1
) # topk_scores shape [B, beam_size], topk_indices shape [B, beam_size]
beam_indices = topk_indices // self.vocab_size
fixed_topk_indices = topk_indices % self.vocab_size # [B, beam_size]
stage_index = torch.arange(batch_size) * self.beam
cand_indices = beam_indices + stage_index.unsqueeze(-1).cuda()
reorder_state = cand_indices.view(batch_size * self.beam)
lm_reorder_state = reorder_state
# Update previous decoded tokens and scores
prev_tokens = prev_tokens.view(
[batch_size, self.beam, -1]
) # [B, beam_size, t]
tmp_scores = tmp_scores.view(
[batch_size, self.beam]
) # previous scores, with shape [B, beam_size]
prev_token_tmp_list = []
scores_tmp_list = []
for n in range(batch_size): # n ranges from 0 to (batch_size - 1)
# Get the max length of current sample
cur_output_maxlen = out_seqlens[n]
# If some sample's decode length is smaller than current step id, keep its score and decoded results
if step_i > cur_output_maxlen:
cur_scores = tmp_scores[n, :] # beam_size
cur_prev_tokens = prev_tokens[n, :, :] # beam_size x t
else:
cur_scores = topk_scores[n, :] # beam_size
cur_prev_tokens = prev_tokens[n, :, :] # beam_size x t
cur_beam_indices = beam_indices[n, :] # beam_size
# Get reformed previous tokens
cur_prev_tokens = torch.index_select(
cur_prev_tokens, dim=0, index=cur_beam_indices
) # beam_size x t
scores_tmp_list.append(cur_scores.unsqueeze(dim=0))
prev_token_tmp_list.append(cur_prev_tokens.unsqueeze(dim=0))
fixed_prev_tokens = torch.cat(prev_token_tmp_list, dim=0)
fixed_topk_indices = torch.where(
step_i <= out_seqlens.unsqueeze(dim=-1).repeat([1, self.beam]),
fixed_topk_indices, # B x beam_size
torch.ones_like(fixed_topk_indices).cuda() * self.pad,
) # Mask locations that outnumber cif max length using <pad>
fixed_topk_indices = fixed_topk_indices.unsqueeze(
dim=-1
) # [B, beam_size, 1]
prev_tokens = torch.cat(
[fixed_prev_tokens, fixed_topk_indices], dim=-1
).view(
[batch_size * self.beam, -1]
) # [B * beam_size, t + 1]
scores = torch.cat(scores_tmp_list, dim=0).view(
[batch_size * self.beam]
) # [B * beam_size]
scores = scores.view([batch_size, self.beam])[:, : self.nbest] # B x beam_size
prev_tokens = prev_tokens.view([batch_size, self.beam, -1])[
:, : self.nbest, 1:
] # B x beam_size x T
out_seqlens = torch.unsqueeze(out_seqlens, dim=-1).repeat(1, self.beam)[
:, : self.nbest
] # B x beam_size
return prev_tokens, scores, out_seqlens
def nar_batch_parallel_greedy_decode(self, model, cif_outputs):
"""
:param model: the model in usage
:param cif_outputs: the outputs of cif module
:return: prev_tokens, out_seqlens, scores
"""
# Get cif outputs
cif_out = cif_outputs["cif_out"]
cif_out_padding_mask = cif_outputs["cif_out_padding_mask"]
raw_encoder_out = cif_outputs["encoder_out"]
raw_encoder_padding_mask = cif_outputs["encoder_padding_mask"]
# Get the maximum length of decoding steps
batch_size, max_decode_length, _ = cif_out.size()
out_seqlens = cif_out_padding_mask.sum(-1) # B
# Initialize previous decoded tokens and cif outputs
prev_decoded_tokens = torch.zeros(
[batch_size, max_decode_length]
).long() # B x T
cif_outputs = {
"cif_out": cif_out,
"cif_out_padding_mask": cif_out_padding_mask,
"raw_encoder_out": raw_encoder_out,
"raw_encoder_padding_mask": raw_encoder_padding_mask,
}
decoder_output, _, _ = model.step_forward_decoder(
prev_decoded_tokens=prev_decoded_tokens, cif_outputs=cif_outputs
) # B x T x V
# Update previous decoded tokens
decoder_output = model.get_probs_from_logits(
decoder_output, log_probs=False
) # B x T x V
decoded_tokens = torch.argmax(decoder_output, dim=-1) # B x T
scores = torch.prod(decoder_output.max(-1)[0], dim=-1) # B
# Reform outputs, now prev_tokens has shape B x (T + 1)
prev_tokens = torch.unsqueeze(decoded_tokens, dim=1) # B x 1 x T
out_seqlens = torch.unsqueeze(out_seqlens, dim=1) # B x 1
scores = torch.unsqueeze(scores, dim=-1) # B x 1
return prev_tokens, scores, out_seqlens
def nar_batch_beam_decode(self, model, cif_outputs):
"""
:param model: the model in usage
:param cif_outputs: the outputs of cif module
:return: prev_tokens, out_seqlens, scores
"""
cif_out = cif_outputs["cif_out"] # B x T x C
cif_out_padding_mask = cif_outputs["cif_out_padding_mask"] # B x T
raw_encoder_out = cif_outputs["encoder_out"]
raw_encoder_padding_mask = cif_outputs["encoder_padding_mask"]
# Get the maximum length of decoding steps
batch_size, max_decode_length, cif_out_dim = cif_out.size() # B x T x C
out_seqlens = cif_out_padding_mask.sum(-1) # B
# Initialize all needed variables
cif_out = torch.unsqueeze(cif_out, dim=1).repeat(
1, self.beam, 1, 1
) # B x beam_size x T x C
prev_tokens = (
torch.ones([batch_size, self.beam, 1]).long().cuda() * self.eos
) # B x beam_size x 1
scores = torch.zeros([batch_size, self.beam]).float().cuda() # B x beam_size
cif_out_padding_mask = torch.unsqueeze(cif_out_padding_mask, dim=1).repeat(
[1, self.beam, 1]
) # B x beam_size x T
cif_out = cif_out.view(
[batch_size * self.beam, max_decode_length, cif_out_dim]
) # (B * beam_size) x T x C
prev_tokens = prev_tokens.view(
[batch_size * self.beam, 1]
) # (B * beam_size) x 1
scores = scores.view([batch_size * self.beam]) # (B * beam_size)
cif_out_padding_mask = cif_out_padding_mask.view(
[batch_size * self.beam, max_decode_length]
) # (B * beam_size) x T
for step_i in range(1, max_decode_length + 1):
# Get cif outputs of current step
cur_step_cif_outputs = cif_out[:, :step_i, :] # (B * beam_size) x t x C
cur_step_cif_out_padding_mask = cif_out_padding_mask[
:, :step_i
] # (B * beam_size) x t
cur_step_cif_out = {
"cif_out": cur_step_cif_outputs,
"cif_out_padding_mask": cur_step_cif_out_padding_mask,
"ctxt_cif_out": None,
"raw_encoder_out": raw_encoder_out,
"raw_encoder_padding_mask": raw_encoder_padding_mask,
}
# Get decoder outputs at step_i
decoder_output_i, extra_outputs, _ = model.step_forward_decoder(
prev_decoded_tokens=prev_tokens, # (B x beam_size) x t
cif_outputs=cur_step_cif_out,
# cif_out: (B * beam_size) x t x C, cif_out_padding_mask: (B * beam_size) x t
) # decoder_output_i has shape [(B * beam_size), t, V]
cur_decoder_output = model.get_probs_from_logits(
decoder_output_i[:, -1, :], log_probs=True
) # [B * beam_size, V]
tmp_scores = scores # Backup scores, with shape [B * beam_size]
scores = scores.unsqueeze(dim=-1).repeat(
[1, self.vocab_size]
) # [B * beam_size, V]
cur_score = cur_decoder_output
# cur_score, with shape [(B x beam_size) x V]
updated_scores = (scores + cur_score).view(
[batch_size, self.beam * self.vocab_size]
) # converted from shape [B * beam_size, V] to [B, beam_size * V]
# Handle the first timestep with special operation
if step_i == 1:
# For the first step, due to the same input token, only consider one beam.
topk_scores, topk_indices = torch.topk(
updated_scores.view([batch_size, self.beam, self.vocab_size])[
:, 0, :
],
k=self.beam,
dim=-1,
)
beam_indices = (
torch.zeros(batch_size, self.beam).long().cuda()
) # [B, beam_size] with all zero elements
fixed_topk_indices = topk_indices # [B, beam_size]
else:
# For all the other beams, due to their inputs are varying, consider all beams.
topk_scores, topk_indices = torch.topk(
updated_scores, k=self.beam, dim=-1
) # topk_scores shape [B, beam_size], topk_indices shape [B, beam_size]
beam_indices = torch.div(
topk_indices, self.vocab_size, rounding_mode="floor"
) # [B, beam_size]
fixed_topk_indices = topk_indices % self.vocab_size # [B, beam_size]
# Update previous decoded tokens and scores
prev_tokens = prev_tokens.view(
[batch_size, self.beam, -1]
) # [B, beam_size, t]
tmp_scores = tmp_scores.view(
[batch_size, self.beam]
) # previous scores, with shape [B, beam_size]
prev_token_tmp_list = []
scores_tmp_list = []
for n in range(batch_size): # n ranges from 0 to (batch_size - 1)
# Get the max length of current sample
cur_output_maxlen = out_seqlens[n]
# If some sample's decode length is smaller than current step id, keep its score and decoded results
if step_i > cur_output_maxlen:
cur_scores = tmp_scores[n, :] # beam_size
cur_prev_tokens = prev_tokens[n, :, :] # beam_size x t
else:
cur_scores = topk_scores[n, :] # beam_size
cur_prev_tokens = prev_tokens[n, :, :] # beam_size x t
cur_beam_indices = beam_indices[n, :] # beam_size
# Get reformed previous tokens
cur_prev_tokens = torch.index_select(
cur_prev_tokens, dim=0, index=cur_beam_indices
) # beam_size x t
scores_tmp_list.append(cur_scores.unsqueeze(dim=0))
prev_token_tmp_list.append(cur_prev_tokens.unsqueeze(dim=0))
fixed_prev_tokens = torch.cat(prev_token_tmp_list, dim=0)
fixed_topk_indices = torch.where(
step_i <= out_seqlens.unsqueeze(dim=-1).repeat([1, self.beam]),
fixed_topk_indices, # B x beam_size
torch.ones_like(fixed_topk_indices).cuda() * self.pad,
) # Mask locations that outnumber cif max length using <pad>
fixed_topk_indices = fixed_topk_indices.unsqueeze(
dim=-1
) # B x beam_size x 1
prev_tokens = torch.cat(
[fixed_prev_tokens, fixed_topk_indices], dim=-1
).view([batch_size * self.beam, -1])
scores = torch.cat(scores_tmp_list, dim=0).view(
[batch_size * self.beam]
) # B x beam_size
scores = scores.view([batch_size, self.beam])[:, : self.nbest] # B x beam_size
prev_tokens = prev_tokens.view([batch_size, self.beam, -1])[
:, : self.nbest, 1:
] # B x beam_size x T
out_seqlens = torch.unsqueeze(out_seqlens, dim=-1).repeat(1, self.beam)[
:, : self.nbest
] # B x beam_size
return prev_tokens, scores, out_seqlens
| 40,532 | 43.057609 | 116 | py |
CIF-HieraDist | CIF-HieraDist-main/examples/speech_recognition/criterions/cross_entropy_acc.py | # Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from __future__ import absolute_import, division, print_function, unicode_literals
import logging
import math
import torch
import torch.nn.functional as F
from fairseq import utils
from fairseq.criterions import FairseqCriterion, register_criterion
@register_criterion("cross_entropy_acc")
class CrossEntropyWithAccCriterion(FairseqCriterion):
def __init__(self, task, sentence_avg):
super().__init__(task)
self.sentence_avg = sentence_avg
def compute_loss(self, model, net_output, target, reduction, log_probs):
# N, T -> N * T
target = target.view(-1)
lprobs = model.get_normalized_probs(net_output, log_probs=log_probs)
if not hasattr(lprobs, "batch_first"):
logging.warning(
"ERROR: we need to know whether "
"batch first for the net output; "
"you need to set batch_first attribute for the return value of "
"model.get_normalized_probs. Now, we assume this is true, but "
"in the future, we will raise exception instead. "
)
batch_first = getattr(lprobs, "batch_first", True)
if not batch_first:
lprobs = lprobs.transpose(0, 1)
# N, T, D -> N * T, D
lprobs = lprobs.view(-1, lprobs.size(-1))
loss = F.nll_loss(
lprobs, target, ignore_index=self.padding_idx, reduction=reduction
)
return lprobs, loss
def get_logging_output(self, sample, target, lprobs, loss):
target = target.view(-1)
mask = target != self.padding_idx
correct = torch.sum(
lprobs.argmax(1).masked_select(mask) == target.masked_select(mask)
)
total = torch.sum(mask)
sample_size = (
sample["target"].size(0) if self.sentence_avg else sample["ntokens"]
)
logging_output = {
"loss": utils.item(loss.data), # * sample['ntokens'],
"ntokens": sample["ntokens"],
"nsentences": sample["target"].size(0),
"sample_size": sample_size,
"correct": utils.item(correct.data),
"total": utils.item(total.data),
"nframes": torch.sum(sample["net_input"]["src_lengths"]).item(),
}
return sample_size, logging_output
def forward(self, model, sample, reduction="sum", log_probs=True):
"""Computes the cross entropy with accuracy metric for the given sample.
This is similar to CrossEntropyCriterion in fairseq, but also
computes accuracy metrics as part of logging
Args:
logprobs (Torch.tensor) of shape N, T, D i.e.
batchsize, timesteps, dimensions
targets (Torch.tensor) of shape N, T i.e batchsize, timesteps
Returns:
tuple: With three elements:
1) the loss
2) the sample size, which is used as the denominator for the gradient
3) logging outputs to display while training
TODO:
* Currently this Criterion will only work with LSTMEncoderModels or
FairseqModels which have decoder, or Models which return TorchTensor
as net_output.
We need to make a change to support all FairseqEncoder models.
"""
net_output = model(**sample["net_input"])
target = model.get_targets(sample, net_output)
lprobs, loss = self.compute_loss(
model, net_output, target, reduction, log_probs
)
sample_size, logging_output = self.get_logging_output(
sample, target, lprobs, loss
)
return loss, sample_size, logging_output
@staticmethod
def aggregate_logging_outputs(logging_outputs):
"""Aggregate logging outputs from data parallel training."""
correct_sum = sum(log.get("correct", 0) for log in logging_outputs)
total_sum = sum(log.get("total", 0) for log in logging_outputs)
loss_sum = sum(log.get("loss", 0) for log in logging_outputs)
ntokens = sum(log.get("ntokens", 0) for log in logging_outputs)
nsentences = sum(log.get("nsentences", 0) for log in logging_outputs)
sample_size = sum(log.get("sample_size", 0) for log in logging_outputs)
nframes = sum(log.get("nframes", 0) for log in logging_outputs)
agg_output = {
"loss": loss_sum / sample_size / math.log(2) if sample_size > 0 else 0.0,
# if args.sentence_avg, then sample_size is nsentences, then loss
# is per-sentence loss; else sample_size is ntokens, the loss
# becomes per-output token loss
"ntokens": ntokens,
"nsentences": nsentences,
"nframes": nframes,
"sample_size": sample_size,
"acc": correct_sum * 100.0 / total_sum if total_sum > 0 else 0.0,
"correct": correct_sum,
"total": total_sum,
# total is the number of validate tokens
}
if sample_size != ntokens:
agg_output["nll_loss"] = loss_sum / ntokens / math.log(2)
# loss: per output token loss
# nll_loss: per sentence loss
return agg_output
| 5,372 | 40.015267 | 85 | py |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.