repo
stringlengths 2
99
| file
stringlengths 13
225
| code
stringlengths 0
18.3M
| file_length
int64 0
18.3M
| avg_line_length
float64 0
1.36M
| max_line_length
int64 0
4.26M
| extension_type
stringclasses 1
value |
|---|---|---|---|---|---|---|
STTS
|
STTS-main/VideoSwin/tests/test_data/test_pipelines/test_loadings/__init__.py
|
from .base import BaseTestLoading
__all__ = ['BaseTestLoading']
| 65
| 15.5
| 33
|
py
|
STTS
|
STTS-main/VideoSwin/tests/test_utils/test_module_hooks.py
|
import copy
import os.path as osp
import mmcv
import numpy as np
import pytest
import torch
from mmaction.models import build_recognizer
from mmaction.utils import register_module_hooks
from mmaction.utils.module_hooks import GPUNormalize
def test_register_module_hooks():
_module_hooks = [
dict(
type='GPUNormalize',
hooked_module='backbone',
hook_pos='forward_pre',
input_format='NCHW',
mean=[123.675, 116.28, 103.53],
std=[58.395, 57.12, 57.375])
]
repo_dpath = osp.dirname(osp.dirname(osp.dirname(__file__)))
config_fpath = osp.join(repo_dpath, 'configs/_base_/models/tsm_r50.py')
config = mmcv.Config.fromfile(config_fpath)
config.model['backbone']['pretrained'] = None
# case 1
module_hooks = copy.deepcopy(_module_hooks)
module_hooks[0]['hook_pos'] = 'forward_pre'
recognizer = build_recognizer(config.model)
handles = register_module_hooks(recognizer, module_hooks)
assert recognizer.backbone._forward_pre_hooks[
handles[0].id].__name__ == 'normalize_hook'
# case 2
module_hooks = copy.deepcopy(_module_hooks)
module_hooks[0]['hook_pos'] = 'forward'
recognizer = build_recognizer(config.model)
handles = register_module_hooks(recognizer, module_hooks)
assert recognizer.backbone._forward_hooks[
handles[0].id].__name__ == 'normalize_hook'
# case 3
module_hooks = copy.deepcopy(_module_hooks)
module_hooks[0]['hooked_module'] = 'cls_head'
module_hooks[0]['hook_pos'] = 'backward'
recognizer = build_recognizer(config.model)
handles = register_module_hooks(recognizer, module_hooks)
assert recognizer.cls_head._backward_hooks[
handles[0].id].__name__ == 'normalize_hook'
# case 4
module_hooks = copy.deepcopy(_module_hooks)
module_hooks[0]['hook_pos'] = '_other_pos'
recognizer = build_recognizer(config.model)
with pytest.raises(ValueError):
handles = register_module_hooks(recognizer, module_hooks)
# case 5
module_hooks = copy.deepcopy(_module_hooks)
module_hooks[0]['hooked_module'] = '_other_module'
recognizer = build_recognizer(config.model)
with pytest.raises(ValueError):
handles = register_module_hooks(recognizer, module_hooks)
def test_gpu_normalize():
def check_normalize(origin_imgs, result_imgs, norm_cfg):
"""Check if the origin_imgs are normalized correctly into result_imgs
in a given norm_cfg."""
from numpy.testing import assert_array_almost_equal
target_imgs = result_imgs.copy()
target_imgs *= norm_cfg['std']
target_imgs += norm_cfg['mean']
assert_array_almost_equal(origin_imgs, target_imgs, decimal=4)
_gpu_normalize_cfg = dict(
input_format='NCTHW',
mean=[123.675, 116.28, 103.53],
std=[58.395, 57.12, 57.375])
# case 1
gpu_normalize_cfg = copy.deepcopy(_gpu_normalize_cfg)
gpu_normalize_cfg['input_format'] = 'NCHW'
gpu_normalize = GPUNormalize(**gpu_normalize_cfg)
assert gpu_normalize._mean.shape == (1, 3, 1, 1)
imgs = np.random.randint(256, size=(2, 240, 320, 3), dtype=np.uint8)
_input = (torch.tensor(imgs).permute(0, 3, 1, 2), )
normalize_hook = gpu_normalize.hook_func()
_input = normalize_hook(torch.nn.Module, _input)
result_imgs = np.array(_input[0].permute(0, 2, 3, 1))
check_normalize(imgs, result_imgs, gpu_normalize_cfg)
# case 2
gpu_normalize_cfg = copy.deepcopy(_gpu_normalize_cfg)
gpu_normalize_cfg['input_format'] = 'NCTHW'
gpu_normalize = GPUNormalize(**gpu_normalize_cfg)
assert gpu_normalize._mean.shape == (1, 3, 1, 1, 1)
# case 3
gpu_normalize_cfg = copy.deepcopy(_gpu_normalize_cfg)
gpu_normalize_cfg['input_format'] = 'NCHW_Flow'
gpu_normalize = GPUNormalize(**gpu_normalize_cfg)
assert gpu_normalize._mean.shape == (1, 3, 1, 1)
# case 4
gpu_normalize_cfg = copy.deepcopy(_gpu_normalize_cfg)
gpu_normalize_cfg['input_format'] = 'NPTCHW'
gpu_normalize = GPUNormalize(**gpu_normalize_cfg)
assert gpu_normalize._mean.shape == (1, 1, 1, 3, 1, 1)
# case 5
gpu_normalize_cfg = copy.deepcopy(_gpu_normalize_cfg)
gpu_normalize_cfg['input_format'] = '_format'
with pytest.raises(ValueError):
gpu_normalize = GPUNormalize(**gpu_normalize_cfg)
| 4,387
| 35.264463
| 77
|
py
|
STTS
|
STTS-main/VideoSwin/tests/test_utils/test_onnx.py
|
import os.path as osp
import tempfile
import torch.nn as nn
from tools.deployment.pytorch2onnx import _convert_batchnorm, pytorch2onnx
class TestModel(nn.Module):
def __init__(self):
super().__init__()
self.conv = nn.Conv3d(1, 2, 1)
self.bn = nn.SyncBatchNorm(2)
def forward(self, x):
return self.bn(self.conv(x))
def forward_dummy(self, x):
out = self.bn(self.conv(x))
return (out, )
def test_onnx_exporting():
with tempfile.TemporaryDirectory() as tmpdir:
out_file = osp.join(tmpdir, 'tmp.onnx')
model = TestModel()
model = _convert_batchnorm(model)
# test exporting
if hasattr(model, 'forward_dummy'):
model.forward = model.forward_dummy
pytorch2onnx(model, (2, 1, 1, 1, 1), output_file=out_file, verify=True)
| 845
| 25.4375
| 79
|
py
|
STTS
|
STTS-main/VideoSwin/tests/test_utils/test_localization_utils.py
|
import os.path as osp
import numpy as np
import pytest
from numpy.testing import assert_array_almost_equal, assert_array_equal
from mmaction.localization import (generate_bsp_feature,
generate_candidate_proposals, soft_nms,
temporal_iop, temporal_iou)
def test_temporal_iou():
anchors_min = np.array([0.0, 0.5])
anchors_max = np.array([1.0, 1.5])
box_min = 0.5
box_max = 1.0
iou = temporal_iou(anchors_min, anchors_max, box_min, box_max)
assert_array_equal(iou, np.array([0.5, 0.5]))
def test_temporal_iop():
anchors_min = np.array([0.0, 0.5])
anchors_max = np.array([1.0, 1.5])
box_min = 0.4
box_max = 1.1
ioa = temporal_iop(anchors_min, anchors_max, box_min, box_max)
assert_array_almost_equal(ioa, np.array([0.6, 0.6]))
def test_soft_nms():
proposals = np.array([[0., 1., 1., 1., 0.5, 0.5],
[0., 0.4, 1., 1., 0.4, 0.4],
[0., 0.95, 1., 1., 0.6, 0.6]])
proposal_list = soft_nms(proposals, 0.75, 0.65, 0.9, 1)
assert_array_equal(proposal_list, [[0., 0.95, 0.6], [0., 0.4, 0.4]])
def test_generate_candidate_proposals():
video_list = [0, 1]
video_infos = [
dict(
video_name='v_test1',
duration_second=100,
duration_frame=1000,
annotations=[{
'segment': [30.0, 60.0],
'label': 'Rock climbing'
}],
feature_frame=900),
dict(
video_name='v_test2',
duration_second=100,
duration_frame=1000,
annotations=[{
'segment': [6.0, 8.0],
'label': 'Drinking beer'
}],
feature_frame=900)
]
tem_results_dir = osp.normpath(
osp.join(osp.dirname(__file__), '../data/tem_results'))
# test when tem_result_ext is not valid
with pytest.raises(NotImplementedError):
result_dict = generate_candidate_proposals(
video_list,
video_infos,
tem_results_dir,
5,
0.5,
tem_results_ext='unsupport_ext')
# test without result_dict
assert_result1 = np.array([
[0.1, 0.7, 0.58390868, 0.35708317, 0.20850396, 0.55555556, 0.55555556],
[0.1, 0.5, 0.58390868, 0.32605207, 0.19038463, 0.29411765, 0.41666667],
[0.1, 0.3, 0.58390868, 0.26221931, 0.15311213, 0., 0.],
[0.3, 0.7, 0.30626667, 0.35708317, 0.10936267, 0.83333333, 0.83333333],
[0.3, 0.5, 0.30626667, 0.32605207, 0.09985888, 0.45454545, 0.83333333]
])
assert_result2 = np.array(
[[0.1, 0.3, 0.78390867, 0.3622193, 0.28394685, 0., 0.],
[0.1, 0.7, 0.78390867, 0.35708317, 0.27992059, 0., 0.],
[0.1, 0.5, 0.78390867, 0.32605207, 0.25559504, 0., 0.]])
result_dict = generate_candidate_proposals(video_list, video_infos,
tem_results_dir, 5, 0.5)
assert_array_almost_equal(result_dict['v_test1'], assert_result1)
assert_array_almost_equal(result_dict['v_test2'], assert_result2)
# test with result_dict
result_dict = {}
generate_candidate_proposals(
video_list,
video_infos,
tem_results_dir,
5,
0.5,
result_dict=result_dict)
assert_array_almost_equal(result_dict['v_test1'], assert_result1)
assert_array_almost_equal(result_dict['v_test2'], assert_result2)
def test_generate_bsp_feature():
video_list = [0, 1]
video_infos = [
dict(
video_name='v_test1',
duration_second=100,
duration_frame=1000,
annotations=[{
'segment': [30.0, 60.0],
'label': 'Rock climbing'
}],
feature_frame=900),
dict(
video_name='v_test2',
duration_second=100,
duration_frame=1000,
annotations=[{
'segment': [6.0, 8.0],
'label': 'Drinking beer'
}],
feature_frame=900)
]
tem_results_dir = osp.normpath(
osp.join(osp.dirname(__file__), '../data/tem_results'))
pgm_proposals_dir = osp.normpath(
osp.join(osp.dirname(__file__), '../data/proposals'))
# test when extension is not valid
with pytest.raises(NotImplementedError):
result_dict = generate_bsp_feature(
video_list,
video_infos,
tem_results_dir,
pgm_proposals_dir,
tem_results_ext='unsupport_ext')
with pytest.raises(NotImplementedError):
result_dict = generate_bsp_feature(
video_list,
video_infos,
tem_results_dir,
pgm_proposals_dir,
pgm_proposal_ext='unsupport_ext')
# test without result_dict
result_dict = generate_bsp_feature(
video_list, video_infos, tem_results_dir, pgm_proposals_dir, top_k=2)
assert_result1 = np.array(
[[
0.02633105, 0.02489364, 0.02345622, 0.0220188, 0.02058138,
0.01914396, 0.01770654, 0.01626912, 0.01541432, 0.01514214,
0.01486995, 0.01459776, 0.01432558, 0.01405339, 0.01378121,
0.01350902, 0.03064331, 0.02941124, 0.02817916, 0.02694709,
0.02571502, 0.02448295, 0.02325087, 0.0220188, 0.01432558,
0.01409228, 0.01385897, 0.01362567, 0.01339237, 0.01315907,
0.01292577, 0.01269246
],
[
0.01350902, 0.01323684, 0.01296465, 0.01269246, 0.01242028,
0.01214809, 0.01187591, 0.01160372, 0.01154264, 0.01169266,
0.01184269, 0.01199271, 0.01214273, 0.01229275, 0.01244278,
0.0125928, 0.01432558, 0.01409228, 0.01385897, 0.01362567,
0.01339237, 0.01315907, 0.01292577, 0.01269246, 0.01214273,
0.01227132, 0.01239991, 0.0125285, 0.0126571, 0.01278569,
0.01291428, 0.01304287
]])
assert_result2 = np.array(
[[
0.04133105, 0.03922697, 0.03712288, 0.0350188, 0.03291471,
0.03081063, 0.02870654, 0.02660246, 0.02541432, 0.02514214,
0.02486995, 0.02459776, 0.02432558, 0.02405339, 0.02378121,
0.02350902, 0.04764331, 0.04583981, 0.04403631, 0.04223281,
0.0404293, 0.0386258, 0.0368223, 0.0350188, 0.02432558, 0.02409228,
0.02385897, 0.02362567, 0.02339237, 0.02315907, 0.02292577,
0.02269246
],
[
0.02350902, 0.02323684, 0.02296465, 0.02269246, 0.02242028,
0.02214809, 0.02187591, 0.02160372, 0.02120931, 0.02069266,
0.02017602, 0.01965937, 0.01914273, 0.01862609, 0.01810944,
0.0175928, 0.02432558, 0.02409228, 0.02385897, 0.02362567,
0.02339237, 0.02315907, 0.02292577, 0.02269246, 0.01914273,
0.01869989, 0.01825706, 0.01781422, 0.01737138, 0.01692854,
0.0164857, 0.01604287
]])
assert_array_almost_equal(result_dict['v_test1'], assert_result1)
assert_array_almost_equal(result_dict['v_test2'], assert_result2)
# test with result_dict
result_dict = {}
generate_bsp_feature(
video_list,
video_infos,
tem_results_dir,
pgm_proposals_dir,
top_k=2,
result_dict=result_dict)
assert_array_almost_equal(result_dict['v_test1'], assert_result1)
assert_array_almost_equal(result_dict['v_test2'], assert_result2)
| 7,536
| 35.946078
| 79
|
py
|
STTS
|
STTS-main/VideoSwin/tests/test_utils/__init__.py
| 0
| 0
| 0
|
py
|
|
STTS
|
STTS-main/VideoSwin/tests/test_utils/test_bbox.py
|
import os.path as osp
from abc import abstractproperty
import numpy as np
import torch
from mmaction.core.bbox import bbox2result, bbox_target
from mmaction.datasets import AVADataset
from mmaction.utils import import_module_error_func
try:
from mmdet.core.bbox import build_assigner, build_sampler
except (ImportError, ModuleNotFoundError):
@import_module_error_func('mmdet')
def build_assigner(*args, **kwargs):
pass
@import_module_error_func('mmdet')
def build_sampler(*args, **kwargs):
pass
def test_assigner_sampler():
data_prefix = osp.normpath(
osp.join(osp.dirname(__file__), '../data/eval_detection'))
ann_file = osp.join(data_prefix, 'gt.csv')
label_file = osp.join(data_prefix, 'action_list.txt')
proposal_file = osp.join(data_prefix, 'proposal.pkl')
dataset = AVADataset(
ann_file=ann_file,
exclude_file=None,
pipeline=[],
label_file=label_file,
proposal_file=proposal_file,
num_classes=4)
assigner = dict(
type='MaxIoUAssignerAVA',
pos_iou_thr=0.5,
neg_iou_thr=0.5,
min_pos_iou=0.5)
assigner = build_assigner(assigner)
proposal = torch.tensor(dataset[0]['proposals'])
gt_bboxes = torch.tensor(dataset[0]['gt_bboxes'])
gt_labels = torch.tensor(dataset[0]['gt_labels'])
assign_result = assigner.assign(
bboxes=proposal,
gt_bboxes=gt_bboxes,
gt_bboxes_ignore=None,
gt_labels=gt_labels)
assert assign_result.num_gts == 4
assert torch.all(
assign_result.gt_inds == torch.tensor([0, 0, 3, 3, 0, 0, 0, 1, 0, 0]))
assert torch.all(
torch.isclose(
assign_result.max_overlaps,
torch.tensor([
0.40386841, 0.47127257, 0.53544776, 0.58797631, 0.29281288,
0.40979504, 0.45902917, 0.50093938, 0.21560125, 0.32948171
],
dtype=torch.float64)))
assert torch.all(
torch.isclose(
assign_result.labels,
torch.tensor([[0., 0., 0., 0.], [0., 0., 0., 0.], [0., 1., 0., 0.],
[0., 1., 0., 0.], [0., 0., 0., 0.], [0., 0., 0., 0.],
[0., 0., 0., 0.], [0., 0., 0., 1.], [0., 0., 0., 0.],
[0., 0., 0., 0.]])))
sampler = dict(type='RandomSampler', num=32, pos_fraction=1)
sampler = build_sampler(sampler)
sampling_result = sampler.sample(assign_result, proposal, gt_bboxes,
gt_labels)
assert (sampling_result.pos_inds.shape[0] ==
sampling_result.pos_bboxes.shape[0])
assert (sampling_result.neg_inds.shape[0] ==
sampling_result.neg_bboxes.shape[0])
return sampling_result
def test_bbox2result():
bboxes = torch.tensor([[0.072, 0.47, 0.84, 0.898],
[0.23, 0.215, 0.781, 0.534],
[0.195, 0.128, 0.643, 0.944],
[0.236, 0.189, 0.689, 0.74],
[0.375, 0.371, 0.726, 0.804],
[0.024, 0.398, 0.776, 0.719]])
labels = torch.tensor([[-1.650, 0.515, 0.798, 1.240],
[1.368, -1.128, 0.037, -1.087],
[0.481, -1.303, 0.501, -0.463],
[-0.356, 0.126, -0.840, 0.438],
[0.079, 1.269, -0.263, -0.538],
[-0.853, 0.391, 0.103, 0.398]])
num_classes = 4
result = bbox2result(bboxes, labels, num_classes)
assert np.all(
np.isclose(
result[0],
np.array([[0.072, 0.47, 0.84, 0.898, 0.515],
[0.236, 0.189, 0.689, 0.74, 0.126],
[0.375, 0.371, 0.726, 0.804, 1.269],
[0.024, 0.398, 0.776, 0.719, 0.391]])))
assert np.all(
np.isclose(
result[1],
np.array([[0.072, 0.47, 0.84, 0.898, 0.798],
[0.23, 0.215, 0.781, 0.534, 0.037],
[0.195, 0.128, 0.643, 0.944, 0.501],
[0.024, 0.398, 0.776, 0.719, 0.103]])))
assert np.all(
np.isclose(
result[2],
np.array([[0.072, 0.47, 0.84, 0.898, 1.24],
[0.236, 0.189, 0.689, 0.74, 0.438],
[0.024, 0.398, 0.776, 0.719, 0.398]])))
def test_bbox_target():
pos_bboxes = torch.tensor([[0.072, 0.47, 0.84, 0.898],
[0.23, 0.215, 0.781, 0.534],
[0.195, 0.128, 0.643, 0.944],
[0.236, 0.189, 0.689, 0.74]])
neg_bboxes = torch.tensor([[0.375, 0.371, 0.726, 0.804],
[0.024, 0.398, 0.776, 0.719]])
pos_gt_labels = torch.tensor([[0., 0., 1., 0.], [0., 0., 0., 1.],
[0., 1., 0., 0.], [0., 1., 0., 0.]])
cfg = abstractproperty()
cfg.pos_weight = 0.8
labels, label_weights = bbox_target([pos_bboxes], [neg_bboxes],
[pos_gt_labels], cfg)
assert torch.all(
torch.isclose(
labels,
torch.tensor([[0., 0., 1., 0.], [0., 0., 0., 1.], [0., 1., 0., 0.],
[0., 1., 0., 0.], [0., 0., 0., 0.], [0., 0., 0.,
0.]])))
assert torch.all(
torch.isclose(label_weights, torch.tensor([0.8] * 4 + [1.0] * 2)))
| 5,511
| 38.371429
| 79
|
py
|
STTS
|
STTS-main/VideoSwin/tests/test_utils/test_decorator.py
|
import pytest
from mmaction.utils import import_module_error_class, import_module_error_func
def test_import_module_error_class():
@import_module_error_class('mmdet')
class ExampleClass:
pass
with pytest.raises(ImportError):
ExampleClass()
@import_module_error_class('mmdet')
class ExampleClass:
def __init__(self, a, b=3):
self.c = a + b
with pytest.raises(ImportError):
ExampleClass(4)
def test_import_module_error_func():
@import_module_error_func('_add')
def ExampleFunc(a, b):
return a + b
with pytest.raises(ImportError):
ExampleFunc(3, 4)
| 654
| 18.848485
| 78
|
py
|
STTS
|
STTS-main/VideoSwin/tests/test_metrics/test_accuracy.py
|
import os.path as osp
import random
import numpy as np
import pytest
from numpy.testing import assert_array_almost_equal, assert_array_equal
from mmaction.core import (ActivityNetLocalization,
average_recall_at_avg_proposals, confusion_matrix,
get_weighted_score, mean_average_precision,
mean_class_accuracy, mmit_mean_average_precision,
pairwise_temporal_iou, top_k_accuracy)
from mmaction.core.evaluation.ava_utils import ava_eval
def gt_confusion_matrix(gt_labels, pred_labels, normalize=None):
"""Calculate the ground truth confusion matrix."""
max_index = max(max(gt_labels), max(pred_labels))
confusion_mat = np.zeros((max_index + 1, max_index + 1), dtype=np.int64)
for gt, pred in zip(gt_labels, pred_labels):
confusion_mat[gt][pred] += 1
del_index = []
for i in range(max_index):
if sum(confusion_mat[i]) == 0 and sum(confusion_mat[:, i]) == 0:
del_index.append(i)
confusion_mat = np.delete(confusion_mat, del_index, axis=0)
confusion_mat = np.delete(confusion_mat, del_index, axis=1)
if normalize is not None:
confusion_mat = np.array(confusion_mat, dtype=np.float)
m, n = confusion_mat.shape
if normalize == 'true':
for i in range(m):
s = np.sum(confusion_mat[i], dtype=float)
if s == 0:
continue
confusion_mat[i, :] = confusion_mat[i, :] / s
print(confusion_mat[i, :])
elif normalize == 'pred':
for i in range(n):
s = sum(confusion_mat[:, i])
if s == 0:
continue
confusion_mat[:, i] = confusion_mat[:, i] / s
elif normalize == 'all':
s = np.sum(confusion_mat)
if s != 0:
confusion_mat /= s
return confusion_mat
def test_activitynet_localization():
data_prefix = osp.normpath(
osp.join(osp.dirname(__file__), '../data/eval_localization'))
gt_path = osp.join(data_prefix, 'gt.json')
result_path = osp.join(data_prefix, 'result.json')
localization = ActivityNetLocalization(gt_path, result_path)
results = localization.evaluate()
mAP = np.array([
0.71428571, 0.71428571, 0.71428571, 0.6875, 0.6875, 0.59722222,
0.52083333, 0.52083333, 0.52083333, 0.5
])
average_mAP = 0.6177579365079365
assert_array_almost_equal(results[0], mAP)
assert_array_almost_equal(results[1], average_mAP)
def test_ava_detection():
data_prefix = osp.normpath(
osp.join(osp.dirname(__file__), '../data/eval_detection'))
gt_path = osp.join(data_prefix, 'gt.csv')
result_path = osp.join(data_prefix, 'pred.csv')
label_map = osp.join(data_prefix, 'action_list.txt')
# eval bbox
detection = ava_eval(result_path, 'mAP', label_map, gt_path, None)
assert_array_almost_equal(detection['mAP@0.5IOU'], 0.09385522)
def test_confusion_matrix():
# custom confusion_matrix
gt_labels = [np.int64(random.randint(0, 9)) for _ in range(100)]
pred_labels = np.random.randint(10, size=100, dtype=np.int64)
for normalize in [None, 'true', 'pred', 'all']:
cf_mat = confusion_matrix(pred_labels, gt_labels, normalize)
gt_cf_mat = gt_confusion_matrix(gt_labels, pred_labels, normalize)
assert_array_equal(cf_mat, gt_cf_mat)
with pytest.raises(ValueError):
# normalize must be in ['true', 'pred', 'all', None]
confusion_matrix([1], [1], 'unsupport')
with pytest.raises(TypeError):
# y_pred must be list or np.ndarray
confusion_matrix(0.5, [1])
with pytest.raises(TypeError):
# y_real must be list or np.ndarray
confusion_matrix([1], 0.5)
with pytest.raises(TypeError):
# y_pred dtype must be np.int64
confusion_matrix([0.5], [1])
with pytest.raises(TypeError):
# y_real dtype must be np.int64
confusion_matrix([1], [0.5])
def test_topk():
scores = [
np.array([-0.2203, -0.7538, 1.8789, 0.4451, -0.2526]),
np.array([-0.0413, 0.6366, 1.1155, 0.3484, 0.0395]),
np.array([0.0365, 0.5158, 1.1067, -0.9276, -0.2124]),
np.array([0.6232, 0.9912, -0.8562, 0.0148, 1.6413])
]
# top1 acc
k = (1, )
top1_labels_0 = [3, 1, 1, 1]
top1_labels_25 = [2, 0, 4, 3]
top1_labels_50 = [2, 2, 3, 1]
top1_labels_75 = [2, 2, 2, 3]
top1_labels_100 = [2, 2, 2, 4]
res = top_k_accuracy(scores, top1_labels_0, k)
assert res == [0]
res = top_k_accuracy(scores, top1_labels_25, k)
assert res == [0.25]
res = top_k_accuracy(scores, top1_labels_50, k)
assert res == [0.5]
res = top_k_accuracy(scores, top1_labels_75, k)
assert res == [0.75]
res = top_k_accuracy(scores, top1_labels_100, k)
assert res == [1.0]
# top1 acc, top2 acc
k = (1, 2)
top2_labels_0_100 = [3, 1, 1, 1]
top2_labels_25_75 = [3, 1, 2, 3]
res = top_k_accuracy(scores, top2_labels_0_100, k)
assert res == [0, 1.0]
res = top_k_accuracy(scores, top2_labels_25_75, k)
assert res == [0.25, 0.75]
# top1 acc, top3 acc, top5 acc
k = (1, 3, 5)
top5_labels_0_0_100 = [1, 0, 3, 2]
top5_labels_0_50_100 = [1, 3, 4, 0]
top5_labels_25_75_100 = [2, 3, 0, 2]
res = top_k_accuracy(scores, top5_labels_0_0_100, k)
assert res == [0, 0, 1.0]
res = top_k_accuracy(scores, top5_labels_0_50_100, k)
assert res == [0, 0.5, 1.0]
res = top_k_accuracy(scores, top5_labels_25_75_100, k)
assert res == [0.25, 0.75, 1.0]
def test_mean_class_accuracy():
scores = [
np.array([-0.2203, -0.7538, 1.8789, 0.4451, -0.2526]),
np.array([-0.0413, 0.6366, 1.1155, 0.3484, 0.0395]),
np.array([0.0365, 0.5158, 1.1067, -0.9276, -0.2124]),
np.array([0.6232, 0.9912, -0.8562, 0.0148, 1.6413])
]
# test mean class accuracy in [0, 0.25, 1/3, 0.75, 1.0]
mean_cls_acc_0 = np.int64([1, 4, 0, 2])
mean_cls_acc_25 = np.int64([2, 0, 4, 3])
mean_cls_acc_33 = np.int64([2, 2, 2, 3])
mean_cls_acc_75 = np.int64([4, 2, 2, 4])
mean_cls_acc_100 = np.int64([2, 2, 2, 4])
assert mean_class_accuracy(scores, mean_cls_acc_0) == 0
assert mean_class_accuracy(scores, mean_cls_acc_25) == 0.25
assert mean_class_accuracy(scores, mean_cls_acc_33) == 1 / 3
assert mean_class_accuracy(scores, mean_cls_acc_75) == 0.75
assert mean_class_accuracy(scores, mean_cls_acc_100) == 1.0
def test_mmit_mean_average_precision():
# One sample
y_true = [np.array([0, 0, 1, 1])]
y_scores = [np.array([0.1, 0.4, 0.35, 0.8])]
map = mmit_mean_average_precision(y_scores, y_true)
precision = [2.0 / 3.0, 0.5, 1., 1.]
recall = [1., 0.5, 0.5, 0.]
target = -np.sum(np.diff(recall) * np.array(precision)[:-1])
assert target == map
def test_pairwise_temporal_iou():
target_segments = np.array([])
candidate_segments = np.array([])
with pytest.raises(ValueError):
pairwise_temporal_iou(target_segments, candidate_segments)
# test temporal iou
target_segments = np.array([[1, 2], [2, 3]])
candidate_segments = np.array([[2, 3], [2.5, 3]])
temporal_iou = pairwise_temporal_iou(candidate_segments, target_segments)
assert_array_equal(temporal_iou, [[0, 0], [1, 0.5]])
# test temporal overlap_self
target_segments = np.array([[1, 2], [2, 3]])
candidate_segments = np.array([[2, 3], [2.5, 3]])
temporal_iou, temporal_overlap_self = pairwise_temporal_iou(
candidate_segments, target_segments, calculate_overlap_self=True)
assert_array_equal(temporal_overlap_self, [[0, 0], [1, 1]])
# test temporal overlap_self when candidate_segments is 1d
target_segments = np.array([[1, 2], [2, 3]])
candidate_segments = np.array([2.5, 3])
temporal_iou, temporal_overlap_self = pairwise_temporal_iou(
candidate_segments, target_segments, calculate_overlap_self=True)
assert_array_equal(temporal_overlap_self, [0, 1])
def test_average_recall_at_avg_proposals():
ground_truth1 = {
'v_test1': np.array([[0, 1], [1, 2]]),
'v_test2': np.array([[0, 1], [1, 2]])
}
ground_truth2 = {'v_test1': np.array([[0, 1]])}
proposals1 = {
'v_test1': np.array([[0, 1, 1], [1, 2, 1]]),
'v_test2': np.array([[0, 1, 1], [1, 2, 1]])
}
proposals2 = {
'v_test1': np.array([[10, 11, 0.6], [11, 12, 0.4]]),
'v_test2': np.array([[10, 11, 0.6], [11, 12, 0.4]])
}
proposals3 = {
'v_test1': np.array([[i, i + 1, 1 / (i + 1)] for i in range(100)])
}
recall, avg_recall, proposals_per_video, auc = (
average_recall_at_avg_proposals(ground_truth1, proposals1, 4))
assert_array_equal(recall, [[0.] * 49 + [0.5] * 50 + [1.]] * 10)
assert_array_equal(avg_recall, [0.] * 49 + [0.5] * 50 + [1.])
assert_array_almost_equal(
proposals_per_video, np.arange(0.02, 2.02, 0.02), decimal=10)
assert auc == 25.5
recall, avg_recall, proposals_per_video, auc = (
average_recall_at_avg_proposals(ground_truth1, proposals2, 4))
assert_array_equal(recall, [[0.] * 100] * 10)
assert_array_equal(avg_recall, [0.] * 100)
assert_array_almost_equal(
proposals_per_video, np.arange(0.02, 2.02, 0.02), decimal=10)
assert auc == 0
recall, avg_recall, proposals_per_video, auc = (
average_recall_at_avg_proposals(ground_truth2, proposals3, 100))
assert_array_equal(recall, [[1.] * 100] * 10)
assert_array_equal(avg_recall, ([1.] * 100))
assert_array_almost_equal(
proposals_per_video, np.arange(1, 101, 1), decimal=10)
assert auc == 99.0
def test_get_weighted_score():
score_a = [
np.array([-0.2203, -0.7538, 1.8789, 0.4451, -0.2526]),
np.array([-0.0413, 0.6366, 1.1155, 0.3484, 0.0395]),
np.array([0.0365, 0.5158, 1.1067, -0.9276, -0.2124]),
np.array([0.6232, 0.9912, -0.8562, 0.0148, 1.6413])
]
score_b = [
np.array([-0.0413, 0.6366, 1.1155, 0.3484, 0.0395]),
np.array([0.0365, 0.5158, 1.1067, -0.9276, -0.2124]),
np.array([0.6232, 0.9912, -0.8562, 0.0148, 1.6413]),
np.array([-0.2203, -0.7538, 1.8789, 0.4451, -0.2526])
]
weighted_score = get_weighted_score([score_a], [1])
assert np.all(np.isclose(np.array(score_a), np.array(weighted_score)))
coeff_a, coeff_b = 2., 1.
weighted_score = get_weighted_score([score_a, score_b], [coeff_a, coeff_b])
ground_truth = [
x * coeff_a + y * coeff_b for x, y in zip(score_a, score_b)
]
assert np.all(np.isclose(np.array(ground_truth), np.array(weighted_score)))
def test_mean_average_precision():
def content_for_unittest(scores, labels, result):
gt = mean_average_precision(scores, labels)
assert gt == result
scores = [
np.array([0.1, 0.2, 0.3, 0.4]),
np.array([0.2, 0.3, 0.4, 0.1]),
np.array([0.3, 0.4, 0.1, 0.2]),
np.array([0.4, 0.1, 0.2, 0.3])
]
label1 = np.array([[1, 1, 0, 0], [1, 0, 1, 1], [1, 0, 1, 0], [1, 1, 0, 1]])
result1 = 2 / 3
label2 = np.array([[0, 1, 0, 1], [0, 1, 1, 0], [1, 0, 1, 0], [0, 0, 1, 1]])
result2 = np.mean([0.5, 0.5833333333333333, 0.8055555555555556, 1.0])
content_for_unittest(scores, label1, result1)
content_for_unittest(scores, label2, result2)
| 11,392
| 35.751613
| 79
|
py
|
STTS
|
STTS-main/VideoSwin/tests/test_metrics/test_losses.py
|
import numpy as np
import pytest
import torch
import torch.nn as nn
import torch.nn.functional as F
from mmcv import ConfigDict
from numpy.testing import assert_almost_equal, assert_array_almost_equal
from torch.autograd import Variable
from mmaction.models import (BCELossWithLogits, BinaryLogisticRegressionLoss,
BMNLoss, CrossEntropyLoss, HVULoss, NLLLoss,
OHEMHingeLoss, SSNLoss)
def test_hvu_loss():
pred = torch.tensor([[-1.0525, -0.7085, 0.1819, -0.8011],
[0.1555, -1.5550, 0.5586, 1.9746]])
gt = torch.tensor([[1., 0., 0., 0.], [0., 0., 1., 1.]])
mask = torch.tensor([[1., 1., 0., 0.], [0., 0., 1., 1.]])
category_mask = torch.tensor([[1., 0.], [0., 1.]])
categories = ['action', 'scene']
category_nums = (2, 2)
category_loss_weights = (1, 1)
loss_all_nomask_sum = HVULoss(
categories=categories,
category_nums=category_nums,
category_loss_weights=category_loss_weights,
loss_type='all',
with_mask=False,
reduction='sum')
loss = loss_all_nomask_sum(pred, gt, mask, category_mask)
loss1 = F.binary_cross_entropy_with_logits(pred, gt, reduction='none')
loss1 = torch.sum(loss1, dim=1)
assert torch.eq(loss['loss_cls'], torch.mean(loss1))
loss_all_mask = HVULoss(
categories=categories,
category_nums=category_nums,
category_loss_weights=category_loss_weights,
loss_type='all',
with_mask=True)
loss = loss_all_mask(pred, gt, mask, category_mask)
loss1 = F.binary_cross_entropy_with_logits(pred, gt, reduction='none')
loss1 = torch.sum(loss1 * mask, dim=1) / torch.sum(mask, dim=1)
loss1 = torch.mean(loss1)
assert torch.eq(loss['loss_cls'], loss1)
loss_ind_mask = HVULoss(
categories=categories,
category_nums=category_nums,
category_loss_weights=category_loss_weights,
loss_type='individual',
with_mask=True)
loss = loss_ind_mask(pred, gt, mask, category_mask)
action_loss = F.binary_cross_entropy_with_logits(pred[:1, :2], gt[:1, :2])
scene_loss = F.binary_cross_entropy_with_logits(pred[1:, 2:], gt[1:, 2:])
loss1 = (action_loss + scene_loss) / 2
assert torch.eq(loss['loss_cls'], loss1)
loss_ind_nomask_sum = HVULoss(
categories=categories,
category_nums=category_nums,
category_loss_weights=category_loss_weights,
loss_type='individual',
with_mask=False,
reduction='sum')
loss = loss_ind_nomask_sum(pred, gt, mask, category_mask)
action_loss = F.binary_cross_entropy_with_logits(
pred[:, :2], gt[:, :2], reduction='none')
action_loss = torch.sum(action_loss, dim=1)
action_loss = torch.mean(action_loss)
scene_loss = F.binary_cross_entropy_with_logits(
pred[:, 2:], gt[:, 2:], reduction='none')
scene_loss = torch.sum(scene_loss, dim=1)
scene_loss = torch.mean(scene_loss)
loss1 = (action_loss + scene_loss) / 2
assert torch.eq(loss['loss_cls'], loss1)
def test_cross_entropy_loss():
cls_scores = torch.rand((3, 4))
hard_gt_labels = torch.LongTensor([0, 1, 2]).squeeze()
soft_gt_labels = torch.FloatTensor([[1, 0, 0, 0], [0, 1, 0, 0],
[0, 0, 1, 0]]).squeeze()
# hard label without weight
cross_entropy_loss = CrossEntropyLoss()
output_loss = cross_entropy_loss(cls_scores, hard_gt_labels)
assert torch.equal(output_loss, F.cross_entropy(cls_scores,
hard_gt_labels))
# hard label with class weight
weight = torch.rand(4)
class_weight = weight.numpy().tolist()
cross_entropy_loss = CrossEntropyLoss(class_weight=class_weight)
output_loss = cross_entropy_loss(cls_scores, hard_gt_labels)
assert torch.equal(
output_loss,
F.cross_entropy(cls_scores, hard_gt_labels, weight=weight))
# soft label without class weight
cross_entropy_loss = CrossEntropyLoss()
output_loss = cross_entropy_loss(cls_scores, soft_gt_labels)
assert_almost_equal(
output_loss.numpy(),
F.cross_entropy(cls_scores, hard_gt_labels).numpy(),
decimal=4)
# soft label with class weight
cross_entropy_loss = CrossEntropyLoss(class_weight=class_weight)
output_loss = cross_entropy_loss(cls_scores, soft_gt_labels)
assert_almost_equal(
output_loss.numpy(),
F.cross_entropy(cls_scores, hard_gt_labels, weight=weight).numpy(),
decimal=4)
def test_bce_loss_with_logits():
cls_scores = torch.rand((3, 4))
gt_labels = torch.rand((3, 4))
bce_loss_with_logits = BCELossWithLogits()
output_loss = bce_loss_with_logits(cls_scores, gt_labels)
assert torch.equal(
output_loss, F.binary_cross_entropy_with_logits(cls_scores, gt_labels))
weight = torch.rand(4)
class_weight = weight.numpy().tolist()
bce_loss_with_logits = BCELossWithLogits(class_weight=class_weight)
output_loss = bce_loss_with_logits(cls_scores, gt_labels)
assert torch.equal(
output_loss,
F.binary_cross_entropy_with_logits(
cls_scores, gt_labels, weight=weight))
def test_nll_loss():
cls_scores = torch.randn(3, 3)
gt_labels = torch.tensor([0, 2, 1]).squeeze()
sm = nn.Softmax(dim=0)
nll_loss = NLLLoss()
cls_score_log = torch.log(sm(cls_scores))
output_loss = nll_loss(cls_score_log, gt_labels)
assert torch.equal(output_loss, F.nll_loss(cls_score_log, gt_labels))
def test_binary_logistic_loss():
binary_logistic_regression_loss = BinaryLogisticRegressionLoss()
reg_score = torch.tensor([0., 1.])
label = torch.tensor([0., 1.])
output_loss = binary_logistic_regression_loss(reg_score, label, 0.5)
assert_array_almost_equal(output_loss.numpy(), np.array([0.]), decimal=4)
reg_score = torch.tensor([0.3, 0.9])
label = torch.tensor([0., 1.])
output_loss = binary_logistic_regression_loss(reg_score, label, 0.5)
assert_array_almost_equal(
output_loss.numpy(), np.array([0.231]), decimal=4)
def test_bmn_loss():
bmn_loss = BMNLoss()
# test tem_loss
pred_start = torch.tensor([0.9, 0.1])
pred_end = torch.tensor([0.1, 0.9])
gt_start = torch.tensor([1., 0.])
gt_end = torch.tensor([0., 1.])
output_tem_loss = bmn_loss.tem_loss(pred_start, pred_end, gt_start, gt_end)
binary_logistic_regression_loss = BinaryLogisticRegressionLoss()
assert_loss = (
binary_logistic_regression_loss(pred_start, gt_start) +
binary_logistic_regression_loss(pred_end, gt_end))
assert_array_almost_equal(
output_tem_loss.numpy(), assert_loss.numpy(), decimal=4)
# test pem_reg_loss
seed = 1
torch.manual_seed(seed)
torch.cuda.manual_seed(seed)
torch.cuda.manual_seed_all(seed)
pred_bm_reg = torch.tensor([[0.1, 0.99], [0.5, 0.4]])
gt_iou_map = torch.tensor([[0, 1.], [0, 1.]])
mask = torch.tensor([[0.1, 0.4], [0.4, 0.1]])
output_pem_reg_loss = bmn_loss.pem_reg_loss(pred_bm_reg, gt_iou_map, mask)
assert_array_almost_equal(
output_pem_reg_loss.numpy(), np.array([0.2140]), decimal=4)
# test pem_cls_loss
pred_bm_cls = torch.tensor([[0.1, 0.99], [0.95, 0.2]])
gt_iou_map = torch.tensor([[0., 1.], [0., 1.]])
mask = torch.tensor([[0.1, 0.4], [0.4, 0.1]])
output_pem_cls_loss = bmn_loss.pem_cls_loss(pred_bm_cls, gt_iou_map, mask)
assert_array_almost_equal(
output_pem_cls_loss.numpy(), np.array([1.6137]), decimal=4)
# test bmn_loss
pred_bm = torch.tensor([[[[0.1, 0.99], [0.5, 0.4]],
[[0.1, 0.99], [0.95, 0.2]]]])
pred_start = torch.tensor([[0.9, 0.1]])
pred_end = torch.tensor([[0.1, 0.9]])
gt_iou_map = torch.tensor([[[0., 2.5], [0., 10.]]])
gt_start = torch.tensor([[1., 0.]])
gt_end = torch.tensor([[0., 1.]])
mask = torch.tensor([[0.1, 0.4], [0.4, 0.1]])
output_loss = bmn_loss(pred_bm, pred_start, pred_end, gt_iou_map, gt_start,
gt_end, mask)
assert_array_almost_equal(
output_loss[0].numpy(),
output_tem_loss + 10 * output_pem_reg_loss + output_pem_cls_loss)
assert_array_almost_equal(output_loss[1].numpy(), output_tem_loss)
assert_array_almost_equal(output_loss[2].numpy(), output_pem_reg_loss)
assert_array_almost_equal(output_loss[3].numpy(), output_pem_cls_loss)
def test_ohem_hinge_loss():
# test normal case
pred = torch.tensor([[
0.5161, 0.5228, 0.7748, 0.0573, 0.1113, 0.8862, 0.1752, 0.9448, 0.0253,
0.1009, 0.4371, 0.2232, 0.0412, 0.3487, 0.3350, 0.9294, 0.7122, 0.3072,
0.2942, 0.7679
]],
requires_grad=True)
gt = torch.tensor([8])
num_video = 1
loss = OHEMHingeLoss.apply(pred, gt, 1, 1.0, num_video)
assert_array_almost_equal(
loss.detach().numpy(), np.array([0.0552]), decimal=4)
loss.backward(Variable(torch.ones([1])))
assert_array_almost_equal(
np.array(pred.grad),
np.array([[
0., 0., 0., 0., 0., 0., 0., -1., 0., 0., 0., 0., 0., 0., 0., 0.,
0., 0., 0., 0.
]]),
decimal=4)
# test error case
with pytest.raises(ValueError):
gt = torch.tensor([8, 10])
loss = OHEMHingeLoss.apply(pred, gt, 1, 1.0, num_video)
def test_ssn_loss():
ssn_loss = SSNLoss()
# test activity_loss
activity_score = torch.rand((8, 21))
labels = torch.LongTensor([8] * 8).squeeze()
activity_indexer = torch.tensor([0, 7])
output_activity_loss = ssn_loss.activity_loss(activity_score, labels,
activity_indexer)
assert torch.equal(
output_activity_loss,
F.cross_entropy(activity_score[activity_indexer, :],
labels[activity_indexer]))
# test completeness_loss
completeness_score = torch.rand((8, 20), requires_grad=True)
labels = torch.LongTensor([8] * 8).squeeze()
completeness_indexer = torch.tensor([0, 1, 2, 3, 4, 5, 6])
positive_per_video = 1
incomplete_per_video = 6
output_completeness_loss = ssn_loss.completeness_loss(
completeness_score, labels, completeness_indexer, positive_per_video,
incomplete_per_video)
pred = completeness_score[completeness_indexer, :]
gt = labels[completeness_indexer]
pred_dim = pred.size(1)
pred = pred.view(-1, positive_per_video + incomplete_per_video, pred_dim)
gt = gt.view(-1, positive_per_video + incomplete_per_video)
# yapf:disable
positive_pred = pred[:, :positive_per_video, :].contiguous().view(-1, pred_dim) # noqa:E501
incomplete_pred = pred[:, positive_per_video:, :].contiguous().view(-1, pred_dim) # noqa:E501
# yapf:enable
ohem_ratio = 0.17
positive_loss = OHEMHingeLoss.apply(
positive_pred, gt[:, :positive_per_video].contiguous().view(-1), 1,
1.0, positive_per_video)
incomplete_loss = OHEMHingeLoss.apply(
incomplete_pred, gt[:, positive_per_video:].contiguous().view(-1), -1,
ohem_ratio, incomplete_per_video)
num_positives = positive_pred.size(0)
num_incompletes = int(incomplete_pred.size(0) * ohem_ratio)
assert_loss = ((positive_loss + incomplete_loss) /
float(num_positives + num_incompletes))
assert torch.equal(output_completeness_loss, assert_loss)
# test reg_loss
bbox_pred = torch.rand((8, 20, 2))
labels = torch.LongTensor([8] * 8).squeeze()
bbox_targets = torch.rand((8, 2))
regression_indexer = torch.tensor([0])
output_reg_loss = ssn_loss.classwise_regression_loss(
bbox_pred, labels, bbox_targets, regression_indexer)
pred = bbox_pred[regression_indexer, :, :]
gt = labels[regression_indexer]
reg_target = bbox_targets[regression_indexer, :]
class_idx = gt.data - 1
classwise_pred = pred[:, class_idx, :]
classwise_reg_pred = torch.cat((torch.diag(classwise_pred[:, :, 0]).view(
-1, 1), torch.diag(classwise_pred[:, :, 1]).view(-1, 1)),
dim=1)
assert torch.equal(
output_reg_loss,
F.smooth_l1_loss(classwise_reg_pred.view(-1), reg_target.view(-1)) * 2)
# test ssn_loss
proposal_type = torch.tensor([[0, 1, 1, 1, 1, 1, 1, 2]])
train_cfg = ConfigDict(
dict(
ssn=dict(
sampler=dict(
num_per_video=8,
positive_ratio=1,
background_ratio=1,
incomplete_ratio=6,
add_gt_as_proposals=True),
loss_weight=dict(comp_loss_weight=0.1, reg_loss_weight=0.1))))
output_loss = ssn_loss(activity_score, completeness_score, bbox_pred,
proposal_type, labels, bbox_targets, train_cfg)
assert torch.equal(output_loss['loss_activity'], output_activity_loss)
assert torch.equal(output_loss['loss_completeness'],
output_completeness_loss * 0.1)
assert torch.equal(output_loss['loss_reg'], output_reg_loss * 0.1)
| 13,164
| 38.653614
| 98
|
py
|
STTS
|
STTS-main/VideoSwin/configs/Kinetics/t0_0.625.py
|
_base_ = [
'../_base_/models/swin/swin_base.py', '../_base_/default_runtime.py'
]
model=dict(backbone=dict(patch_size=(2,4,4), drop_path_rate=0.2, time_pruning_loc=[0], time_left_ratio=[0.625], time_score='tpool', pretrained2d=False), test_cfg=dict(max_testing_views=2))
# dataset settings
dataset_type = 'VideoDataset'
data_root = "path_to_your_dataset"
data_root_val = "path_to_your_dataset"
ann_file_train = "path_to_the_train_split"
ann_file_val = "path_to_the_val_split"
ann_file_test = "path_to_the_test_split"
img_norm_cfg = dict(
mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_bgr=False)
train_pipeline = [
dict(type='DecordInit'),
dict(type='SampleFrames', clip_len=32, frame_interval=2, num_clips=1),
dict(type='DecordDecode'),
dict(type='Resize', scale=(-1, 256)),
dict(type='RandomResizedCrop'),
dict(type='Resize', scale=(224, 224), keep_ratio=False),
dict(type='Flip', flip_ratio=0.5),
dict(type='Normalize', **img_norm_cfg),
dict(type='FormatShape', input_format='NCTHW'),
dict(type='Collect', keys=['imgs', 'label'], meta_keys=[]),
dict(type='ToTensor', keys=['imgs', 'label'])
]
val_pipeline = [
dict(type='DecordInit'),
dict(
type='SampleFrames',
clip_len=32,
frame_interval=2,
num_clips=1,
test_mode=True),
dict(type='DecordDecode'),
dict(type='Resize', scale=(-1, 256)),
dict(type='CenterCrop', crop_size=224),
dict(type='Flip', flip_ratio=0),
dict(type='Normalize', **img_norm_cfg),
dict(type='FormatShape', input_format='NCTHW'),
dict(type='Collect', keys=['imgs', 'label'], meta_keys=[]),
dict(type='ToTensor', keys=['imgs'])
]
test_pipeline = [
dict(type='DecordInit'),
dict(
type='SampleFrames',
clip_len=32,
frame_interval=2,
num_clips=4,
test_mode=True),
dict(type='DecordDecode'),
dict(type='Resize', scale=(-1, 224)),
dict(type='ThreeCrop', crop_size=224),
dict(type='Flip', flip_ratio=0),
dict(type='Normalize', **img_norm_cfg),
dict(type='FormatShape', input_format='NCTHW'),
dict(type='Collect', keys=['imgs', 'label'], meta_keys=[]),
dict(type='ToTensor', keys=['imgs'])
]
data = dict(
videos_per_gpu=8,
workers_per_gpu=4,
val_dataloader=dict(
videos_per_gpu=1,
workers_per_gpu=1
),
test_dataloader=dict(
videos_per_gpu=1,
workers_per_gpu=1
),
train=dict(
type=dataset_type,
ann_file=ann_file_train,
data_prefix=data_root,
pipeline=train_pipeline),
val=dict(
type=dataset_type,
ann_file=ann_file_val,
data_prefix=data_root_val,
pipeline=val_pipeline),
test=dict(
type=dataset_type,
ann_file=ann_file_test,
data_prefix=data_root_val,
pipeline=test_pipeline))
evaluation = dict(
interval=5, metrics=['top_k_accuracy', 'mean_class_accuracy'])
# optimizer
optimizer = dict(type='AdamW', lr=3e-4, betas=(0.9, 0.999), weight_decay=0.05,
paramwise_cfg=dict(custom_keys={'absolute_pos_embed': dict(decay_mult=0.),
'relative_position_bias_table': dict(decay_mult=0.),
'norm': dict(decay_mult=0.),
'predictor': dict(lr_mult=0.001)}))
# learning policy
lr_config = dict(
policy='CosineAnnealing',
min_lr=0,
warmup='linear',
warmup_by_epoch=True,
warmup_iters=0.8
)
total_epochs = 10
# runtime settings
checkpoint_config = dict(interval=1)
work_dir = './work_dirs/k400_swin_base_22k_patch244_window877.py'
find_unused_parameters = False
# do not use mmdet version fp16
fp16 = None
optimizer_config = dict(
type="DistOptimizerHook",
update_interval=8,
grad_clip=None,
coalesce=True,
bucket_size_mb=-1,
use_fp16=True,
)
| 3,950
| 30.862903
| 188
|
py
|
STTS
|
STTS-main/VideoSwin/configs/Kinetics/t0_0.5625.py
|
_base_ = [
'../_base_/models/swin/swin_base.py', '../_base_/default_runtime.py'
]
model=dict(backbone=dict(patch_size=(2,4,4), drop_path_rate=0.2, time_pruning_loc=[0], time_left_ratio=[0.5625], time_score='tpool', pretrained2d=False), test_cfg=dict(max_testing_views=2))
# dataset settings
dataset_type = 'VideoDataset'
data_root = "path_to_your_dataset"
data_root_val = "path_to_your_dataset"
ann_file_train = "path_to_the_train_split"
ann_file_val = "path_to_the_val_split"
ann_file_test = "path_to_the_test_split"
img_norm_cfg = dict(
mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_bgr=False)
train_pipeline = [
dict(type='DecordInit'),
dict(type='SampleFrames', clip_len=32, frame_interval=2, num_clips=1),
dict(type='DecordDecode'),
dict(type='Resize', scale=(-1, 256)),
dict(type='RandomResizedCrop'),
dict(type='Resize', scale=(224, 224), keep_ratio=False),
dict(type='Flip', flip_ratio=0.5),
dict(type='Normalize', **img_norm_cfg),
dict(type='FormatShape', input_format='NCTHW'),
dict(type='Collect', keys=['imgs', 'label'], meta_keys=[]),
dict(type='ToTensor', keys=['imgs', 'label'])
]
val_pipeline = [
dict(type='DecordInit'),
dict(
type='SampleFrames',
clip_len=32,
frame_interval=2,
num_clips=1,
test_mode=True),
dict(type='DecordDecode'),
dict(type='Resize', scale=(-1, 256)),
dict(type='CenterCrop', crop_size=224),
dict(type='Flip', flip_ratio=0),
dict(type='Normalize', **img_norm_cfg),
dict(type='FormatShape', input_format='NCTHW'),
dict(type='Collect', keys=['imgs', 'label'], meta_keys=[]),
dict(type='ToTensor', keys=['imgs'])
]
test_pipeline = [
dict(type='DecordInit'),
dict(
type='SampleFrames',
clip_len=32,
frame_interval=2,
num_clips=4,
test_mode=True),
dict(type='DecordDecode'),
dict(type='Resize', scale=(-1, 224)),
dict(type='ThreeCrop', crop_size=224),
dict(type='Flip', flip_ratio=0),
dict(type='Normalize', **img_norm_cfg),
dict(type='FormatShape', input_format='NCTHW'),
dict(type='Collect', keys=['imgs', 'label'], meta_keys=[]),
dict(type='ToTensor', keys=['imgs'])
]
data = dict(
videos_per_gpu=8,
workers_per_gpu=4,
val_dataloader=dict(
videos_per_gpu=1,
workers_per_gpu=1
),
test_dataloader=dict(
videos_per_gpu=1,
workers_per_gpu=1
),
train=dict(
type=dataset_type,
ann_file=ann_file_train,
data_prefix=data_root,
pipeline=train_pipeline),
val=dict(
type=dataset_type,
ann_file=ann_file_val,
data_prefix=data_root_val,
pipeline=val_pipeline),
test=dict(
type=dataset_type,
ann_file=ann_file_test,
data_prefix=data_root_val,
pipeline=test_pipeline))
evaluation = dict(
interval=5, metrics=['top_k_accuracy', 'mean_class_accuracy'])
# optimizer
optimizer = dict(type='AdamW', lr=3e-4, betas=(0.9, 0.999), weight_decay=0.05,
paramwise_cfg=dict(custom_keys={'absolute_pos_embed': dict(decay_mult=0.),
'relative_position_bias_table': dict(decay_mult=0.),
'norm': dict(decay_mult=0.),
'predictor': dict(lr_mult=0.001)}))
# learning policy
lr_config = dict(
policy='CosineAnnealing',
min_lr=0,
warmup='linear',
warmup_by_epoch=True,
warmup_iters=0.8
)
total_epochs = 10
# runtime settings
checkpoint_config = dict(interval=1)
work_dir = './work_dirs/k400_swin_base_22k_patch244_window877.py'
find_unused_parameters = False
# do not use mmdet version fp16
fp16 = None
optimizer_config = dict(
type="DistOptimizerHook",
update_interval=8,
grad_clip=None,
coalesce=True,
bucket_size_mb=-1,
use_fp16=True,
)
| 3,951
| 30.870968
| 189
|
py
|
STTS
|
STTS-main/VideoSwin/configs/Kinetics/t0_0.875.py
|
_base_ = [
'../_base_/models/swin/swin_base.py', '../_base_/default_runtime.py'
]
model=dict(backbone=dict(patch_size=(2,4,4), drop_path_rate=0.2, time_pruning_loc=[0], time_left_ratio=[0.875], time_score='tpool', pretrained2d=False), test_cfg=dict(max_testing_views=2))
# dataset settings
dataset_type = 'VideoDataset'
data_root = "path_to_your_dataset"
data_root_val = "path_to_your_dataset"
ann_file_train = "path_to_the_train_split"
ann_file_val = "path_to_the_val_split"
ann_file_test = "path_to_the_test_split"
img_norm_cfg = dict(
mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_bgr=False)
train_pipeline = [
dict(type='DecordInit'),
dict(type='SampleFrames', clip_len=32, frame_interval=2, num_clips=1),
dict(type='DecordDecode'),
dict(type='Resize', scale=(-1, 256)),
dict(type='RandomResizedCrop'),
dict(type='Resize', scale=(224, 224), keep_ratio=False),
dict(type='Flip', flip_ratio=0.5),
dict(type='Normalize', **img_norm_cfg),
dict(type='FormatShape', input_format='NCTHW'),
dict(type='Collect', keys=['imgs', 'label'], meta_keys=[]),
dict(type='ToTensor', keys=['imgs', 'label'])
]
val_pipeline = [
dict(type='DecordInit'),
dict(
type='SampleFrames',
clip_len=32,
frame_interval=2,
num_clips=1,
test_mode=True),
dict(type='DecordDecode'),
dict(type='Resize', scale=(-1, 256)),
dict(type='CenterCrop', crop_size=224),
dict(type='Flip', flip_ratio=0),
dict(type='Normalize', **img_norm_cfg),
dict(type='FormatShape', input_format='NCTHW'),
dict(type='Collect', keys=['imgs', 'label'], meta_keys=[]),
dict(type='ToTensor', keys=['imgs'])
]
test_pipeline = [
dict(type='DecordInit'),
dict(
type='SampleFrames',
clip_len=32,
frame_interval=2,
num_clips=4,
test_mode=True),
dict(type='DecordDecode'),
dict(type='Resize', scale=(-1, 224)),
dict(type='ThreeCrop', crop_size=224),
dict(type='Flip', flip_ratio=0),
dict(type='Normalize', **img_norm_cfg),
dict(type='FormatShape', input_format='NCTHW'),
dict(type='Collect', keys=['imgs', 'label'], meta_keys=[]),
dict(type='ToTensor', keys=['imgs'])
]
data = dict(
videos_per_gpu=8,
workers_per_gpu=4,
val_dataloader=dict(
videos_per_gpu=1,
workers_per_gpu=1
),
test_dataloader=dict(
videos_per_gpu=1,
workers_per_gpu=1
),
train=dict(
type=dataset_type,
ann_file=ann_file_train,
data_prefix=data_root,
pipeline=train_pipeline),
val=dict(
type=dataset_type,
ann_file=ann_file_val,
data_prefix=data_root_val,
pipeline=val_pipeline),
test=dict(
type=dataset_type,
ann_file=ann_file_test,
data_prefix=data_root_val,
pipeline=test_pipeline))
evaluation = dict(
interval=5, metrics=['top_k_accuracy', 'mean_class_accuracy'])
# optimizer
optimizer = dict(type='AdamW', lr=3e-4, betas=(0.9, 0.999), weight_decay=0.05,
paramwise_cfg=dict(custom_keys={'absolute_pos_embed': dict(decay_mult=0.),
'relative_position_bias_table': dict(decay_mult=0.),
'norm': dict(decay_mult=0.),
'predictor': dict(lr_mult=0.001)}))
# learning policy
lr_config = dict(
policy='CosineAnnealing',
min_lr=0,
warmup='linear',
warmup_by_epoch=True,
warmup_iters=0.8
)
total_epochs = 10
# runtime settings
checkpoint_config = dict(interval=1)
work_dir = './work_dirs/k400_swin_base_22k_patch244_window877.py'
find_unused_parameters = False
# do not use mmdet version fp16
fp16 = None
optimizer_config = dict(
type="DistOptimizerHook",
update_interval=8,
grad_clip=None,
coalesce=True,
bucket_size_mb=-1,
use_fp16=True,
)
| 3,950
| 30.862903
| 188
|
py
|
STTS
|
STTS-main/VideoSwin/configs/Kinetics/t0_0.75.py
|
_base_ = [
'../_base_/models/swin/swin_base.py', '../_base_/default_runtime.py'
]
model=dict(backbone=dict(patch_size=(2,4,4), drop_path_rate=0.2, time_pruning_loc=[0], time_left_ratio=[0.75], time_score='tpool', pretrained2d=False), test_cfg=dict(max_testing_views=2))
# dataset settings
dataset_type = 'VideoDataset'
data_root = "path_to_your_dataset"
data_root_val = "path_to_your_dataset"
ann_file_train = "path_to_the_train_split"
ann_file_val = "path_to_the_val_split"
ann_file_test = "path_to_the_test_split"
img_norm_cfg = dict(
mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_bgr=False)
train_pipeline = [
dict(type='DecordInit'),
dict(type='SampleFrames', clip_len=32, frame_interval=2, num_clips=1),
dict(type='DecordDecode'),
dict(type='Resize', scale=(-1, 256)),
dict(type='RandomResizedCrop'),
dict(type='Resize', scale=(224, 224), keep_ratio=False),
dict(type='Flip', flip_ratio=0.5),
dict(type='Normalize', **img_norm_cfg),
dict(type='FormatShape', input_format='NCTHW'),
dict(type='Collect', keys=['imgs', 'label'], meta_keys=[]),
dict(type='ToTensor', keys=['imgs', 'label'])
]
val_pipeline = [
dict(type='DecordInit'),
dict(
type='SampleFrames',
clip_len=32,
frame_interval=2,
num_clips=1,
test_mode=True),
dict(type='DecordDecode'),
dict(type='Resize', scale=(-1, 256)),
dict(type='CenterCrop', crop_size=224),
dict(type='Flip', flip_ratio=0),
dict(type='Normalize', **img_norm_cfg),
dict(type='FormatShape', input_format='NCTHW'),
dict(type='Collect', keys=['imgs', 'label'], meta_keys=[]),
dict(type='ToTensor', keys=['imgs'])
]
test_pipeline = [
dict(type='DecordInit'),
dict(
type='SampleFrames',
clip_len=32,
frame_interval=2,
num_clips=4,
test_mode=True),
dict(type='DecordDecode'),
dict(type='Resize', scale=(-1, 224)),
dict(type='ThreeCrop', crop_size=224),
dict(type='Flip', flip_ratio=0),
dict(type='Normalize', **img_norm_cfg),
dict(type='FormatShape', input_format='NCTHW'),
dict(type='Collect', keys=['imgs', 'label'], meta_keys=[]),
dict(type='ToTensor', keys=['imgs'])
]
data = dict(
videos_per_gpu=8,
workers_per_gpu=4,
val_dataloader=dict(
videos_per_gpu=1,
workers_per_gpu=1
),
test_dataloader=dict(
videos_per_gpu=1,
workers_per_gpu=1
),
train=dict(
type=dataset_type,
ann_file=ann_file_train,
data_prefix=data_root,
pipeline=train_pipeline),
val=dict(
type=dataset_type,
ann_file=ann_file_val,
data_prefix=data_root_val,
pipeline=val_pipeline),
test=dict(
type=dataset_type,
ann_file=ann_file_test,
data_prefix=data_root_val,
pipeline=test_pipeline))
evaluation = dict(
interval=5, metrics=['top_k_accuracy', 'mean_class_accuracy'])
# optimizer
optimizer = dict(type='AdamW', lr=3e-4, betas=(0.9, 0.999), weight_decay=0.05,
paramwise_cfg=dict(custom_keys={'absolute_pos_embed': dict(decay_mult=0.),
'relative_position_bias_table': dict(decay_mult=0.),
'norm': dict(decay_mult=0.),
'predictor': dict(lr_mult=0.001)}))
# learning policy
lr_config = dict(
policy='CosineAnnealing',
min_lr=0,
warmup='linear',
warmup_by_epoch=True,
warmup_iters=0.8
)
total_epochs = 10
# runtime settings
checkpoint_config = dict(interval=1)
work_dir = './work_dirs/k400_swin_base_22k_patch244_window877.py'
find_unused_parameters = False
# do not use mmdet version fp16
fp16 = None
optimizer_config = dict(
type="DistOptimizerHook",
update_interval=8,
grad_clip=None,
coalesce=True,
bucket_size_mb=-1,
use_fp16=True,
)
| 3,949
| 30.854839
| 187
|
py
|
STTS
|
STTS-main/VideoSwin/configs/Kinetics/t0_0.375.py
|
_base_ = [
'../_base_/models/swin/swin_base.py', '../_base_/default_runtime.py'
]
model=dict(backbone=dict(patch_size=(2,4,4), drop_path_rate=0.2, time_pruning_loc=[0], time_left_ratio=[0.375], time_score='tpool', pretrained2d=False), test_cfg=dict(max_testing_views=2))
# dataset settings
dataset_type = 'VideoDataset'
data_root = "path_to_your_dataset"
data_root_val = "path_to_your_dataset"
ann_file_train = "path_to_the_train_split"
ann_file_val = "path_to_the_val_split"
ann_file_test = "path_to_the_test_split"
img_norm_cfg = dict(
mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_bgr=False)
train_pipeline = [
dict(type='DecordInit'),
dict(type='SampleFrames', clip_len=32, frame_interval=2, num_clips=1),
dict(type='DecordDecode'),
dict(type='Resize', scale=(-1, 256)),
dict(type='RandomResizedCrop'),
dict(type='Resize', scale=(224, 224), keep_ratio=False),
dict(type='Flip', flip_ratio=0.5),
dict(type='Normalize', **img_norm_cfg),
dict(type='FormatShape', input_format='NCTHW'),
dict(type='Collect', keys=['imgs', 'label'], meta_keys=[]),
dict(type='ToTensor', keys=['imgs', 'label'])
]
val_pipeline = [
dict(type='DecordInit'),
dict(
type='SampleFrames',
clip_len=32,
frame_interval=2,
num_clips=1,
test_mode=True),
dict(type='DecordDecode'),
dict(type='Resize', scale=(-1, 256)),
dict(type='CenterCrop', crop_size=224),
dict(type='Flip', flip_ratio=0),
dict(type='Normalize', **img_norm_cfg),
dict(type='FormatShape', input_format='NCTHW'),
dict(type='Collect', keys=['imgs', 'label'], meta_keys=[]),
dict(type='ToTensor', keys=['imgs'])
]
test_pipeline = [
dict(type='DecordInit'),
dict(
type='SampleFrames',
clip_len=32,
frame_interval=2,
num_clips=4,
test_mode=True),
dict(type='DecordDecode'),
dict(type='Resize', scale=(-1, 224)),
dict(type='ThreeCrop', crop_size=224),
dict(type='Flip', flip_ratio=0),
dict(type='Normalize', **img_norm_cfg),
dict(type='FormatShape', input_format='NCTHW'),
dict(type='Collect', keys=['imgs', 'label'], meta_keys=[]),
dict(type='ToTensor', keys=['imgs'])
]
data = dict(
videos_per_gpu=8,
workers_per_gpu=4,
val_dataloader=dict(
videos_per_gpu=1,
workers_per_gpu=1
),
test_dataloader=dict(
videos_per_gpu=1,
workers_per_gpu=1
),
train=dict(
type=dataset_type,
ann_file=ann_file_train,
data_prefix=data_root,
pipeline=train_pipeline),
val=dict(
type=dataset_type,
ann_file=ann_file_val,
data_prefix=data_root_val,
pipeline=val_pipeline),
test=dict(
type=dataset_type,
ann_file=ann_file_test,
data_prefix=data_root_val,
pipeline=test_pipeline))
evaluation = dict(
interval=5, metrics=['top_k_accuracy', 'mean_class_accuracy'])
# optimizer
optimizer = dict(type='AdamW', lr=3e-4, betas=(0.9, 0.999), weight_decay=0.05,
paramwise_cfg=dict(custom_keys={'absolute_pos_embed': dict(decay_mult=0.),
'relative_position_bias_table': dict(decay_mult=0.),
'norm': dict(decay_mult=0.),
'predictor': dict(lr_mult=0.001)}))
# learning policy
lr_config = dict(
policy='CosineAnnealing',
min_lr=0,
warmup='linear',
warmup_by_epoch=True,
warmup_iters=0.8
)
total_epochs = 10
# runtime settings
checkpoint_config = dict(interval=1)
work_dir = './work_dirs/k400_swin_base_22k_patch244_window877.py'
find_unused_parameters = False
# do not use mmdet version fp16
fp16 = None
optimizer_config = dict(
type="DistOptimizerHook",
update_interval=8,
grad_clip=None,
coalesce=True,
bucket_size_mb=-1,
use_fp16=True,
)
| 3,950
| 30.862903
| 188
|
py
|
STTS
|
STTS-main/VideoSwin/configs/_base_/default_runtime.py
|
checkpoint_config = dict(interval=1)
log_config = dict(
interval=20,
hooks=[
dict(type='TextLoggerHook'),
# dict(type='TensorboardLoggerHook'),
])
# runtime settings
dist_params = dict(backend='nccl')
log_level = 'INFO'
load_from = None
resume_from = None
workflow = [('train', 1)]
| 310
| 21.214286
| 45
|
py
|
STTS
|
STTS-main/VideoSwin/configs/_base_/models/slowfast_r50.py
|
# model settings
model = dict(
type='Recognizer3D',
backbone=dict(
type='ResNet3dSlowFast',
pretrained=None,
resample_rate=8, # tau
speed_ratio=8, # alpha
channel_ratio=8, # beta_inv
slow_pathway=dict(
type='resnet3d',
depth=50,
pretrained=None,
lateral=True,
conv1_kernel=(1, 7, 7),
dilations=(1, 1, 1, 1),
conv1_stride_t=1,
pool1_stride_t=1,
inflate=(0, 0, 1, 1),
norm_eval=False),
fast_pathway=dict(
type='resnet3d',
depth=50,
pretrained=None,
lateral=False,
base_channels=8,
conv1_kernel=(5, 7, 7),
conv1_stride_t=1,
pool1_stride_t=1,
norm_eval=False)),
cls_head=dict(
type='SlowFastHead',
in_channels=2304, # 2048+256
num_classes=400,
spatial_type='avg',
dropout_ratio=0.5),
# model training and testing settings
train_cfg=None,
test_cfg=dict(average_clips='prob'))
| 1,123
| 27.1
| 41
|
py
|
STTS
|
STTS-main/VideoSwin/configs/_base_/models/tsm_r50.py
|
# model settings
model = dict(
type='Recognizer2D',
backbone=dict(
type='ResNetTSM',
pretrained='torchvision://resnet50',
depth=50,
norm_eval=False,
shift_div=8),
cls_head=dict(
type='TSMHead',
num_classes=400,
in_channels=2048,
spatial_type='avg',
consensus=dict(type='AvgConsensus', dim=1),
dropout_ratio=0.5,
init_std=0.001,
is_shift=True),
# model training and testing settings
train_cfg=None,
test_cfg=dict(average_clips='prob'))
| 563
| 24.636364
| 51
|
py
|
STTS
|
STTS-main/VideoSwin/configs/_base_/models/tsn_r50.py
|
# model settings
model = dict(
type='Recognizer2D',
backbone=dict(
type='ResNet',
pretrained='torchvision://resnet50',
depth=50,
norm_eval=False),
cls_head=dict(
type='TSNHead',
num_classes=400,
in_channels=2048,
spatial_type='avg',
consensus=dict(type='AvgConsensus', dim=1),
dropout_ratio=0.4,
init_std=0.01),
# model training and testing settings
train_cfg=None,
test_cfg=dict(average_clips=None))
| 513
| 24.7
| 51
|
py
|
STTS
|
STTS-main/VideoSwin/configs/_base_/models/r2plus1d_r34.py
|
# model settings
model = dict(
type='Recognizer3D',
backbone=dict(
type='ResNet2Plus1d',
depth=34,
pretrained=None,
pretrained2d=False,
norm_eval=False,
conv_cfg=dict(type='Conv2plus1d'),
norm_cfg=dict(type='SyncBN', requires_grad=True, eps=1e-3),
conv1_kernel=(3, 7, 7),
conv1_stride_t=1,
pool1_stride_t=1,
inflate=(1, 1, 1, 1),
spatial_strides=(1, 2, 2, 2),
temporal_strides=(1, 2, 2, 2),
zero_init_residual=False),
cls_head=dict(
type='I3DHead',
num_classes=400,
in_channels=512,
spatial_type='avg',
dropout_ratio=0.5,
init_std=0.01),
# model training and testing settings
train_cfg=None,
test_cfg=dict(average_clips='prob'))
| 813
| 27.068966
| 67
|
py
|
STTS
|
STTS-main/VideoSwin/configs/_base_/models/trn_r50.py
|
# model settings
model = dict(
type='Recognizer2D',
backbone=dict(
type='ResNet',
pretrained='torchvision://resnet50',
depth=50,
norm_eval=False,
partial_bn=True),
cls_head=dict(
type='TRNHead',
num_classes=400,
in_channels=2048,
num_segments=8,
spatial_type='avg',
relation_type='TRNMultiScale',
hidden_dim=256,
dropout_ratio=0.8,
init_std=0.001),
# model training and testing settings
train_cfg=None,
test_cfg=dict(average_clips='prob'))
| 576
| 24.086957
| 44
|
py
|
STTS
|
STTS-main/VideoSwin/configs/_base_/models/c3d_sports1m_pretrained.py
|
# model settings
model = dict(
type='Recognizer3D',
backbone=dict(
type='C3D',
pretrained= # noqa: E251
'https://download.openmmlab.com/mmaction/recognition/c3d/c3d_sports1m_pretrain_20201016-dcc47ddc.pth', # noqa: E501
style='pytorch',
conv_cfg=dict(type='Conv3d'),
norm_cfg=None,
act_cfg=dict(type='ReLU'),
dropout_ratio=0.5,
init_std=0.005),
cls_head=dict(
type='I3DHead',
num_classes=101,
in_channels=4096,
spatial_type=None,
dropout_ratio=0.5,
init_std=0.01),
# model training and testing settings
train_cfg=None,
test_cfg=dict(average_clips='score'))
| 703
| 28.333333
| 124
|
py
|
STTS
|
STTS-main/VideoSwin/configs/_base_/models/tpn_slowonly_r50.py
|
# model settings
model = dict(
type='Recognizer3D',
backbone=dict(
type='ResNet3dSlowOnly',
depth=50,
pretrained='torchvision://resnet50',
lateral=False,
out_indices=(2, 3),
conv1_kernel=(1, 7, 7),
conv1_stride_t=1,
pool1_stride_t=1,
inflate=(0, 0, 1, 1),
norm_eval=False),
neck=dict(
type='TPN',
in_channels=(1024, 2048),
out_channels=1024,
spatial_modulation_cfg=dict(
in_channels=(1024, 2048), out_channels=2048),
temporal_modulation_cfg=dict(downsample_scales=(8, 8)),
upsample_cfg=dict(scale_factor=(1, 1, 1)),
downsample_cfg=dict(downsample_scale=(1, 1, 1)),
level_fusion_cfg=dict(
in_channels=(1024, 1024),
mid_channels=(1024, 1024),
out_channels=2048,
downsample_scales=((1, 1, 1), (1, 1, 1))),
aux_head_cfg=dict(out_channels=400, loss_weight=0.5)),
cls_head=dict(
type='TPNHead',
num_classes=400,
in_channels=2048,
spatial_type='avg',
consensus=dict(type='AvgConsensus', dim=1),
dropout_ratio=0.5,
init_std=0.01),
# model training and testing settings
train_cfg=None,
test_cfg=dict(average_clips='prob'))
| 1,310
| 30.97561
| 63
|
py
|
STTS
|
STTS-main/VideoSwin/configs/_base_/models/i3d_r50.py
|
# model settings
model = dict(
type='Recognizer3D',
backbone=dict(
type='ResNet3d',
pretrained2d=True,
pretrained='torchvision://resnet50',
depth=50,
conv1_kernel=(5, 7, 7),
conv1_stride_t=2,
pool1_stride_t=2,
conv_cfg=dict(type='Conv3d'),
norm_eval=False,
inflate=((1, 1, 1), (1, 0, 1, 0), (1, 0, 1, 0, 1, 0), (0, 1, 0)),
zero_init_residual=False),
cls_head=dict(
type='I3DHead',
num_classes=400,
in_channels=2048,
spatial_type='avg',
dropout_ratio=0.5,
init_std=0.01),
# model training and testing settings
train_cfg=None,
test_cfg=dict(average_clips='prob'))
# This setting refers to https://github.com/open-mmlab/mmaction/blob/master/mmaction/models/tenons/backbones/resnet_i3d.py#L329-L332 # noqa: E501
| 870
| 30.107143
| 146
|
py
|
STTS
|
STTS-main/VideoSwin/configs/_base_/models/audioonly_r50.py
|
# model settings
model = dict(
type='AudioRecognizer',
backbone=dict(
type='ResNetAudio',
depth=50,
pretrained=None,
in_channels=1,
norm_eval=False),
cls_head=dict(
type='AudioTSNHead',
num_classes=400,
in_channels=1024,
dropout_ratio=0.5,
init_std=0.01),
# model training and testing settings
train_cfg=None,
test_cfg=dict(average_clips='prob'))
| 451
| 22.789474
| 41
|
py
|
STTS
|
STTS-main/VideoSwin/configs/_base_/models/tsn_r50_audio.py
|
# model settings
model = dict(
type='AudioRecognizer',
backbone=dict(type='ResNet', depth=50, in_channels=1, norm_eval=False),
cls_head=dict(
type='AudioTSNHead',
num_classes=400,
in_channels=2048,
dropout_ratio=0.5,
init_std=0.01),
# model training and testing settings
train_cfg=None,
test_cfg=dict(average_clips='prob'))
| 388
| 26.785714
| 75
|
py
|
STTS
|
STTS-main/VideoSwin/configs/_base_/models/tin_r50.py
|
# model settings
model = dict(
type='Recognizer2D',
backbone=dict(
type='ResNetTIN',
pretrained='torchvision://resnet50',
depth=50,
norm_eval=False,
shift_div=4),
cls_head=dict(
type='TSMHead',
num_classes=400,
in_channels=2048,
spatial_type='avg',
consensus=dict(type='AvgConsensus', dim=1),
dropout_ratio=0.5,
init_std=0.001,
is_shift=False),
# model training and testing settings
train_cfg=None,
test_cfg=dict(average_clips=None))
| 562
| 24.590909
| 51
|
py
|
STTS
|
STTS-main/VideoSwin/configs/_base_/models/bmn_400x100.py
|
# model settings
model = dict(
type='BMN',
temporal_dim=100,
boundary_ratio=0.5,
num_samples=32,
num_samples_per_bin=3,
feat_dim=400,
soft_nms_alpha=0.4,
soft_nms_low_threshold=0.5,
soft_nms_high_threshold=0.9,
post_process_top_k=100)
| 275
| 20.230769
| 32
|
py
|
STTS
|
STTS-main/VideoSwin/configs/_base_/models/tsm_mobilenet_v2.py
|
# model settings
model = dict(
type='Recognizer2D',
backbone=dict(
type='MobileNetV2TSM',
shift_div=8,
num_segments=8,
is_shift=True,
pretrained='mmcls://mobilenet_v2'),
cls_head=dict(
type='TSMHead',
num_segments=8,
num_classes=400,
in_channels=1280,
spatial_type='avg',
consensus=dict(type='AvgConsensus', dim=1),
dropout_ratio=0.5,
init_std=0.001,
is_shift=True),
# model training and testing settings
train_cfg=None,
test_cfg=dict(average_clips='prob'))
| 594
| 24.869565
| 51
|
py
|
STTS
|
STTS-main/VideoSwin/configs/_base_/models/tanet_r50.py
|
# model settings
model = dict(
type='Recognizer2D',
backbone=dict(
type='TANet',
pretrained='torchvision://resnet50',
depth=50,
num_segments=8,
tam_cfg=dict()),
cls_head=dict(
type='TSMHead',
num_classes=400,
in_channels=2048,
spatial_type='avg',
consensus=dict(type='AvgConsensus', dim=1),
dropout_ratio=0.5,
init_std=0.001),
# model training and testing settings
train_cfg=None,
test_cfg=dict(average_clips='prob'))
| 538
| 24.666667
| 51
|
py
|
STTS
|
STTS-main/VideoSwin/configs/_base_/models/csn_ig65m_pretrained.py
|
# model settings
model = dict(
type='Recognizer3D',
backbone=dict(
type='ResNet3dCSN',
pretrained2d=False,
pretrained= # noqa: E251
'https://download.openmmlab.com/mmaction/recognition/csn/ircsn_from_scratch_r152_ig65m_20200807-771c4135.pth', # noqa: E501
depth=152,
with_pool2=False,
bottleneck_mode='ir',
norm_eval=False,
zero_init_residual=False),
cls_head=dict(
type='I3DHead',
num_classes=400,
in_channels=2048,
spatial_type='avg',
dropout_ratio=0.5,
init_std=0.01),
# model training and testing settings
train_cfg=None,
test_cfg=dict(average_clips='prob'))
| 709
| 28.583333
| 132
|
py
|
STTS
|
STTS-main/VideoSwin/configs/_base_/models/tpn_tsm_r50.py
|
# model settings
model = dict(
type='Recognizer2D',
backbone=dict(
type='ResNetTSM',
pretrained='torchvision://resnet50',
depth=50,
out_indices=(2, 3),
norm_eval=False,
shift_div=8),
neck=dict(
type='TPN',
in_channels=(1024, 2048),
out_channels=1024,
spatial_modulation_cfg=dict(
in_channels=(1024, 2048), out_channels=2048),
temporal_modulation_cfg=dict(downsample_scales=(8, 8)),
upsample_cfg=dict(scale_factor=(1, 1, 1)),
downsample_cfg=dict(downsample_scale=(1, 1, 1)),
level_fusion_cfg=dict(
in_channels=(1024, 1024),
mid_channels=(1024, 1024),
out_channels=2048,
downsample_scales=((1, 1, 1), (1, 1, 1))),
aux_head_cfg=dict(out_channels=174, loss_weight=0.5)),
cls_head=dict(
type='TPNHead',
num_classes=174,
in_channels=2048,
spatial_type='avg',
consensus=dict(type='AvgConsensus', dim=1),
dropout_ratio=0.5,
init_std=0.01),
# model training and testing settings
train_cfg=None,
test_cfg=dict(average_clips='prob', fcn_test=True))
| 1,202
| 31.513514
| 63
|
py
|
STTS
|
STTS-main/VideoSwin/configs/_base_/models/bsn_tem.py
|
# model settings
model = dict(
type='TEM',
temporal_dim=100,
boundary_ratio=0.1,
tem_feat_dim=400,
tem_hidden_dim=512,
tem_match_threshold=0.5)
| 168
| 17.777778
| 28
|
py
|
STTS
|
STTS-main/VideoSwin/configs/_base_/models/slowonly_r50.py
|
# model settings
model = dict(
type='Recognizer3D',
backbone=dict(
type='ResNet3dSlowOnly',
depth=50,
pretrained='torchvision://resnet50',
lateral=False,
conv1_kernel=(1, 7, 7),
conv1_stride_t=1,
pool1_stride_t=1,
inflate=(0, 0, 1, 1),
norm_eval=False),
cls_head=dict(
type='I3DHead',
in_channels=2048,
num_classes=400,
spatial_type='avg',
dropout_ratio=0.5),
# model training and testing settings
train_cfg=None,
test_cfg=dict(average_clips='prob'))
| 587
| 24.565217
| 44
|
py
|
STTS
|
STTS-main/VideoSwin/configs/_base_/models/x3d.py
|
# model settings
model = dict(
type='Recognizer3D',
backbone=dict(type='X3D', gamma_w=1, gamma_b=2.25, gamma_d=2.2),
cls_head=dict(
type='X3DHead',
in_channels=432,
num_classes=400,
spatial_type='avg',
dropout_ratio=0.5,
fc1_bias=False),
# model training and testing settings
train_cfg=None,
test_cfg=dict(average_clips='prob'))
| 401
| 25.8
| 68
|
py
|
STTS
|
STTS-main/VideoSwin/configs/_base_/models/bsn_pem.py
|
# model settings
model = dict(
type='PEM',
pem_feat_dim=32,
pem_hidden_dim=256,
pem_u_ratio_m=1,
pem_u_ratio_l=2,
pem_high_temporal_iou_threshold=0.6,
pem_low_temporal_iou_threshold=0.2,
soft_nms_alpha=0.75,
soft_nms_low_threshold=0.65,
soft_nms_high_threshold=0.9,
post_process_top_k=100)
| 334
| 22.928571
| 40
|
py
|
STTS
|
STTS-main/VideoSwin/configs/_base_/models/vip/vip_tiny.py
|
# model settings
model = dict(
type='Recognizer3D',
backbone=dict(
type='VideoParser',
inplanes=64,
num_chs=(64, 128, 256, 512),
patch_sizes=[8, 7, 7, 7],
num_heads=[1, 2, 4, 8],
num_enc_heads=[1, 2, 4, 8],
num_parts=[32, 32, 32, 32],
num_layers=[1, 1, 2, 1],
ffn_exp=3,
has_last_encoder=False,
drop_path=0.1,
local_attn='joint'),
cls_head=dict(
type='I3DHead',
in_channels=512,
num_classes=400,
spatial_type='avg',
dropout_ratio=0.5),
test_cfg = dict(average_clips='prob')
)
| 636
| 23.5
| 41
|
py
|
STTS
|
STTS-main/VideoSwin/configs/_base_/models/swin/swin_base.py
|
# model settings
_base_ = "swin_tiny.py"
model = dict(backbone=dict(depths=[2, 2, 18, 2],
embed_dim=128,
num_heads=[4, 8, 16, 32]),
cls_head=dict(in_channels=1024))
| 232
| 32.285714
| 53
|
py
|
STTS
|
STTS-main/VideoSwin/configs/_base_/models/swin/swin_large.py
|
# model settings
_base_ = "swin_tiny.py"
model = dict(backbone=dict(depths=[2, 2, 18, 2],
embed_dim=192,
num_heads=[6, 12, 24, 48]),
cls_head=dict(in_channels=1536))
| 233
| 32.428571
| 54
|
py
|
STTS
|
STTS-main/VideoSwin/configs/_base_/models/swin/swin_tiny.py
|
# model settings
model = dict(
type='Recognizer3D',
backbone=dict(
type='SwinTransformer3D',
patch_size=(4,4,4),
embed_dim=96,
depths=[2, 2, 6, 2],
num_heads=[3, 6, 12, 24],
window_size=(8,7,7),
mlp_ratio=4.,
qkv_bias=True,
qk_scale=None,
drop_rate=0.,
attn_drop_rate=0.,
drop_path_rate=0.2,
patch_norm=True),
cls_head=dict(
type='I3DHead',
in_channels=768,
num_classes=400,
spatial_type='avg',
dropout_ratio=0.5),
test_cfg = dict(average_clips='prob'))
| 614
| 23.6
| 42
|
py
|
STTS
|
STTS-main/VideoSwin/configs/_base_/models/swin/swin_small.py
|
# model settings
_base_ = "swin_tiny.py"
model = dict(backbone=dict(depths=[2, 2, 18, 2]))
| 91
| 22
| 49
|
py
|
STTS
|
STTS-main/VideoSwin/configs/_base_/schedules/sgd_100e.py
|
# optimizer
optimizer = dict(
type='SGD',
lr=0.01, # this lr is used for 8 gpus
momentum=0.9,
weight_decay=0.0001)
optimizer_config = dict(grad_clip=dict(max_norm=40, norm_type=2))
# learning policy
lr_config = dict(policy='step', step=[40, 80])
total_epochs = 100
| 282
| 24.727273
| 65
|
py
|
STTS
|
STTS-main/VideoSwin/configs/_base_/schedules/sgd_tsm_mobilenet_v2_50e.py
|
# optimizer
optimizer = dict(
type='SGD',
constructor='TSMOptimizerConstructor',
paramwise_cfg=dict(fc_lr5=True),
lr=0.01, # this lr is used for 8 gpus
momentum=0.9,
weight_decay=0.00002)
optimizer_config = dict(grad_clip=dict(max_norm=20, norm_type=2))
# learning policy
lr_config = dict(policy='step', step=[20, 40])
total_epochs = 50
| 362
| 26.923077
| 65
|
py
|
STTS
|
STTS-main/VideoSwin/configs/_base_/schedules/sgd_50e.py
|
# optimizer
optimizer = dict(
type='SGD',
lr=0.01, # this lr is used for 8 gpus
momentum=0.9,
weight_decay=0.0001)
optimizer_config = dict(grad_clip=dict(max_norm=40, norm_type=2))
# learning policy
lr_config = dict(policy='step', step=[20, 40])
total_epochs = 50
| 281
| 24.636364
| 65
|
py
|
STTS
|
STTS-main/VideoSwin/configs/_base_/schedules/adam_20e.py
|
# optimizer
optimizer = dict(
type='Adam', lr=0.01, weight_decay=0.00001) # this lr is used for 1 gpus
optimizer_config = dict(grad_clip=None)
# learning policy
lr_config = dict(policy='step', step=10)
total_epochs = 20
| 225
| 27.25
| 77
|
py
|
STTS
|
STTS-main/VideoSwin/configs/_base_/schedules/sgd_tsm_mobilenet_v2_100e.py
|
# optimizer
optimizer = dict(
type='SGD',
constructor='TSMOptimizerConstructor',
paramwise_cfg=dict(fc_lr5=True),
lr=0.01, # this lr is used for 8 gpus
momentum=0.9,
weight_decay=0.00002)
optimizer_config = dict(grad_clip=dict(max_norm=20, norm_type=2))
# learning policy
lr_config = dict(policy='step', step=[40, 80])
total_epochs = 100
| 363
| 27
| 65
|
py
|
STTS
|
STTS-main/VideoSwin/configs/_base_/schedules/sgd_150e_warmup.py
|
# optimizer
optimizer = dict(
type='SGD', lr=0.01, momentum=0.9,
weight_decay=0.0001) # this lr is used for 8 gpus
optimizer_config = dict(grad_clip=dict(max_norm=40, norm_type=2))
# learning policy
lr_config = dict(
policy='step',
step=[90, 130],
warmup='linear',
warmup_by_epoch=True,
warmup_iters=10)
total_epochs = 150
| 352
| 24.214286
| 65
|
py
|
STTS
|
STTS-main/VideoSwin/configs/_base_/schedules/sgd_tsm_100e.py
|
# optimizer
optimizer = dict(
type='SGD',
constructor='TSMOptimizerConstructor',
paramwise_cfg=dict(fc_lr5=True),
lr=0.02, # this lr is used for 8 gpus
momentum=0.9,
weight_decay=0.0001)
optimizer_config = dict(grad_clip=dict(max_norm=20, norm_type=2))
# learning policy
lr_config = dict(policy='step', step=[40, 80])
total_epochs = 100
| 362
| 26.923077
| 65
|
py
|
STTS
|
STTS-main/VideoSwin/configs/_base_/schedules/sgd_tsm_50e.py
|
# optimizer
optimizer = dict(
type='SGD',
constructor='TSMOptimizerConstructor',
paramwise_cfg=dict(fc_lr5=True),
lr=0.01, # this lr is used for 8 gpus
momentum=0.9,
weight_decay=0.0001)
optimizer_config = dict(grad_clip=dict(max_norm=20, norm_type=2))
# learning policy
lr_config = dict(policy='step', step=[20, 40])
total_epochs = 50
| 361
| 26.846154
| 65
|
py
|
PathomicFusion
|
PathomicFusion-master/data_loaders.py
|
### data_loaders.py
import os
import numpy as np
import pandas as pd
from PIL import Image
from sklearn import preprocessing
import torch
import torch.nn as nn
from torch.utils.data.dataset import Dataset # For custom datasets
from torchvision import datasets, transforms
################
# Dataset Loader
################
class PathgraphomicDatasetLoader(Dataset):
def __init__(self, opt, data, split, mode='omic'):
"""
Args:
X = data
e = overall survival event
t = overall survival in months
"""
self.X_path = data[split]['x_path']
self.X_grph = data[split]['x_grph']
self.X_omic = data[split]['x_omic']
self.e = data[split]['e']
self.t = data[split]['t']
self.g = data[split]['g']
self.mode = mode
self.transforms = transforms.Compose([
transforms.RandomHorizontalFlip(0.5),
transforms.RandomVerticalFlip(0.5),
transforms.RandomCrop(opt.input_size_path),
transforms.ColorJitter(brightness=0.1, contrast=0.1, saturation=0.05, hue=0.01),
transforms.ToTensor(),
transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))])
def __getitem__(self, index):
single_e = torch.tensor(self.e[index]).type(torch.FloatTensor)
single_t = torch.tensor(self.t[index]).type(torch.FloatTensor)
single_g = torch.tensor(self.g[index]).type(torch.LongTensor)
if self.mode == "path" or self.mode == 'pathpath':
single_X_path = Image.open(self.X_path[index]).convert('RGB')
return (self.transforms(single_X_path), 0, 0, single_e, single_t, single_g)
elif self.mode == "graph" or self.mode == 'graphgraph':
single_X_grph = torch.load(self.X_grph[index])
return (0, single_X_grph, 0, single_e, single_t, single_g)
elif self.mode == "omic" or self.mode == 'omicomic':
single_X_omic = torch.tensor(self.X_omic[index]).type(torch.FloatTensor)
return (0, 0, single_X_omic, single_e, single_t, single_g)
elif self.mode == "pathomic":
single_X_path = Image.open(self.X_path[index]).convert('RGB')
single_X_omic = torch.tensor(self.X_omic[index]).type(torch.FloatTensor)
return (self.transforms(single_X_path), 0, single_X_omic, single_e, single_t, single_g)
elif self.mode == "graphomic":
single_X_grph = torch.load(self.X_grph[index])
single_X_omic = torch.tensor(self.X_omic[index]).type(torch.FloatTensor)
return (0, single_X_grph, single_X_omic, single_e, single_t, single_g)
elif self.mode == "pathgraph":
single_X_path = Image.open(self.X_path[index]).convert('RGB')
single_X_grph = torch.load(self.X_grph[index])
return (self.transforms(single_X_path), single_X_grph, 0, single_e, single_t, single_g)
elif self.mode == "pathgraphomic":
single_X_path = Image.open(self.X_path[index]).convert('RGB')
single_X_grph = torch.load(self.X_grph[index])
single_X_omic = torch.tensor(self.X_omic[index]).type(torch.FloatTensor)
return (self.transforms(single_X_path), single_X_grph, single_X_omic, single_e, single_t, single_g)
def __len__(self):
return len(self.X_path)
class PathgraphomicFastDatasetLoader(Dataset):
def __init__(self, opt, data, split, mode='omic'):
"""
Args:
X = data
e = overall survival event
t = overall survival in months
"""
self.X_path = data[split]['x_path']
self.X_grph = data[split]['x_grph']
self.X_omic = data[split]['x_omic']
self.e = data[split]['e']
self.t = data[split]['t']
self.g = data[split]['g']
self.mode = mode
def __getitem__(self, index):
single_e = torch.tensor(self.e[index]).type(torch.FloatTensor)
single_t = torch.tensor(self.t[index]).type(torch.FloatTensor)
single_g = torch.tensor(self.g[index]).type(torch.LongTensor)
if self.mode == "path" or self.mode == 'pathpath':
single_X_path = torch.tensor(self.X_path[index]).type(torch.FloatTensor).squeeze(0)
return (single_X_path, 0, 0, single_e, single_t, single_g)
elif self.mode == "graph" or self.mode == 'graphgraph':
single_X_grph = torch.load(self.X_grph[index])
return (0, single_X_grph, 0, single_e, single_t, single_g)
elif self.mode == "omic" or self.mode == 'omicomic':
single_X_omic = torch.tensor(self.X_omic[index]).type(torch.FloatTensor)
return (0, 0, single_X_omic, single_e, single_t, single_g)
elif self.mode == "pathomic":
single_X_path = torch.tensor(self.X_path[index]).type(torch.FloatTensor).squeeze(0)
single_X_omic = torch.tensor(self.X_omic[index]).type(torch.FloatTensor)
return (single_X_path, 0, single_X_omic, single_e, single_t, single_g)
elif self.mode == "graphomic":
single_X_grph = torch.load(self.X_grph[index])
single_X_omic = torch.tensor(self.X_omic[index]).type(torch.FloatTensor)
return (0, single_X_grph, single_X_omic, single_e, single_t, single_g)
elif self.mode == "pathgraph":
single_X_path = torch.tensor(self.X_path[index]).type(torch.FloatTensor).squeeze(0)
single_X_grph = torch.load(self.X_grph[index])
return (single_X_path, single_X_grph, 0, single_e, single_t, single_g)
elif self.mode == "pathgraphomic":
single_X_path = torch.tensor(self.X_path[index]).type(torch.FloatTensor).squeeze(0)
single_X_grph = torch.load(self.X_grph[index])
single_X_omic = torch.tensor(self.X_omic[index]).type(torch.FloatTensor)
return (single_X_path, single_X_grph, single_X_omic, single_e, single_t, single_g)
def __len__(self):
return len(self.X_path)
| 6,141
| 46.984375
| 111
|
py
|
PathomicFusion
|
PathomicFusion-master/fusion.py
|
import torch
import torch.nn as nn
from utils import init_max_weights
class BilinearFusion(nn.Module):
def __init__(self, skip=1, use_bilinear=1, gate1=1, gate2=1, dim1=32, dim2=32, scale_dim1=1, scale_dim2=1, mmhid=64, dropout_rate=0.25):
super(BilinearFusion, self).__init__()
self.skip = skip
self.use_bilinear = use_bilinear
self.gate1 = gate1
self.gate2 = gate2
dim1_og, dim2_og, dim1, dim2 = dim1, dim2, dim1//scale_dim1, dim2//scale_dim2
skip_dim = dim1+dim2+2 if skip else 0
self.linear_h1 = nn.Sequential(nn.Linear(dim1_og, dim1), nn.ReLU())
self.linear_z1 = nn.Bilinear(dim1_og, dim2_og, dim1) if use_bilinear else nn.Sequential(nn.Linear(dim1_og+dim2_og, dim1))
self.linear_o1 = nn.Sequential(nn.Linear(dim1, dim1), nn.ReLU(), nn.Dropout(p=dropout_rate))
self.linear_h2 = nn.Sequential(nn.Linear(dim2_og, dim2), nn.ReLU())
self.linear_z2 = nn.Bilinear(dim1_og, dim2_og, dim2) if use_bilinear else nn.Sequential(nn.Linear(dim1_og+dim2_og, dim2))
self.linear_o2 = nn.Sequential(nn.Linear(dim2, dim2), nn.ReLU(), nn.Dropout(p=dropout_rate))
self.post_fusion_dropout = nn.Dropout(p=dropout_rate)
self.encoder1 = nn.Sequential(nn.Linear((dim1+1)*(dim2+1), mmhid), nn.ReLU(), nn.Dropout(p=dropout_rate))
self.encoder2 = nn.Sequential(nn.Linear(mmhid+skip_dim, mmhid), nn.ReLU(), nn.Dropout(p=dropout_rate))
init_max_weights(self)
def forward(self, vec1, vec2):
### Gated Multimodal Units
if self.gate1:
h1 = self.linear_h1(vec1)
z1 = self.linear_z1(vec1, vec2) if self.use_bilinear else self.linear_z1(torch.cat((vec1, vec2), dim=1))
o1 = self.linear_o1(nn.Sigmoid()(z1)*h1)
else:
o1 = self.linear_o1(vec1)
if self.gate2:
h2 = self.linear_h2(vec2)
z2 = self.linear_z2(vec1, vec2) if self.use_bilinear else self.linear_z2(torch.cat((vec1, vec2), dim=1))
o2 = self.linear_o2(nn.Sigmoid()(z2)*h2)
else:
o2 = self.linear_o2(vec2)
### Fusion
o1 = torch.cat((o1, torch.cuda.FloatTensor(o1.shape[0], 1).fill_(1)), 1)
o2 = torch.cat((o2, torch.cuda.FloatTensor(o2.shape[0], 1).fill_(1)), 1)
o12 = torch.bmm(o1.unsqueeze(2), o2.unsqueeze(1)).flatten(start_dim=1) # BATCH_SIZE X 1024
out = self.post_fusion_dropout(o12)
out = self.encoder1(out)
if self.skip: out = torch.cat((out, o1, o2), 1)
out = self.encoder2(out)
return out
class TrilinearFusion_A(nn.Module):
def __init__(self, skip=1, use_bilinear=1, gate1=1, gate2=1, gate3=1, dim1=32, dim2=32, dim3=32, scale_dim1=1, scale_dim2=1, scale_dim3=1, mmhid=96, dropout_rate=0.25):
super(TrilinearFusion_A, self).__init__()
self.skip = skip
self.use_bilinear = use_bilinear
self.gate1 = gate1
self.gate2 = gate2
self.gate3 = gate3
dim1_og, dim2_og, dim3_og, dim1, dim2, dim3 = dim1, dim2, dim3, dim1//scale_dim1, dim2//scale_dim2, dim3//scale_dim3
skip_dim = dim1+dim2+dim3+3 if skip else 0
### Path
self.linear_h1 = nn.Sequential(nn.Linear(dim1_og, dim1), nn.ReLU())
self.linear_z1 = nn.Bilinear(dim1_og, dim3_og, dim1) if use_bilinear else nn.Sequential(nn.Linear(dim1_og+dim3_og, dim1))
self.linear_o1 = nn.Sequential(nn.Linear(dim1, dim1), nn.ReLU(), nn.Dropout(p=dropout_rate))
### Graph
self.linear_h2 = nn.Sequential(nn.Linear(dim2_og, dim2), nn.ReLU())
self.linear_z2 = nn.Bilinear(dim2_og, dim3_og, dim2) if use_bilinear else nn.Sequential(nn.Linear(dim2_og+dim3_og, dim2))
self.linear_o2 = nn.Sequential(nn.Linear(dim2, dim2), nn.ReLU(), nn.Dropout(p=dropout_rate))
### Omic
self.linear_h3 = nn.Sequential(nn.Linear(dim3_og, dim3), nn.ReLU())
self.linear_z3 = nn.Bilinear(dim1_og, dim3_og, dim3) if use_bilinear else nn.Sequential(nn.Linear(dim1_og+dim3_og, dim3))
self.linear_o3 = nn.Sequential(nn.Linear(dim3, dim3), nn.ReLU(), nn.Dropout(p=dropout_rate))
self.post_fusion_dropout = nn.Dropout(p=0.25)
self.encoder1 = nn.Sequential(nn.Linear((dim1+1)*(dim2+1)*(dim3+1), mmhid), nn.ReLU(), nn.Dropout(p=dropout_rate))
self.encoder2 = nn.Sequential(nn.Linear(mmhid+skip_dim, mmhid), nn.ReLU(), nn.Dropout(p=dropout_rate))
init_max_weights(self)
def forward(self, vec1, vec2, vec3):
### Gated Multimodal Units
if self.gate1:
h1 = self.linear_h1(vec1)
z1 = self.linear_z1(vec1, vec3) if self.use_bilinear else self.linear_z1(torch.cat((vec1, vec3), dim=1)) # Gate Path with Omic
o1 = self.linear_o1(nn.Sigmoid()(z1)*h1)
else:
o1 = self.linear_o1(vec1)
if self.gate2:
h2 = self.linear_h2(vec2)
z2 = self.linear_z2(vec2, vec3) if self.use_bilinear else self.linear_z2(torch.cat((vec2, vec3), dim=1)) # Gate Graph with Omic
o2 = self.linear_o2(nn.Sigmoid()(z2)*h2)
else:
o2 = self.linear_o2(vec2)
if self.gate3:
h3 = self.linear_h3(vec3)
z3 = self.linear_z3(vec1, vec3) if self.use_bilinear else self.linear_z3(torch.cat((vec1, vec3), dim=1)) # Gate Omic With Path
o3 = self.linear_o3(nn.Sigmoid()(z3)*h3)
else:
o3 = self.linear_o3(vec3)
### Fusion
o1 = torch.cat((o1, torch.cuda.FloatTensor(o1.shape[0], 1).fill_(1)), 1)
o2 = torch.cat((o2, torch.cuda.FloatTensor(o2.shape[0], 1).fill_(1)), 1)
o3 = torch.cat((o3, torch.cuda.FloatTensor(o3.shape[0], 1).fill_(1)), 1)
o12 = torch.bmm(o1.unsqueeze(2), o2.unsqueeze(1)).flatten(start_dim=1)
o123 = torch.bmm(o12.unsqueeze(2), o3.unsqueeze(1)).flatten(start_dim=1)
out = self.post_fusion_dropout(o123)
out = self.encoder1(out)
if self.skip: out = torch.cat((out, o1, o2, o3), 1)
out = self.encoder2(out)
return out
class TrilinearFusion_B(nn.Module):
def __init__(self, skip=1, use_bilinear=1, gate1=1, gate2=1, gate3=1, dim1=32, dim2=32, dim3=32, scale_dim1=1, scale_dim2=1, scale_dim3=1, mmhid=96, dropout_rate=0.25):
super(TrilinearFusion_B, self).__init__()
self.skip = skip
self.use_bilinear = use_bilinear
self.gate1 = gate1
self.gate2 = gate2
self.gate3 = gate3
dim1_og, dim2_og, dim3_og, dim1, dim2, dim3 = dim1, dim2, dim3, dim1//scale_dim1, dim2//scale_dim2, dim3//scale_dim3
skip_dim = dim1+dim2+dim3+3 if skip else 0
### Path
self.linear_h1 = nn.Sequential(nn.Linear(dim1_og, dim1), nn.ReLU())
self.linear_z1 = nn.Bilinear(dim1_og, dim3_og, dim1) if use_bilinear else nn.Sequential(nn.Linear(dim1_og+dim3_og, dim1))
self.linear_o1 = nn.Sequential(nn.Linear(dim1, dim1), nn.ReLU(), nn.Dropout(p=dropout_rate))
### Graph
self.linear_h2 = nn.Sequential(nn.Linear(dim2_og, dim2), nn.ReLU())
self.linear_z2 = nn.Bilinear(dim2_og, dim1_og, dim2) if use_bilinear else nn.Sequential(nn.Linear(dim2_og+dim1_og, dim2))
self.linear_o2 = nn.Sequential(nn.Linear(dim2, dim2), nn.ReLU(), nn.Dropout(p=dropout_rate))
### Omic
self.linear_h3 = nn.Sequential(nn.Linear(dim3_og, dim3), nn.ReLU())
self.linear_z3 = nn.Bilinear(dim1_og, dim3_og, dim3) if use_bilinear else nn.Sequential(nn.Linear(dim1_og+dim3_og, dim3))
self.linear_o3 = nn.Sequential(nn.Linear(dim3, dim3), nn.ReLU(), nn.Dropout(p=dropout_rate))
self.post_fusion_dropout = nn.Dropout(p=0.25)
self.encoder1 = nn.Sequential(nn.Linear((dim1+1)*(dim2+1)*(dim3+1), mmhid), nn.ReLU(), nn.Dropout(p=dropout_rate))
self.encoder2 = nn.Sequential(nn.Linear(mmhid+skip_dim, mmhid), nn.ReLU(), nn.Dropout(p=dropout_rate))
init_max_weights(self)
def forward(self, vec1, vec2, vec3):
### Gated Multimodal Units
if self.gate1:
h1 = self.linear_h1(vec1)
z1 = self.linear_z1(vec1, vec3) if self.use_bilinear else self.linear_z1(torch.cat((vec1, vec3), dim=1)) # Gate Path with Omic
o1 = self.linear_o1(nn.Sigmoid()(z1)*h1)
else:
o1 = self.linear_o1(vec1)
if self.gate2:
h2 = self.linear_h2(vec2)
z2 = self.linear_z2(vec2, vec1) if self.use_bilinear else self.linear_z2(torch.cat((vec2, vec1), dim=1)) # Gate Graph with Omic
o2 = self.linear_o2(nn.Sigmoid()(z2)*h2)
else:
o2 = self.linear_o2(vec2)
if self.gate3:
h3 = self.linear_h3(vec3)
z3 = self.linear_z3(vec1, vec3) if self.use_bilinear else self.linear_z3(torch.cat((vec1, vec3), dim=1)) # Gate Omic With Path
o3 = self.linear_o3(nn.Sigmoid()(z3)*h3)
else:
o3 = self.linear_o3(vec3)
### Fusion
o1 = torch.cat((o1, torch.cuda.FloatTensor(o1.shape[0], 1).fill_(1)), 1)
o2 = torch.cat((o2, torch.cuda.FloatTensor(o2.shape[0], 1).fill_(1)), 1)
o3 = torch.cat((o3, torch.cuda.FloatTensor(o3.shape[0], 1).fill_(1)), 1)
o12 = torch.bmm(o1.unsqueeze(2), o2.unsqueeze(1)).flatten(start_dim=1)
o123 = torch.bmm(o12.unsqueeze(2), o3.unsqueeze(1)).flatten(start_dim=1)
out = self.post_fusion_dropout(o123)
out = self.encoder1(out)
if self.skip: out = torch.cat((out, o1, o2, o3), 1)
out = self.encoder2(out)
return out
| 9,580
| 48.901042
| 172
|
py
|
PathomicFusion
|
PathomicFusion-master/run_cox_baselines.py
|
# Base / Native
import os
import pickle
# Numerical / Array
from lifelines.utils import concordance_index
from lifelines import CoxPHFitter
import numpy as np
import pandas as pd
pd.options.display.max_rows = 999
# Env
from utils import CI_pm
from utils import cox_log_rank
from utils import getCleanAllDataset, addHistomolecularSubtype
from utils import makeKaplanMeierPlot
def trainCox(dataroot = './data/TCGA_GBMLGG/', ckpt_name='./checkpoints/surv_15_cox/', model='cox_omic', penalizer=1e-4):
### Creates Checkpoint Directory
if not os.path.exists(ckpt_name): os.makedirs(ckpt_name)
if not os.path.exists(os.path.join(ckpt_name, model)): os.makedirs(os.path.join(ckpt_name, model))
### Load PNAS Splits
pnas_splits = pd.read_csv(dataroot+'pnas_splits.csv')
pnas_splits.columns = ['TCGA ID']+[str(k) for k in range(1, 16)]
pnas_splits.index = pnas_splits['TCGA ID']
pnas_splits = pnas_splits.drop(['TCGA ID'], axis=1)
### Loads Data
ignore_missing_moltype = True if model in ['cox_omic', 'cox_moltype', 'cox_grade+moltype', 'all'] else False
ignore_missing_histype = True if model in ['cox_histype', 'cox_grade', 'cox_grade+moltype', 'all'] else False
all_dataset = getCleanAllDataset(dataroot=dataroot, ignore_missing_moltype=ignore_missing_moltype,
ignore_missing_histype=ignore_missing_histype)[1]
model_feats = {'cox_omic':['TCGA ID', 'Histology', 'Grade', 'Molecular subtype', 'Histomolecular subtype'],
'cox_moltype':['Survival months', 'censored', 'codeletion', 'idh mutation'],
'cox_histype':['Survival months', 'censored', 'Histology'],
'cox_grade':['Survival months', 'censored', 'Grade'],
'cox_grade+moltype':['Survival months', 'censored', 'codeletion', 'idh mutation', 'Grade'],
'cox_all':['TCGA ID', 'Histomolecular subtype']}
cv_results = []
for k in pnas_splits.columns:
pat_train = list(set(pnas_splits.index[pnas_splits[k] == 'Train']).intersection(all_dataset.index))
pat_test = list(set(pnas_splits.index[pnas_splits[k] == 'Test']).intersection(all_dataset.index))
feats = all_dataset.columns.drop(model_feats[model]) if model == 'cox_omic' or model == 'cox_all' else model_feats[model]
train = all_dataset.loc[pat_train]
test = all_dataset.loc[pat_test]
cph = CoxPHFitter(penalizer=penalizer)
cph.fit(train[feats], duration_col='Survival months', event_col='censored', show_progress=False)
cin = concordance_index(test['Survival months'], -cph.predict_partial_hazard(test[feats]), test['censored'])
cv_results.append(cin)
train.insert(loc=0, column='Hazard', value=-cph.predict_partial_hazard(train))
test.insert(loc=0, column='Hazard', value=-cph.predict_partial_hazard(test))
pickle.dump(train, open(os.path.join(ckpt_name, model, '%s_%s_pred_train.pkl' % (model, k)), 'wb'))
pickle.dump(test, open(os.path.join(ckpt_name, model, '%s_%s_pred_test.pkl' % (model, k)), 'wb'))
pickle.dump(cv_results, open(os.path.join(ckpt_name, model, '%s_results.pkl' % model), 'wb'))
print("C-Indices across Splits", cv_results)
print("Average C-Index: %f" % CI_pm(cv_results))
print('1. Omic Only. Ignore missing molecular subtypes')
trainCox(model='cox_omic', penalizer=1e-1)
print('2. molecular subtype only. Ignore missing molecular subtypes')
trainCox(model='cox_moltype', penalizer=0)
print('3. histology subtype only. Ignore missing histology subtypes')
trainCox(model='cox_histype', penalizer=0)
print('4. histologic grade only. Ignore missing histology subtypes')
trainCox(model='cox_grade', penalizer=0)
print('5. grade + molecular subtype. Ignore all NAs')
trainCox(model='cox_grade+moltype', penalizer=0)
print('6. All. Ignore all NAs')
trainCox(model='cox_all', penalizer=1e-1)
print('7. KM-Curves')
for model in ['cox_omic', 'cox_moltype', 'cox_histype', 'cox_grade', 'cox_grade+moltype', 'cox_all']:
makeKaplanMeierPlot(ckpt_name='./checkpoints/surv_15_cox/', model=model, split='test')
| 4,162
| 50.395062
| 129
|
py
|
PathomicFusion
|
PathomicFusion-master/utils.py
|
# Base / Native
import math
import os
import pickle
import re
import warnings
warnings.filterwarnings('ignore')
# Numerical / Array
import lifelines
from lifelines.utils import concordance_index
from lifelines import CoxPHFitter
from lifelines.datasets import load_regression_dataset
from lifelines.utils import k_fold_cross_validation
from lifelines.statistics import logrank_test
from imblearn.over_sampling import RandomOverSampler
import matplotlib as mpl
import matplotlib.pyplot as plt
import matplotlib.font_manager as font_manager
import numpy as np
import pandas as pd
from PIL import Image
import pylab
import scipy
import seaborn as sns
from sklearn import preprocessing
from sklearn.model_selection import train_test_split, KFold
from sklearn.metrics import average_precision_score, auc, f1_score, roc_curve, roc_auc_score
from sklearn.preprocessing import LabelBinarizer
from scipy import interp
mpl.rcParams['axes.linewidth'] = 3 #set the value globally
# Torch
import torch
import torch.nn as nn
from torch.nn import init, Parameter
from torch.utils.data._utils.collate import *
from torch.utils.data.dataloader import default_collate
import torch_geometric
from torch_geometric.data import Batch
################
# Regularization
################
def regularize_weights(model, reg_type=None):
l1_reg = None
for W in model.parameters():
if l1_reg is None:
l1_reg = torch.abs(W).sum()
else:
l1_reg = l1_reg + torch.abs(W).sum() # torch.abs(W).sum() is equivalent to W.norm(1)
return l1_reg
def regularize_path_weights(model, reg_type=None):
l1_reg = None
for W in model.module.classifier.parameters():
if l1_reg is None:
l1_reg = torch.abs(W).sum()
else:
l1_reg = l1_reg + torch.abs(W).sum() # torch.abs(W).sum() is equivalent to W.norm(1)
for W in model.module.linear.parameters():
if l1_reg is None:
l1_reg = torch.abs(W).sum()
else:
l1_reg = l1_reg + torch.abs(W).sum() # torch.abs(W).sum() is equivalent to W.norm(1)
return l1_reg
def regularize_MM_weights(model, reg_type=None):
l1_reg = None
if model.module.__hasattr__('omic_net'):
for W in model.module.omic_net.parameters():
if l1_reg is None:
l1_reg = torch.abs(W).sum()
else:
l1_reg = l1_reg + torch.abs(W).sum() # torch.abs(W).sum() is equivalent to W.norm(1)
if model.module.__hasattr__('linear_h_path'):
for W in model.module.linear_h_path.parameters():
if l1_reg is None:
l1_reg = torch.abs(W).sum()
else:
l1_reg = l1_reg + torch.abs(W).sum() # torch.abs(W).sum() is equivalent to W.norm(1)
if model.module.__hasattr__('linear_h_omic'):
for W in model.module.linear_h_omic.parameters():
if l1_reg is None:
l1_reg = torch.abs(W).sum()
else:
l1_reg = l1_reg + torch.abs(W).sum() # torch.abs(W).sum() is equivalent to W.norm(1)
if model.module.__hasattr__('linear_h_grph'):
for W in model.module.linear_h_grph.parameters():
if l1_reg is None:
l1_reg = torch.abs(W).sum()
else:
l1_reg = l1_reg + torch.abs(W).sum() # torch.abs(W).sum() is equivalent to W.norm(1)
if model.module.__hasattr__('linear_z_path'):
for W in model.module.linear_z_path.parameters():
if l1_reg is None:
l1_reg = torch.abs(W).sum()
else:
l1_reg = l1_reg + torch.abs(W).sum() # torch.abs(W).sum() is equivalent to W.norm(1)
if model.module.__hasattr__('linear_z_omic'):
for W in model.module.linear_z_omic.parameters():
if l1_reg is None:
l1_reg = torch.abs(W).sum()
else:
l1_reg = l1_reg + torch.abs(W).sum() # torch.abs(W).sum() is equivalent to W.norm(1)
if model.module.__hasattr__('linear_z_grph'):
for W in model.module.linear_z_grph.parameters():
if l1_reg is None:
l1_reg = torch.abs(W).sum()
else:
l1_reg = l1_reg + torch.abs(W).sum() # torch.abs(W).sum() is equivalent to W.norm(1)
if model.module.__hasattr__('linear_o_path'):
for W in model.module.linear_o_path.parameters():
if l1_reg is None:
l1_reg = torch.abs(W).sum()
else:
l1_reg = l1_reg + torch.abs(W).sum() # torch.abs(W).sum() is equivalent to W.norm(1)
if model.module.__hasattr__('linear_o_omic'):
for W in model.module.linear_o_omic.parameters():
if l1_reg is None:
l1_reg = torch.abs(W).sum()
else:
l1_reg = l1_reg + torch.abs(W).sum() # torch.abs(W).sum() is equivalent to W.norm(1)
if model.module.__hasattr__('linear_o_grph'):
for W in model.module.linear_o_grph.parameters():
if l1_reg is None:
l1_reg = torch.abs(W).sum()
else:
l1_reg = l1_reg + torch.abs(W).sum() # torch.abs(W).sum() is equivalent to W.norm(1)
if model.module.__hasattr__('encoder1'):
for W in model.module.encoder1.parameters():
if l1_reg is None:
l1_reg = torch.abs(W).sum()
else:
l1_reg = l1_reg + torch.abs(W).sum() # torch.abs(W).sum() is equivalent to W.norm(1)
if model.module.__hasattr__('encoder2'):
for W in model.module.encoder2.parameters():
if l1_reg is None:
l1_reg = torch.abs(W).sum()
else:
l1_reg = l1_reg + torch.abs(W).sum() # torch.abs(W).sum() is equivalent to W.norm(1)
if model.module.__hasattr__('classifier'):
for W in model.module.classifier.parameters():
if l1_reg is None:
l1_reg = torch.abs(W).sum()
else:
l1_reg = l1_reg + torch.abs(W).sum() # torch.abs(W).sum() is equivalent to W.norm(1)
return l1_reg
def regularize_MM_omic(model, reg_type=None):
l1_reg = None
if model.module.__hasattr__('omic_net'):
for W in model.module.omic_net.parameters():
if l1_reg is None:
l1_reg = torch.abs(W).sum()
else:
l1_reg = l1_reg + torch.abs(W).sum() # torch.abs(W).sum() is equivalent to W.norm(1)
return l1_reg
################
# Network Initialization
################
def init_weights(net, init_type='orthogonal', init_gain=0.02):
"""Initialize network weights.
Parameters:
net (network) -- network to be initialized
init_type (str) -- the name of an initialization method: normal | xavier | kaiming | orthogonal
init_gain (float) -- scaling factor for normal, xavier and orthogonal.
We use 'normal' in the original pix2pix and CycleGAN paper. But xavier and kaiming might
work better for some applications. Feel free to try yourself.
"""
def init_func(m): # define the initialization function
classname = m.__class__.__name__
if hasattr(m, 'weight') and (classname.find('Conv') != -1 or classname.find('Linear') != -1):
if init_type == 'normal':
init.normal_(m.weight.data, 0.0, init_gain)
elif init_type == 'xavier':
init.xavier_normal_(m.weight.data, gain=init_gain)
elif init_type == 'kaiming':
init.kaiming_normal_(m.weight.data, a=0, mode='fan_in')
elif init_type == 'orthogonal':
init.orthogonal_(m.weight.data, gain=init_gain)
else:
raise NotImplementedError('initialization method [%s] is not implemented' % init_type)
if hasattr(m, 'bias') and m.bias is not None:
init.constant_(m.bias.data, 0.0)
elif classname.find('BatchNorm2d') != -1: # BatchNorm Layer's weight is not a matrix; only normal distribution applies.
init.normal_(m.weight.data, 1.0, init_gain)
init.constant_(m.bias.data, 0.0)
print('initialize network with %s' % init_type)
net.apply(init_func) # apply the initialization function <init_func>
def init_max_weights(module):
for m in module.modules():
if type(m) == nn.Linear:
stdv = 1. / math.sqrt(m.weight.size(1))
m.weight.data.normal_(0, stdv)
m.bias.data.zero_()
def init_net(net, init_type='normal', init_gain=0.02, gpu_ids=[]):
"""Initialize a network: 1. register CPU/GPU device (with multi-GPU support); 2. initialize the network weights
Parameters:
net (network) -- the network to be initialized
init_type (str) -- the name of an initialization method: normal | xavier | kaiming | orthogonal
gain (float) -- scaling factor for normal, xavier and orthogonal.
gpu_ids (int list) -- which GPUs the network runs on: e.g., 0,1,2
Return an initialized network.
"""
if len(gpu_ids) > 0:
assert(torch.cuda.is_available())
net.to(gpu_ids[0])
net = torch.nn.DataParallel(net, gpu_ids) # multi-GPUs
if init_type != 'max' and init_type != 'none':
print("Init Type:", init_type)
init_weights(net, init_type, init_gain=init_gain)
elif init_type == 'none':
print("Init Type: Not initializing networks.")
elif init_type == 'max':
print("Init Type: Self-Normalizing Weights")
return net
################
# Freeze / Unfreeze
################
def unfreeze_unimodal(opt, model, epoch):
if opt.mode == 'graphomic':
if epoch == 5:
dfs_unfreeze(model.module.omic_net)
print("Unfreezing Omic")
if epoch == 5:
dfs_unfreeze(model.module.grph_net)
print("Unfreezing Graph")
elif opt.mode == 'pathomic':
if epoch == 5:
dfs_unfreeze(model.module.omic_net)
print("Unfreezing Omic")
elif opt.mode == 'pathgraph':
if epoch == 5:
dfs_unfreeze(model.module.grph_net)
print("Unfreezing Graph")
elif opt.mode == "pathgraphomic":
if epoch == 5:
dfs_unfreeze(model.module.omic_net)
print("Unfreezing Omic")
if epoch == 5:
dfs_unfreeze(model.module.grph_net)
print("Unfreezing Graph")
elif opt.mode == "omicomic":
if epoch == 5:
dfs_unfreeze(model.module.omic_net)
print("Unfreezing Omic")
elif opt.mode == "graphgraph":
if epoch == 5:
dfs_unfreeze(model.module.grph_net)
print("Unfreezing Graph")
def dfs_freeze(model):
for name, child in model.named_children():
for param in child.parameters():
param.requires_grad = False
dfs_freeze(child)
def dfs_unfreeze(model):
for name, child in model.named_children():
for param in child.parameters():
param.requires_grad = True
dfs_unfreeze(child)
def print_if_frozen(module):
for idx, child in enumerate(module.children()):
for param in child.parameters():
if param.requires_grad == True:
print("Learnable!!! %d:" % idx, child)
else:
print("Still Frozen %d:" % idx, child)
def unfreeze_vgg_features(model, epoch):
epoch_schedule = {30:45}
unfreeze_index = epoch_schedule[epoch]
for idx, child in enumerate(model.features.children()):
if idx > unfreeze_index:
print("Unfreezing %d:" %idx, child)
for param in child.parameters():
param.requires_grad = True
else:
print("Still Frozen %d:" %idx, child)
continue
################
# Collate Utils
################
def mixed_collate(batch):
elem = batch[0]
elem_type = type(elem)
transposed = zip(*batch)
return [Batch.from_data_list(samples, []) if type(samples[0]) is torch_geometric.data.data.Data else default_collate(samples) for samples in transposed]
################
# Survival Utils
################
def CoxLoss(survtime, censor, hazard_pred, device):
# This calculation credit to Travers Ching https://github.com/traversc/cox-nnet
# Cox-nnet: An artificial neural network method for prognosis prediction of high-throughput omics data
current_batch_len = len(survtime)
R_mat = np.zeros([current_batch_len, current_batch_len], dtype=int)
for i in range(current_batch_len):
for j in range(current_batch_len):
R_mat[i,j] = survtime[j] >= survtime[i]
R_mat = torch.FloatTensor(R_mat).to(device)
theta = hazard_pred.reshape(-1)
exp_theta = torch.exp(theta)
loss_cox = -torch.mean((theta - torch.log(torch.sum(exp_theta*R_mat, dim=1))) * censor)
return loss_cox
def accuracy(output, labels):
preds = output.max(1)[1].type_as(labels)
correct = preds.eq(labels).double()
correct = correct.sum()
return correct / len(labels)
def accuracy_cox(hazardsdata, labels):
# This accuracy is based on estimated survival events against true survival events
median = np.median(hazardsdata)
hazards_dichotomize = np.zeros([len(hazardsdata)], dtype=int)
hazards_dichotomize[hazardsdata > median] = 1
correct = np.sum(hazards_dichotomize == labels)
return correct / len(labels)
def cox_log_rank(hazardsdata, labels, survtime_all):
median = np.median(hazardsdata)
hazards_dichotomize = np.zeros([len(hazardsdata)], dtype=int)
hazards_dichotomize[hazardsdata > median] = 1
idx = hazards_dichotomize == 0
T1 = survtime_all[idx]
T2 = survtime_all[~idx]
E1 = labels[idx]
E2 = labels[~idx]
results = logrank_test(T1, T2, event_observed_A=E1, event_observed_B=E2)
pvalue_pred = results.p_value
return(pvalue_pred)
def CIndex(hazards, labels, survtime_all):
concord = 0.
total = 0.
N_test = labels.shape[0]
for i in range(N_test):
if labels[i] == 1:
for j in range(N_test):
if survtime_all[j] > survtime_all[i]:
total += 1
if hazards[j] < hazards[i]: concord += 1
elif hazards[j] < hazards[i]: concord += 0.5
return(concord/total)
def CIndex_lifeline(hazards, labels, survtime_all):
return(concordance_index(survtime_all, -hazards, labels))
################
# Data Utils
################
def addHistomolecularSubtype(data):
"""
Molecular Subtype: IDHwt == 0, IDHmut-non-codel == 1, IDHmut-codel == 2
Histology Subtype: astrocytoma == 0, oligoastrocytoma == 1, oligodendroglioma == 2, glioblastoma == 3
"""
subtyped_data = data.copy()
subtyped_data.insert(loc=0, column='Histomolecular subtype', value=np.ones(len(data)))
idhwt_ATC = np.logical_and(data['Molecular subtype'] == 0, np.logical_or(data['Histology'] == 0, data['Histology'] == 3))
subtyped_data.loc[idhwt_ATC, 'Histomolecular subtype'] = 'idhwt_ATC'
idhmut_ATC = np.logical_and(data['Molecular subtype'] == 1, np.logical_or(data['Histology'] == 0, data['Histology'] == 3))
subtyped_data.loc[idhmut_ATC, 'Histomolecular subtype'] = 'idhmut_ATC'
ODG = np.logical_and(data['Molecular subtype'] == 2, data['Histology'] == 2)
subtyped_data.loc[ODG, 'Histomolecular subtype'] = 'ODG'
return subtyped_data
def changeHistomolecularSubtype(data):
"""
Molecular Subtype: IDHwt == 0, IDHmut-non-codel == 1, IDHmut-codel == 2
Histology Subtype: astrocytoma == 0, oligoastrocytoma == 1, oligodendroglioma == 2, glioblastoma == 3
"""
data = data.drop(['Histomolecular subtype'], axis=1)
subtyped_data = data.copy()
subtyped_data.insert(loc=0, column='Histomolecular subtype', value=np.ones(len(data)))
idhwt_ATC = np.logical_and(data['Molecular subtype'] == 0, np.logical_or(data['Histology'] == 0, data['Histology'] == 3))
subtyped_data.loc[idhwt_ATC, 'Histomolecular subtype'] = 'idhwt_ATC'
idhmut_ATC = np.logical_and(data['Molecular subtype'] == 1, np.logical_or(data['Histology'] == 0, data['Histology'] == 3))
subtyped_data.loc[idhmut_ATC, 'Histomolecular subtype'] = 'idhmut_ATC'
ODG = np.logical_and(data['Molecular subtype'] == 2, data['Histology'] == 2)
subtyped_data.loc[ODG, 'Histomolecular subtype'] = 'ODG'
return subtyped_data
def getCleanAllDataset(dataroot='./data/TCGA_GBMLGG/', ignore_missing_moltype=False, ignore_missing_histype=False, use_rnaseq=False):
### 1. Joining all_datasets.csv with grade data. Looks at columns with misisng samples
metadata = ['Histology', 'Grade', 'Molecular subtype', 'TCGA ID', 'censored', 'Survival months']
all_dataset = pd.read_csv(os.path.join(dataroot, 'all_dataset.csv')).drop('indexes', axis=1)
all_dataset.index = all_dataset['TCGA ID']
all_grade = pd.read_csv(os.path.join(dataroot, 'grade_data.csv'))
all_grade['Histology'] = all_grade['Histology'].str.replace('astrocytoma (glioblastoma)', 'glioblastoma', regex=False)
all_grade.index = all_grade['TCGA ID']
assert pd.Series(all_dataset.index).equals(pd.Series(sorted(all_grade.index)))
all_dataset = all_dataset.join(all_grade[['Histology', 'Grade', 'Molecular subtype']], how='inner')
cols = all_dataset.columns.tolist()
cols = cols[-3:] + cols[:-3]
all_dataset = all_dataset[cols]
if use_rnaseq:
gbm = pd.read_csv(os.path.join(dataroot, 'mRNA_Expression_z-Scores_RNA_Seq_RSEM.txt'), sep='\t', skiprows=1, index_col=0)
lgg = pd.read_csv(os.path.join(dataroot, 'mRNA_Expression_Zscores_RSEM.txt'), sep='\t', skiprows=1, index_col=0)
gbm = gbm[gbm.columns[~gbm.isnull().all()]]
lgg = lgg[lgg.columns[~lgg.isnull().all()]]
glioma_RNAseq = gbm.join(lgg, how='inner').T
glioma_RNAseq = glioma_RNAseq.dropna(axis=1)
glioma_RNAseq.columns = [gene+'_rnaseq' for gene in glioma_RNAseq.columns]
glioma_RNAseq.index = [patname[:12] for patname in glioma_RNAseq.index]
glioma_RNAseq = glioma_RNAseq.iloc[~glioma_RNAseq.index.duplicated()]
glioma_RNAseq.index.name = 'TCGA ID'
all_dataset = all_dataset.join(glioma_RNAseq, how='inner')
pat_missing_moltype = all_dataset[all_dataset['Molecular subtype'].isna()].index
pat_missing_idh = all_dataset[all_dataset['idh mutation'].isna()].index
pat_missing_1p19q = all_dataset[all_dataset['codeletion'].isna()].index
print("# Missing Molecular Subtype:", len(pat_missing_moltype))
print("# Missing IDH Mutation:", len(pat_missing_idh))
print("# Missing 1p19q Codeletion:", len(pat_missing_1p19q))
assert pat_missing_moltype.equals(pat_missing_idh)
assert pat_missing_moltype.equals(pat_missing_1p19q)
pat_missing_grade = all_dataset[all_dataset['Grade'].isna()].index
pat_missing_histype = all_dataset[all_dataset['Histology'].isna()].index
print("# Missing Histological Subtype:", len(pat_missing_histype))
print("# Missing Grade:", len(pat_missing_grade))
assert pat_missing_histype.equals(pat_missing_grade)
### 2. Impute Missing Genomic Data: Removes patients with missing molecular subtype / idh mutation / 1p19q. Else imputes with median value of each column. Fills missing Molecular subtype with "Missing"
if ignore_missing_moltype:
all_dataset = all_dataset[all_dataset['Molecular subtype'].isna() == False]
for col in all_dataset.drop(metadata, axis=1).columns:
all_dataset['Molecular subtype'] = all_dataset['Molecular subtype'].fillna('Missing')
all_dataset[col] = all_dataset[col].fillna(all_dataset[col].median())
### 3. Impute Missing Histological Data: Removes patients with missing histological subtype / grade. Else imputes with "missing" / grade -1
if ignore_missing_histype:
all_dataset = all_dataset[all_dataset['Histology'].isna() == False]
else:
all_dataset['Grade'] = all_dataset['Grade'].fillna(1)
all_dataset['Histology'] = all_dataset['Histology'].fillna('Missing')
all_dataset['Grade'] = all_dataset['Grade'] - 2
### 4. Adds Histomolecular subtype
ms2int = {'Missing':-1, 'IDHwt':0, 'IDHmut-non-codel':1, 'IDHmut-codel':2}
all_dataset[['Molecular subtype']] = all_dataset[['Molecular subtype']].applymap(lambda s: ms2int.get(s) if s in ms2int else s)
hs2int = {'Missing':-1, 'astrocytoma':0, 'oligoastrocytoma':1, 'oligodendroglioma':2, 'glioblastoma':3}
all_dataset[['Histology']] = all_dataset[['Histology']].applymap(lambda s: hs2int.get(s) if s in hs2int else s)
all_dataset = addHistomolecularSubtype(all_dataset)
metadata.extend(['Histomolecular subtype'])
all_dataset['censored'] = 1 - all_dataset['censored']
return metadata, all_dataset
################
# Analysis Utils
################
def count_parameters(model):
return sum(p.numel() for p in model.parameters() if p.requires_grad)
def hazard2grade(hazard, p):
if hazard < p[0]:
return 0
elif hazard < p[1]:
return 1
return 2
def p(n):
def percentile_(x):
return np.percentile(x, n)
percentile_.__name__ = 'p%s' % n
return percentile_
def natural_sort(l):
convert = lambda text: int(text) if text.isdigit() else text.lower()
alphanum_key = lambda key: [ convert(c) for c in re.split('([0-9]+)', key) ]
return sorted(l, key = alphanum_key)
def CI_pm(data, confidence=0.95):
a = 1.0 * np.array(data)
n = len(a)
m, se = np.mean(a), scipy.stats.sem(a)
h = se * scipy.stats.t.ppf((1 + confidence) / 2., n-1)
return str("{0:.4f} ± ".format(m) + "{0:.3f}".format(h))
def CI_interval(data, confidence=0.95):
a = 1.0 * np.array(data)
n = len(a)
m, se = np.mean(a), scipy.stats.sem(a)
h = se * scipy.stats.t.ppf((1 + confidence) / 2., n-1)
return str("{0:.3f}, ".format(m-h) + "{0:.3f}".format(m+h))
def poolSurvTestPD(ckpt_name='./checkpoints/TCGA_GBMLGG/surv_15_rnaseq/', model='pathgraphomic_fusion', split='test', zscore=False, agg_type='Hazard_mean'):
all_dataset_regstrd_pooled = []
ignore_missing_moltype = 1 if 'omic' in model else 0
ignore_missing_histype = 1 if 'grad' in ckpt_name else 0
use_patch, roi_dir, use_vgg_features = ('_patch_', 'all_st_patches_512', 1) if ((('path' in model) or ('graph' in model)) and ('cox' not in model)) else ('_', 'all_st', 0)
use_rnaseq = '_rnaseq' if ('rnaseq' in ckpt_name and 'path' != model and 'pathpath' not in model and 'graph' != model and 'graphgraph' not in model) else ''
for k in range(1,16):
pred = pickle.load(open(ckpt_name+'/%s/%s_%d%spred_%s.pkl' % (model, model, k, use_patch, split), 'rb'))
if 'cox' not in model:
surv_all = pd.DataFrame(np.stack(np.delete(np.array(pred), 3))).T
surv_all.columns = ['Hazard', 'Survival months', 'censored', 'Grade']
data_cv = pickle.load(open('./data/TCGA_GBMLGG/splits/gbmlgg15cv_%s_%d_%d_%d%s.pkl' % (roi_dir, ignore_missing_moltype, ignore_missing_histype, use_vgg_features, use_rnaseq), 'rb'))
data_cv_splits = data_cv['cv_splits']
data_cv_split_k = data_cv_splits[k]
assert np.all(data_cv_split_k[split]['t'] == pred[1]) # Data is correctly registered
all_dataset = data_cv['data_pd'].drop('TCGA ID', axis=1)
all_dataset_regstrd = all_dataset.loc[data_cv_split_k[split]['x_patname']] # Subset of "all_datasets" (metadata) that is registered with "pred" (predictions)
assert np.all(np.array(all_dataset_regstrd['Survival months']) == pred[1])
assert np.all(np.array(all_dataset_regstrd['censored']) == pred[2])
assert np.all(np.array(all_dataset_regstrd['Grade']) == pred[4])
all_dataset_regstrd.insert(loc=0, column='Hazard', value = np.array(surv_all['Hazard']))
all_dataset_regstrd.index.name = 'TCGA ID'
hazard_agg = all_dataset_regstrd.groupby('TCGA ID').agg({'Hazard': ['mean', 'median', max, p(0.25), p(0.75)]})
hazard_agg.columns = ["_".join(x) for x in hazard_agg.columns.ravel()]
hazard_agg = hazard_agg[[agg_type]]
hazard_agg.columns = ['Hazard']
pred = hazard_agg.join(all_dataset, how='inner')
if zscore: pred['Hazard'] = scipy.stats.zscore(np.array(pred['Hazard']))
all_dataset_regstrd_pooled.append(pred)
all_dataset_regstrd_pooled = pd.concat(all_dataset_regstrd_pooled)
all_dataset_regstrd_pooled = changeHistomolecularSubtype(all_dataset_regstrd_pooled)
return all_dataset_regstrd_pooled
def getAggHazardCV(ckpt_name='./checkpoints/TCGA_GBMLGG/surv_15_rnaseq/', model='pathgraphomic_fusion', split='test', agg_type='Hazard_mean'):
result = []
ignore_missing_moltype = 1 if 'omic' in model else 0
ignore_missing_histype = 1 if 'grad' in ckpt_name else 0
use_patch, roi_dir, use_vgg_features = ('_patch_', 'all_st_patches_512', 1) if (('path' in model) or ('graph' in model)) else ('_', 'all_st', 0)
use_rnaseq = '_rnaseq' if ('rnaseq' in ckpt_name and 'path' != model and 'pathpath' not in model and 'graph' != model and 'graphgraph' not in model) else ''
for k in range(1,16):
pred = pickle.load(open(ckpt_name+'/%s/%s_%d%spred_%s.pkl' % (model, model, k, use_patch, split), 'rb'))
surv_all = pd.DataFrame(np.stack(np.delete(np.array(pred), 3))).T
surv_all.columns = ['Hazard', 'Survival months', 'censored', 'Grade']
data_cv = pickle.load(open('./data/TCGA_GBMLGG/splits/gbmlgg15cv_%s_%d_%d_%d%s.pkl' % (roi_dir, ignore_missing_moltype, ignore_missing_histype, use_vgg_features, use_rnaseq), 'rb'))
data_cv_splits = data_cv['cv_splits']
data_cv_split_k = data_cv_splits[k]
assert np.all(data_cv_split_k[split]['t'] == pred[1]) # Data is correctly registered
all_dataset = data_cv['data_pd'].drop('TCGA ID', axis=1)
all_dataset_regstrd = all_dataset.loc[data_cv_split_k[split]['x_patname']] # Subset of "all_datasets" (metadata) that is registered with "pred" (predictions)
assert np.all(np.array(all_dataset_regstrd['Survival months']) == pred[1])
assert np.all(np.array(all_dataset_regstrd['censored']) == pred[2])
assert np.all(np.array(all_dataset_regstrd['Grade']) == pred[4])
all_dataset_regstrd.insert(loc=0, column='Hazard', value = np.array(surv_all['Hazard']))
all_dataset_regstrd.index.name = 'TCGA ID'
hazard_agg = all_dataset_regstrd.groupby('TCGA ID').agg({'Hazard': ['mean', max, p(0.75)]})
hazard_agg.columns = ["_".join(x) for x in hazard_agg.columns.ravel()]
hazard_agg = hazard_agg[[agg_type]]
hazard_agg.columns = ['Hazard']
all_dataset_hazard = hazard_agg.join(all_dataset, how='inner')
cin = CIndex_lifeline(all_dataset_hazard['Hazard'], all_dataset_hazard['censored'], all_dataset_hazard['Survival months'])
result.append(cin)
return result
def calcGradMetrics(ckpt_name='./checkpoints/grad_15/', model='pathgraphomic_fusion', split='test', avg='micro'):
auc_all = []
ap_all = []
f1_all = []
f1_gradeIV_all = []
ignore_missing_moltype = 1 if 'omic' in model else 0
ignore_missing_histype = 1 if 'grad' in ckpt_name else 0
use_patch, roi_dir, use_vgg_features = ('_patch_', 'all_st_patches_512', 1) if (('path' in model) or ('graph' in model)) else ('_', 'all_st', 0)
for k in range(1,16):
pred = pickle.load(open(ckpt_name+'/%s/%s_%d%spred_%s.pkl' % (model, model, k, use_patch, split), 'rb'))
grade_pred, grade = np.array(pred[3]), np.array(pred[4])
enc = LabelBinarizer()
enc.fit(grade)
grade_oh = enc.transform(grade)
rocauc = roc_auc_score(grade_oh, grade_pred, avg)
ap = average_precision_score(grade_oh, grade_pred, average=avg)
f1 = f1_score(grade_pred.argmax(axis=1), grade, average=avg)
f1_gradeIV = f1_score(grade_pred.argmax(axis=1), grade, average=None)[2]
auc_all.append(rocauc)
ap_all.append(ap)
f1_all.append(f1)
f1_gradeIV_all.append(f1_gradeIV)
return np.array([CI_pm(auc_all), CI_pm(ap_all), CI_pm(f1_all), CI_pm(f1_gradeIV_all)])
################
# Plot Utils
################
def makeKaplanMeierPlot(ckpt_name='./checkpoints/surv_15_rnaseq/', model='omic', split='test', zscore=False, agg_type='Hazard_mean'):
def hazard2KMCurve(data, subtype):
p = np.percentile(data['Hazard'], [33, 66])
if p[0] == p[1]: p[0] = 2.99997
data.insert(0, 'grade_pred', [hazard2grade(hazard, p) for hazard in data['Hazard']])
kmf_pred = lifelines.KaplanMeierFitter()
kmf_gt = lifelines.KaplanMeierFitter()
def get_name(model):
mode2name = {'pathgraphomic':'Pathomic F.', 'pathomic':'Pathomic F.', 'graphomic':'Pathomic F.', 'path':'Histology CNN', 'graph':'Histology GCN', 'omic':'Genomic SNN'}
for mode in mode2name.keys():
if mode in model: return mode2name[mode]
return 'N/A'
fig = plt.figure(figsize=(10, 10), dpi=600)
ax = plt.subplot()
censor_style = {'ms': 20, 'marker': '+'}
temp = data[data['Grade']==0]
kmf_gt.fit(temp['Survival months']/365, temp['censored'], label="Grade II")
kmf_gt.plot(ax=ax, show_censors=True, ci_show=False, c='g', linewidth=3, ls='--', markerfacecolor='black', censor_styles=censor_style)
temp = data[data['grade_pred']==0]
kmf_pred.fit(temp['Survival months']/365, temp['censored'], label="%s (Low)" % get_name(model))
kmf_pred.plot(ax=ax, show_censors=True, ci_show=False, c='g', linewidth=4, ls='-', markerfacecolor='black', censor_styles=censor_style)
temp = data[data['Grade']==1]
kmf_gt.fit(temp['Survival months']/365, temp['censored'], label="Grade III")
kmf_gt.plot(ax=ax, show_censors=True, ci_show=False, c='b', linewidth=3, ls='--', censor_styles=censor_style)
temp = data[data['grade_pred']==1]
kmf_pred.fit(temp['Survival months']/365, temp['censored'], label="%s (Mid)" % get_name(model))
kmf_pred.plot(ax=ax, show_censors=True, ci_show=False, c='b', linewidth=4, ls='-', censor_styles=censor_style)
if subtype != 'ODG':
temp = data[data['Grade']==2]
kmf_gt.fit(temp['Survival months']/365, temp['censored'], label="Grade IV")
kmf_gt.plot(ax=ax, show_censors=True, ci_show=False, c='r', linewidth=3, ls='--', censor_styles=censor_style)
temp = data[data['grade_pred']==2]
kmf_pred.fit(temp['Survival months']/365, temp['censored'], label="%s (High)" % get_name(model))
kmf_pred.plot(ax=ax, show_censors=True, ci_show=False, c='r', linewidth=4, ls='-', censor_styles=censor_style)
ax.set_xlabel('')
ax.set_ylim(0, 1)
ax.set_yticks(np.arange(0, 1.001, 0.5))
ax.tick_params(axis='both', which='major', labelsize=40)
plt.legend(fontsize=32, prop=font_manager.FontProperties(family='Arial', style='normal', size=32))
if subtype != 'idhwt_ATC': ax.get_legend().remove()
return fig
data = poolSurvTestPD(ckpt_name, model, split, zscore, agg_type)
for subtype in ['idhwt_ATC', 'idhmut_ATC', 'ODG']:
fig = hazard2KMCurve(data[data['Histomolecular subtype'] == subtype], subtype)
fig.savefig(ckpt_name+'/%s_KM_%s.png' % (model, subtype))
fig = hazard2KMCurve(data, 'all')
fig.savefig(ckpt_name+'/%s_KM_%s.png' % (model, 'all'))
def makeHazardSwarmPlot(ckpt_name='./checkpoints/surv_15_rnaseq/', model='path', split='test', zscore=True, agg_type='Hazard_mean'):
mpl.rcParams['font.family'] = "arial"
data = poolSurvTestPD(ckpt_name=ckpt_name, model=model, split=split, zscore=zscore, agg_type=agg_type)
data = data[data['Grade'] != -1]
data = data[data['Histomolecular subtype'] != -1]
data['Grade'] = data['Grade'].astype(int).astype(str)
data['Grade'] = data['Grade'].str.replace('0', 'Grade II', regex=False)
data['Grade'] = data['Grade'].str.replace('1', 'Grade III', regex=False)
data['Grade'] = data['Grade'].str.replace('2', 'Grade IV', regex=False)
data['Histomolecular subtype'] = data['Histomolecular subtype'].str.replace('idhwt_ATC', 'IDH-wt \n astryocytoma', regex=False)
data['Histomolecular subtype'] = data['Histomolecular subtype'].str.replace('idhmut_ATC', 'IDH-mut \n astrocytoma', regex=False)
data['Histomolecular subtype'] = data['Histomolecular subtype'].str.replace('ODG', 'Oligodendroglioma', regex=False)
fig, ax = plt.subplots(dpi=600)
ax.set_ylim([-2, 2.5]) # plt.ylim(-2, 2)
ax.spines['right'].set_visible(False)
ax.spines['top'].set_visible(False)
ax.set_yticks(np.arange(-2, 2.001, 1))
sns.swarmplot(x = 'Histomolecular subtype', y='Hazard', data=data, hue='Grade',
palette={"Grade II":"#AFD275" , "Grade III":"#7395AE", "Grade IV":"#E7717D"},
size = 4, alpha = 0.9, ax=ax)
ax.set_xlabel('') # ax.set_xlabel('Histomolecular subtype', size=16)
ax.set_ylabel('') # ax.set_ylabel('Hazard (Z-Score)', size=16)
ax.tick_params(axis='y', which='both', labelsize=20)
ax.tick_params(axis='x', which='both', labelsize=15)
ax.tick_params(axis='x', which='both', labelbottom='off') # doesn't work??
ax.legend(prop={'size': 8})
fig.savefig(ckpt_name+'/%s_HSP.png' % (model))
def makeHazardBoxPlot(ckpt_name='./checkpoints/surv_15_rnaseq/', model='omic', split='test', zscore=True, agg_type='Hazard_mean'):
mpl.rcParams['font.family'] = "arial"
data = poolSurvTestPD(ckpt_name, model, split, zscore, 'Hazard_mean')
data['Grade'] = data['Grade'].astype(int).astype(str)
data['Grade'] = data['Grade'].str.replace('0', 'II', regex=False)
data['Grade'] = data['Grade'].str.replace('1', 'III', regex=False)
data['Grade'] = data['Grade'].str.replace('2', 'IV', regex=False)
fig, axes = plt.subplots(nrows=1, ncols=3, gridspec_kw={'width_ratios': [3, 3, 2]}, dpi=600)
plt.subplots_adjust(wspace=0, hspace=0)
plt.ylim(-2, 2)
plt.yticks(np.arange(-2, 2.001, 1))
#color_dict = {0: '#CF9498', 1: '#8CC7C8', 2: '#AAA0C6'}
#color_dict = {0: '#F76C6C', 1: '#A8D0E6', 2: '#F8E9A1'}
color_dict = ['#F76C6C', '#A8D0E6', '#F8E9A1']
subtypes = ['idhwt_ATC', 'idhmut_ATC', 'ODG']
for i in range(len(subtypes)):
axes[i].spines["top"].set_visible(False)
axes[i].spines["right"].set_visible(False)
axes[i].xaxis.grid(False)
axes[i].yaxis.grid(False)
if i > 0:
axes[i].get_yaxis().set_visible(False)
axes[i].spines["left"].set_visible(False)
order = ["II","III","IV"] if subtypes[i] != 'ODG' else ["II", "III"]
axes[i].xaxis.label.set_visible(False)
axes[i].yaxis.label.set_visible(False)
axes[i].tick_params(axis='y', which='both', labelsize=20)
axes[i].tick_params(axis='x', which='both', labelsize=15)
datapoints = data[data['Histomolecular subtype'] == subtypes[i]]
sns.boxplot(y='Hazard', x="Grade", data=datapoints, ax = axes[i], color=color_dict[i], order=order)
sns.stripplot(y='Hazard', x='Grade', data=datapoints, alpha=0.2, jitter=0.2, color='k', ax = axes[i], order=order)
axes[i].set_ylim(-2.5, 2.5)
axes[i].set_yticks(np.arange(-2.0, 2.1, 1))
#axes[2].legend(prop={'size': 10})
fig.savefig(ckpt_name+'/%s_HBP.png' % (model))
def makeAUROCPlot(ckpt_name='./checkpoints/grad_15/', model_list=['path', 'omic', 'pathgraphomic_fusion'], split='test', avg='micro', use_zoom=False):
mpl.rcParams['font.family'] = "arial"
colors = {'path':'dodgerblue', 'graph':'orange', 'omic':'green', 'pathgraphomic_fusion':'crimson'}
names = {'path':'Histology CNN', 'graph':'Histology GCN', 'omic':'Genomic SNN', 'pathgraphomic_fusion':'Pathomic F.'}
zoom_params = {0:([0.2, 0.4], [0.8, 1.0]),
1:([0.25, 0.45], [0.75, 0.95]),
2:([0.0, 0.2], [0.8, 1.0]),
'micro':([0.15, 0.35], [0.8, 1.0])}
mean_fpr = np.linspace(0, 1, 100)
classes = [0, 1, 2, avg]
### 1. Looping over classes
for i in classes:
print("Class: " + str(i))
fi = pylab.figure(figsize=(10,10), dpi=600, linewidth=0.2)
axi = plt.subplot()
### 2. Looping over models
for m, model in enumerate(model_list):
ignore_missing_moltype = 1 if 'omic' in model else 0
ignore_missing_histype = 1 if 'grad' in ckpt_name else 0
use_patch, roi_dir, use_vgg_features = ('_patch_', 'all_st_patches_512', 1) if (('path' in model) or ('graph' in model)) else ('_', 'all_st', 0)
###. 3. Looping over all splits
tprs, pres, aucrocs, rocaucs, = [], [], [], []
for k in range(1,16):
pred = pickle.load(open(ckpt_name+'/%s/%s_%d%spred_%s.pkl' % (model, model, k, use_patch, split), 'rb'))
grade_pred, grade = np.array(pred[3]), np.array(pred[4])
enc = LabelBinarizer()
enc.fit(grade)
grade_oh = enc.transform(grade)
if i != avg:
pres.append(average_precision_score(grade_oh[:, i], grade_pred[:, i])) # from https://scikit-learn.org/stable/auto_examples/model_selection/plot_precision_recall.html
fpr, tpr, thresh = roc_curve(grade_oh[:,i], grade_pred[:,i], drop_intermediate=False)
aucrocs.append(auc(fpr, tpr)) # https://scikit-learn.org/stable/auto_examples/model_selection/plot_roc.html
rocaucs.append(roc_auc_score(grade_oh[:,i], grade_pred[:,i])) # https://scikit-learn.org/stable/modules/generated/sklearn.metrics.roc_auc_score.html#sklearn.metrics.roc_auc_score
tprs.append(interp(mean_fpr, fpr, tpr))
tprs[-1][0] = 0.0
else:
# A "micro-average": quantifying score on all classes jointly
pres.append(average_precision_score(grade_oh, grade_pred, average=avg))
fpr, tpr, thresh = roc_curve(grade_oh.ravel(), grade_pred.ravel())
aucrocs.append(auc(fpr, tpr))
rocaucs.append(roc_auc_score(grade_oh, grade_pred, avg))
tprs.append(interp(mean_fpr, fpr, tpr))
tprs[-1][0] = 0.0
mean_tpr = np.mean(tprs, axis=0)
mean_tpr[-1] = 1.0
#mean_auc = auc(mean_fpr, mean_tpr)
mean_auc = np.mean(aucrocs)
std_auc = np.std(aucrocs)
print('\t'+'%s - AUC: %0.3f ± %0.3f' % (model, mean_auc, std_auc))
if use_zoom:
alpha, lw = (0.8, 6) if model =='pathgraphomic_fusion' else (0.5, 6)
plt.plot(mean_fpr, mean_tpr, color=colors[model],
label=r'%s (AUC = %0.3f $\pm$ %0.3f)' % (names[model], mean_auc, std_auc), lw=lw, alpha=alpha)
std_tpr = np.std(tprs, axis=0)
tprs_upper = np.minimum(mean_tpr + std_tpr, 1)
tprs_lower = np.maximum(mean_tpr - std_tpr, 0)
plt.fill_between(mean_fpr, tprs_lower, tprs_upper, color=colors[model], alpha=0.1)
plt.xlim([zoom_params[i][0][0]-0.005, zoom_params[i][0][1]+0.005])
plt.ylim([zoom_params[i][1][0]-0.005, zoom_params[i][1][1]+0.005])
axi.set_xticks(np.arange(zoom_params[i][0][0], zoom_params[i][0][1]+0.001, 0.05))
axi.set_yticks(np.arange(zoom_params[i][1][0], zoom_params[i][1][1]+0.001, 0.05))
axi.tick_params(axis='both', which='major', labelsize=26)
else:
alpha, lw = (0.8, 4) if model =='pathgraphomic_fusion' else (0.5, 3)
plt.plot(mean_fpr, mean_tpr, color=colors[model],
label=r'%s (AUC = %0.3f $\pm$ %0.3f)' % (names[model], mean_auc, std_auc), lw=lw, alpha=alpha)
std_tpr = np.std(tprs, axis=0)
tprs_upper = np.minimum(mean_tpr + std_tpr, 1)
tprs_lower = np.maximum(mean_tpr - std_tpr, 0)
plt.fill_between(mean_fpr, tprs_lower, tprs_upper, color=colors[model], alpha=0.1)
plt.xlim([-0.05, 1.05])
plt.ylim([-0.05, 1.05])
axi.set_xticks(np.arange(0, 1.001, 0.2))
axi.set_yticks(np.arange(0, 1.001, 0.2))
axi.legend(loc="lower right", prop={'size': 20})
axi.tick_params(axis='both', which='major', labelsize=30)
#plt.plot([0, 1], [0, 1], linestyle='--', lw=2, color='navy', alpha=.8)
figures = [manager.canvas.figure
for manager in mpl._pylab_helpers.Gcf.get_all_fig_managers()]
zoom = '_zoom' if use_zoom else ''
for i, fig in enumerate(figures):
fig.savefig(ckpt_name+'/AUC_%s%s.png' % (classes[i], zoom))
| 41,241
| 45.235426
| 205
|
py
|
PathomicFusion
|
PathomicFusion-master/networks.py
|
# Base / Native
import csv
from collections import Counter
import copy
import json
import functools
import gc
import logging
import math
import os
import pdb
import pickle
import random
import sys
import tables
import time
from tqdm import tqdm
# Numerical / Array
import numpy as np
# Torch
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
from torch import Tensor
from torch.autograd import Variable
from torch.nn import init, Parameter
from torch.utils.data import DataLoader
from torch.utils.model_zoo import load_url as load_state_dict_from_url
from torchvision import datasets, transforms
import torch.optim.lr_scheduler as lr_scheduler
from torch_geometric.nn import GCNConv, SAGEConv, GraphConv, GatedGraphConv, GATConv
from torch_geometric.nn import GraphConv, TopKPooling, SAGPooling
from torch_geometric.nn import global_mean_pool as gap, global_max_pool as gmp
from torch_geometric.transforms.normalize_features import NormalizeFeatures
# Env
from fusion import *
from options import parse_args
from utils import *
################
# Network Utils
################
def define_net(opt, k):
net = None
act = define_act_layer(act_type=opt.act_type)
init_max = True if opt.init_type == "max" else False
if opt.mode == "path":
net = get_vgg(path_dim=opt.path_dim, act=act, label_dim=opt.label_dim)
elif opt.mode == "graph":
net = GraphNet(grph_dim=opt.grph_dim, dropout_rate=opt.dropout_rate, GNN=opt.GNN, use_edges=opt.use_edges, pooling_ratio=opt.pooling_ratio, act=act, label_dim=opt.label_dim, init_max=init_max)
elif opt.mode == "omic":
net = MaxNet(input_dim=opt.input_size_omic, omic_dim=opt.omic_dim, dropout_rate=opt.dropout_rate, act=act, label_dim=opt.label_dim, init_max=init_max)
elif opt.mode == "graphomic":
net = GraphomicNet(opt=opt, act=act, k=k)
elif opt.mode == "pathomic":
net = PathomicNet(opt=opt, act=act, k=k)
elif opt.mode == "pathgraphomic":
net = PathgraphomicNet(opt=opt, act=act, k=k)
elif opt.mode == "pathpath":
net = PathpathNet(opt=opt, act=act, k=k)
elif opt.mode == "graphgraph":
net = GraphgraphNet(opt=opt, act=act, k=k)
elif opt.mode == "omicomic":
net = OmicomicNet(opt=opt, act=act, k=k)
else:
raise NotImplementedError('model [%s] is not implemented' % opt.model)
return init_net(net, opt.init_type, opt.init_gain, opt.gpu_ids)
def define_optimizer(opt, model):
optimizer = None
if opt.optimizer_type == 'adabound':
optimizer = adabound.AdaBound(model.parameters(), lr=opt.lr, final_lr=opt.final_lr)
elif opt.optimizer_type == 'adam':
optimizer = torch.optim.Adam(model.parameters(), lr=opt.lr, betas=(opt.beta1, opt.beta2), weight_decay=opt.weight_decay)
elif opt.optimizer_type == 'adagrad':
optimizer = torch.optim.Adagrad(model.parameters(), lr=opt.lr, weight_decay=opt.weight_decay, initial_accumulator_value=0.1)
else:
raise NotImplementedError('initialization method [%s] is not implemented' % opt.optimizer)
return optimizer
def define_reg(opt, model):
loss_reg = None
if opt.reg_type == 'none':
loss_reg = 0
elif opt.reg_type == 'path':
loss_reg = regularize_path_weights(model=model)
elif opt.reg_type == 'mm':
loss_reg = regularize_MM_weights(model=model)
elif opt.reg_type == 'all':
loss_reg = regularize_weights(model=model)
elif opt.reg_type == 'omic':
loss_reg = regularize_MM_omic(model=model)
else:
raise NotImplementedError('reg method [%s] is not implemented' % opt.reg_type)
return loss_reg
def define_scheduler(opt, optimizer):
if opt.lr_policy == 'linear':
def lambda_rule(epoch):
lr_l = 1.0 - max(0, epoch + opt.epoch_count - opt.niter) / float(opt.niter_decay + 1)
return lr_l
scheduler = lr_scheduler.LambdaLR(optimizer, lr_lambda=lambda_rule)
elif opt.lr_policy == 'exp':
scheduler = lr_scheduler.ExponentialLR(optimizer, 0.1, last_epoch=-1)
elif opt.lr_policy == 'step':
scheduler = lr_scheduler.StepLR(optimizer, step_size=opt.lr_decay_iters, gamma=0.1)
elif opt.lr_policy == 'plateau':
scheduler = lr_scheduler.ReduceLROnPlateau(optimizer, mode='min', factor=0.2, threshold=0.01, patience=5)
elif opt.lr_policy == 'cosine':
scheduler = lr_scheduler.CosineAnnealingLR(optimizer, T_max=opt.niter, eta_min=0)
else:
return NotImplementedError('learning rate policy [%s] is not implemented', opt.lr_policy)
return scheduler
def define_act_layer(act_type='Tanh'):
if act_type == 'Tanh':
act_layer = nn.Tanh()
elif act_type == 'ReLU':
act_layer = nn.ReLU()
elif act_type == 'Sigmoid':
act_layer = nn.Sigmoid()
elif act_type == 'LSM':
act_layer = nn.LogSoftmax(dim=1)
elif act_type == "none":
act_layer = None
else:
raise NotImplementedError('activation layer [%s] is not found' % act_type)
return act_layer
def define_bifusion(fusion_type, skip=1, use_bilinear=1, gate1=1, gate2=1, dim1=32, dim2=32, scale_dim1=1, scale_dim2=1, mmhid=64, dropout_rate=0.25):
fusion = None
if fusion_type == 'pofusion':
fusion = BilinearFusion(skip=skip, use_bilinear=use_bilinear, gate1=gate1, gate2=gate2, dim1=dim1, dim2=dim2, scale_dim1=scale_dim1, scale_dim2=scale_dim2, mmhid=mmhid, dropout_rate=dropout_rate)
else:
raise NotImplementedError('fusion type [%s] is not found' % fusion_type)
return fusion
def define_trifusion(fusion_type, skip=1, use_bilinear=1, gate1=1, gate2=1, gate3=3, dim1=32, dim2=32, dim3=32, scale_dim1=1, scale_dim2=1, scale_dim3=1, mmhid=96, dropout_rate=0.25):
fusion = None
if fusion_type == 'pofusion_A':
fusion = TrilinearFusion_A(skip=skip, use_bilinear=use_bilinear, gate1=gate1, gate2=gate2, gate3=gate3, dim1=dim1, dim2=dim2, dim3=dim3, scale_dim1=scale_dim1, scale_dim2=scale_dim2, scale_dim3=scale_dim3, mmhid=mmhid, dropout_rate=dropout_rate)
elif fusion_type == 'pofusion_B':
fusion = TrilinearFusion_B(skip=skip, use_bilinear=use_bilinear, gate1=gate1, gate2=gate2, gate3=gate3, dim1=dim1, dim2=dim2, dim3=dim3, scale_dim1=scale_dim1, scale_dim2=scale_dim2, scale_dim3=scale_dim3, mmhid=mmhid, dropout_rate=dropout_rate)
else:
raise NotImplementedError('fusion type [%s] is not found' % fusion_type)
return fusion
############
# Omic Model
############
class MaxNet(nn.Module):
def __init__(self, input_dim=80, omic_dim=32, dropout_rate=0.25, act=None, label_dim=1, init_max=True):
super(MaxNet, self).__init__()
hidden = [64, 48, 32, 32]
self.act = act
encoder1 = nn.Sequential(
nn.Linear(input_dim, hidden[0]),
nn.ELU(),
nn.AlphaDropout(p=dropout_rate, inplace=False))
encoder2 = nn.Sequential(
nn.Linear(hidden[0], hidden[1]),
nn.ELU(),
nn.AlphaDropout(p=dropout_rate, inplace=False))
encoder3 = nn.Sequential(
nn.Linear(hidden[1], hidden[2]),
nn.ELU(),
nn.AlphaDropout(p=dropout_rate, inplace=False))
encoder4 = nn.Sequential(
nn.Linear(hidden[2], omic_dim),
nn.ELU(),
nn.AlphaDropout(p=dropout_rate, inplace=False))
self.encoder = nn.Sequential(encoder1, encoder2, encoder3, encoder4)
self.classifier = nn.Sequential(nn.Linear(omic_dim, label_dim))
if init_max: init_max_weights(self)
self.output_range = Parameter(torch.FloatTensor([6]), requires_grad=False)
self.output_shift = Parameter(torch.FloatTensor([-3]), requires_grad=False)
def forward(self, **kwargs):
x = kwargs['x_omic']
features = self.encoder(x)
out = self.classifier(features)
if self.act is not None:
out = self.act(out)
if isinstance(self.act, nn.Sigmoid):
out = out * self.output_range + self.output_shift
return features, out
############
# Graph Model
############
class NormalizeFeaturesV2(object):
r"""Column-normalizes node features to sum-up to one."""
def __call__(self, data):
data.x = data.x / data.x.max(0, keepdim=True)[0]#.type(torch.cuda.FloatTensor)
return data
def __repr__(self):
return '{}()'.format(self.__class__.__name__)
class NormalizeFeaturesV2(object):
r"""Column-normalizes node features to sum-up to one."""
def __call__(self, data):
data.x[:, :12] = data.x[:, :12] / data.x[:, :12].max(0, keepdim=True)[0]
data.x = data.x.type(torch.cuda.FloatTensor)
return data
def __repr__(self):
return '{}()'.format(self.__class__.__name__)
class NormalizeEdgesV2(object):
r"""Column-normalizes node features to sum-up to one."""
def __call__(self, data):
data.edge_attr = data.edge_attr.type(torch.cuda.FloatTensor)
data.edge_attr = data.edge_attr / data.edge_attr.max(0, keepdim=True)[0]#.type(torch.cuda.FloatTensor)
return data
def __repr__(self):
return '{}()'.format(self.__class__.__name__)
class GraphNet(torch.nn.Module):
def __init__(self, features=1036, nhid=128, grph_dim=32, nonlinearity=torch.tanh,
dropout_rate=0.25, GNN='GCN', use_edges=0, pooling_ratio=0.20, act=None, label_dim=1, init_max=True):
super(GraphNet, self).__init__()
self.dropout_rate = dropout_rate
self.use_edges = use_edges
self.act = act
self.conv1 = SAGEConv(features, nhid)
self.pool1 = SAGPooling(nhid, ratio=pooling_ratio, gnn=GNN)#, nonlinearity=nonlinearity)
self.conv2 = SAGEConv(nhid, nhid)
self.pool2 = SAGPooling(nhid, ratio=pooling_ratio, gnn=GNN)#, nonlinearity=nonlinearity)
self.conv3 = SAGEConv(nhid, nhid)
self.pool3 = SAGPooling(nhid, ratio=pooling_ratio, gnn=GNN)#, nonlinearity=nonlinearity)
self.lin1 = torch.nn.Linear(nhid*2, nhid)
self.lin2 = torch.nn.Linear(nhid, grph_dim)
self.lin3 = torch.nn.Linear(grph_dim, label_dim)
self.output_range = Parameter(torch.FloatTensor([6]), requires_grad=False)
self.output_shift = Parameter(torch.FloatTensor([-3]), requires_grad=False)
if init_max:
init_max_weights(self)
print("Initialzing with Max")
def forward(self, **kwargs):
data = kwargs['x_grph']
data = NormalizeFeaturesV2()(data)
data = NormalizeEdgesV2()(data)
x, edge_index, edge_attr, batch = data.x, data.edge_index, data.edge_attr, data.batch
#x, edge_index, edge_attr, batch = data.x.type(torch.cuda.FloatTensor), data.edge_index.type(torch.cuda.LongTensor), data.edge_attr.type(torch.cuda.FloatTensor), data.batch
x = F.relu(self.conv1(x, edge_index))
x, edge_index, edge_attr, batch, _ = self.pool1(x, edge_index, edge_attr, batch)
x1 = torch.cat([gmp(x, batch), gap(x, batch)], dim=1)
x = F.relu(self.conv2(x, edge_index))
x, edge_index, edge_attr, batch, _ = self.pool2(x, edge_index, edge_attr, batch)
x2 = torch.cat([gmp(x, batch), gap(x, batch)], dim=1)
x = F.relu(self.conv3(x, edge_index))
x, edge_index, edge_attr, batch, _ = self.pool3(x, edge_index, edge_attr, batch)
x3 = torch.cat([gmp(x, batch), gap(x, batch)], dim=1)
x = x1 + x2 + x3
x = F.relu(self.lin1(x))
x = F.dropout(x, p=self.dropout_rate, training=self.training)
features = F.relu(self.lin2(x))
out = self.lin3(features)
if self.act is not None:
out = self.act(out)
if isinstance(self.act, nn.Sigmoid):
out = out * self.output_range + self.output_shift
return features, out
############
# Path Model
############
model_urls = {
'vgg11': 'https://download.pytorch.org/models/vgg11-bbd30ac9.pth',
'vgg13': 'https://download.pytorch.org/models/vgg13-c768596a.pth',
'vgg16': 'https://download.pytorch.org/models/vgg16-397923af.pth',
'vgg19': 'https://download.pytorch.org/models/vgg19-dcbb9e9d.pth',
'vgg11_bn': 'https://download.pytorch.org/models/vgg11_bn-6002323d.pth',
'vgg13_bn': 'https://download.pytorch.org/models/vgg13_bn-abd245e5.pth',
'vgg16_bn': 'https://download.pytorch.org/models/vgg16_bn-6c64b313.pth',
'vgg19_bn': 'https://download.pytorch.org/models/vgg19_bn-c79401a0.pth',
}
class PathNet(nn.Module):
def __init__(self, features, path_dim=32, act=None, num_classes=1):
super(PathNet, self).__init__()
self.features = features
self.avgpool = nn.AdaptiveAvgPool2d((7, 7))
self.classifier = nn.Sequential(
nn.Linear(512 * 7 * 7, 1024),
nn.ReLU(True),
nn.Dropout(0.25),
nn.Linear(1024, 1024),
nn.ReLU(True),
nn.Dropout(0.25),
nn.Linear(1024, path_dim),
nn.ReLU(True),
nn.Dropout(0.05)
)
self.linear = nn.Linear(path_dim, num_classes)
self.act = act
self.output_range = Parameter(torch.FloatTensor([6]), requires_grad=False)
self.output_shift = Parameter(torch.FloatTensor([-3]), requires_grad=False)
dfs_freeze(self.features)
def forward(self, **kwargs):
x = kwargs['x_path']
x = self.features(x)
x = self.avgpool(x)
x = x.view(x.size(0), -1)
features = self.classifier(x)
hazard = self.linear(features)
if self.act is not None:
hazard = self.act(hazard)
if isinstance(self.act, nn.Sigmoid):
hazard = hazard * self.output_range + self.output_shift
return features, hazard
def make_layers(cfg, batch_norm=False):
layers = []
in_channels = 3
for v in cfg:
if v == 'M':
layers += [nn.MaxPool2d(kernel_size=2, stride=2)]
else:
conv2d = nn.Conv2d(in_channels, v, kernel_size=3, padding=1)
if batch_norm:
layers += [conv2d, nn.BatchNorm2d(v), nn.ReLU(inplace=True)]
else:
layers += [conv2d, nn.ReLU(inplace=True)]
in_channels = v
return nn.Sequential(*layers)
cfgs = {
'A': [64, 'M', 128, 'M', 256, 256, 'M', 512, 512, 'M', 512, 512, 'M'],
'B': [64, 64, 'M', 128, 128, 'M', 256, 256, 'M', 512, 512, 'M', 512, 512, 'M'],
'D': [64, 64, 'M', 128, 128, 'M', 256, 256, 256, 'M', 512, 512, 512, 'M', 512, 512, 512, 'M'],
'E': [64, 64, 'M', 128, 128, 'M', 256, 256, 256, 256, 'M', 512, 512, 512, 512, 'M', 512, 512, 512, 512, 'M'],
}
def get_vgg(arch='vgg19_bn', cfg='E', act=None, batch_norm=True, label_dim=1, pretrained=True, progress=True, **kwargs):
model = PathNet(make_layers(cfgs[cfg], batch_norm=batch_norm), act=act, num_classes=label_dim, **kwargs)
if pretrained:
pretrained_dict = load_state_dict_from_url(model_urls[arch], progress=progress)
for key in list(pretrained_dict.keys()):
if 'classifier' in key: pretrained_dict.pop(key)
model.load_state_dict(pretrained_dict, strict=False)
print("Initializing Path Weights")
return model
##############################################################################
# Graph + Omic
##############################################################################
class GraphomicNet(nn.Module):
def __init__(self, opt, act, k):
super(GraphomicNet, self).__init__()
self.grph_net = GraphNet(grph_dim=opt.grph_dim, dropout_rate=opt.dropout_rate, use_edges=1, pooling_ratio=0.20, label_dim=opt.label_dim, init_max=False)
self.omic_net = MaxNet(input_dim=opt.input_size_omic, omic_dim=opt.omic_dim, dropout_rate=opt.dropout_rate, act=act, label_dim=opt.label_dim, init_max=False)
if k is not None:
pt_fname = '_%d.pt' % k
best_grph_ckpt = torch.load(os.path.join(opt.checkpoints_dir, opt.exp_name, 'graph', 'graph'+pt_fname), map_location=torch.device('cpu'))
best_omic_ckpt = torch.load(os.path.join(opt.checkpoints_dir, opt.exp_name, 'omic', 'omic'+pt_fname), map_location=torch.device('cpu'))
self.grph_net.load_state_dict(best_grph_ckpt['model_state_dict'])
self.omic_net.load_state_dict(best_omic_ckpt['model_state_dict'])
print("Loading Models:\n", os.path.join(opt.checkpoints_dir, opt.exp_name, 'graph', 'graph'+pt_fname), "\n", os.path.join(opt.checkpoints_dir, opt.exp_name, 'omic', 'omic'+pt_fname))
self.fusion = define_bifusion(fusion_type=opt.fusion_type, skip=opt.skip, use_bilinear=opt.use_bilinear, gate1=opt.grph_gate, gate2=opt.omic_gate, dim1=opt.grph_dim, dim2=opt.omic_dim, scale_dim1=opt.grph_scale, scale_dim2=opt.omic_scale, mmhid=opt.mmhid, dropout_rate=opt.dropout_rate)
self.classifier = nn.Sequential(nn.Linear(opt.mmhid, opt.label_dim))
self.act = act
dfs_freeze(self.grph_net)
dfs_freeze(self.omic_net)
self.output_range = Parameter(torch.FloatTensor([6]), requires_grad=False)
self.output_shift = Parameter(torch.FloatTensor([-3]), requires_grad=False)
def forward(self, **kwargs):
grph_vec, _ = self.grph_net(x_grph=kwargs['x_grph'])
omic_vec, _ = self.omic_net(x_omic=kwargs['x_omic'])
features = self.fusion(grph_vec, omic_vec)
hazard = self.classifier(features)
if self.act is not None:
hazard = self.act(hazard)
if isinstance(self.act, nn.Sigmoid):
hazard = hazard * self.output_range + self.output_shift
return features, hazard
def __hasattr__(self, name):
if '_parameters' in self.__dict__:
_parameters = self.__dict__['_parameters']
if name in _parameters:
return True
if '_buffers' in self.__dict__:
_buffers = self.__dict__['_buffers']
if name in _buffers:
return True
if '_modules' in self.__dict__:
modules = self.__dict__['_modules']
if name in modules:
return True
return False
##############################################################################
# Path + Omic
##############################################################################
class PathomicNet(nn.Module):
def __init__(self, opt, act, k):
super(PathomicNet, self).__init__()
self.omic_net = MaxNet(input_dim=opt.input_size_omic, omic_dim=opt.omic_dim, dropout_rate=opt.dropout_rate, act=act, label_dim=opt.label_dim, init_max=False)
if k is not None:
pt_fname = '_%d.pt' % k
best_omic_ckpt = torch.load(os.path.join(opt.checkpoints_dir, opt.exp_name, 'omic', 'omic'+pt_fname), map_location=torch.device('cpu'))
self.omic_net.load_state_dict(best_omic_ckpt['model_state_dict'])
print("Loading Models:\n", os.path.join(opt.checkpoints_dir, opt.exp_name, 'omic', 'omic'+pt_fname))
self.fusion = define_bifusion(fusion_type=opt.fusion_type, skip=opt.skip, use_bilinear=opt.use_bilinear, gate1=opt.path_gate, gate2=opt.omic_gate, dim1=opt.path_dim, dim2=opt.omic_dim, scale_dim1=opt.path_scale, scale_dim2=opt.omic_scale, mmhid=opt.mmhid, dropout_rate=opt.dropout_rate)
self.classifier = nn.Sequential(nn.Linear(opt.mmhid, opt.label_dim))
self.act = act
dfs_freeze(self.omic_net)
self.output_range = Parameter(torch.FloatTensor([6]), requires_grad=False)
self.output_shift = Parameter(torch.FloatTensor([-3]), requires_grad=False)
def forward(self, **kwargs):
path_vec = kwargs['x_path']
omic_vec, _ = self.omic_net(x_omic=kwargs['x_omic'])
features = self.fusion(path_vec, omic_vec)
hazard = self.classifier(features)
if self.act is not None:
hazard = self.act(hazard)
if isinstance(self.act, nn.Sigmoid):
hazard = hazard * self.output_range + self.output_shift
return features, hazard
def __hasattr__(self, name):
if '_parameters' in self.__dict__:
_parameters = self.__dict__['_parameters']
if name in _parameters:
return True
if '_buffers' in self.__dict__:
_buffers = self.__dict__['_buffers']
if name in _buffers:
return True
if '_modules' in self.__dict__:
modules = self.__dict__['_modules']
if name in modules:
return True
return False
#############################################################################
# Path + Graph + Omic
##############################################################################
class PathgraphomicNet(nn.Module):
def __init__(self, opt, act, k):
super(PathgraphomicNet, self).__init__()
self.grph_net = GraphNet(grph_dim=opt.grph_dim, dropout_rate=opt.dropout_rate, use_edges=1, pooling_ratio=0.20, label_dim=opt.label_dim, init_max=False)
self.omic_net = MaxNet(input_dim=opt.input_size_omic, omic_dim=opt.omic_dim, dropout_rate=opt.dropout_rate, act=act, label_dim=opt.label_dim, init_max=False)
if k is not None:
pt_fname = '_%d.pt' % k
best_grph_ckpt = torch.load(os.path.join(opt.checkpoints_dir, opt.exp_name, 'graph', 'graph'+pt_fname), map_location=torch.device('cpu'))
best_omic_ckpt = torch.load(os.path.join(opt.checkpoints_dir, opt.exp_name, 'omic', 'omic'+pt_fname), map_location=torch.device('cpu'))
self.grph_net.load_state_dict(best_grph_ckpt['model_state_dict'])
self.omic_net.load_state_dict(best_omic_ckpt['model_state_dict'])
print("Loading Models:\n", os.path.join(opt.checkpoints_dir, opt.exp_name, 'graph', 'graph'+pt_fname), "\n", os.path.join(opt.checkpoints_dir, opt.exp_name, 'omic', 'omic'+pt_fname))
self.fusion = define_trifusion(fusion_type=opt.fusion_type, skip=opt.skip, use_bilinear=opt.use_bilinear, gate1=opt.path_gate, gate2=opt.grph_gate, gate3=opt.omic_gate, dim1=opt.path_dim, dim2=opt.grph_dim, dim3=opt.omic_dim, scale_dim1=opt.path_scale, scale_dim2=opt.grph_scale, scale_dim3=opt.omic_scale, mmhid=opt.mmhid, dropout_rate=opt.dropout_rate)
self.classifier = nn.Sequential(nn.Linear(opt.mmhid, opt.label_dim))
self.act = act
dfs_freeze(self.grph_net)
dfs_freeze(self.omic_net)
self.output_range = Parameter(torch.FloatTensor([6]), requires_grad=False)
self.output_shift = Parameter(torch.FloatTensor([-3]), requires_grad=False)
def forward(self, **kwargs):
path_vec = kwargs['x_path']
grph_vec, _ = self.grph_net(x_grph=kwargs['x_grph'])
omic_vec, _ = self.omic_net(x_omic=kwargs['x_omic'])
features = self.fusion(path_vec, grph_vec, omic_vec)
hazard = self.classifier(features)
if self.act is not None:
hazard = self.act(hazard)
if isinstance(self.act, nn.Sigmoid):
hazard = hazard * self.output_range + self.output_shift
return features, hazard
def __hasattr__(self, name):
if '_parameters' in self.__dict__:
_parameters = self.__dict__['_parameters']
if name in _parameters:
return True
if '_buffers' in self.__dict__:
_buffers = self.__dict__['_buffers']
if name in _buffers:
return True
if '_modules' in self.__dict__:
modules = self.__dict__['_modules']
if name in modules:
return True
return False
##############################################################################
# Ensembling Effects
##############################################################################
class PathgraphNet(nn.Module):
def __init__(self, opt, act, k):
super(PathgraphNet, self).__init__()
self.grph_net = GraphNet(grph_dim=opt.grph_dim, dropout_rate=opt.dropout_rate, use_edges=1, pooling_ratio=0.20, label_dim=opt.label_dim, init_max=False)
if k is not None:
pt_fname = '_%d.pt' % k
best_grph_ckpt = torch.load(os.path.join(opt.checkpoints_dir, opt.exp_name, 'graph', 'graph'+pt_fname), map_location=torch.device('cpu'))
self.grph_net.load_state_dict(best_grph_ckpt['model_state_dict'])
print("Loading Models:\n", os.path.join(opt.checkpoints_dir, opt.exp_name, 'graph', 'graph'+pt_fname))
self.fusion = define_bifusion(fusion_type=opt.fusion_type, skip=opt.skip, use_bilinear=opt.use_bilinear, gate1=opt.path_gate, gate2=opt.grph_gate, dim1=opt.path_dim, dim2=opt.grph_dim, scale_dim1=opt.path_scale, scale_dim2=opt.grph_scale, mmhid=opt.mmhid, dropout_rate=opt.dropout_rate)
self.classifier = nn.Sequential(nn.Linear(opt.mmhid, opt.label_dim))
self.act = act
dfs_freeze(self.grph_net)
self.output_range = Parameter(torch.FloatTensor([6]), requires_grad=False)
self.output_shift = Parameter(torch.FloatTensor([-3]), requires_grad=False)
def forward(self, **kwargs):
path_vec = kwargs['x_path']
grph_vec, _ = self.grph_net(x_grph=kwargs['x_grph'])
features = self.fusion(path_vec, grph_vec)
hazard = self.classifier(features)
if self.act is not None:
hazard = self.act(hazard)
if isinstance(self.act, nn.Sigmoid):
hazard = hazard * self.output_range + self.output_shift
return features, hazard
def __hasattr__(self, name):
if '_parameters' in self.__dict__:
_parameters = self.__dict__['_parameters']
if name in _parameters:
return True
if '_buffers' in self.__dict__:
_buffers = self.__dict__['_buffers']
if name in _buffers:
return True
if '_modules' in self.__dict__:
modules = self.__dict__['_modules']
if name in modules:
return True
return False
class PathpathNet(nn.Module):
def __init__(self, opt, act, k):
super(PathpathNet, self).__init__()
self.fusion = define_bifusion(fusion_type=opt.fusion_type, skip=opt.skip, use_bilinear=opt.use_bilinear, gate1=opt.path_gate, gate2=1-opt.path_gate if opt.path_gate else 0,
dim1=opt.path_dim, dim2=opt.path_dim, scale_dim1=opt.path_scale, scale_dim2=opt.path_scale, mmhid=opt.mmhid, dropout_rate=opt.dropout_rate)
self.classifier = nn.Sequential(nn.Linear(opt.mmhid, opt.label_dim))
self.act = act
self.output_range = Parameter(torch.FloatTensor([6]), requires_grad=False)
self.output_shift = Parameter(torch.FloatTensor([-3]), requires_grad=False)
def forward(self, **kwargs):
path_vec = kwargs['x_path']
features = self.fusion(path_vec, path_vec)
hazard = self.classifier(features)
if self.act is not None:
hazard = self.act(hazard)
if isinstance(self.act, nn.Sigmoid):
hazard = hazard * self.output_range + self.output_shift
return features, hazard
def __hasattr__(self, name):
if '_parameters' in self.__dict__:
_parameters = self.__dict__['_parameters']
if name in _parameters:
return True
if '_buffers' in self.__dict__:
_buffers = self.__dict__['_buffers']
if name in _buffers:
return True
if '_modules' in self.__dict__:
modules = self.__dict__['_modules']
if name in modules:
return True
return False
class GraphgraphNet(nn.Module):
def __init__(self, opt, act, k):
super(GraphgraphNet, self).__init__()
self.grph_net = GraphNet(grph_dim=opt.grph_dim, dropout_rate=opt.dropout_rate, use_edges=1, pooling_ratio=0.20, label_dim=opt.label_dim, init_max=False)
if k is not None:
pt_fname = '_%d.pt' % k
best_grph_ckpt = torch.load(os.path.join(opt.checkpoints_dir, opt.exp_name, 'graph', 'graph'+pt_fname), map_location=torch.device('cpu'))
self.grph_net.load_state_dict(best_grph_ckpt['model_state_dict'])
print("Loading Models:\n", os.path.join(opt.checkpoints_dir, opt.exp_name, 'graph', 'graph'+pt_fname))
self.fusion = define_bifusion(fusion_type=opt.fusion_type, skip=opt.skip, use_bilinear=opt.use_bilinear, gate1=opt.grph_gate, gate2=1-opt.grph_gate if opt.grph_gate else 0,
dim1=opt.grph_dim, dim2=opt.grph_dim, scale_dim1=opt.grph_scale, scale_dim2=opt.grph_scale, mmhid=opt.mmhid, dropout_rate=opt.dropout_rate)
self.classifier = nn.Sequential(nn.Linear(opt.mmhid, opt.label_dim))
self.act = act
dfs_freeze(self.grph_net)
self.output_range = Parameter(torch.FloatTensor([6]), requires_grad=False)
self.output_shift = Parameter(torch.FloatTensor([-3]), requires_grad=False)
def forward(self, **kwargs):
grph_vec, _ = self.grph_net(x_grph=kwargs['x_grph'])
features = self.fusion(grph_vec, grph_vec)
hazard = self.classifier(features)
if self.act is not None:
hazard = self.act(hazard)
if isinstance(self.act, nn.Sigmoid):
hazard = hazard * self.output_range + self.output_shift
return features, hazard
def __hasattr__(self, name):
if '_parameters' in self.__dict__:
_parameters = self.__dict__['_parameters']
if name in _parameters:
return True
if '_buffers' in self.__dict__:
_buffers = self.__dict__['_buffers']
if name in _buffers:
return True
if '_modules' in self.__dict__:
modules = self.__dict__['_modules']
if name in modules:
return True
return False
class OmicomicNet(nn.Module):
def __init__(self, opt, act, k):
super(OmicomicNet, self).__init__()
self.omic_net = MaxNet(input_dim=opt.input_size_omic, omic_dim=opt.omic_dim, dropout_rate=opt.dropout_rate, act=act, label_dim=opt.label_dim, init_max=False)
if k is not None:
pt_fname = '_%d.pt' % k
best_omic_ckpt = torch.load(os.path.join(opt.checkpoints_dir, opt.exp_name, 'omic', 'omic'+pt_fname), map_location=torch.device('cpu'))
self.omic_net.load_state_dict(best_omic_ckpt['model_state_dict'])
print("Loading Models:\n", os.path.join(opt.checkpoints_dir, opt.exp_name, 'omic', 'omic'+pt_fname))
self.fusion = define_bifusion(fusion_type=opt.fusion_type, skip=opt.skip, use_bilinear=opt.use_bilinear, gate1=opt.omic_gate, gate2=1-opt.omic_gate if opt.omic_gate else 0,
dim1=opt.omic_dim, dim2=opt.omic_dim, scale_dim1=opt.omic_scale, scale_dim2=opt.omic_scale, mmhid=opt.mmhid, dropout_rate=opt.dropout_rate)
self.classifier = nn.Sequential(nn.Linear(opt.mmhid, opt.label_dim))
self.act = act
dfs_freeze(self.omic_net)
self.output_range = Parameter(torch.FloatTensor([6]), requires_grad=False)
self.output_shift = Parameter(torch.FloatTensor([-3]), requires_grad=False)
def forward(self, **kwargs):
omic_vec, _ = self.omic_net(x_omic=kwargs['x_omic'])
features = self.fusion(omic_vec, omic_vec)
hazard = self.classifier(features)
if self.act is not None:
hazard = self.act(hazard)
if isinstance(self.act, nn.Sigmoid):
hazard = hazard * self.output_range + self.output_shift
return features, hazard
def __hasattr__(self, name):
if '_parameters' in self.__dict__:
_parameters = self.__dict__['_parameters']
if name in _parameters:
return True
if '_buffers' in self.__dict__:
_buffers = self.__dict__['_buffers']
if name in _buffers:
return True
if '_modules' in self.__dict__:
modules = self.__dict__['_modules']
if name in modules:
return True
return False
| 32,354
| 42.313253
| 362
|
py
|
PathomicFusion
|
PathomicFusion-master/make_splits.py
|
### data_loaders.py
import argparse
import os
import pickle
import numpy as np
import pandas as pd
from PIL import Image
from sklearn import preprocessing
# Env
from networks import define_net
from utils import getCleanAllDataset
import torch
from torchvision import transforms
from options import parse_gpuids
### Initializes parser and data
"""
all_st
python make_splits.py --ignore_missing_moltype 0 --ignore_missing_histype 0 --use_vgg_features 0 --roi_dir all_st # for training Surv Path, Surv Graph, and testing Surv Graph
python make_splits.py --ignore_missing_moltype 0 --ignore_missing_histype 1 --use_vgg_features 0 --roi_dir all_st # for training Grad Path, Grad Graph, and testing Surv_graph
python make_splits.py --ignore_missing_moltype 1 --ignore_missing_histype 0 --use_vgg_features 0 --roi_dir all_st # for training Surv Omic, Surv Graphomic
python make_splits.py --ignore_missing_moltype 1 --ignore_missing_histype 1 --use_vgg_features 0 --roi_dir all_st # for training Grad Omic, Grad Graphomic
all_st_patches_512 (no VGG)
python make_splits.py --ignore_missing_moltype 0 --ignore_missing_histype 0 --use_vgg_features 0 --roi_dir all_st_patches_512 # for testing Surv Path
python make_splits.py --ignore_missing_moltype 0 --ignore_missing_histype 1 --use_vgg_features 0 --roi_dir all_st_patches_512 # for testing Grad Path
all_st_patches_512 (use VGG)
python make_splits.py --ignore_missing_moltype 0 --ignore_missing_histype 0 --use_vgg_features 1 --roi_dir all_st_patches_512 --exp_name surv_15 --gpu_ids 0 # for Surv Pathgraph
python make_splits.py --ignore_missing_moltype 0 --ignore_missing_histype 1 --use_vgg_features 1 --roi_dir all_st_patches_512 --exp_name grad_15 --act_type LSM --label_dim 3 --gpu_ids 1 # for Grad Pathgraph
python make_splits.py --ignore_missing_moltype 1 --ignore_missing_histype 0 --use_vgg_features 1 --roi_dir all_st_patches_512 --exp_name surv_15 --gpu_ids 2 # for Surv Pathomic, Pathgraphomic
python make_splits.py --ignore_missing_moltype 1 --ignore_missing_histype 1 --use_vgg_features 1 --roi_dir all_st_patches_512 --exp_name grad_15 --act_type LSM --label_dim 3 --gpu_ids 3 # for Grad Pathomic, Pathgraphomic
python make_splits.py --ignore_missing_moltype 0 --ignore_missing_histype 1 --make_all_train 1
python make_splits.py --ignore_missing_moltype 1 --ignore_missing_histype 0 --use_vgg_features 0 --roi_dir all_st --use_rnaseq 1
python make_splits.py --ignore_missing_moltype 1 --ignore_missing_histype 1 --use_vgg_features 0 --roi_dir all_st --use_rnaseq 1
python make_splits.py --ignore_missing_moltype 1 --ignore_missing_histype 0 --use_vgg_features 1 --roi_dir all_st_patches_512 --exp_name surv_15 --use_rnaseq 1 --gpu_ids 2
python make_splits.py --ignore_missing_moltype 1 --ignore_missing_histype 1 --use_vgg_features 1 --roi_dir all_st_patches_512 --exp_name grad_15 --use_rnaseq 1 --act_type LSM --label_dim 3 --gpu_ids 3
python make_splits.py --ignore_missing_moltype 0 --ignore_missing_histype 0 --use_vgg_features 1 --roi_dir all_st_patches_512 --exp_name surv_15_rnaseq --gpu_ids 0
python make_splits.py --ignore_missing_moltype 1 --ignore_missing_histype 0 --use_vgg_features 1 --roi_dir all_st_patches_512 --exp_name surv_15_rnaseq --use_rnaseq 1 --gpu_ids 0
python make_splits.py --ignore_missing_moltype 0 --ignore_missing_histype 1 --use_vgg_features 1 --roi_dir all_st_patches_512 --exp_name grad_15 --act_type LSM --label_dim 3 --gpu_ids 1
python make_splits.py --ignore_missing_moltype 1 --ignore_missing_histype 1 --use_vgg_features 1 --roi_dir all_st_patches_512 --exp_name grad_15 --use_rnaseq 1 --act_type LSM --label_dim 3 --gpu_ids 1
python make_splits.py --ignore_missing_moltype 1 --ignore_missing_histype 0 --use_vgg_features 0 --roi_dir all_st --use_rnaseq 1
python make_splits.py --ignore_missing_moltype 1 --ignore_missing_histype 0 --use_vgg_features 1 --roi_dir all_st_patches_512 --exp_name surv_15_rnaseq --gpu_ids 2
python make_splits.py --ignore_missing_moltype 1 --ignore_missing_histype 1 --use_vgg_features 0 --roi_dir all_st --use_rnaseq 1
python make_splits.py --ignore_missing_moltype 1 --ignore_missing_histype 1 --use_vgg_features 1 --roi_dir all_st_patches_512 --exp_name grad_15 --act_type LSM --label_dim 3 --gpu_ids 3
"""
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument('--dataroot', type=str, default='./data/TCGA_GBMLGG/', help="datasets")
parser.add_argument('--roi_dir', type=str, default='all_st')
parser.add_argument('--graph_feat_type', type=str, default='cpc', help="graph features to use")
parser.add_argument('--ignore_missing_moltype', type=int, default=0, help="Ignore data points with missing molecular subtype")
parser.add_argument('--ignore_missing_histype', type=int, default=0, help="Ignore data points with missign histology subtype")
parser.add_argument('--make_all_train', type=int, default=0)
parser.add_argument('--use_vgg_features', type=int, default=0)
parser.add_argument('--use_rnaseq', type=int, default=0)
parser.add_argument('--checkpoints_dir', type=str, default='./checkpoints/TCGA_GBMLGG/', help='models are saved here')
parser.add_argument('--exp_name', type=str, default='surv_15_rnaseq', help='name of the project. It decides where to store samples and models')
parser.add_argument('--gpu_ids', type=str, default='0,1,2,3', help='gpu ids: e.g. 0 0,1,2, 0,2. use -1 for CPU')
parser.add_argument('--mode', type=str, default='path', help='mode')
parser.add_argument('--model_name', type=str, default='path', help='mode')
parser.add_argument('--task', type=str, default='surv', help='surv | grad')
parser.add_argument('--act_type', type=str, default='Sigmoid', help='activation function')
parser.add_argument('--init_gain', type=float, default=0.02, help='scaling factor for normal, xavier and orthogonal.')
parser.add_argument('--label_dim', type=int, default=1, help='size of output')
parser.add_argument('--batch_size', type=int, default=32, help="Number of batches to train/test for. Default: 256")
parser.add_argument('--path_dim', type=int, default=32)
parser.add_argument('--init_type', type=str, default='none', help='network initialization [normal | xavier | kaiming | orthogonal | max]. Max seems to work well')
parser.add_argument('--dropout_rate', default=0.25, type=float, help='0 - 0.25. Increasing dropout_rate helps overfitting. Some people have gone as high as 0.5. You can try adding more regularization')
opt = parser.parse_known_args()[0]
opt = parse_gpuids(opt)
return opt
opt = parse_args()
device = torch.device('cuda:{}'.format(opt.gpu_ids[0])) if opt.gpu_ids else torch.device('cpu')
metadata, all_dataset = getCleanAllDataset(opt.dataroot, opt.ignore_missing_moltype, opt.ignore_missing_histype, opt.use_rnaseq)
### Creates a mapping from TCGA ID -> Image ROI
img_fnames = os.listdir(os.path.join(opt.dataroot, opt.roi_dir))
pat2img = {}
for pat, img_fname in zip([img_fname[:12] for img_fname in img_fnames], img_fnames):
if pat not in pat2img.keys(): pat2img[pat] = []
pat2img[pat].append(img_fname)
### Dictionary file containing split information
data_dict = {}
data_dict['data_pd'] = all_dataset
#data_dict['pat2img'] = pat2img
#data_dict['img_fnames'] = img_fnames
cv_splits = {}
### Extracting K-Fold Splits
pnas_splits = pd.read_csv(opt.dataroot+'pnas_splits.csv')
pnas_splits.columns = ['TCGA ID']+[str(k) for k in range(1, 16)]
pnas_splits.index = pnas_splits['TCGA ID']
pnas_splits = pnas_splits.drop(['TCGA ID'], axis=1)
### get path_feats
def get_vgg_features(model, device, img_path):
if model is None:
return img_path
else:
x_path = Image.open(img_path).convert('RGB')
normalize = transforms.Compose([transforms.ToTensor(), transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))])
x_path = torch.unsqueeze(normalize(x_path), dim=0)
features, hazard = model(x_path=x_path.to(device))
return features.cpu().detach().numpy()
### method for constructing aligned
def getAlignedMultimodalData(opt, model, device, all_dataset, pat_split, pat2img):
x_patname, x_path, x_grph, x_omic, e, t, g = [], [], [], [], [], [], []
for pat_name in pat_split:
if pat_name not in all_dataset.index: continue
for img_fname in pat2img[pat_name]:
grph_fname = img_fname.rstrip('.png')+'.pt'
assert grph_fname in os.listdir(os.path.join(opt.dataroot, '%s_%s' % (opt.roi_dir, opt.graph_feat_type)))
assert all_dataset[all_dataset['TCGA ID'] == pat_name].shape[0] == 1
x_patname.append(pat_name)
x_path.append(get_vgg_features(model, device, os.path.join(opt.dataroot, opt.roi_dir, img_fname)))
x_grph.append(os.path.join(opt.dataroot, '%s_%s' % (opt.roi_dir, opt.graph_feat_type), grph_fname))
x_omic.append(np.array(all_dataset[all_dataset['TCGA ID'] == pat_name].drop(metadata, axis=1)))
e.append(int(all_dataset[all_dataset['TCGA ID']==pat_name]['censored']))
t.append(int(all_dataset[all_dataset['TCGA ID']==pat_name]['Survival months']))
g.append(int(all_dataset[all_dataset['TCGA ID']==pat_name]['Grade']))
return x_patname, x_path, x_grph, x_omic, e, t, g
print(all_dataset.shape)
for k in pnas_splits.columns:
print('Creating Split %s' % k)
pat_train = pnas_splits.index[pnas_splits[k] == 'Train'] if opt.make_all_train == 0 else pnas_splits.index
pat_test = pnas_splits.index[pnas_splits[k] == 'Test']
cv_splits[int(k)] = {}
model = None
if opt.use_vgg_features:
load_path = os.path.join(opt.checkpoints_dir, opt.exp_name, opt.model_name, '%s_%s.pt' % (opt.model_name, k))
model_ckpt = torch.load(load_path, map_location=device)
model_state_dict = model_ckpt['model_state_dict']
if hasattr(model_state_dict, '_metadata'): del model_state_dict._metadata
model = define_net(opt, None)
if isinstance(model, torch.nn.DataParallel): model = model.module
print('Loading the model from %s' % load_path)
model.load_state_dict(model_state_dict)
model.eval()
train_x_patname, train_x_path, train_x_grph, train_x_omic, train_e, train_t, train_g = getAlignedMultimodalData(opt, model, device, all_dataset, pat_train, pat2img)
test_x_patname, test_x_path, test_x_grph, test_x_omic, test_e, test_t, test_g = getAlignedMultimodalData(opt, model, device, all_dataset, pat_test, pat2img)
train_x_omic, train_e, train_t = np.array(train_x_omic).squeeze(axis=1), np.array(train_e, dtype=np.float64), np.array(train_t, dtype=np.float64)
test_x_omic, test_e, test_t = np.array(test_x_omic).squeeze(axis=1), np.array(test_e, dtype=np.float64), np.array(test_t, dtype=np.float64)
scaler = preprocessing.StandardScaler().fit(train_x_omic)
train_x_omic = scaler.transform(train_x_omic)
test_x_omic = scaler.transform(test_x_omic)
train_data = {'x_patname': train_x_patname,
'x_path':np.array(train_x_path),
'x_grph':train_x_grph,
'x_omic':train_x_omic,
'e':np.array(train_e, dtype=np.float64),
't':np.array(train_t, dtype=np.float64),
'g':np.array(train_g, dtype=np.float64)}
test_data = {'x_patname': test_x_patname,
'x_path':np.array(test_x_path),
'x_grph':test_x_grph,
'x_omic':test_x_omic,
'e':np.array(test_e, dtype=np.float64),
't':np.array(test_t, dtype=np.float64),
'g':np.array(test_g, dtype=np.float64)}
dataset = {'train':train_data, 'test':test_data}
cv_splits[int(k)] = dataset
if opt.make_all_train: break
data_dict['cv_splits'] = cv_splits
pickle.dump(data_dict, open('%s/splits/gbmlgg15cv_%s_%d_%d_%d%s.pkl' % (opt.dataroot, opt.roi_dir, opt.ignore_missing_moltype, opt.ignore_missing_histype, opt.use_vgg_features, '_rnaseq' if opt.use_rnaseq else ''), 'wb'))
| 12,069
| 59.049751
| 221
|
py
|
PathomicFusion
|
PathomicFusion-master/train_cv.py
|
import os
import logging
import numpy as np
import random
import pickle
import torch
# Env
from data_loaders import *
from options import parse_args
from train_test import train, test
### 1. Initializes parser and device
opt = parse_args()
device = torch.device('cuda:{}'.format(opt.gpu_ids[0])) if opt.gpu_ids else torch.device('cpu')
print("Using device:", device)
if not os.path.exists(opt.checkpoints_dir): os.makedirs(opt.checkpoints_dir)
if not os.path.exists(os.path.join(opt.checkpoints_dir, opt.exp_name)): os.makedirs(os.path.join(opt.checkpoints_dir, opt.exp_name))
if not os.path.exists(os.path.join(opt.checkpoints_dir, opt.exp_name, opt.model_name)): os.makedirs(os.path.join(opt.checkpoints_dir, opt.exp_name, opt.model_name))
### 2. Initializes Data
ignore_missing_histype = 1 if 'grad' in opt.task else 0
ignore_missing_moltype = 1 if 'omic' in opt.mode else 0
use_patch, roi_dir = ('_patch_', 'all_st_patches_512') if opt.use_vgg_features else ('_', 'all_st')
use_rnaseq = '_rnaseq' if opt.use_rnaseq else ''
data_cv_path = '%s/splits/gbmlgg15cv_%s_%d_%d_%d%s.pkl' % (opt.dataroot, roi_dir, ignore_missing_moltype, ignore_missing_histype, opt.use_vgg_features, use_rnaseq)
print("Loading %s" % data_cv_path)
data_cv = pickle.load(open(data_cv_path, 'rb'))
data_cv_splits = data_cv['cv_splits']
results = []
### 3. Sets-Up Main Loop
for k, data in data_cv_splits.items():
print("*******************************************")
print("************** SPLIT (%d/%d) **************" % (k, len(data_cv_splits.items())))
print("*******************************************")
if os.path.exists(os.path.join(opt.checkpoints_dir, opt.exp_name, opt.model_name, '%s_%d_patch_pred_train.pkl' % (opt.model_name, k))):
print("Train-Test Split already made.")
continue
### 3.1 Trains Model
model, optimizer, metric_logger = train(opt, data, device, k)
### 3.2 Evalutes Train + Test Error, and Saves Model
loss_train, cindex_train, pvalue_train, surv_acc_train, grad_acc_train, pred_train = test(opt, model, data, 'train', device)
loss_test, cindex_test, pvalue_test, surv_acc_test, grad_acc_test, pred_test = test(opt, model, data, 'test', device)
if opt.task == 'surv':
print("[Final] Apply model to training set: C-Index: %.10f, P-Value: %.10e" % (cindex_train, pvalue_train))
logging.info("[Final] Apply model to training set: C-Index: %.10f, P-Value: %.10e" % (cindex_train, pvalue_train))
print("[Final] Apply model to testing set: C-Index: %.10f, P-Value: %.10e" % (cindex_test, pvalue_test))
logging.info("[Final] Apply model to testing set: cC-Index: %.10f, P-Value: %.10e" % (cindex_test, pvalue_test))
results.append(cindex_test)
elif opt.task == 'grad':
print("[Final] Apply model to training set: Loss: %.10f, Acc: %.4f" % (loss_train, grad_acc_train))
logging.info("[Final] Apply model to training set: Loss: %.10f, Acc: %.4f" % (loss_train, grad_acc_train))
print("[Final] Apply model to testing set: Loss: %.10f, Acc: %.4f" % (loss_test, grad_acc_test))
logging.info("[Final] Apply model to testing set: Loss: %.10f, Acc: %.4f" % (loss_test, grad_acc_test))
results.append(grad_acc_test)
### 3.3 Saves Model
if len(opt.gpu_ids) > 0 and torch.cuda.is_available():
model_state_dict = model.module.cpu().state_dict()
else:
model_state_dict = model.cpu().state_dict()
torch.save({
'split':k,
'opt': opt,
'epoch': opt.niter+opt.niter_decay,
'data': data,
'model_state_dict': model_state_dict,
'optimizer_state_dict': optimizer.state_dict(),
'metrics': metric_logger},
os.path.join(opt.checkpoints_dir, opt.exp_name, opt.model_name, '%s_%d.pt' % (opt.model_name, k)))
print()
pickle.dump(pred_train, open(os.path.join(opt.checkpoints_dir, opt.exp_name, opt.model_name, '%s_%d%spred_train.pkl' % (opt.model_name, k, use_patch)), 'wb'))
pickle.dump(pred_test, open(os.path.join(opt.checkpoints_dir, opt.exp_name, opt.model_name, '%s_%d%spred_test.pkl' % (opt.model_name, k, use_patch)), 'wb'))
print('Split Results:', results)
print("Average:", np.array(results).mean())
pickle.dump(results, open(os.path.join(opt.checkpoints_dir, opt.exp_name, opt.model_name, '%s_results.pkl' % opt.model_name), 'wb'))
| 4,216
| 46.920455
| 164
|
py
|
PathomicFusion
|
PathomicFusion-master/options.py
|
import argparse
import os
import torch
### Parser
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument('--dataroot', default='./data/TCGA_GBMLGG', help="datasets")
parser.add_argument('--checkpoints_dir', type=str, default='./checkpoints/TCGA_GBMLGG', help='models are saved here')
parser.add_argument('--exp_name', type=str, default='exp_name', help='name of the project. It decides where to store samples and models')
parser.add_argument('--gpu_ids', type=str, default='0', help='gpu ids: e.g. 0 0,1,2, 0,2. use -1 for CPU')
parser.add_argument('--mode', type=str, default='omic', help='mode')
parser.add_argument('--model_name', type=str, default='omic', help='mode')
parser.add_argument('--use_vgg_features', type=int, default=0, help='Use pretrained embeddings')
parser.add_argument('--use_rnaseq', type=int, default=0, help='Use RNAseq data.')
parser.add_argument('--task', type=str, default='surv', help='surv | grad')
parser.add_argument('--useRNA', type=int, default=0) # Doesn't work at the moment...:(
parser.add_argument('--useSN', type=int, default=1)
parser.add_argument('--act_type', type=str, default='Sigmoid', help='activation function')
parser.add_argument('--input_size_omic', type=int, default=80, help="input_size for omic vector")
parser.add_argument('--input_size_path', type=int, default=512, help="input_size for path images")
parser.add_argument('--init_gain', type=float, default=0.02, help='scaling factor for normal, xavier and orthogonal.')
parser.add_argument('--save_at', type=int, default=20, help="adsfasdf")
parser.add_argument('--label_dim', type=int, default=1, help='size of output')
parser.add_argument('--measure', default=1, type=int, help='disables measure while training (make program faster)')
parser.add_argument('--verbose', default=1, type=int)
parser.add_argument('--print_every', default=0, type=int)
parser.add_argument('--optimizer_type', type=str, default='adam')
parser.add_argument('--beta1', type=float, default=0.9, help='0.9, 0.5 | 0.25 | 0')
parser.add_argument('--beta2', type=float, default=0.999, help='0.9, 0.5 | 0.25 | 0')
parser.add_argument('--lr_policy', default='linear', type=str, help='5e-4 for Adam | 1e-3 for AdaBound')
parser.add_argument('--finetune', default=1, type=int, help='5e-4 for Adam | 1e-3 for AdaBound')
parser.add_argument('--final_lr', default=0.1, type=float, help='Used for AdaBound')
parser.add_argument('--reg_type', default='omic', type=str, help="regularization type")
parser.add_argument('--niter', type=int, default=0, help='# of iter at starting learning rate')
parser.add_argument('--niter_decay', type=int, default=25, help='# of iter to linearly decay learning rate to zero')
parser.add_argument('--epoch_count', type=int, default=1, help='start of epoch')
parser.add_argument('--batch_size', type=int, default=32, help="Number of batches to train/test for. Default: 256")
parser.add_argument('--lambda_cox', type=float, default=1)
parser.add_argument('--lambda_reg', type=float, default=3e-4)
parser.add_argument('--lambda_nll', type=float, default=1)
parser.add_argument('--fusion_type', type=str, default="pofusion", help='concat | pofusion')
parser.add_argument('--skip', type=int, default=0)
parser.add_argument('--use_bilinear', type=int, default=1)
parser.add_argument('--path_gate', type=int, default=1)
parser.add_argument('--grph_gate', type=int, default=1)
parser.add_argument('--omic_gate', type=int, default=1)
parser.add_argument('--path_dim', type=int, default=32)
parser.add_argument('--grph_dim', type=int, default=32)
parser.add_argument('--omic_dim', type=int, default=32)
parser.add_argument('--path_scale', type=int, default=1)
parser.add_argument('--grph_scale', type=int, default=1)
parser.add_argument('--omic_scale', type=int, default=1)
parser.add_argument('--mmhid', type=int, default=64)
parser.add_argument('--init_type', type=str, default='none', help='network initialization [normal | xavier | kaiming | orthogonal | max]. Max seems to work well')
parser.add_argument('--dropout_rate', default=0.25, type=float, help='0 - 0.25. Increasing dropout_rate helps overfitting. Some people have gone as high as 0.5. You can try adding more regularization')
parser.add_argument('--use_edges', default=1, type=float, help='Using edge_attr')
parser.add_argument('--pooling_ratio', default=0.2, type=float, help='pooling ratio for SAGPOOl')
parser.add_argument('--lr', default=2e-3, type=float, help='5e-4 for Adam | 1e-3 for AdaBound')
parser.add_argument('--weight_decay', default=4e-4, type=float, help='Used for Adam. L2 Regularization on weights. I normally turn this off if I am using L1. You should try')
parser.add_argument('--GNN', default='GCN', type=str, help='GCN | GAT | SAG. graph conv mode for pooling')
parser.add_argument('--patience', default=0.005, type=float)
opt = parser.parse_known_args()[0]
print_options(parser, opt)
opt = parse_gpuids(opt)
return opt
def print_options(parser, opt):
"""Print and save options
It will print both current options and default values(if different).
It will save options into a text file / [checkpoints_dir] / opt.txt
"""
message = ''
message += '----------------- Options ---------------\n'
for k, v in sorted(vars(opt).items()):
comment = ''
default = parser.get_default(k)
if v != default:
comment = '\t[default: %s]' % str(default)
message += '{:>25}: {:<30}{}\n'.format(str(k), str(v), comment)
message += '----------------- End -------------------'
print(message)
# save to the disk
expr_dir = os.path.join(opt.checkpoints_dir, opt.exp_name, opt.model_name)
mkdirs(expr_dir)
file_name = os.path.join(expr_dir, '{}_opt.txt'.format('train'))
with open(file_name, 'wt') as opt_file:
opt_file.write(message)
opt_file.write('\n')
def parse_gpuids(opt):
# set gpu ids
str_ids = opt.gpu_ids.split(',')
opt.gpu_ids = []
for str_id in str_ids:
id = int(str_id)
if id >= 0:
opt.gpu_ids.append(id)
if len(opt.gpu_ids) > 0:
torch.cuda.set_device(opt.gpu_ids[0])
return opt
def mkdirs(paths):
"""create empty directories if they don't exist
Parameters:
paths (str list) -- a list of directory paths
"""
if isinstance(paths, list) and not isinstance(paths, str):
for path in paths:
mkdir(path)
else:
mkdir(paths)
def mkdir(path):
"""create a single empty directory if it didn't exist
Parameters:
path (str) -- a single directory path
"""
if not os.path.exists(path):
os.makedirs(path)
| 6,901
| 49.014493
| 205
|
py
|
PathomicFusion
|
PathomicFusion-master/test_cv.py
|
import os
import logging
import numpy as np
import random
import pickle
import torch
# Env
from networks import define_net
from data_loaders import *
from options import parse_args
from train_test import train, test
### 1. Initializes parser and device
opt = parse_args()
device = torch.device('cuda:{}'.format(opt.gpu_ids[0])) if opt.gpu_ids else torch.device('cpu')
print("Using device:", device)
if not os.path.exists(opt.checkpoints_dir): os.makedirs(opt.checkpoints_dir)
if not os.path.exists(os.path.join(opt.checkpoints_dir, opt.exp_name)): os.makedirs(os.path.join(opt.checkpoints_dir, opt.exp_name))
if not os.path.exists(os.path.join(opt.checkpoints_dir, opt.exp_name, opt.model_name)): os.makedirs(os.path.join(opt.checkpoints_dir, opt.exp_name, opt.model_name))
### 2. Initializes Data
ignore_missing_histype = 1 if 'grad' in opt.task else 0
ignore_missing_moltype = 1 if 'omic' in opt.mode else 0
use_patch, roi_dir = ('_patch_', 'all_st_patches_512') if opt.use_vgg_features else ('_', 'all_st')
use_rnaseq = '_rnaseq' if opt.use_rnaseq else ''
data_cv_path = '%s/splits/gbmlgg15cv_%s_%d_%d_%d%s.pkl' % (opt.dataroot, roi_dir, ignore_missing_moltype, ignore_missing_histype, opt.use_vgg_features, use_rnaseq)
print("Loading %s" % data_cv_path)
data_cv = pickle.load(open(data_cv_path, 'rb'))
data_cv_splits = data_cv['cv_splits']
results = []
### 3. Sets-Up Main Loop
for k, data in data_cv_splits.items():
print("*******************************************")
print("************** SPLIT (%d/%d) **************" % (k, len(data_cv_splits.items())))
print("*******************************************")
load_path = os.path.join(opt.checkpoints_dir, opt.exp_name, opt.model_name, '%s_%d.pt' % (opt.model_name, k))
model_ckpt = torch.load(load_path, map_location=device)
#### Loading Env
model_state_dict = model_ckpt['model_state_dict']
if hasattr(model_state_dict, '_metadata'): del model_state_dict._metadata
model = define_net(opt, None)
if isinstance(model, torch.nn.DataParallel): model = model.module
print('Loading the model from %s' % load_path)
model.load_state_dict(model_state_dict)
### 3.2 Evalutes Train + Test Error, and Saves Model
loss_test, cindex_test, pvalue_test, surv_acc_test, grad_acc_test, pred_test = test(opt, model, data, 'test', device)
if opt.task == 'surv':
print("[Final] Apply model to testing set: C-Index: %.10f, P-Value: %.10e" % (cindex_test, pvalue_test))
logging.info("[Final] Apply model to testing set: cC-Index: %.10f, P-Value: %.10e" % (cindex_test, pvalue_test))
results.append(cindex_test)
elif opt.task == 'grad':
print("[Final] Apply model to testing set: Loss: %.10f, Acc: %.4f" % (loss_test, grad_acc_test))
logging.info("[Final] Apply model to testing set: Loss: %.10f, Acc: %.4f" % (loss_test, grad_acc_test))
results.append(grad_acc_test)
### 3.3 Saves Model
pickle.dump(pred_test, open(os.path.join(opt.checkpoints_dir, opt.exp_name, opt.model_name, '%s_%d%spred_test.pkl' % (opt.model_name, k, use_patch)), 'wb'))
print('Split Results:', results)
print("Average:", np.array(results).mean())
pickle.dump(results, open(os.path.join(opt.checkpoints_dir, opt.exp_name, opt.model_name, '%s_results.pkl' % opt.model_name), 'wb'))
| 3,233
| 43.30137
| 164
|
py
|
PathomicFusion
|
PathomicFusion-master/train_test.py
|
import random
from tqdm import tqdm
import numpy as np
import torch
import torch.backends.cudnn as cudnn
import torch.nn.functional as F
from torch.utils.data import RandomSampler
from data_loaders import PathgraphomicDatasetLoader, PathgraphomicFastDatasetLoader
from networks import define_net, define_reg, define_optimizer, define_scheduler
from utils import unfreeze_unimodal, CoxLoss, CIndex_lifeline, cox_log_rank, accuracy_cox, mixed_collate, count_parameters
#from GPUtil import showUtilization as gpu_usage
import pdb
import pickle
import os
def train(opt, data, device, k):
cudnn.deterministic = True
torch.cuda.manual_seed_all(2019)
torch.manual_seed(2019)
random.seed(2019)
model = define_net(opt, k)
optimizer = define_optimizer(opt, model)
scheduler = define_scheduler(opt, optimizer)
print(model)
print("Number of Trainable Parameters: %d" % count_parameters(model))
print("Activation Type:", opt.act_type)
print("Optimizer Type:", opt.optimizer_type)
print("Regularization Type:", opt.reg_type)
use_patch, roi_dir = ('_patch_', 'all_st_patches_512') if opt.use_vgg_features else ('_', 'all_st')
custom_data_loader = PathgraphomicFastDatasetLoader(opt, data, split='train', mode=opt.mode) if opt.use_vgg_features else PathgraphomicDatasetLoader(opt, data, split='train', mode=opt.mode)
train_loader = torch.utils.data.DataLoader(dataset=custom_data_loader, batch_size=opt.batch_size, shuffle=True, collate_fn=mixed_collate)
metric_logger = {'train':{'loss':[], 'pvalue':[], 'cindex':[], 'surv_acc':[], 'grad_acc':[]},
'test':{'loss':[], 'pvalue':[], 'cindex':[], 'surv_acc':[], 'grad_acc':[]}}
for epoch in tqdm(range(opt.epoch_count, opt.niter+opt.niter_decay+1)):
if opt.finetune == 1:
unfreeze_unimodal(opt, model, epoch)
model.train()
risk_pred_all, censor_all, survtime_all = np.array([]), np.array([]), np.array([]) # Used for calculating the C-Index
loss_epoch, grad_acc_epoch = 0, 0
for batch_idx, (x_path, x_grph, x_omic, censor, survtime, grade) in enumerate(train_loader):
censor = censor.to(device) if "surv" in opt.task else censor
grade = grade.to(device) if "grad" in opt.task else grade
_, pred = model(x_path=x_path.to(device), x_grph=x_grph.to(device), x_omic=x_omic.to(device))
loss_cox = CoxLoss(survtime, censor, pred, device) if opt.task == "surv" else 0
loss_reg = define_reg(opt, model)
loss_nll = F.nll_loss(pred, grade) if opt.task == "grad" else 0
loss = opt.lambda_cox*loss_cox + opt.lambda_nll*loss_nll + opt.lambda_reg*loss_reg
loss_epoch += loss.data.item()
optimizer.zero_grad()
loss.backward()
optimizer.step()
if opt.task == "surv":
risk_pred_all = np.concatenate((risk_pred_all, pred.detach().cpu().numpy().reshape(-1))) # Logging Information
censor_all = np.concatenate((censor_all, censor.detach().cpu().numpy().reshape(-1))) # Logging Information
survtime_all = np.concatenate((survtime_all, survtime.detach().cpu().numpy().reshape(-1))) # Logging Information
elif opt.task == "grad":
pred = pred.argmax(dim=1, keepdim=True)
grad_acc_epoch += pred.eq(grade.view_as(pred)).sum().item()
if opt.verbose > 0 and opt.print_every > 0 and (batch_idx % opt.print_every == 0 or batch_idx+1 == len(train_loader)):
print("Epoch {:02d}/{:02d} Batch {:04d}/{:d}, Loss {:9.4f}".format(
epoch+1, opt.niter+opt.niter_decay, batch_idx+1, len(train_loader), loss.item()))
scheduler.step()
# lr = optimizer.param_groups[0]['lr']
#print('learning rate = %.7f' % lr)
if opt.measure or epoch == (opt.niter+opt.niter_decay - 1):
loss_epoch /= len(train_loader)
cindex_epoch = CIndex_lifeline(risk_pred_all, censor_all, survtime_all) if opt.task == 'surv' else None
pvalue_epoch = cox_log_rank(risk_pred_all, censor_all, survtime_all) if opt.task == 'surv' else None
surv_acc_epoch = accuracy_cox(risk_pred_all, censor_all) if opt.task == 'surv' else None
grad_acc_epoch = grad_acc_epoch / len(train_loader.dataset) if opt.task == 'grad' else None
loss_test, cindex_test, pvalue_test, surv_acc_test, grad_acc_test, pred_test = test(opt, model, data, 'test', device)
metric_logger['train']['loss'].append(loss_epoch)
metric_logger['train']['cindex'].append(cindex_epoch)
metric_logger['train']['pvalue'].append(pvalue_epoch)
metric_logger['train']['surv_acc'].append(surv_acc_epoch)
metric_logger['train']['grad_acc'].append(grad_acc_epoch)
metric_logger['test']['loss'].append(loss_test)
metric_logger['test']['cindex'].append(cindex_test)
metric_logger['test']['pvalue'].append(pvalue_test)
metric_logger['test']['surv_acc'].append(surv_acc_test)
metric_logger['test']['grad_acc'].append(grad_acc_test)
pickle.dump(pred_test, open(os.path.join(opt.checkpoints_dir, opt.exp_name, opt.model_name, '%s_%d%s%d_pred_test.pkl' % (opt.model_name, k, use_patch, epoch)), 'wb'))
if opt.verbose > 0:
if opt.task == 'surv':
print('[{:s}]\t\tLoss: {:.4f}, {:s}: {:.4f}'.format('Train', loss_epoch, 'C-Index', cindex_epoch))
print('[{:s}]\t\tLoss: {:.4f}, {:s}: {:.4f}\n'.format('Test', loss_test, 'C-Index', cindex_test))
elif opt.task == 'grad':
print('[{:s}]\t\tLoss: {:.4f}, {:s}: {:.4f}'.format('Train', loss_epoch, 'Accuracy', grad_acc_epoch))
print('[{:s}]\t\tLoss: {:.4f}, {:s}: {:.4f}\n'.format('Test', loss_test, 'Accuracy', grad_acc_test))
if opt.task == 'grad' and loss_epoch < opt.patience:
print("Early stopping at Epoch %d" % epoch)
break
return model, optimizer, metric_logger
def test(opt, model, data, split, device):
model.eval()
custom_data_loader = PathgraphomicFastDatasetLoader(opt, data, split, mode=opt.mode) if opt.use_vgg_features else PathgraphomicDatasetLoader(opt, data, split=split, mode=opt.mode)
test_loader = torch.utils.data.DataLoader(dataset=custom_data_loader, batch_size=opt.batch_size, shuffle=False, collate_fn=mixed_collate)
risk_pred_all, censor_all, survtime_all = np.array([]), np.array([]), np.array([])
probs_all, gt_all = None, np.array([])
loss_test, grad_acc_test = 0, 0
for batch_idx, (x_path, x_grph, x_omic, censor, survtime, grade) in enumerate(test_loader):
censor = censor.to(device) if "surv" in opt.task else censor
grade = grade.to(device) if "grad" in opt.task else grade
_, pred = model(x_path=x_path.to(device), x_grph=x_grph.to(device), x_omic=x_omic.to(device))
loss_cox = CoxLoss(survtime, censor, pred, device) if opt.task == "surv" else 0
loss_reg = define_reg(opt, model)
loss_nll = F.nll_loss(pred, grade) if opt.task == "grad" else 0
loss = opt.lambda_cox*loss_cox + opt.lambda_nll*loss_nll + opt.lambda_reg*loss_reg
loss_test += loss.data.item()
gt_all = np.concatenate((gt_all, grade.detach().cpu().numpy().reshape(-1))) # Logging Information
if opt.task == "surv":
risk_pred_all = np.concatenate((risk_pred_all, pred.detach().cpu().numpy().reshape(-1))) # Logging Information
censor_all = np.concatenate((censor_all, censor.detach().cpu().numpy().reshape(-1))) # Logging Information
survtime_all = np.concatenate((survtime_all, survtime.detach().cpu().numpy().reshape(-1))) # Logging Information
elif opt.task == "grad":
grade_pred = pred.argmax(dim=1, keepdim=True)
grad_acc_test += grade_pred.eq(grade.view_as(grade_pred)).sum().item()
probs_np = pred.detach().cpu().numpy()
probs_all = probs_np if probs_all is None else np.concatenate((probs_all, probs_np), axis=0) # Logging Information
###################################################
# ==== Measuring Test Loss, C-Index, P-Value ==== #
###################################################
loss_test /= len(test_loader)
cindex_test = CIndex_lifeline(risk_pred_all, censor_all, survtime_all) if opt.task == 'surv' else None
pvalue_test = cox_log_rank(risk_pred_all, censor_all, survtime_all) if opt.task == 'surv' else None
surv_acc_test = accuracy_cox(risk_pred_all, censor_all) if opt.task == 'surv' else None
grad_acc_test = grad_acc_test / len(test_loader.dataset) if opt.task == 'grad' else None
pred_test = [risk_pred_all, survtime_all, censor_all, probs_all, gt_all]
return loss_test, cindex_test, pvalue_test, surv_acc_test, grad_acc_test, pred_test
| 9,077
| 54.018182
| 193
|
py
|
PathomicFusion
|
PathomicFusion-master/core/utils_models.py
|
# Base / Native
import math
import os
import pickle
import re
import warnings
warnings.filterwarnings('ignore')
# Numerical / Array
import lifelines
from lifelines.utils import concordance_index
from lifelines import CoxPHFitter
from lifelines.datasets import load_regression_dataset
from lifelines.utils import k_fold_cross_validation
from lifelines.statistics import logrank_test
from imblearn.over_sampling import RandomOverSampler
import numpy as np
# Torch
import torch
import torch.nn as nn
from torch.nn import init, Parameter
from torch.utils.data._utils.collate import *
from torch.utils.data.dataloader import default_collate
import torch_geometric
from torch_geometric.data import Batch
################
# Regularization
################
def regularize_weights(model, reg_type=None):
l1_reg = None
for W in model.parameters():
if l1_reg is None:
l1_reg = torch.abs(W).sum()
else:
l1_reg = l1_reg + torch.abs(W).sum() # torch.abs(W).sum() is equivalent to W.norm(1)
return l1_reg
def regularize_path_weights(model, reg_type=None):
l1_reg = None
for W in model.classifier.parameters():
if l1_reg is None:
l1_reg = torch.abs(W).sum()
else:
l1_reg = l1_reg + torch.abs(W).sum() # torch.abs(W).sum() is equivalent to W.norm(1)
for W in model.linear.parameters():
if l1_reg is None:
l1_reg = torch.abs(W).sum()
else:
l1_reg = l1_reg + torch.abs(W).sum() # torch.abs(W).sum() is equivalent to W.norm(1)
return l1_reg
def regularize_MM_weights(model, reg_type=None):
l1_reg = None
if model.module.__hasattr__('omic_net'):
for W in model.module.omic_net.parameters():
if l1_reg is None:
l1_reg = torch.abs(W).sum()
else:
l1_reg = l1_reg + torch.abs(W).sum() # torch.abs(W).sum() is equivalent to W.norm(1)
if model.module.__hasattr__('linear_h_path'):
for W in model.module.linear_h_path.parameters():
if l1_reg is None:
l1_reg = torch.abs(W).sum()
else:
l1_reg = l1_reg + torch.abs(W).sum() # torch.abs(W).sum() is equivalent to W.norm(1)
if model.module.__hasattr__('linear_h_omic'):
for W in model.module.linear_h_omic.parameters():
if l1_reg is None:
l1_reg = torch.abs(W).sum()
else:
l1_reg = l1_reg + torch.abs(W).sum() # torch.abs(W).sum() is equivalent to W.norm(1)
if model.module.__hasattr__('linear_h_grph'):
for W in model.module.linear_h_grph.parameters():
if l1_reg is None:
l1_reg = torch.abs(W).sum()
else:
l1_reg = l1_reg + torch.abs(W).sum() # torch.abs(W).sum() is equivalent to W.norm(1)
if model.module.__hasattr__('linear_z_path'):
for W in model.module.linear_z_path.parameters():
if l1_reg is None:
l1_reg = torch.abs(W).sum()
else:
l1_reg = l1_reg + torch.abs(W).sum() # torch.abs(W).sum() is equivalent to W.norm(1)
if model.module.__hasattr__('linear_z_omic'):
for W in model.module.linear_z_omic.parameters():
if l1_reg is None:
l1_reg = torch.abs(W).sum()
else:
l1_reg = l1_reg + torch.abs(W).sum() # torch.abs(W).sum() is equivalent to W.norm(1)
if model.module.__hasattr__('linear_z_grph'):
for W in model.module.linear_z_grph.parameters():
if l1_reg is None:
l1_reg = torch.abs(W).sum()
else:
l1_reg = l1_reg + torch.abs(W).sum() # torch.abs(W).sum() is equivalent to W.norm(1)
if model.module.__hasattr__('linear_o_path'):
for W in model.module.linear_o_path.parameters():
if l1_reg is None:
l1_reg = torch.abs(W).sum()
else:
l1_reg = l1_reg + torch.abs(W).sum() # torch.abs(W).sum() is equivalent to W.norm(1)
if model.module.__hasattr__('linear_o_omic'):
for W in model.module.linear_o_omic.parameters():
if l1_reg is None:
l1_reg = torch.abs(W).sum()
else:
l1_reg = l1_reg + torch.abs(W).sum() # torch.abs(W).sum() is equivalent to W.norm(1)
if model.module.__hasattr__('linear_o_grph'):
for W in model.module.linear_o_grph.parameters():
if l1_reg is None:
l1_reg = torch.abs(W).sum()
else:
l1_reg = l1_reg + torch.abs(W).sum() # torch.abs(W).sum() is equivalent to W.norm(1)
if model.module.__hasattr__('encoder1'):
for W in model.module.encoder1.parameters():
if l1_reg is None:
l1_reg = torch.abs(W).sum()
else:
l1_reg = l1_reg + torch.abs(W).sum() # torch.abs(W).sum() is equivalent to W.norm(1)
if model.module.__hasattr__('encoder2'):
for W in model.module.encoder2.parameters():
if l1_reg is None:
l1_reg = torch.abs(W).sum()
else:
l1_reg = l1_reg + torch.abs(W).sum() # torch.abs(W).sum() is equivalent to W.norm(1)
if model.module.__hasattr__('classifier'):
for W in model.module.classifier.parameters():
if l1_reg is None:
l1_reg = torch.abs(W).sum()
else:
l1_reg = l1_reg + torch.abs(W).sum() # torch.abs(W).sum() is equivalent to W.norm(1)
return l1_reg
def regularize_MM_omic(model, reg_type=None):
l1_reg = None
if model.module.__hasattr__('omic_net'):
for W in model.module.omic_net.parameters():
if l1_reg is None:
l1_reg = torch.abs(W).sum()
else:
l1_reg = l1_reg + torch.abs(W).sum() # torch.abs(W).sum() is equivalent to W.norm(1)
return l1_reg
################
# Network Initialization
################
def init_weights(net, init_type='orthogonal', init_gain=0.02):
"""Initialize network weights.
Parameters:
net (network) -- network to be initialized
init_type (str) -- the name of an initialization method: normal | xavier | kaiming | orthogonal
init_gain (float) -- scaling factor for normal, xavier and orthogonal.
We use 'normal' in the original pix2pix and CycleGAN paper. But xavier and kaiming might
work better for some applications. Feel free to try yourself.
"""
def init_func(m): # define the initialization function
classname = m.__class__.__name__
if hasattr(m, 'weight') and (classname.find('Conv') != -1 or classname.find('Linear') != -1):
if init_type == 'normal':
init.normal_(m.weight.data, 0.0, init_gain)
elif init_type == 'xavier':
init.xavier_normal_(m.weight.data, gain=init_gain)
elif init_type == 'kaiming':
init.kaiming_normal_(m.weight.data, a=0, mode='fan_in')
elif init_type == 'orthogonal':
init.orthogonal_(m.weight.data, gain=init_gain)
else:
raise NotImplementedError('initialization method [%s] is not implemented' % init_type)
if hasattr(m, 'bias') and m.bias is not None:
init.constant_(m.bias.data, 0.0)
elif classname.find('BatchNorm2d') != -1: # BatchNorm Layer's weight is not a matrix; only normal distribution applies.
init.normal_(m.weight.data, 1.0, init_gain)
init.constant_(m.bias.data, 0.0)
print('initialize network with %s' % init_type)
net.apply(init_func) # apply the initialization function <init_func>
def init_max_weights(module):
for m in module.modules():
if type(m) == nn.Linear:
stdv = 1. / math.sqrt(m.weight.size(1))
m.weight.data.normal_(0, stdv)
m.bias.data.zero_()
def init_net(net, init_type='normal', init_gain=0.02, gpu_ids=[]):
"""Initialize a network: 1. register CPU/GPU device (with multi-GPU support); 2. initialize the network weights
Parameters:
net (network) -- the network to be initialized
init_type (str) -- the name of an initialization method: normal | xavier | kaiming | orthogonal
gain (float) -- scaling factor for normal, xavier and orthogonal.
gpu_ids (int list) -- which GPUs the network runs on: e.g., 0,1,2
Return an initialized network.
"""
if len(gpu_ids) > 0:
assert(torch.cuda.is_available())
net.to(gpu_ids[0])
net = torch.nn.DataParallel(net, gpu_ids) # multi-GPUs
if init_type != 'max' and init_type != 'none':
print("Init Type:", init_type)
init_weights(net, init_type, init_gain=init_gain)
elif init_type == 'none':
print("Init Type: Not initializing networks.")
elif init_type == 'max':
print("Init Type: Self-Normalizing Weights")
return net
################
# Freeze / Unfreeze
################
def unfreeze_unimodal(opt, model, epoch):
if opt.mode == 'graphomic':
if epoch == 5:
dfs_unfreeze(model.module.omic_net)
print("Unfreezing Omic")
if epoch == 5:
dfs_unfreeze(model.module.grph_net)
print("Unfreezing Graph")
elif opt.mode == 'pathomic':
if epoch == 5:
dfs_unfreeze(model.module.omic_net)
print("Unfreezing Omic")
elif opt.mode == 'pathgraph':
if epoch == 5:
dfs_unfreeze(model.module.grph_net)
print("Unfreezing Graph")
elif opt.mode == "pathgraphomic":
if epoch == 5:
dfs_unfreeze(model.module.omic_net)
print("Unfreezing Omic")
if epoch == 5:
dfs_unfreeze(model.module.grph_net)
print("Unfreezing Graph")
elif opt.mode == "omicomic":
if epoch == 5:
dfs_unfreeze(model.module.omic_net)
print("Unfreezing Omic")
elif opt.mode == "graphgraph":
if epoch == 5:
dfs_unfreeze(model.module.grph_net)
print("Unfreezing Graph")
def dfs_freeze(model):
for name, child in model.named_children():
for param in child.parameters():
param.requires_grad = False
dfs_freeze(child)
def dfs_unfreeze(model):
for name, child in model.named_children():
for param in child.parameters():
param.requires_grad = True
dfs_unfreeze(child)
def print_if_frozen(module):
for idx, child in enumerate(module.children()):
for param in child.parameters():
if param.requires_grad == True:
print("Learnable!!! %d:" % idx, child)
else:
print("Still Frozen %d:" % idx, child)
def unfreeze_vgg_features(model, epoch):
epoch_schedule = {30:45}
unfreeze_index = epoch_schedule[epoch]
for idx, child in enumerate(model.features.children()):
if idx > unfreeze_index:
print("Unfreezing %d:" %idx, child)
for param in child.parameters():
param.requires_grad = True
else:
print("Still Frozen %d:" %idx, child)
continue
################
# Collate Utils
################
def mixed_collate(batch):
elem = batch[0]
elem_type = type(elem)
transposed = zip(*batch)
return [Batch.from_data_list(samples, []) if type(samples[0]) is torch_geometric.data.data.Data else default_collate(samples) for samples in transposed]
################
# Survival Utils
################
def CoxLoss(survtime, censor, hazard_pred, device):
# This calculation credit to Travers Ching https://github.com/traversc/cox-nnet
# Cox-nnet: An artificial neural network method for prognosis prediction of high-throughput omics data
current_batch_len = len(survtime)
R_mat = np.zeros([current_batch_len, current_batch_len], dtype=int)
for i in range(current_batch_len):
for j in range(current_batch_len):
R_mat[i,j] = survtime[j] >= survtime[i]
R_mat = torch.FloatTensor(R_mat).to(device)
theta = hazard_pred.reshape(-1)
exp_theta = torch.exp(theta)
loss_cox = -torch.mean((theta - torch.log(torch.sum(exp_theta*R_mat, dim=1))) * censor)
return loss_cox
def accuracy(output, labels):
preds = output.max(1)[1].type_as(labels)
correct = preds.eq(labels).double()
correct = correct.sum()
return correct / len(labels)
def accuracy_cox(hazardsdata, labels):
# This accuracy is based on estimated survival events against true survival events
median = np.median(hazardsdata)
hazards_dichotomize = np.zeros([len(hazardsdata)], dtype=int)
hazards_dichotomize[hazardsdata > median] = 1
correct = np.sum(hazards_dichotomize == labels)
return correct / len(labels)
def cox_log_rank(hazardsdata, labels, survtime_all):
median = np.median(hazardsdata)
hazards_dichotomize = np.zeros([len(hazardsdata)], dtype=int)
hazards_dichotomize[hazardsdata > median] = 1
idx = hazards_dichotomize == 0
T1 = survtime_all[idx]
T2 = survtime_all[~idx]
E1 = labels[idx]
E2 = labels[~idx]
results = logrank_test(T1, T2, event_observed_A=E1, event_observed_B=E2)
pvalue_pred = results.p_value
return(pvalue_pred)
def CIndex(hazards, labels, survtime_all):
concord = 0.
total = 0.
N_test = labels.shape[0]
for i in range(N_test):
if labels[i] == 1:
for j in range(N_test):
if survtime_all[j] > survtime_all[i]:
total += 1
if hazards[j] < hazards[i]: concord += 1
elif hazards[j] < hazards[i]: concord += 0.5
return(concord/total)
def CIndex_lifeline(hazards, labels, survtime_all):
return(concordance_index(survtime_all, -hazards, labels))
| 13,960
| 34.524173
| 156
|
py
|
PathomicFusion
|
PathomicFusion-master/core/utils_data.py
|
import os
import pandas as pd
import numpy as np
################
# Data Utils
################
def addHistomolecularSubtype(data):
"""
Molecular Subtype: IDHwt == 0, IDHmut-non-codel == 1, IDHmut-codel == 2
Histology Subtype: astrocytoma == 0, oligoastrocytoma == 1, oligodendroglioma == 2, glioblastoma == 3
"""
subtyped_data = data.copy()
subtyped_data.insert(loc=0, column='Histomolecular subtype', value=np.ones(len(data)))
idhwt_ATC = np.logical_and(data['Molecular subtype'] == 0, np.logical_or(data['Histology'] == 0, data['Histology'] == 3))
subtyped_data.loc[idhwt_ATC, 'Histomolecular subtype'] = 'idhwt_ATC'
idhmut_ATC = np.logical_and(data['Molecular subtype'] == 1, np.logical_or(data['Histology'] == 0, data['Histology'] == 3))
subtyped_data.loc[idhmut_ATC, 'Histomolecular subtype'] = 'idhmut_ATC'
ODG = np.logical_and(data['Molecular subtype'] == 2, data['Histology'] == 2)
subtyped_data.loc[ODG, 'Histomolecular subtype'] = 'ODG'
return subtyped_data
def changeHistomolecularSubtype(data):
"""
Molecular Subtype: IDHwt == 0, IDHmut-non-codel == 1, IDHmut-codel == 2
Histology Subtype: astrocytoma == 0, oligoastrocytoma == 1, oligodendroglioma == 2, glioblastoma == 3
"""
data = data.drop(['Histomolecular subtype'], axis=1)
subtyped_data = data.copy()
subtyped_data.insert(loc=0, column='Histomolecular subtype', value=np.ones(len(data)))
idhwt_ATC = np.logical_and(data['Molecular subtype'] == 0, np.logical_or(data['Histology'] == 0, data['Histology'] == 3))
subtyped_data.loc[idhwt_ATC, 'Histomolecular subtype'] = 'idhwt_ATC'
idhmut_ATC = np.logical_and(data['Molecular subtype'] == 1, np.logical_or(data['Histology'] == 0, data['Histology'] == 3))
subtyped_data.loc[idhmut_ATC, 'Histomolecular subtype'] = 'idhmut_ATC'
ODG = np.logical_and(data['Molecular subtype'] == 2, data['Histology'] == 2)
subtyped_data.loc[ODG, 'Histomolecular subtype'] = 'ODG'
return subtyped_data
def getCleanGBMLGG(dataroot='./data/TCGA_GBMLGG/', ignore_missing_moltype=False, ignore_missing_histype=False, use_rnaseq=False, use_ag=False):
### 1. Joining all_datasets.csv with grade data. Looks at columns with misisng samples
metadata = ['Histology', 'Grade', 'Molecular subtype', 'TCGA ID', 'censored', 'Survival months']
all_dataset = pd.read_csv(os.path.join(dataroot, 'all_dataset.csv')).drop('indexes', axis=1)
all_dataset.index = all_dataset['TCGA ID']
all_grade = pd.read_csv(os.path.join(dataroot, 'grade_data.csv'))
all_grade['Histology'] = all_grade['Histology'].str.replace('astrocytoma (glioblastoma)', 'glioblastoma', regex=False)
all_grade.index = all_grade['TCGA ID']
all_grade = all_grade.rename(columns={'Age at diagnosis': 'Age'})
all_grade['Gender'] = all_grade['Gender'].replace({'male':0, 'female': 1})
assert pd.Series(all_dataset.index).equals(pd.Series(sorted(all_grade.index)))
all_dataset = all_dataset.join(all_grade[['Histology', 'Grade', 'Molecular subtype', 'Age', 'Gender']], how='inner')
cols = all_dataset.columns.tolist()
cols = cols[-3:] + cols[:-3]
all_dataset = all_dataset[cols]
if use_rnaseq:
gbm = pd.read_csv(os.path.join(dataroot, 'mRNA_Expression_z-Scores_RNA_Seq_RSEM.txt'), sep='\t', skiprows=1, index_col=0)
lgg = pd.read_csv(os.path.join(dataroot, 'mRNA_Expression_Zscores_RSEM.txt'), sep='\t', skiprows=1, index_col=0)
gbm = gbm[gbm.columns[~gbm.isnull().all()]]
lgg = lgg[lgg.columns[~lgg.isnull().all()]]
glioma_RNAseq = gbm.join(lgg, how='inner').T
glioma_RNAseq = glioma_RNAseq.dropna(axis=1)
glioma_RNAseq.columns = [gene+'_rnaseq' for gene in glioma_RNAseq.columns]
glioma_RNAseq.index = [patname[:12] for patname in glioma_RNAseq.index]
glioma_RNAseq = glioma_RNAseq.iloc[~glioma_RNAseq.index.duplicated()]
glioma_RNAseq.index.name = 'TCGA ID'
all_dataset = all_dataset.join(glioma_RNAseq, how='inner')
pat_missing_moltype = all_dataset[all_dataset['Molecular subtype'].isna()].index
pat_missing_idh = all_dataset[all_dataset['idh mutation'].isna()].index
pat_missing_1p19q = all_dataset[all_dataset['codeletion'].isna()].index
print("# Missing Molecular Subtype:", len(pat_missing_moltype))
print("# Missing IDH Mutation:", len(pat_missing_idh))
print("# Missing 1p19q Codeletion:", len(pat_missing_1p19q))
assert pat_missing_moltype.equals(pat_missing_idh)
assert pat_missing_moltype.equals(pat_missing_1p19q)
pat_missing_grade = all_dataset[all_dataset['Grade'].isna()].index
pat_missing_histype = all_dataset[all_dataset['Histology'].isna()].index
print("# Missing Histological Subtype:", len(pat_missing_histype))
print("# Missing Grade:", len(pat_missing_grade))
assert pat_missing_histype.equals(pat_missing_grade)
### 2. Impute Missing Genomic Data: Removes patients with missing molecular subtype / idh mutation / 1p19q. Else imputes with median value of each column. Fills missing Molecular subtype with "Missing"
if ignore_missing_moltype:
all_dataset = all_dataset[all_dataset['Molecular subtype'].isna() == False]
for col in all_dataset.drop(metadata, axis=1).columns:
all_dataset['Molecular subtype'] = all_dataset['Molecular subtype'].fillna('Missing')
all_dataset[col] = all_dataset[col].fillna(all_dataset[col].median())
### 3. Impute Missing Histological Data: Removes patients with missing histological subtype / grade. Else imputes with "missing" / grade -1
if ignore_missing_histype:
all_dataset = all_dataset[all_dataset['Histology'].isna() == False]
else:
all_dataset['Grade'] = all_dataset['Grade'].fillna(1)
all_dataset['Histology'] = all_dataset['Histology'].fillna('Missing')
all_dataset['Grade'] = all_dataset['Grade'] - 2
### 4. Adds Histomolecular subtype
ms2int = {'Missing':-1, 'IDHwt':0, 'IDHmut-non-codel':1, 'IDHmut-codel':2}
all_dataset[['Molecular subtype']] = all_dataset[['Molecular subtype']].applymap(lambda s: ms2int.get(s) if s in ms2int else s)
hs2int = {'Missing':-1, 'astrocytoma':0, 'oligoastrocytoma':1, 'oligodendroglioma':2, 'glioblastoma':3}
all_dataset[['Histology']] = all_dataset[['Histology']].applymap(lambda s: hs2int.get(s) if s in hs2int else s)
all_dataset = addHistomolecularSubtype(all_dataset)
metadata.extend(['Histomolecular subtype'])
if use_ag == 0:
metadata.extend(['Age', 'Gender'])
all_dataset['censored'] = 1 - all_dataset['censored']
return metadata, all_dataset
def getCleanKIRC(dataroot='./', rnaseq_cutoff='all', cnv_cutoff=7.0, mut_cutoff=5.0):
### Clinical variables
clinical = pd.read_table(os.path.join(dataroot, './kirc_tcga_pan_can_atlas_2018_clinical_data.tsv'), index_col=2)
clinical.index.name = None
clinical['censored'] = clinical['Overall Survival Status']
clinical['censored'] = clinical['censored'].replace('LIVING', 1)
clinical['censored'] = clinical['censored'].replace('DECEASED', 0)
clinical['censored'] = 1-clinical['censored']
### Select RNAseq Features
rnaseq = pd.read_table(os.path.join(dataroot, 'data_RNA_Seq_v2_mRNA_median_Zscores.txt'), index_col=0)
rnaseq = rnaseq[rnaseq.index.notnull()]
rnaseq = rnaseq.drop(['Entrez_Gene_Id'], axis=1)
rnaseq.index.name = None
rnaseqDEGs = pd.read_csv(os.path.join(dataroot, 'dataDEGs_kirc.csv'), index_col=0)
rnaseqDEGs = rnaseqDEGs.sort_values(['PValue', 'logFC'], ascending=False)
rnaseq_cutoff = rnaseqDEGs.shape[0] if isinstance(rnaseq_cutoff, str) else rnaseq_cutoff
rnaseq = rnaseq.loc[rnaseq.index.intersection(rnaseqDEGs.index)].T
rnaseq.columns = [g+"_rnaseq" for g in rnaseq.columns]
### Select CNV Features
cnv = pd.read_table(os.path.join(dataroot, 'data_CNA.txt'), index_col=0)
cnv = cnv[cnv.index.notnull()]
cnv = cnv.drop(['Entrez_Gene_Id'], axis=1)
cnv.index.name = None
cnv_freq = pd.read_table(os.path.join(dataroot, 'CNA_Genes.txt'), index_col=0)
cnv_freq = cnv_freq[['CNA', 'Profiled Samples', 'Freq']]
cnv_freq['Freq'] = cnv_freq['Freq'].str.rstrip('%').astype(float)
cnv_cutoff = cnv_freq.shape[0] if isinstance(cnv_cutoff, str) else cnv_cutoff
cnv_freq = cnv_freq[cnv_freq['Freq'] >= cnv_cutoff]
cnv = cnv.loc[cnv.index.intersection(cnv_freq.index)].T
cnv.columns = [g+"_cnv" for g in cnv.columns]
mut = clinical[['Patient ID']].copy()
for tsv in os.listdir(os.path.join(dataroot, 'muts')):
if tsv.endswith('.tsv'):
mut_samples = pd.read_table(os.path.join(dataroot, 'muts', tsv))['Patient ID']
mut_gene = tsv.split('_')[2].rstrip('.tsv')+'_mut'
mut[mut_gene] = 0
mut.loc[mut.index[:-3].isin(mut_samples), mut_gene] = 1
mut = mut.drop(['Patient ID'], axis=1)
omic_features = rnaseq.join(cnv, how='inner').join(mut, how='inner')
return omic_features
| 9,075
| 54.006061
| 205
|
py
|
PathomicFusion
|
PathomicFusion-master/core/utils_analysis.py
|
# Base / Native
import math
import os
import pickle
import re
import warnings
warnings.filterwarnings('ignore')
# Numerical / Array
import lifelines
from lifelines.utils import concordance_index
from lifelines import CoxPHFitter
from lifelines.datasets import load_regression_dataset
from lifelines.utils import k_fold_cross_validation
from lifelines.statistics import logrank_test
from imblearn.over_sampling import RandomOverSampler
import matplotlib as mpl
import matplotlib.pyplot as plt
import matplotlib.font_manager as font_manager
import numpy as np
import pandas as pd
from PIL import Image
import pylab
import scipy
import seaborn as sns
from sklearn import preprocessing
from sklearn.model_selection import train_test_split, KFold
from sklearn.metrics import average_precision_score, auc, f1_score, roc_curve, roc_auc_score
from sklearn.preprocessing import LabelBinarizer
from scipy import interp
mpl.rcParams['axes.linewidth'] = 3 #set the value globally
def natural_sort(l):
convert = lambda text: int(text) if text.isdigit() else text.lower()
alphanum_key = lambda key: [ convert(c) for c in re.split('([0-9]+)', key) ]
return sorted(l, key = alphanum_key)
### Grade Classification
# Glioma
def getGradTestPats_GBMLGG(ckpt_name='./checkpoints/TCGA_GBMLGG/grad_15/', model='pathgraphomic_fusion', split='test', use_rnaseq=False, agg_type='mean'):
pats = {}
ignore_missing_moltype, ignore_missing_histype = 1, 1
use_patch, roi_dir, use_vgg_features = ('_patch_', 'all_st_patches_512', 1) if (('path' in model) or ('graph' in model)) else ('_', 'all_st', 0)
use_rnaseq = '_rnaseq' if use_rnaseq else ''
data_cv_path = '../data/TCGA_GBMLGG/splits/gbmlgg15cv_%s_%d_%d_%d%s.pkl' % (roi_dir, ignore_missing_moltype, ignore_missing_histype, use_vgg_features, use_rnaseq, )
print(data_cv_path)
for k in range(1,16):
pred = pickle.load(open(ckpt_name+'/%s/%s_%d%spred_%s.pkl' % (model, model, k, use_patch, split), 'rb'))
grad_all = pred[3].T
grad_all = pd.DataFrame(np.stack(grad_all)).T
grad_all.columns = ['score_0', 'score_1', 'score_2']
data_cv = pickle.load(open(data_cv_path, 'rb'))
data_cv_splits = data_cv['cv_splits']
data_cv_split_k = data_cv_splits[k]
assert np.all(data_cv_split_k[split]['g'] == pred[4]) # Data is correctly registered
all_dataset = data_cv['data_pd'].drop('TCGA ID', axis=1)
all_dataset_regstrd = all_dataset.loc[data_cv_split_k[split]['x_patname']] # Subset of "all_datasets" (metadata) that is registered with "pred" (predictions)
assert np.all(np.array(all_dataset_regstrd['Grade']) == pred[4])
grad_all.index = data_cv_split_k[split]['x_patname']
grad_all.index.name = 'TCGA ID'
fun = p(0.75) if agg_type == 'p0.75' else agg_type
grad_all = grad_all.groupby('TCGA ID').agg({'score_0': [fun], 'score_1': [fun], 'score_2': [fun]})
pats[k] = grad_all.index
return pats
def getPredAggGrad_GBMLGG(ckpt_name='./checkpoints/TCGA_GBMLGG/grad_15/', model='pathgraphomic_fusion', split='test', use_rnaseq=False,
agg_type='max', test_pats=getGradTestPats_GBMLGG(), label='all'):
y_label, y_pred = [], []
ignore_missing_moltype = 1 if 'omic' in model else 0
ignore_missing_histype = 1 if 'grad' in ckpt_name else 0
use_patch, roi_dir, use_vgg_features = ('_patch_', 'all_st_patches_512', 1) if (('path' in model) or ('graph' in model)) else ('_', 'all_st', 0)
use_rnaseq = '_rnaseq' if use_rnaseq else ''
data_cv_path = '../data/TCGA_GBMLGG/splits/gbmlgg15cv_%s_%d_%d_%d%s.pkl' % (roi_dir, ignore_missing_moltype, ignore_missing_histype, use_vgg_features, use_rnaseq, )
#print(data_cv_path)
for k in range(1,16):
### Loads Prediction Pickle File. Registers predictions with TCGA IDs for the test split.
pred = pickle.load(open(ckpt_name+'/%s/%s_%d%spred_%s.pkl' % (model, model, k, use_patch, split), 'rb'))
grad_pred = pred[3].T
grad_pred = pd.DataFrame(np.stack(grad_pred)).T
grad_pred.columns = ['score_0', 'score_1', 'score_2']
data_cv = pickle.load(open(data_cv_path, 'rb'))
data_cv_splits = data_cv['cv_splits']
data_cv_split_k = data_cv_splits[k]
assert np.all(data_cv_split_k[split]['g'] == pred[4]) # Data is correctly registered
all_dataset = data_cv['data_pd'].drop('TCGA ID', axis=1)
all_dataset_regstrd = all_dataset.loc[data_cv_split_k[split]['x_patname']] # Subset of "all_datasets" (metadata) that is registered with "pred" (predictions)
assert np.all(np.array(all_dataset_regstrd['Grade']) == pred[4])
grad_pred.index = data_cv_split_k[split]['x_patname']
grad_pred.index.name = 'TCGA ID'
### Amalgamates predictions together.
fun = p(0.90) if agg_type == 'p0.75' else agg_type
grad_pred = grad_pred.groupby('TCGA ID').agg({'score_0': [fun], 'score_1': [fun], 'score_2': [fun]})
test_pat = test_pats[k]
grad_pred = grad_pred.loc[test_pat]
grad_gt = np.array(all_dataset.loc[test_pat]['Grade'])
grad_pred = np.array(grad_pred)
enc = LabelBinarizer()
enc.fit(grad_gt)
grad_gt = enc.transform(grad_gt)
y_label.append(grad_gt)
y_pred.append(grad_pred)
return y_label, y_pred
y_label, y_pred = np.vstack(y_label), np.vstack(y_pred)
if isinstance(label, int):
return y_label[:,label], y_pred[:,label]
return y_label, y_pred
def calcGradMetrics(y_label_all, y_pred_all, avg='micro'):
rocauc_all = []
ap_all = []
f1_all = []
f1_gradeIV_all = []
for i in range(15):
y_label, y_pred = y_label_all[i], y_pred_all[i]
rocauc_all.append(roc_auc_score(y_label, y_pred, avg))
return np.array(rocauc_all)
def calcAggGradMetrics(y_label_all, y_pred_all, avg='micro'):
rocauc_all = []
ap_all = []
f1_all = []
f1_gradeIV_all = []
for i in range(15):
y_label, y_pred = y_label_all[i], y_pred_all[i]
rocauc_all.append(roc_auc_score(y_label, y_pred, avg))
ap_all.append(average_precision_score(y_label, y_pred, average=avg))
f1_all.append(f1_score(y_pred.argmax(axis=1), np.argmax(y_label, axis=1), average=avg))
f1_gradeIV_all.append(f1_score(y_pred.argmax(axis=1), np.argmax(y_label, axis=1), average=None)[2])
return np.array([CI_pm(rocauc_all), CI_pm(ap_all), CI_pm(f1_all), CI_pm(f1_gradeIV_all)])
def makeAUROCPlot(ckpt_name='./checkpoints/TCGA_GBMLGG/grad_15/', model_list=['path', 'pathgraphomic_fusion'], split='test', avg='micro', use_zoom=False):
mpl.rcParams['font.family'] = "arial"
colors = {'path':'dodgerblue', 'graph':'orange', 'omic':'green', 'pathgraphomic_fusion':'crimson'}
names = {'path':'Histology CNN', 'graph':'Histology GCN', 'omic':'Genomic SNN', 'pathgraphomic_fusion':'Pathomic F.'}
zoom_params = {0:([0.2, 0.4], [0.8, 1.0]),
1:([0.25, 0.45], [0.75, 0.95]),
2:([0.0, 0.2], [0.8, 1.0]),
'micro':([0.15, 0.35], [0.8, 1.0])}
mean_fpr = np.linspace(0, 1, 100)
classes = [0, 1, 2, avg]
### 1. Looping over classes
for i in classes:
print("Class: " + str(i))
fi = pylab.figure(figsize=(10,10), dpi=600, linewidth=0.2)
axi = plt.subplot()
### 2. Looping over models
for m, model in enumerate(model_list):
###. 3. Looping over all splits
tprs, pres, aucrocs, rocaucs, = [], [], [], []
y_label_all, y_pred_all = getPredAggGrad_GBMLGG(model=model, agg_type='max')
for k in range(15):
y_label, y_pred = y_label_all[k], y_pred_all[k]
if i != avg:
pres.append(average_precision_score(y_label[:, i], y_pred[:, i])) # from https://scikit-learn.org/stable/auto_examples/model_selection/plot_precision_recall.html
fpr, tpr, thresh = roc_curve(y_label[:,i], y_pred[:,i], drop_intermediate=False)
aucrocs.append(auc(fpr, tpr)) # https://scikit-learn.org/stable/auto_examples/model_selection/plot_roc.html
rocaucs.append(roc_auc_score(y_label[:,i],y_pred[:,i])) # https://scikit-learn.org/stable/modules/generated/sklearn.metrics.roc_auc_score.html#sklearn.metrics.roc_auc_score
tprs.append(interp(mean_fpr, fpr, tpr))
tprs[-1][0] = 0.0
else:
# A "micro-average": quantifying score on all classes jointly
pres.append(average_precision_score(y_label, y_pred, average=avg))
fpr, tpr, thresh = roc_curve(y_label.ravel(), y_pred.ravel())
aucrocs.append(auc(fpr, tpr))
rocaucs.append(roc_auc_score(y_label, y_pred, avg))
tprs.append(interp(mean_fpr, fpr, tpr))
tprs[-1][0] = 0.0
mean_tpr = np.mean(tprs, axis=0)
mean_tpr[-1] = 1.0
#mean_auc = auc(mean_fpr, mean_tpr)
mean_auc = np.mean(aucrocs)
std_auc = np.std(aucrocs)
print('\t'+'%s - AUC: %0.3f ± %0.3f' % (model, mean_auc, std_auc))
if use_zoom:
alpha, lw = (0.8, 6) if model =='pathgraphomic_fusion' else (0.5, 6)
plt.plot(mean_fpr, mean_tpr, color=colors[model],
label=r'%s (AUC = %0.3f $\pm$ %0.3f)' % (names[model], mean_auc, std_auc), lw=lw, alpha=alpha)
std_tpr = np.std(tprs, axis=0)
tprs_upper = np.minimum(mean_tpr + std_tpr, 1)
tprs_lower = np.maximum(mean_tpr - std_tpr, 0)
plt.fill_between(mean_fpr, tprs_lower, tprs_upper, color=colors[model], alpha=0.1)
plt.xlim([zoom_params[i][0][0]-0.005, zoom_params[i][0][1]+0.005])
plt.ylim([zoom_params[i][1][0]-0.005, zoom_params[i][1][1]+0.005])
axi.set_xticks(np.arange(zoom_params[i][0][0], zoom_params[i][0][1]+0.001, 0.05))
axi.set_yticks(np.arange(zoom_params[i][1][0], zoom_params[i][1][1]+0.001, 0.05))
axi.tick_params(axis='both', which='major', labelsize=26)
else:
alpha, lw = (0.8, 4) if model =='pathgraphomic_fusion' else (0.5, 3)
plt.plot(mean_fpr, mean_tpr, color=colors[model],
label=r'%s (AUC = %0.3f $\pm$ %0.3f)' % (names[model], mean_auc, std_auc), lw=lw, alpha=alpha)
std_tpr = np.std(tprs, axis=0)
tprs_upper = np.minimum(mean_tpr + std_tpr, 1)
tprs_lower = np.maximum(mean_tpr - std_tpr, 0)
plt.fill_between(mean_fpr, tprs_lower, tprs_upper, color=colors[model], alpha=0.1)
plt.xlim([-0.05, 1.05])
plt.ylim([-0.05, 1.05])
axi.set_xticks(np.arange(0, 1.001, 0.2))
axi.set_yticks(np.arange(0, 1.001, 0.2))
axi.legend(loc="lower right", prop={'size': 20})
axi.tick_params(axis='both', which='major', labelsize=30)
#plt.plot([0, 1], [0, 1], linestyle='--', lw=2, color='navy', alpha=.8)
figures = [manager.canvas.figure
for manager in mpl._pylab_helpers.Gcf.get_all_fig_managers()]
zoom = '_zoom' if use_zoom else ''
for i, fig in enumerate(figures):
fig.savefig(ckpt_name+'/AUC_%s%s.png' % (classes[i], zoom), bbox_inches='tight')
### Survival Outcome Prediction
def hazard2grade(hazard, p):
for i in range(len(p)):
if hazard < p[i]:
return i
return len(p)
def CI_pm(data, confidence=0.95):
a = 1.0 * np.array(data)
n = len(a)
m, se = np.mean(a), scipy.stats.sem(a)
h = se * scipy.stats.t.ppf((1 + confidence) / 2., n-1)
return str("{0:.4f} ± ".format(m) + "{0:.3f}".format(h))
def CI_interval(data, confidence=0.95):
a = 1.0 * np.array(data)
n = len(a)
m, se = np.mean(a), scipy.stats.sem(a)
h = se * scipy.stats.t.ppf((1 + confidence) / 2., n-1)
return str("{0:.3f}, ".format(m-h) + "{0:.3f}".format(m+h))
def p(n):
def percentile_(x):
return np.percentile(x, n)
percentile_.__name__ = 'p%s' % n
return percentile_
def trainCox_GBMLGG(dataroot = './data/TCGA_GBMLGG/', ckpt_name='./checkpoints/TCGA_GBMLGG/surv_15_rnaseq/', model='cox_omic', use_rnaseq=False, normalize=False, penalizer=0):
### Creates Checkpoint Directory
if not os.path.exists(ckpt_name): os.makedirs(ckpt_name)
if not os.path.exists(os.path.join(ckpt_name, model)): os.makedirs(os.path.join(ckpt_name, model))
### Load PNAS Splits
pnas_splits = pd.read_csv(dataroot+'pnas_splits.csv')
pnas_splits.columns = ['TCGA ID']+[str(k) for k in range(1, 16)]
pnas_splits.index = pnas_splits['TCGA ID']
pnas_splits = pnas_splits.drop(['TCGA ID'], axis=1)
### Loads Data
ignore_missing_moltype = True if model in ['cox_omic', 'cox_moltype', 'cox_molgrade', 'all'] else False
ignore_missing_histype = True if model in ['cox_histype', 'cox_grade', 'cox_molgrade', 'all'] else False
all_dataset = getCleanGBMLGG(dataroot=dataroot, ignore_missing_moltype=ignore_missing_moltype,
ignore_missing_histype=ignore_missing_histype, use_rnaseq=use_rnaseq)[1]
model_feats = {'cox_agegender':['Survival months', 'censored', 'Age', 'Gender'],
'cox_moltype':['Survival months', 'censored', 'codeletion', 'idh mutation'],
'cox_grade':['Survival months', 'censored', 'Grade'],
'cox_molgrade':['Survival months', 'censored', 'Grade', 'codeletion', 'idh mutation'],
'cox_covariates':['Survival months', 'censored', 'codeletion', 'idh mutation', 'Grade', 'Age', 'Gender', 'Histology']}
cv_results = []
cv_pvals = []
for k in pnas_splits.columns:
pat_train = list(set(pnas_splits.index[pnas_splits[k] == 'Train']).intersection(all_dataset.index))
pat_test = list(set(pnas_splits.index[pnas_splits[k] == 'Test']).intersection(all_dataset.index))
feats = all_dataset.columns.drop(model_feats[model]) if model == 'cox_omic' or model == 'cox_all' else model_feats[model]
train = all_dataset.loc[pat_train]
test = all_dataset.loc[pat_test]
if normalize:
scaler = preprocessing.StandardScaler().fit(train[feats])
train[feats] = scaler.transform(train[feats])
test[feats] = scaler.transform(test[feats])
cph = CoxPHFitter(penalizer=penalizer)
cph.fit(train[feats], duration_col='Survival months', event_col='censored', show_progress=False)
cin = concordance_index(test['Survival months'], -cph.predict_partial_hazard(test[feats]), test['censored'])
pval = cox_log_rank(np.array(-cph.predict_partial_hazard(test[feats])).reshape(-1),
np.array(test['censored']).reshape(-1),
np.array(test['Survival months']).reshape(-1))
cv_results.append(cin)
cv_pvals.append(pval)
train.insert(loc=0, column='Hazard', value=-cph.predict_partial_hazard(train))
test.insert(loc=0, column='Hazard', value=-cph.predict_partial_hazard(test))
pickle.dump(train, open(os.path.join(ckpt_name, model, '%s_%s_pred_train.pkl' % (model, k)), 'wb'))
pickle.dump(test, open(os.path.join(ckpt_name, model, '%s_%s_pred_test.pkl' % (model, k)), 'wb'))
pickle.dump(cv_results, open(os.path.join(ckpt_name, model, '%s_results.pkl' % model), 'wb'))
print("C-Indices across Splits", cv_results)
print("Average C-Index: %s" % CI_pm(cv_results))
def getSurvTestPats_GBMLGG(ckpt_name='./checkpoints/TCGA_GBMLGG/surv_15_rnaseq/', model='pathgraphomic_fusion', split='test', use_rnaseq=True, agg_type='Hazard_mean'):
pats = {}
print(model)
ignore_missing_moltype = 1
ignore_missing_histype = 0
use_patch, roi_dir, use_vgg_features = ('_patch_', 'all_st_patches_512', 1) if (('path' in model) or ('graph' in model)) else ('_', 'all_st', 0)
use_rnaseq = '_rnaseq' if use_rnaseq else ''
data_cv_path = './data/TCGA_GBMLGG/splits/gbmlgg15cv_%s_%d_%d_%d%s.pkl' % (roi_dir, ignore_missing_moltype, ignore_missing_histype, use_vgg_features, use_rnaseq, )
print(data_cv_path)
for k in range(1,16):
pred = pickle.load(open(ckpt_name+'/%s/%s_%d%spred_%s.pkl' % (model, model, k, use_patch, split), 'rb'))
surv_all = pd.DataFrame(np.stack(np.delete(np.array(pred), 3))).T
surv_all.columns = ['Hazard', 'Survival months', 'censored', 'Grade']
data_cv = pickle.load(open(data_cv_path, 'rb'))
data_cv_splits = data_cv['cv_splits']
data_cv_split_k = data_cv_splits[k]
assert np.all(data_cv_split_k[split]['t'] == pred[1]) # Data is correctly registered
all_dataset = data_cv['data_pd'].drop('TCGA ID', axis=1)
all_dataset_regstrd = all_dataset.loc[data_cv_split_k[split]['x_patname']] # Subset of "all_datasets" (metadata) that is registered with "pred" (predictions)
assert np.all(np.array(all_dataset_regstrd['Survival months']) == pred[1])
assert np.all(np.array(all_dataset_regstrd['censored']) == pred[2])
assert np.all(np.array(all_dataset_regstrd['Grade']) == pred[4])
all_dataset_regstrd.insert(loc=0, column='Hazard', value = np.array(surv_all['Hazard']))
hazard_agg = all_dataset_regstrd.groupby('TCGA ID').agg({'Hazard': ['mean', 'median', max]})
hazard_agg.columns = ["_".join(x) for x in hazard_agg.columns.ravel()]
hazard_agg = hazard_agg[[agg_type]]
hazard_agg.columns = ['Hazard']
all_dataset_hazard = hazard_agg.join(all_dataset, how='inner')
pats[k] = all_dataset_hazard.index
return pats
def getPValAggSurv_GBMLGG_Binary(ckpt_name='./checkpoints/TCGA_GBMLGG/surv_15_rnaseq/', model='pathgraphomic_fusion', percentile=[50]):
data = getDataAggSurv_GBMLGG(ckpt_name=ckpt_name, model=model)
p = np.percentile(data['Hazard'], percentile)
data.insert(0, 'grade_pred', [hazard2grade(hazard, p) for hazard in data['Hazard']])
T_low, T_high = data['Survival months'][data['grade_pred']==0], data['Survival months'][data['grade_pred']==1]
E_low, E_high = data['censored'][data['grade_pred']==0], data['censored'][data['grade_pred']==1]
low_vs_high = logrank_test(durations_A=T_low, durations_B=T_high, event_observed_A=E_low, event_observed_B=E_high).p_value
return np.array([low_vs_high])
def getPValAggSurv_GBMLGG_Multi(ckpt_name='./checkpoints/TCGA_GBMLGG/surv_15_rnaseq/', model='pathgraphomic_fusion', percentile=[33,66]):
data = getDataAggSurv_GBMLGG(ckpt_name=ckpt_name, model=model)
p = np.percentile(data['Hazard'], percentile)
if p[0] == p[1]: p[0] = 2.99997
data.insert(0, 'grade_pred', [hazard2grade(hazard, p) for hazard in data['Hazard']])
T_low, T_mid, T_high = data['Survival months'][data['grade_pred']==0], data['Survival months'][data['grade_pred']==1], data['Survival months'][data['grade_pred']==2]
E_low, E_mid, E_high = data['censored'][data['grade_pred']==0], data['censored'][data['grade_pred']==1], data['censored'][data['grade_pred']==2]
low_vs_mid = logrank_test(durations_A=T_low, durations_B=T_mid, event_observed_A=E_low, event_observed_B=E_mid).p_value
mid_vs_high = logrank_test(durations_A=T_mid, durations_B=T_high, event_observed_A=E_mid, event_observed_B=E_high).p_value
return np.array([low_vs_mid, mid_vs_high])
def getPredAggSurv_GBMLGG(ckpt_name='./checkpoints/TCGA_GBMLGG/surv_15_rnaseq/', model='pathgraphomic_fusion',
split='test', use_rnaseq=True, agg_type='Hazard_mean', test_pats=getSurvTestPats_GBMLGG()):
results = []
if 'cox' in model:
for k in range(1,16):
pred = pickle.load(open(ckpt_name+'/%s/%s_%d_pred_%s.pkl' % (model, model, k, split), 'rb'))
cin = CIndex_lifeline(-pred['Hazard'], pred['censored'], pred['Survival months'])
results.append(cin)
return results
else:
ignore_missing_moltype = 1 if 'omic' in model else 0
ignore_missing_histype = 1 if 'grad' in ckpt_name else 0
use_patch, roi_dir, use_vgg_features = ('_patch_', 'all_st_patches_512', 1) if (('path' in model) or ('graph' in model)) else ('_', 'all_st', 0)
use_rnaseq = '_rnaseq' if ('rnaseq' in ckpt_name and 'path' != model and 'pathpath' not in model and 'graph' != model and 'graphgraph' not in model) else ''
data_cv_path = './data/TCGA_GBMLGG/splits/gbmlgg15cv_%s_%d_%d_%d%s.pkl' % (roi_dir, ignore_missing_moltype, ignore_missing_histype, use_vgg_features, use_rnaseq, )
data_cv = pickle.load(open(data_cv_path, 'rb'))
for k in range(1,16):
pred = pickle.load(open(ckpt_name+'/%s/%s_%d%spred_%s.pkl' % (model, model, k, use_patch, split), 'rb'))
surv_all = pd.DataFrame(np.stack(np.delete(np.array(pred), 3))).T
surv_all.columns = ['Hazard', 'Survival months', 'censored', 'Grade']
data_cv_splits = data_cv['cv_splits']
data_cv_split_k = data_cv_splits[k]
assert np.all(data_cv_split_k[split]['t'] == pred[1]) # Data is correctly registered
all_dataset = data_cv['data_pd'].drop('TCGA ID', axis=1)
all_dataset_regstrd = all_dataset.loc[data_cv_split_k[split]['x_patname']] # Subset of "all_datasets" (metadata) that is registered with "pred" (predictions)
assert np.all(np.array(all_dataset_regstrd['Survival months']) == pred[1])
assert np.all(np.array(all_dataset_regstrd['censored']) == pred[2])
assert np.all(np.array(all_dataset_regstrd['Grade']) == pred[4])
all_dataset_regstrd.insert(loc=0, column='Hazard', value = np.array(surv_all['Hazard']))
hazard_agg = all_dataset_regstrd.groupby('TCGA ID').agg({'Hazard': ['mean', 'median', max]})
hazard_agg.columns = ["_".join(x) for x in hazard_agg.columns.ravel()]
hazard_agg = hazard_agg[[agg_type]]
hazard_agg.columns = ['Hazard']
all_dataset_hazard = hazard_agg.join(all_dataset, how='inner')
all_dataset_hazard = all_dataset_hazard.loc[test_pats[k]]
cin = CIndex_lifeline(all_dataset_hazard['Hazard'], all_dataset_hazard['censored'], all_dataset_hazard['Survival months'])
results.append(cin)
return results
def getDataAggSurv_GBMLGG(ckpt_name='./checkpoints/TCGA_GBMLGG/surv_15_rnaseq/', model='pathgraphomic_fusion',
split='test', use_rnaseq=True, agg_type='Hazard_mean', zscore=False, test_pats=getSurvTestPats_GBMLGG()):
data = []
if 'cox' in model:
for k in range(1,16):
pred = pickle.load(open(ckpt_name+'/%s/%s_%d_pred_%s.pkl' % (model, model, k, split), 'rb'))
data.append(pred)
return pd.concat(data)
else:
ignore_missing_moltype = 1 if 'omic' in model else 0
ignore_missing_histype = 1 if 'grad' in ckpt_name else 0
use_patch, roi_dir, use_vgg_features = ('_patch_', 'all_st_patches_512', 1) if (('path' in model) or ('graph' in model)) else ('_', 'all_st', 0)
use_rnaseq = '_rnaseq' if ('rnaseq' in ckpt_name and 'path' != model and 'pathpath' not in model and 'graph' != model and 'graphgraph' not in model) else ''
data_cv_path = './data/TCGA_GBMLGG/splits/gbmlgg15cv_%s_%d_%d_%d%s.pkl' % (roi_dir, ignore_missing_moltype, ignore_missing_histype, use_vgg_features, use_rnaseq, )
data_cv = pickle.load(open(data_cv_path, 'rb'))
for k in range(1,16):
pred = pickle.load(open(ckpt_name+'/%s/%s_%d%spred_%s.pkl' % (model, model, k, use_patch, split), 'rb'))
surv_all = pd.DataFrame(np.stack(np.delete(np.array(pred), 3))).T
surv_all.columns = ['Hazard', 'Survival months', 'censored', 'Grade']
data_cv_splits = data_cv['cv_splits']
data_cv_split_k = data_cv_splits[k]
assert np.all(data_cv_split_k[split]['t'] == pred[1]) # Data is correctly registered
all_dataset = data_cv['data_pd'].drop('TCGA ID', axis=1)
all_dataset_regstrd = all_dataset.loc[data_cv_split_k[split]['x_patname']] # Subset of "all_datasets" (metadata) that is registered with "pred" (predictions)
assert np.all(np.array(all_dataset_regstrd['Survival months']) == pred[1])
assert np.all(np.array(all_dataset_regstrd['censored']) == pred[2])
assert np.all(np.array(all_dataset_regstrd['Grade']) == pred[4])
all_dataset_regstrd.insert(loc=0, column='Hazard', value = np.array(surv_all['Hazard']))
hazard_agg = all_dataset_regstrd.groupby('TCGA ID').agg({'Hazard': ['mean', 'median', max]})
hazard_agg.columns = ["_".join(x) for x in hazard_agg.columns.ravel()]
hazard_agg = hazard_agg[[agg_type]]
hazard_agg.columns = ['Hazard']
all_dataset_hazard = hazard_agg.join(all_dataset, how='inner')
all_dataset_hazard = all_dataset_hazard.loc[test_pats[k]]
all_dataset_hazard['split'] = k
if zscore: all_dataset_hazard['Hazard'] = scipy.stats.zscore(np.array(all_dataset_hazard['Hazard']))
data.append(all_dataset_hazard)
data = pd.concat(data)
data = changeHistomolecularSubtype(data)
return data
def getHazardHistogramPlot_GBMLGG(ckpt_name='./checkpoints/TCGA_GBMLGG/surv_15_rnaseq/', model='pathgraphomic_fusion',
split='test', zscore=True, agg_type='Hazard_mean', c=[(-1.5, -0.5), (1, 1.25), (1.25, 1.5)]):
data = getDataAggSurv_GBMLGG(ckpt_name=ckpt_name, model=model, split=split, use_rnaseq=True, agg_type=agg_type, zscore=zscore)
norm = True
fig, ax = plt.subplots(dpi=600)
low = data[data['Survival months'] <= 365*5]
low = low[low['censored'] == 1]
high = data[data['Survival months'] > 365*5]
high = high[high['censored'] == 1]
sns.distplot(low['Hazard'], bins=15, kde=False, norm_hist=norm,
#kde_kws={"color": "k", "lw": 2},
hist_kws={'histtype':'stepfilled', "linewidth": 1, "alpha": 0.5, "color": "r"}, ax=ax)
sns.distplot(high['Hazard'], bins=15, kde=False, norm_hist=norm,
#kde_kws={"color": "k", "lw": 2},
hist_kws={'histtype':'stepfilled', "linewidth": 1, "alpha": 0.5, "color": "b"}, ax=ax)
ax.set_xlabel('')
ax.spines["right"].set_visible(False)
ax.spines["top"].set_visible(False)
ax.tick_params(axis='y', which='both', labelsize=15)
ax.tick_params(axis='x', which='both', labelsize=15)
ax.set_xticks(np.arange(-1.5, 1.51, 0.5))
plt.xlim([-1.75, 1.75])
if norm:
ax.set_yticks(np.arange(0, 2.1, 1))
plt.ylim([0, 2])
fig.savefig(ckpt_name+'/%s_HHP_V2.png' % (model))
cluster1 = data[data['Hazard'] > c[0][0]]
cluster1 = cluster1[cluster1['Hazard'] < c[0][1]]
num_cluster1 = cluster1.shape[0]
cluster1_II = (cluster1['Grade'] == 0).sum() / num_cluster1
cluster1_III = (cluster1['Grade'] == 1).sum() / num_cluster1
cluster1_IV = (cluster1['Grade'] == 2).sum() / num_cluster1
cluster1_ODG = (cluster1['Histomolecular subtype'] == 'ODG').sum() / num_cluster1
cluster1_IDHmut = (cluster1['Histomolecular subtype'] == 'idhmut_ATC').sum() / num_cluster1
cluster1_IDHwt = (cluster1['Histomolecular subtype'] == 'idhwt_ATC').sum() / num_cluster1
cluster1_summary = [cluster1_II, cluster1_III, cluster1_IV, cluster1_ODG, cluster1_IDHmut, cluster1_IDHwt]
cluster2 = data[data['Hazard'] > c[1][0]]
cluster2 = cluster2[cluster2['Hazard'] < c[1][1]]
num_cluster2 = cluster2.shape[0]
cluster2_II = (cluster2['Grade'] == 0).sum() / num_cluster2
cluster2_III = (cluster2['Grade'] == 1).sum() / num_cluster2
cluster2_IV = (cluster2['Grade'] == 2).sum() / num_cluster2
cluster2_ODG = (cluster2['Histomolecular subtype'] == 'ODG').sum() / num_cluster2
cluster2_IDHmut = (cluster2['Histomolecular subtype'] == 'idhmut_ATC').sum() / num_cluster2
cluster2_IDHwt = (cluster2['Histomolecular subtype'] == 'idhwt_ATC').sum() / num_cluster2
cluster2_summary = [cluster2_II, cluster2_III, cluster2_IV, cluster2_ODG, cluster2_IDHmut, cluster2_IDHwt]
cluster3 = data[data['Hazard'] > c[2][0]]
cluster3 = cluster3[cluster3['Hazard'] < c[2][1]]
num_cluster3 = cluster3.shape[0]
cluster3_II = (cluster3['Grade'] == 0).sum() / num_cluster3
cluster3_III = (cluster3['Grade'] == 1).sum() / num_cluster3
cluster3_IV = (cluster3['Grade'] == 2).sum() / num_cluster3
cluster3_ODG = (cluster3['Histomolecular subtype'] == 'ODG').sum() / num_cluster3
cluster3_IDHmut = (cluster3['Histomolecular subtype'] == 'idhmut_ATC').sum() / num_cluster3
cluster3_IDHwt = (cluster3['Histomolecular subtype'] == 'idhwt_ATC').sum() / num_cluster3
cluster3_summary = [cluster3_II, cluster3_III, cluster3_IV, cluster3_ODG, cluster3_IDHmut, cluster3_IDHwt]
cluster_results = pd.DataFrame([cluster1_summary, cluster2_summary, cluster3_summary])
cluster_results.index = ['%0.2f < Hazard < %0.2f' % c[0], '%0.2f < Hazard < %0.2f' % c[1], '%0.2f < Hazard < %0.2f' % c[2]]
cluster_results.index.name = 'Density Region'
cluster_results.columns = ['Grade II (%)', 'Grade III (%)', 'Grade IV (%)', 'ODG (%)', 'IDHmut ATC (%)', 'IDHwt ATC (%)']
cluster_results *= 100
pd.options.display.float_format = '{:.2f}'.format
return cluster_results
def makeHazardSwarmPlot(ckpt_name='./checkpoints/surv_15_rnaseq/', model='path', split='test', zscore=True, agg_type='Hazard_mean'):
mpl.rcParams['font.family'] = "arial"
data = getDataAggSurv_GBMLGG(ckpt_name=ckpt_name, model=model, split=split, zscore=zscore, agg_type=agg_type)
data = data[data['Grade'] != -1]
data = data[data['Histomolecular subtype'] != -1]
data['Grade'] = data['Grade'].astype(int).astype(str)
data['Grade'] = data['Grade'].str.replace('0', 'Grade II', regex=False)
data['Grade'] = data['Grade'].str.replace('1', 'Grade III', regex=False)
data['Grade'] = data['Grade'].str.replace('2', 'Grade IV', regex=False)
data['Histomolecular subtype'] = data['Histomolecular subtype'].str.replace('idhwt_ATC', 'IDH-wt \n astryocytoma', regex=False)
data['Histomolecular subtype'] = data['Histomolecular subtype'].str.replace('idhmut_ATC', 'IDH-mut \n astrocytoma', regex=False)
data['Histomolecular subtype'] = data['Histomolecular subtype'].str.replace('ODG', 'Oligodendroglioma', regex=False)
fig, ax = plt.subplots(dpi=600)
ax.set_ylim([-2, 2.5]) # plt.ylim(-2, 2)
ax.spines['right'].set_visible(False)
ax.spines['top'].set_visible(False)
ax.set_yticks(np.arange(-2, 2.001, 1))
sns.swarmplot(x = 'Histomolecular subtype', y='Hazard', data=data, hue='Grade',
palette={"Grade II":"#AFD275" , "Grade III":"#7395AE", "Grade IV":"#E7717D"},
size = 4, alpha = 0.9, ax=ax)
ax.set_xlabel('') # ax.set_xlabel('Histomolecular subtype', size=16)
ax.set_ylabel('') # ax.set_ylabel('Hazard (Z-Score)', size=16)
ax.tick_params(axis='y', which='both', labelsize=20)
ax.tick_params(axis='x', which='both', labelsize=15)
ax.tick_params(axis='x', which='both', labelbottom='off') # doesn't work??
ax.legend(prop={'size': 8})
fig.savefig(ckpt_name+'/%s_HSP.png' % (model))
plt.close()
def makeHazardBoxPlot(ckpt_name='./checkpoints/surv_15_rnaseq/', model='omic', split='test', zscore=True, agg_type='Hazard_mean'):
mpl.rcParams['font.family'] = "arial"
data = getDataAggSurv_GBMLGG(ckpt_name=ckpt_name, model=model, split=split, zscore=zscore, agg_type=agg_type)
data['Grade'] = data['Grade'].astype(int).astype(str)
data['Grade'] = data['Grade'].str.replace('0', 'II', regex=False)
data['Grade'] = data['Grade'].str.replace('1', 'III', regex=False)
data['Grade'] = data['Grade'].str.replace('2', 'IV', regex=False)
fig, axes = plt.subplots(nrows=1, ncols=3, gridspec_kw={'width_ratios': [3, 3, 2]}, dpi=600)
plt.subplots_adjust(wspace=0, hspace=0)
plt.ylim(-2, 2)
plt.yticks(np.arange(-2, 2.001, 1))
#color_dict = {0: '#CF9498', 1: '#8CC7C8', 2: '#AAA0C6'}
#color_dict = {0: '#F76C6C', 1: '#A8D0E6', 2: '#F8E9A1'}
color_dict = ['#F76C6C', '#A8D0E6', '#F8E9A1']
subtypes = ['idhwt_ATC', 'idhmut_ATC', 'ODG']
for i in range(len(subtypes)):
axes[i].spines["top"].set_visible(False)
axes[i].spines["right"].set_visible(False)
axes[i].xaxis.grid(False)
axes[i].yaxis.grid(False)
if i > 0:
axes[i].get_yaxis().set_visible(False)
axes[i].spines["left"].set_visible(False)
order = ["II","III","IV"] if subtypes[i] != 'ODG' else ["II", "III"]
axes[i].xaxis.label.set_visible(False)
axes[i].yaxis.label.set_visible(False)
axes[i].tick_params(axis='y', which='both', labelsize=20)
axes[i].tick_params(axis='x', which='both', labelsize=15)
datapoints = data[data['Histomolecular subtype'] == subtypes[i]]
sns.boxplot(y='Hazard', x="Grade", data=datapoints, ax = axes[i], color=color_dict[i], order=order)
sns.stripplot(y='Hazard', x='Grade', data=datapoints, alpha=0.2, jitter=0.2, color='k', ax = axes[i], order=order)
axes[i].set_ylim(-2.5, 2.5)
axes[i].set_yticks(np.arange(-2.0, 2.1, 1))
#axes[2].legend(prop={'size': 10})
fig.savefig(ckpt_name+'/%s_HBP.png' % (model))
plt.close()
def makeKaplanMeierPlot(ckpt_name='./checkpoints/TCGA_GBMLGG/surv_15_rnaseq/', model='omic', split='test', zscore=False, agg_type='Hazard_mean', plot_gt=True):
def hazard2KMCurve(data, subtype, plot_gt=True):
p = np.percentile(data['Hazard'], [33, 66])
if p[0] == p[1]: p[0] = 2.99997
data.insert(0, 'grade_pred', [hazard2grade(hazard, p) for hazard in data['Hazard']])
kmf_pred = lifelines.KaplanMeierFitter()
kmf_gt = lifelines.KaplanMeierFitter()
def get_name(model):
mode2name = {'pathgraphomic':'Pathomic F.', 'pathomic':'Pathomic F.', 'graphomic':'Pathomic F.', 'path':'Histology CNN', 'graph':'Histology GCN', 'omic':'Genomic SNN'}
for mode in mode2name.keys():
if mode in model: return mode2name[mode]
return 'N/A'
fig = plt.figure(figsize=(10, 10), dpi=600)
ax = plt.subplot()
censor_style = {'ms': 20, 'marker': '+'}
if plot_gt:
temp = data[data['Grade']==0]
kmf_gt.fit(temp['Survival months']/365, temp['censored'], label="Grade II")
kmf_gt.plot(ax=ax, show_censors=True, ci_show=False, c='g', linewidth=3, ls='--', markerfacecolor='black', censor_styles=censor_style)
temp = data[data['grade_pred']==0]
kmf_pred.fit(temp['Survival months']/365, temp['censored'], label="%s (Low)" % get_name(model))
kmf_pred.plot(ax=ax, show_censors=True, ci_show=False, c='g', linewidth=4, ls='-', markerfacecolor='black', censor_styles=censor_style)
if plot_gt:
temp = data[data['Grade']==1]
kmf_gt.fit(temp['Survival months']/365, temp['censored'], label="Grade III")
kmf_gt.plot(ax=ax, show_censors=True, ci_show=False, c='b', linewidth=3, ls='--', censor_styles=censor_style)
temp = data[data['grade_pred']==1]
kmf_pred.fit(temp['Survival months']/365, temp['censored'], label="%s (Int.)" % get_name(model))
kmf_pred.plot(ax=ax, show_censors=True, ci_show=False, c='b', linewidth=4, ls='-', censor_styles=censor_style)
if subtype != 'ODG':
if plot_gt:
temp = data[data['Grade']==2]
kmf_gt.fit(temp['Survival months']/365, temp['censored'], label="Grade IV")
kmf_gt.plot(ax=ax, show_censors=True, ci_show=False, c='r', linewidth=3, ls='--', censor_styles=censor_style)
temp = data[data['grade_pred']==2]
kmf_pred.fit(temp['Survival months']/365, temp['censored'], label="%s (High)" % get_name(model))
kmf_pred.plot(ax=ax, show_censors=True, ci_show=False, c='r', linewidth=4, ls='-', censor_styles=censor_style)
ax.set_xlabel('')
ax.set_ylim(0, 1)
ax.set_yticks(np.arange(0, 1.001, 0.5))
ax.tick_params(axis='both', which='major', labelsize=40)
plt.legend(fontsize=24, prop=font_manager.FontProperties(family='Arial', style='normal', size=24))
if subtype != 'idhwt_ATC' and plot_gt: ax.get_legend().remove()
return fig
data = getDataAggSurv_GBMLGG(ckpt_name=ckpt_name, model=model)
for subtype in ['idhwt_ATC', 'idhmut_ATC', 'ODG']:
if plot_gt:
fig = hazard2KMCurve(data[data['Histomolecular subtype'] == subtype], subtype)
fig.savefig(ckpt_name+'/%s_KM_%s.png' % (model, subtype), bbox_inches='tight')
plt.close()
fig = hazard2KMCurve(data, 'all', plot_gt=plot_gt)
fig.savefig(ckpt_name+'/%s_KM_%s%s.png' % (model, 'all', '' if plot_gt else '_nogt'), bbox_inches='tight')
plt.close()
def makeKaplanMeierPlot_Baseline(ckpt_name='./checkpoints/TCGA_GBMLGG/surv_15_rnaseq/', model='Grade'):
def hazard2KMCurve(data, model):
fig = plt.figure(figsize=(10, 10), dpi=600)
ax = plt.subplot()
censor_style = {'ms': 20, 'marker': '+'}
kmf = lifelines.KaplanMeierFitter()
kmf_gt = lifelines.KaplanMeierFitter()
baseline = {'Grade':[0,1,2],
'Histomolecular subtype':['ODG', 'idhmut_ATC', 'idhwt_ATC']}
baseline_name = {'Grade':['Grade II', 'Grade III', 'Grade IV'],
'Histomolecular subtype':['Oligodendroglioma', 'IDHmut Astrocytoma', 'IDHwt Astrocytoma']}
temp = data[data['Grade']==0]
kmf_gt.fit(temp['Survival months']/365, temp['censored'], label="Grade II")
kmf_gt.plot(ax=ax, show_censors=True, ci_show=False, c='g', linewidth=3, ls='--', markerfacecolor='black', censor_styles=censor_style)
temp = data[data[model]==baseline[model][0]]
kmf.fit(temp['Survival months']/365, temp['censored'], label=baseline_name[model][0])
kmf.plot(ax=ax, show_censors=True, ci_show=False, c='g', linewidth=4, ls='-', markerfacecolor='black', censor_styles=censor_style)
temp = data[data['Grade']==1]
kmf_gt.fit(temp['Survival months']/365, temp['censored'], label="Grade III")
kmf_gt.plot(ax=ax, show_censors=True, ci_show=False, c='b', linewidth=3, ls='--', censor_styles=censor_style)
temp = data[data[model]==baseline[model][1]]
kmf.fit(temp['Survival months']/365, temp['censored'], label=baseline_name[model][1])
kmf.plot(ax=ax, show_censors=True, ci_show=False, c='b', linewidth=4, ls='-', censor_styles=censor_style)
temp = data[data['Grade']==2]
kmf_gt.fit(temp['Survival months']/365, temp['censored'], label="Grade IV")
kmf_gt.plot(ax=ax, show_censors=True, ci_show=False, c='r', linewidth=3, ls='--', censor_styles=censor_style)
temp = data[data[model]==baseline[model][2]]
kmf.fit(temp['Survival months']/365, temp['censored'], label=baseline_name[model][2])
kmf.plot(ax=ax, show_censors=True, ci_show=False, c='r', linewidth=4, ls='-', censor_styles=censor_style)
ax.set_xlabel('')
ax.set_ylim(0, 1)
ax.set_yticks(np.arange(0, 1.001, 0.5))
ax.tick_params(axis='both', which='major', labelsize=40)
plt.legend(fontsize=24, prop=font_manager.FontProperties(family='Arial', style='normal', size=24))
return fig
data = getDataAggSurv_GBMLGG(ckpt_name=ckpt_name, model='pathgraphomic_fusion')
fig = hazard2KMCurve(data, model)
fig.savefig(ckpt_name+'/%s_KM_%s.png' % (model, 'all'), bbox_inches='tight')
### KIRC
def getDataAggSurv_KIRC(ckpt_name='./checkpoints/TCGA_KIRC/surv_15/', model='pathgraphomic_fusion',
split='test', use_rnaseq=True, agg_type='Hazard_mean', zscore=False):
data = []
if 'cox' in model:
for k in range(1,16):
pred = pickle.load(open(ckpt_name+'/%s/%s_%d_pred_%s.pkl' % (model, model, k, split), 'rb'))
data.append(pred)
return pd.concat(data)
else:
data_cv = pickle.load(open('./data/TCGA_KIRC/splits/KIRC_st_1.pkl', 'rb'))
data_cv_splits = data_cv['split']
for k in range(1,16):
pred = pickle.load(open(ckpt_name+'/%s/%s_%d_pred_%s.pkl' % (model, model, k, split), 'rb'))
surv_all = pd.DataFrame(np.stack(np.delete(np.array(pred), 3))).T
surv_all.columns = ['Hazard', 'OS_month', 'censored', 'Grade']
data_cv_split_k = data_cv_splits[k]
assert np.all(data_cv_split_k[split]['t'] == pred[1]) # Data is correctly registered
all_dataset = data_cv['all_dataset']
all_dataset.index = all_dataset.index.str[:12]
patnames = data_cv_split_k[split]['x_patname']
patnames = [pat[:12] for pat in patnames]
all_dataset_regstrd = all_dataset.loc[patnames] # Subset of "all_datasets" (metadata) that is registered with "pred" (predictions)
assert np.all(np.array(all_dataset_regstrd['censored']) == pred[2])
all_dataset_regstrd.insert(loc=0, column='Hazard', value = np.array(surv_all['Hazard']))
all_dataset_regstrd['TCGA ID'] = all_dataset_regstrd.index
hazard_agg = all_dataset_regstrd.groupby('TCGA ID').agg({'Hazard': ['mean', 'median', max]})
hazard_agg.columns = ["_".join(x) for x in hazard_agg.columns.ravel()]
hazard_agg = hazard_agg[[agg_type]]
hazard_agg.columns = ['Hazard']
all_dataset_hazard = hazard_agg.join(all_dataset, how='inner')
all_dataset_hazard['split'] = k
if zscore: all_dataset_hazard['Hazard'] = scipy.stats.zscore(np.array(all_dataset_hazard['Hazard']))
data.append(all_dataset_hazard)
data = addNeoplasmGrade(pd.concat(data))
return data
def getPredAggSurv_KIRC(ckpt_name='./checkpoints/TCGA_KIRC/surv_15/', model='pathgraphomic_fusion',
split='test', use_rnaseq=True, agg_type='Hazard_mean', test_pats=None):
results = []
if 'cox' in model:
for k in range(1,16):
pred = pickle.load(open(ckpt_name+'/%s/%s_%d_pred_%s.pkl' % (model, model, k, split), 'rb'))
cin = CIndex_lifeline(-pred['Hazard'], pred['censored'], pred['OS_month'])
results.append(cin)
return results
else:
data_cv = pickle.load(open('./data/TCGA_KIRC/splits/KIRC_st_1.pkl', 'rb'))
data_cv_splits = data_cv['split']
for k in range(1,16):
pred = pickle.load(open(ckpt_name+'/%s/%s_%d_pred_%s.pkl' % (model, model, k, split), 'rb'))
surv_all = pd.DataFrame(np.stack(np.delete(np.array(pred), 3))).T
surv_all.columns = ['Hazard', 'OS_month', 'censored', 'Grade']
data_cv_split_k = data_cv_splits[k]
assert np.all(data_cv_split_k[split]['t'] == pred[1]) # Data is correctly registered
all_dataset = data_cv['all_dataset']
#return all_dataset
patnames = data_cv_split_k[split]['x_patname']
patnames = [pat[:12] for pat in patnames]
all_dataset_regstrd = all_dataset.loc[patnames] # Subset of "all_datasets" (metadata) that is registered with "pred" (predictions)
assert np.all(np.array(all_dataset_regstrd['censored']) == pred[2])
all_dataset_regstrd.insert(loc=0, column='Hazard', value = np.array(surv_all['Hazard']))
all_dataset_regstrd['TCGA ID'] = all_dataset_regstrd.index
hazard_agg = all_dataset_regstrd.groupby('TCGA ID').agg({'Hazard': ['mean', 'median', max]})
hazard_agg.columns = ["_".join(x) for x in hazard_agg.columns.ravel()]
hazard_agg = hazard_agg[[agg_type]]
hazard_agg.columns = ['Hazard']
all_dataset_hazard = addNeoplasmGrade(hazard_agg.join(all_dataset, how='inner'))
cin = CIndex_lifeline(all_dataset_hazard['Hazard'], all_dataset_hazard['censored'], all_dataset_hazard['OS_month'])
results.append(cin)
return results
def hazard2grade(hazard, p):
for i in range(len(p)):
if hazard < p[i]:
return i
return len(p)
def getPValAggSurv_KIRC_Binary(ckpt_name='./checkpoints/TCGA_KIRC/surv_15/', model='pathgraphomic_fusion', percentile=[50]):
data = getDataAggSurv_KIRC(ckpt_name=ckpt_name, model=model)
p = np.percentile(data['Hazard'], percentile)
data.insert(0, 'grade_pred', [hazard2grade(hazard, p) for hazard in data['Hazard']])
T_low, T_high = data['OS_month'][data['grade_pred']==0], data['OS_month'][data['grade_pred']==1]
E_low, E_high = data['censored'][data['grade_pred']==0], data['censored'][data['grade_pred']==1]
low_vs_high = logrank_test(durations_A=T_low, durations_B=T_high, event_observed_A=E_low, event_observed_B=E_high).p_value
return np.array([low_vs_high])
def getPValAggSurv_KIRC_Multi(ckpt_name='./checkpoints/TCGA_KIRC/surv_15/', model='pathgraphomic_fusion', percentile=[26,51,76]):
data = getDataAggSurv_KIRC(ckpt_name=ckpt_name, model=model)
p = np.percentile(data['Hazard'], percentile)
data.insert(0, 'grade_pred', [hazard2grade(hazard, p) for hazard in data['Hazard']])
T_low, T_midlow = data['OS_month'][data['grade_pred']==0], data['OS_month'][data['grade_pred']==1]
T_midhigh, T_high = data['OS_month'][data['grade_pred']==2], data['OS_month'][data['grade_pred']==3]
E_low, E_midlow = data['censored'][data['grade_pred']==0], data['censored'][data['grade_pred']==1]
E_midhigh, E_high = data['censored'][data['grade_pred']==2], data['censored'][data['grade_pred']==3]
low_vs_midlow = logrank_test(durations_A=T_low, durations_B=T_midlow, event_observed_A=E_low, event_observed_B=E_midlow).p_value
midlow_vs_midhigh = logrank_test(durations_A=T_midlow, durations_B=T_midhigh, event_observed_A=E_midlow, event_observed_B=E_midhigh).p_value
midhigh_vs_high = logrank_test(durations_A=T_midhigh, durations_B=T_high, event_observed_A=E_midhigh, event_observed_B=E_high).p_value
return np.array([low_vs_midlow, midlow_vs_midhigh, midhigh_vs_high])
def addNeoplasmGrade(data):
clinical_with_grade = pd.read_csv('./data/TCGA_KIRC/kirc_tcga_pan_can_atlas_2018_clinical_data.tsv', sep='\t', index_col=1)
clinical_with_grade.index.name = None
data = data.join(clinical_with_grade[['Neoplasm Histologic Grade']], how='inner')
data['Neoplasm Histologic Grade'] = data['Neoplasm Histologic Grade'].str.lstrip('G')
data = data[~data['Neoplasm Histologic Grade'].isnull()]
data = data[data['Neoplasm Histologic Grade'] != 'X']
return data
def CI_pm(data, confidence=0.95):
a = 1.0 * np.array(data)
n = len(a)
m, se = np.mean(a), scipy.stats.sem(a)
h = se * scipy.stats.t.ppf((1 + confidence) / 2., n-1)
return str("{0:.4f} ± ".format(m) + "{0:.3f}".format(h))
def trainCox_KIRC(dataroot = './data/TCGA_KIRC/', ckpt_name='./checkpoints/TCGA_KIRC/surv_15/', model='cox_agegender', normalize=False, penalizer=1e-4):
### Creates Checkpoint Directory
if not os.path.exists(ckpt_name): os.makedirs(ckpt_name)
if not os.path.exists(os.path.join(ckpt_name, model)): os.makedirs(os.path.join(ckpt_name, model))
clinical = pd.read_table(os.path.join(dataroot, './kirc_tcga_pan_can_atlas_2018_clinical_data.tsv'), index_col=2)
clinical.index.name = None
clinical = clinical[['Center of sequencing', 'Overall Survival Status', 'Overall Survival (Months)', 'Diagnosis Age', 'Sex', 'Neoplasm Histologic Grade']].copy()
clinical = clinical.rename(columns={'Center of sequencing':'CoS', 'Overall Survival Status':'censored', 'Overall Survival (Months)':'OS_month', 'Diagnosis Age':'Age', 'Neoplasm Histologic Grade':'Grade'})
clinical['Sex'] = clinical['Sex'].replace({'Male':0, 'Female': 1})
clinical['censored'] = clinical['censored'].replace('LIVING', 0) # actually uncensored
clinical['censored'] = clinical['censored'].replace('DECEASED', 1) # actually uncensored
clinical['train'] = 0
train_cohort = list(clinical['CoS'].value_counts().index[0:2]) + list(clinical['CoS'].value_counts().index[-16:])
clinical.loc[clinical['CoS'].isin(train_cohort), 'train'] = 1
clinical = clinical.sort_values(['train', 'CoS'], ascending=False)
clinical['Grade'] = clinical['Grade'].str.lstrip('G')
clinical = clinical[~clinical['Grade'].isnull()]
clinical = clinical[clinical['Grade'] != 'X']
clinical = clinical.drop(['CoS'], axis=1)
all_dataset = clinical
all_dataset.index = all_dataset.index.str[:-3]
model_feats = {'cox_agegender':['OS_month', 'censored', 'Age', 'Sex'],
'cox_grade':['OS_month', 'censored', 'Grade'],
'cox_all':['OS_month', 'censored', 'Age', 'Sex', 'Grade']}
cv_results = []
cv_pvals = []
splits = pd.read_csv(os.path.join(dataroot, 'kirc_splits.csv'), index_col=0)
splits.columns = [str(k) for k in range(1, 16)]
for k in range(1,16):
feats = model_feats[model]
pat_train = splits.index[splits[str(k)] == 'Train']
pat_test = splits.index[splits[str(k)] == 'Test']
train = all_dataset.loc[pat_train.intersection(all_dataset.index)]
test = all_dataset.loc[pat_test.intersection(all_dataset.index)]
if normalize:
scaler = preprocessing.StandardScaler().fit(train[feats])
train[feats] = scaler.transform(train[feats])
test[feats] = scaler.transform(test[feats])
cph = CoxPHFitter(penalizer=penalizer)
cph.fit(train[feats], duration_col='OS_month', event_col='censored', show_progress=False)
cin = concordance_index(test['OS_month'], -cph.predict_partial_hazard(test[feats]), test['censored'])
pval = cox_log_rank(np.array(-cph.predict_partial_hazard(test[feats])).reshape(-1),
np.array(test['censored']).reshape(-1),
np.array(test['OS_month']).reshape(-1))
cv_results.append(cin)
cv_pvals.append(pval)
train.insert(loc=0, column='Hazard', value=-cph.predict_partial_hazard(train))
test.insert(loc=0, column='Hazard', value=-cph.predict_partial_hazard(test))
pickle.dump(train, open(os.path.join(ckpt_name, model, '%s_%s_pred_train.pkl' % (model, k)), 'wb'))
pickle.dump(test, open(os.path.join(ckpt_name, model, '%s_%s_pred_test.pkl' % (model, k)), 'wb'))
pickle.dump(cv_results, open(os.path.join(ckpt_name, model, '%s_results.pkl' % model), 'wb'))
print("C-Indices across Splits", cv_results)
print("Average C-Index: %s" % CI_pm(cv_results))
print("Average P-Value: " + str(np.mean(cv_pvals)))
def getHazardHistogramPlot_KIRC(ckpt_name='./checkpoints/TCGA_KIRC/surv_15/', model='pathgraphomic_fusion',
split='test', zscore=True, agg_type='Hazard_mean', c=[(-1.5, -0.75), (-0.75, 0), (0, 0.75), (0.75, 1.5)]):
data = getDataAggSurv_KIRC(ckpt_name=ckpt_name, model=model, split=split, use_rnaseq=True, agg_type=agg_type, zscore=zscore)
norm = True
fig, ax = plt.subplots(dpi=600)
low = data[data['OS_month'] <= 12*3.5]
low = low[low['censored'] == 1]
high = data[data['OS_month'] > 12*3.5]
high = high[high['censored'] == 1]
sns.distplot(low['Hazard'], bins=15, kde=False, norm_hist=norm,
#kde_kws={"color": "k", "lw": 2},
hist_kws={'histtype':'stepfilled', "linewidth": 1, "alpha": 0.5, "color": "r"}, ax=ax)
sns.distplot(high['Hazard'], bins=15, kde=False, norm_hist=norm,
#kde_kws={"color": "k", "lw": 2},
hist_kws={'histtype':'stepfilled', "linewidth": 1, "alpha": 0.5, "color": "b"}, ax=ax)
ax.set_xlabel('')
ax.spines["right"].set_visible(False)
ax.spines["top"].set_visible(False)
ax.tick_params(axis='y', which='both', labelsize=15)
ax.tick_params(axis='x', which='both', labelsize=15)
ax.set_xticks(np.arange(-1.5, 1.51, 0.5))
plt.xlim([-1.75, 1.75])
if norm:
ax.set_yticks(np.arange(0, 2.1, 1))
plt.ylim([0, 2])
fig.savefig(ckpt_name+'/%s_HHP.png' % (model))
clusters = []
for i in range(len(c)):
cluster = data[data['Hazard'] > c[i][0]]
cluster = cluster[cluster['Hazard'] < c[i][1]]
cluster_size = cluster.shape[0]
cluster_I = (cluster['Neoplasm Histologic Grade'] == '1').sum() / cluster_size
cluster_II = (cluster['Neoplasm Histologic Grade'] == '2').sum() / cluster_size
cluster_III = (cluster['Neoplasm Histologic Grade'] == '3').sum() / cluster_size
cluster_IV = (cluster['Neoplasm Histologic Grade'] == '4').sum() / cluster_size
cluster_summary = [cluster_I, cluster_II, cluster_III, cluster_IV]
clusters.append(cluster_summary)
cluster_results = pd.DataFrame(clusters)
cluster_results.index = ['%0.2f < Hazard < %0.2f' % c[i] for i in range(len(c))]
cluster_results.index.name = 'Density Region'
cluster_results.columns = ['Grade ' + str(g) + ' (%)' for g in range(1, 5)]
cluster_results *= 100
pd.options.display.float_format = '{:.2f}'.format
return cluster_results
def hazard2grade(hazard, p):
for i in range(len(p)):
if hazard < p[i]:
return i
return len(p)
def makeKaplanMeierPlot_KIRC_Binary(ckpt_name='./checkpoints/TCGA_KIRC/surv_15/', model='omic', split='test', zscore=False, agg_type='Hazard_mean', percentile=[50]):
def hazard2KMCurve(data, percentile):
p = np.percentile(data['Hazard'], percentile)
data.insert(0, 'grade_pred', [hazard2grade(hazard, p) for hazard in data['Hazard']])
kmf_pred = lifelines.KaplanMeierFitter()
kmf_gt = lifelines.KaplanMeierFitter()
def get_name(model):
mode2name = {'pathgraphomic':'Pathomic F.', 'pathomic':'Pathomic F.', 'graphomic':'Pathomic F.', 'path':'Histology CNN', 'graph':'Histology GCN', 'omic':'Genomic SNN'}
for mode in mode2name.keys():
if mode in model: return mode2name[mode]
return 'N/A'
fig = plt.figure(figsize=(10, 10), dpi=600)
ax = plt.subplot()
censor_style = {'ms': 20, 'marker': '+'}
for i in range(len(p)):
temp = data[data['grade_pred']==i]
kmf_pred.fit(temp['OS_month']/12, temp['censored'], label="%s (Low)" % get_name(model))
kmf_pred.plot(ax=ax, show_censors=True, ci_show=False, c='b', linewidth=4, ls='-', markerfacecolor='black', censor_styles=censor_style)
temp = data[data['grade_pred']==len(p)]
kmf_pred.fit(temp['OS_month']/12, temp['censored'], label="%s (High)" % get_name(model))
kmf_pred.plot(ax=ax, show_censors=True, ci_show=False, c='r', linewidth=4, ls='-', censor_styles=censor_style)
ax.set_xlabel('')
ax.set_ylim(0, 1)
ax.set_yticks(np.arange(0, 1.001, 0.5))
ax.tick_params(axis='both', which='major', labelsize=40)
plt.legend(fontsize=32, prop=font_manager.FontProperties(family='Arial', style='normal', size=32))
return fig
data = getDataAggSurv_KIRC(ckpt_name=ckpt_name, model=model)
fig = hazard2KMCurve(data, percentile=percentile)
fig.savefig(ckpt_name+'/%s_%s_KM_%s.png' % ('./checkpoints/TCGA_KIRC/surv_15/'.split('/')[2], model, 'all'), bbox_inches='tight')
plt.close()
def makeKaplanMeierPlot_KIRC_Multi(ckpt_name='./checkpoints/TCGA_KIRC/surv_15/', model='omic', split='test', zscore=False, agg_type='Hazard_mean', percentile=[25, 50, 75]):
def hazard2KMCurve(data, percentile):
p = np.percentile(data['Hazard'], percentile)
print(p)
return None
data.insert(0, 'grade_pred', [hazard2grade(hazard, p) for hazard in data['Hazard']])
kmf_pred = lifelines.KaplanMeierFitter()
kmf_gt = lifelines.KaplanMeierFitter()
def get_name(model):
mode2name = {'pathgraphomic':'Pathomic F.', 'pathomic':'Pathomic F.', 'graphomic':'Pathomic F.', 'path':'Hist. CNN', 'graph':'Histology GCN', 'omic':'Genomic SNN'}
for mode in mode2name.keys():
if mode in model: return mode2name[mode]
return 'N/A'
fig = plt.figure(figsize=(10, 10), dpi=600)
ax = plt.subplot()
censor_style = {'ms': 20, 'marker': '+'}
colors = ['g', 'b', 'm', 'r']
stage = ['G1', 'G2', 'G3', 'G4']
pred = ['<25%', '25-50%', '50-75%', '>75%']
for i in range(len(p)):
temp = data[data['Neoplasm Histologic Grade']==str(i+1)]
kmf_gt.fit(temp['OS_month']/12, temp['censored'], label=stage[i])
kmf_gt.plot(ax=ax, show_censors=True, ci_show=False, c=colors[i], linewidth=3, ls='--', markerfacecolor='black', censor_styles=censor_style)
temp = data[data['grade_pred']==i]
kmf_pred.fit(temp['OS_month']/12, temp['censored'], label="%s (%s)" % (get_name(model), pred[i]))
kmf_pred.plot(ax=ax, show_censors=True, ci_show=False, c=colors[i], linewidth=4, ls='-', markerfacecolor='black', censor_styles=censor_style)
temp = data[data['Neoplasm Histologic Grade']==str(4)]
kmf_gt.fit(temp['OS_month']/12, temp['censored'], label=stage[i])
kmf_gt.plot(ax=ax, show_censors=True, ci_show=False, c='r', linewidth=3, ls='--', markerfacecolor='black', censor_styles=censor_style)
temp = data[data['grade_pred']==len(p)]
kmf_pred.fit(temp['OS_month']/12, temp['censored'], label="%s (%s)" % (get_name(model), pred[i]))
kmf_pred.plot(ax=ax, show_censors=True, ci_show=False, c='r', linewidth=4, ls='-', censor_styles=censor_style)
ax.set_xlabel('')
ax.set_ylim(0, 1)
ax.set_yticks(np.arange(0, 1.001, 0.5))
ax.tick_params(axis='both', which='major', labelsize=40)
plt.legend(fontsize=16, prop=font_manager.FontProperties(family='Arial', style='normal', size=16))
#ax.get_legend().remove()
return fig
data = getDataAggSurv_KIRC(ckpt_name=ckpt_name, model=model)
fig = hazard2KMCurve(data, percentile=percentile)
return None
fig.savefig(ckpt_name+'/%s_%s_KM_%s.png' % ('./checkpoints/TCGA_KIRC/surv_15/'.split('/')[2]+"_Multi", model, 'all'), bbox_inches='tight')
plt.close()
| 59,192
| 53.55576
| 208
|
py
|
PathomicFusion
|
PathomicFusion-master/CellGraph/pixelcnn.py
|
import torch.nn as nn
from layers_custom import maskConv0, MaskConvBlock
import torch
class MaskCNN(nn.Module):
def __init__(self, n_channel=1024, h=128):
"""PixelCNN Model"""
super(MaskCNN, self).__init__()
self.MaskConv0 = maskConv0(n_channel, h, k_size=7, stride=1, pad=3)
# large 7 x 7 masked filter with image downshift to ensure that each output neuron's receptive field only sees what is above it in the image
MaskConv = []
# stack of 10 gated residual masked conv blocks
for i in range(10):
MaskConv.append(MaskConvBlock(h, k_size=3, stride=1, pad=1))
self.MaskConv = nn.Sequential(*MaskConv)
# 1x1 conv to upsample to required feature (channel) length
self.out = nn.Sequential(
nn.ReLU(),
nn.Conv2d(h, n_channel, kernel_size=1, stride=1, padding=0),
nn.BatchNorm2d(n_channel),
nn.ReLU()
)
def forward(self, x):
"""
Args:
x: [batch_size, channel, height, width]
Return:
out [batch_size, channel, height, width]
"""
# fully convolutional, feature map dimension maintained constant throughout
x = self.MaskConv0(x)
x = self.MaskConv(x)
x = self.out(x)
return x
if __name__ == '__main__':
from torchsummary import summary
model = PixelCNN(1024, 128)
summary(model, (1024, 7,7))
x = torch.rand(2, 1024, 7, 7)
x = model(x)
print(x.shape)
| 1,544
| 27.090909
| 149
|
py
|
PathomicFusion
|
PathomicFusion-master/CellGraph/resnet.py
|
'''
Properly implemented ResNet-s for CIFAR10 as described in paper [1].
The implementation and structure of this file is hugely influenced by [2]
which is implemented for ImageNet and doesn't have option A for identity.
Moreover, most of the implementations on the web is copy-paste from
torchvision's resnet and has wrong number of params.
Proper ResNet-s for CIFAR10 (for fair comparision and etc.) has following
number of layers and parameters:
name | layers | params
ResNet20 | 20 | 0.27M
ResNet32 | 32 | 0.46M
ResNet44 | 44 | 0.66M
ResNet56 | 56 | 0.85M
ResNet110 | 110 | 1.7M
ResNet1202| 1202 | 19.4m
which this implementation indeed has.
Reference:
[1] Kaiming He, Xiangyu Zhang, Shaoqing Ren, Jian Sun
Deep Residual Learning for Image Recognition. arXiv:1512.03385
[2] https://github.com/pytorch/vision/blob/master/torchvision/models/resnet.py
If you use this implementation in you work, please don't forget to mention the
author, Yerlan Idelbayev.
'''
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.nn.init as init
from torch.autograd import Variable
__all__ = ['ResNet', 'resnet20', 'resnet32', 'resnet44', 'resnet56', 'resnet110', 'resnet1202']
def _weights_init(m):
classname = m.__class__.__name__
print(classname)
if isinstance(m, nn.Linear) or isinstance(m, nn.Conv2d):
init.kaiming_normal(m.weight)
class LambdaLayer(nn.Module):
def __init__(self, lambd):
super(LambdaLayer, self).__init__()
self.lambd = lambd
def forward(self, x):
return self.lambd(x)
class BasicBlock(nn.Module):
expansion = 1
def __init__(self, in_planes, planes, stride=1, option='A'):
super(BasicBlock, self).__init__()
self.conv1 = nn.Conv2d(in_planes, planes, kernel_size=3, stride=stride, padding=1, bias=False)
self.bn1 = nn.BatchNorm2d(planes)
self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=1, padding=1, bias=False)
self.bn2 = nn.BatchNorm2d(planes)
self.shortcut = nn.Sequential()
if stride != 1 or in_planes != planes:
if option == 'A':
"""
For CIFAR10 ResNet paper uses option A.
"""
self.shortcut = LambdaLayer(lambda x:
F.pad(x[:, :, ::2, ::2], (0, 0, 0, 0, planes//4, planes//4), "constant", 0))
elif option == 'B':
self.shortcut = nn.Sequential(
nn.Conv2d(in_planes, self.expansion * planes, kernel_size=1, stride=stride, bias=False),
nn.BatchNorm2d(self.expansion * planes)
)
def forward(self, x):
out = F.relu(self.bn1(self.conv1(x)))
out = self.bn2(self.conv2(out))
out += self.shortcut(x)
out = F.relu(out)
return out
class ResNet(nn.Module):
def __init__(self, block, num_blocks, num_classes=10):
super(ResNet, self).__init__()
self.in_planes = 16
self.conv1 = nn.Conv2d(3, 16, kernel_size=3, stride=1, padding=1, bias=False)
self.bn1 = nn.BatchNorm2d(16)
self.layer1 = self._make_layer(block, 16, num_blocks[0], stride=1)
self.layer2 = self._make_layer(block, 32, num_blocks[1], stride=2)
self.layer3 = self._make_layer(block, 64, num_blocks[2], stride=2)
self.linear = nn.Linear(64, num_classes)
self.apply(_weights_init)
def _make_layer(self, block, planes, num_blocks, stride):
strides = [stride] + [1]*(num_blocks-1)
layers = []
for stride in strides:
layers.append(block(self.in_planes, planes, stride))
self.in_planes = planes * block.expansion
return nn.Sequential(*layers)
def forward(self, x):
out = F.relu(self.bn1(self.conv1(x)))
out = self.layer1(out)
out = self.layer2(out)
out = self.layer3(out)
out = F.avg_pool2d(out, out.size()[3])
out = out.view(out.size(0), -1)
return out
def resnet20():
return ResNet(BasicBlock, [3, 3, 3])
def resnet32():
return ResNet(BasicBlock, [5, 5, 5])
def resnet44():
return ResNet(BasicBlock, [7, 7, 7])
def resnet56():
return ResNet(BasicBlock, [9, 9, 9])
def resnet110():
return ResNet(BasicBlock, [18, 18, 18])
def resnet1202():
return ResNet(BasicBlock, [200, 200, 200])
def test(net):
import numpy as np
total_params = 0
for x in filter(lambda p: p.requires_grad, net.parameters()):
total_params += np.prod(x.data.numpy().shape)
print("Total number of params", total_params)
print("Total layers", len(list(filter(lambda p: p.requires_grad and len(p.data.size())>1, net.parameters()))))
if __name__ == "__main__":
for net_name in __all__:
if net_name.startswith('resnet'):
print(net_name)
test(globals()[net_name]())
print()
| 4,971
| 29.881988
| 120
|
py
|
PathomicFusion
|
PathomicFusion-master/CellGraph/model.py
|
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.autograd import Variable
from resnet_custom import *
import pdb
import math
from pixelcnn import MaskCNN
device=torch.device("cuda" if torch.cuda.is_available() else "cpu")
def initialize_weights(module):
"""
args:
module: any pytorch module with trainable parameters
"""
for m in module.modules():
if isinstance(m, nn.Conv2d):
nn.init.kaiming_normal_(m.weight, nonlinearity='relu')
if m.bias is not None:
m.bias.data.zero_()
# if isinstance(m, nn.Linear):
# nn.init.xavier_normal_(m.weight)
# m.bias.data.zero_()
if isinstance(m, nn.Linear):
nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')
m.bias.data.zero_()
elif isinstance(m, nn.BatchNorm2d):
nn.init.constant_(m.weight, 1)
nn.init.constant_(m.bias, 0)
class CPC_model(nn.Module):
def __init__(self, input_size = 1024, hidden_size = 128, k = 3, ln = False):
"""
args:
input_size: input size to autoregresser (encoding size)
hidden_size: number of hidden units in MaskedCNN
num_layers: number of hidden layers in MaskedCNN
k: prediction length
"""
super(CPC_model, self).__init__()
### Settings
self.seq_len = 49 # 7 x 7 grid of overlapping 64 x 64 patches extracted from each 256 x 256 image
self.k = k
self.input_size = input_size
self.hidden_size=hidden_size
### Networks
if ln:
self.encoder = resnet50_ln(pretrained=False)
else:
self.encoder = resnet50(pretrained=False)
self.reg = MaskCNN(n_channel=self.input_size, h=self.hidden_size)
network_pred = [nn.Linear(input_size, input_size) for i in range(self.k)] #use an indepdent linear layer to predict each future row
self.network_pred= nn.ModuleList(network_pred)
# initialize linear network and context network
initialize_weights(self.network_pred)
initialize_weights(self.reg)
### Activation functions
self.softmax = nn.Softmax(dim=1)
self.lsoftmax = nn.LogSoftmax(dim=1)
def forward(self, x):
# input = [bs * 7 * 7, 3, 64, 64]
# compute batch_size
bs = x.size(0) // (self.seq_len)
rows = int(math.sqrt(self.seq_len))
cols = int(math.sqrt(self.seq_len))
# compute latent representation for each patch
z = self.encoder(x)
# z.shape: [bs * 7 * 7, 1024]
# reshape z into feature grid: [bs, 7, 7, 1024]
z = z.contiguous().view(bs, rows, cols, self.input_size)
device = z.device
#randomly draw a row to predict what is k rows below it, using information in current row and above
if self.training:
pred_id = torch.randint(rows - self.k, size=(1,)).long() #low is 0, high is 3 (predicts row 4, 5, 6)
else:
pred_id = torch.tensor([3]).long()
# feature predictions for the next k rows e.g. pred[i] is [bs * cols, 1024] for i in k
pred = [torch.empty(bs * cols, self.input_size).float().to(device) for i in range(self.k)]
# ground truth encodings for the next k rows e.g. encode_samples[i] is [bs * cols, 1024] for i in k
encode_samples = [torch.empty(bs * cols, self.input_size).float().to(device) for i in range(self.k)]
for i in np.arange(self.k):
# add ground truth encodings
start_row = pred_id.item()+i+1
encode_samples[i] = z[:,start_row, :, :].contiguous().view(bs * cols, self.input_size)
# reshape feature grid to channel first (required by Pytorch convolution convention)
z = z.permute(0, 3, 1, 2)
# z.shape: from [bs, 7, 7, 1024] --> [bs, 1024, 7, 7]
# apply aggregation to compute context
output = self.reg(z)
# reg is fully convolutional --> output size is [bs, 1024, 7, 7]
output = output.permute(0, 2, 3, 1) # reshape back to feature grid
# output.shape: [bs, row, col, 1024]
# context for each patch in the row
c_t = output[:,pred_id + 1,:, :]
# c_t.shape: [bs, 1, 7, 1024]
# reshape for linear classification:
c_t = c_t.contiguous().view(bs * cols, self.input_size)
# c_t.shape: [bs * cols, 1024]
# linear prediction: Wk*c_t
for i in np.arange(0, self.k):
if type(self.network_pred) == nn.DataParallel:
pred[i] = self.network_pred.module[i](c_t)
else:
pred[i] = self.network_pred[i](c_t) #e.g. size [bs * cols, 1024]
nce = 0 # average over prediction length, cols, and batch
accuracy = np.zeros((self.k,))
for i in np.arange(0, self.k):
"""
goal: can network correctly match predicted features with ground truth features among negative targets
i.e. match z_i+k,j with W_k * c_i,j
postivie target: patch with the correct groundtruth encoding
negative targets: patches with wrong groundtruth encodings (sampled from other patches in the same image, or other images in the minibatch)
1) dot product for each k to obtain raw prediction logits
total = (a_ij) = [bs * col, bs * col], where a_ij is the logit of ith patch prediction matching jth patch encoding
2) apply softmax along each row to get probability that ith patch prediction matches jth patch encoding
we want ith patch prediction to correctly match ith patch encoding, therefore target has 1s along diagnol, and 0s off diagnol
3) we take the argmax along softmaxed rows to get the patch prediction for the ith patch, this value should be i
4) compute nce loss as the cross-entropy of classifying the positive sample correctly (sum of logsoftmax along diagnol)
5) normalize loss by batchsize and k and number of patches in a row
"""
total = torch.mm(pred[i], torch.transpose(encode_samples[i],0,1)) # e.g. size [bs * col, bs * col]
accuracy[i] = torch.sum(torch.eq(torch.argmax(self.softmax(total), dim=1), torch.arange(0, bs * cols).to(device))).item()
accuracy[i] /= 1. * (bs * cols)
nce += torch.sum(torch.diag(self.lsoftmax(total))) # nce is a tensor
nce /= -1. * bs * cols * self.k
# accuracy = 1.*correct.item() / (bs * cols * self.k)
return nce, np.array(accuracy)
# crop data into 64 by 64 with 32 overlap
def cropdata(data, num_channels=3, kernel_size = 64, stride = 32):
if len(data.shape) == 3:
data = data.unsqueeze(0)
data = data.unfold(2, kernel_size, stride).unfold(3, kernel_size, stride)
data = data.permute(0,2,3,1,4,5)
data = data.contiguous().view(-1, num_channels, kernel_size, kernel_size)
return data
if __name__ == '__main__':
torch.set_printoptions(threshold=1e6)
x = torch.rand(2, 3, 256, 256)
x = cropdata(x)
print(x.shape)
model = CPC_model(1024, 256)
nce, accuracy = model(x)
| 6,484
| 32.427835
| 142
|
py
|
PathomicFusion
|
PathomicFusion-master/CellGraph/layers_custom.py
|
import torch
import torch.nn as nn
import pdb
def down_shift(x, pad=None):
# Pytorch ordering
xs = [int(y) for y in x.size()]
# when downshifting, the last row is removed
x = x[:, :, :xs[2] - 1, :]
# padding left, padding right, padding top, padding bottom
pad = nn.ZeroPad2d((0, 0, 1, 0)) if pad is None else pad
return pad(x)
class MaskedConv2d(nn.Conv2d):
def __init__(self, c_in, c_out, k_size, stride, pad, use_down_shift=False):
super(MaskedConv2d, self).__init__(
c_in, c_out, k_size, stride, pad, bias=False)
ch_out, ch_in, height, width = self.weight.size()
# Mask
# -------------------------------------
# | 1 1 1 1 1 |
# | 1 1 1 1 1 |
# | 1 1 1 1 1 | H // 2
# | 0 0 0 0 0 | H // 2 + 1
# | 0 0 0 0 0 |
# -------------------------------------
# index 0 1 W//2 W//2+1
mask = torch.ones(ch_out, ch_in, height, width)
mask[:, :, height // 2 + 1:] = 0
self.register_buffer('mask', mask)
self.use_down_shift = use_down_shift
def forward(self, x):
self.weight.data *= self.mask
if self.use_down_shift:
x = down_shift(x)
return super(MaskedConv2d, self).forward(x)
def maskConv0(c_in=3, c_out=256, k_size=7, stride=1, pad=3):
"""2D Masked Convolution first layer"""
return nn.Sequential(
MaskedConv2d(c_in, c_out * 2, k_size, stride, pad, use_down_shift=True),
nn.BatchNorm2d(c_out * 2),
Gate()
)
class Gate(nn.Module):
def __init__(self):
super(Gate, self).__init__()
def forward(self, x):
# gated activation
xf, xg = torch.chunk(x, 2, dim=1)
f = torch.tanh(xf)
g = torch.sigmoid(xg)
return f * g
class MaskConvBlock(nn.Module):
def __init__(self, h=128, k_size=3, stride=1, pad=1):
"""1x1 Conv + 2D Masked Convolution (type B) + 1x1 Conv"""
super(MaskConvBlock, self).__init__()
self.net = nn.Sequential(
MaskedConv2d(h, 2 * h, k_size, stride, pad),
nn.BatchNorm2d(2 * h),
Gate()
)
def forward(self, x):
"""Residual connection"""
return self.net(x) + x
if __name__ == '__main__':
def conv(x, kernel):
return nn.functional.conv2d(x, kernel, padding=1)
x = torch.ones((1, 1, 5, 5)) * 0.1
x[:,:,1,0] = 1000
print("blindspot experiment")
normal_kernel = torch.ones(1, 1, 3, 3)
mask_kernel = torch.zeros(1, 1, 3, 3)
mask_kernel[:,:,0,:] = 1
mask_b = mask_kernel.clone()
mask_b[:,:,1,1] = 1
# mask_kernel[:,:,1,1] = 1
print("unmasked kernel:", "\n",normal_kernel.squeeze(), "\n")
print("masked kernel:", "\n", mask_kernel.squeeze(), "\n")
print("normal conv")
print("orig image", "\n", x.squeeze(), "\n")
y = conv(x, normal_kernel)
print(y[:,0, :,:], "\n")
y = conv(y, normal_kernel)
print(y[:,0, :,:], "\n")
print("with mask")
print("orig image", "\n", x.squeeze(), "\n")
y = conv(x, mask_kernel)
print(y[:,0, :,:], "\n")
y = conv(y, mask_b)
print(y[:,0, :,:], "\n")
y = conv(y, mask_b)
print(y[:,0, :,:],"\n")
print("with down_shift")
print("orig image", x.squeeze(), "\n")
c_kernel = mask_kernel
c_kernel[:,:,1,:] = 1
print("custom kernel:", "\n", c_kernel.squeeze(), "\n")
y = conv(down_shift(x), c_kernel)
print(y[:,0, :,:],"\n")
y = conv(y, c_kernel)
print(y[:,0, :,:],"\n")
y = conv(y, c_kernel)
print(y[:,0, :,:],"\n")
y = conv(y, c_kernel)
print(y[:,0, :,:],"\n")
| 3,879
| 28.172932
| 80
|
py
|
PathomicFusion
|
PathomicFusion-master/CellGraph/resnet_custom.py
|
# modified from Pytorch official resnet.py
# oops
import torch.nn as nn
import torch.utils.model_zoo as model_zoo
import torch
from torchsummary import summary
import torch.nn.functional as F
__all__ = ['ResNet', 'resnet18', 'resnet34', 'resnet50', 'resnet101',
'resnet152']
model_urls = {
'resnet18': 'https://download.pytorch.org/models/resnet18-5c106cde.pth',
'resnet34': 'https://download.pytorch.org/models/resnet34-333f7ec4.pth',
'resnet50': 'https://download.pytorch.org/models/resnet50-19c8e357.pth',
'resnet101': 'https://download.pytorch.org/models/resnet101-5d3b4d8f.pth',
'resnet152': 'https://download.pytorch.org/models/resnet152-b121ed2d.pth',
}
def conv3x3(in_planes, out_planes, stride=1):
"""3x3 convolution with padding"""
return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride,
padding=1, bias=False)
class BasicBlock(nn.Module):
expansion = 1
def __init__(self, inplanes, planes, stride=1, downsample=None):
super(BasicBlock, self).__init__()
self.conv1 = conv3x3(inplanes, planes, stride)
# self.bn1 = nn.BatchNorm2d(planes)
self.relu = nn.ReLU(inplace=True)
self.conv2 = conv3x3(planes, planes)
# self.bn2 = nn.BatchNorm2d(planes)
self.downsample = downsample
self.stride = stride
def forward(self, x):
residual = x
out = self.conv1(x)
# out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
# out = self.bn2(out)
if self.downsample is not None:
residual = self.downsample(x)
out += residual
out = self.relu(out)
return out
class Bottleneck(nn.Module):
expansion = 4
def __init__(self, inplanes, planes, stride=1, downsample=None):
super(Bottleneck, self).__init__()
self.conv1 = nn.Conv2d(inplanes, planes, kernel_size=1, bias=False)
# self.bn1 = nn.BatchNorm2d(planes)
self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=stride,
padding=1, bias=False)
# self.bn2 = nn.BatchNorm2d(planes)
self.conv3 = nn.Conv2d(planes, planes * self.expansion, kernel_size=1, bias=False)
# self.bn3 = nn.BatchNorm2d(planes * self.expansion)
self.relu = nn.ReLU(inplace=True)
self.downsample = downsample
self.stride = stride
def forward(self, x):
residual = x
out = self.conv1(x)
# out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
# out = self.bn2(out)
out = self.relu(out)
out = self.conv3(out)
# out = self.bn3(out)
if self.downsample is not None:
residual = self.downsample(x)
out += residual
out = self.relu(out)
return out
class LayerNorm(nn.Module):
def __init__(self):
super(LayerNorm, self).__init__()
def forward(self, x):
return F.layer_norm(x, x.size()[1:])
class Bottleneck_LN(nn.Module):
expansion = 4
def __init__(self, inplanes, planes, stride=1, downsample=None):
super(Bottleneck_LN, self).__init__()
self.conv1 = nn.Conv2d(inplanes, planes, kernel_size=1, bias=False)
self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=stride,
padding=1, bias=False)
self.conv3 = nn.Conv2d(planes, planes * self.expansion, kernel_size=1, bias=False)
self.relu = nn.ReLU(inplace=True)
self.ln = LayerNorm()
self.downsample = downsample
self.stride = stride
def forward(self, x):
residual = x
out = self.conv1(x)
# out = F.layer_norm(out, out.size()[1:])
out = self.ln(out)
out = self.relu(out)
out = self.conv2(out)
# out = F.layer_norm(out, out.size()[1:])
out = self.ln(out)
out = self.relu(out)
out = self.conv3(out)
# out = F.layer_norm(out, out.size()[1:])
out = self.ln(out)
if self.downsample is not None:
residual = self.downsample(x)
# residual = F.layer_norm(residual, residual.size()[1:])
residual = self.ln(residual)
out += residual
out = self.relu(out)
return out
class ResNet(nn.Module):
def __init__(self, block, layers
# num_classes=1000
):
self.inplanes = 64
super(ResNet, self).__init__()
self.conv1 = nn.Conv2d(3, 64, kernel_size=7, stride=2, padding=3,
bias=False)
# self.bn1 = nn.BatchNorm2d(64)
self.relu = nn.ReLU(inplace=True)
self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
self.layer1 = self._make_layer(block, 64, layers[0])
self.layer2 = self._make_layer(block, 128, layers[1], stride=2)
self.layer3 = self._make_layer(block, 256, layers[2], stride=2)
# self.layer4 = self._make_layer(block, 512, layers[3], stride=2)
self.avgpool = nn.AdaptiveAvgPool2d(1) # was 7, if have layer4
# remove the final fc
# self.fc = nn.Linear(512 * block.expansion, num_classes)
for m in self.modules():
if isinstance(m, nn.Conv2d):
nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')
elif isinstance(m, nn.BatchNorm2d):
nn.init.constant_(m.weight, 1)
nn.init.constant_(m.bias, 0)
def _make_layer(self, block, planes, blocks, stride=1):
downsample = None
if stride != 1 or self.inplanes != planes * block.expansion:
downsample = nn.Sequential(
nn.Conv2d(self.inplanes, planes * block.expansion,
kernel_size=1, stride=stride, bias=False),
# nn.BatchNorm2d(planes * block.expansion),
)
layers = []
layers.append(block(self.inplanes, planes, stride, downsample))
self.inplanes = planes * block.expansion
for i in range(1, blocks):
layers.append(block(self.inplanes, planes))
return nn.Sequential(*layers)
def forward(self, x):
x = self.conv1(x)
# x = self.bn1(x)
x = self.relu(x)
x = self.maxpool(x)
x = self.layer1(x)
x = self.layer2(x)
x = self.layer3(x)
# x = self.layer4(x)
x = self.avgpool(x)
x = x.view(x.size(0), -1)
# x = self.fc(x)
return x
def resnet18(pretrained=False, **kwargs):
"""Constructs a ResNet-18 model.
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
"""
model = ResNet(BasicBlock, [2, 2, 2, 2], **kwargs)
if pretrained:
model = neq_load(model, 'resnet18')
# model.load_state_dict(model_zoo.load_url(model_urls['resnet18']))
return model
def resnet34(pretrained=False, **kwargs):
"""Constructs a ResNet-34 model.
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
"""
model = ResNet(BasicBlock, [3, 4, 6, 3], **kwargs)
if pretrained:
model = neq_load(model, 'resnet34')
# model.load_state_dict(model_zoo.load_url(model_urls['resnet34']))
return model
def resnet50(pretrained=False, **kwargs):
"""Constructs a ResNet-50 model.
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
"""
model = ResNet(Bottleneck, [3, 4, 6, 3], **kwargs)
if pretrained:
model = neq_load(model, 'resnet50')
# model.load_state_dict(model_zoo.load_url(model_urls['resnet50']))
return model
def resnet50_ln(pretrained=False):
"""Constructs a ResNet-50 model.
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
"""
model = ResNet(Bottleneck_LN, [3, 4, 6, 3], **kwargs)
if pretrained:
model = neq_load(model, 'resnet50')
# model.load_state_dict(model_zoo.load_url(model_urls['resnet50']))
return model
def resnet101(pretrained=False, **kwargs):
"""Constructs a ResNet-101 model.
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
"""
model = ResNet(Bottleneck, [3, 4, 23, 3], **kwargs)
if pretrained:
model = neq_load(model, 'resnet101')
# model.load_state_dict(model_zoo.load_url(model_urls['resnet101']))
return model
def resnet101_wide(pretrained=False, ln=False):
"""Constructs a ResNet-101 model.
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
"""
if ln:
model = ResNet_Wide_LN(Bottleneck_LN, Bottleneck_Wide_LN, [3, 4, 46, 3])
else:
model = ResNet_Wide(Bottleneck, Bottleneck_Wide, [3, 4, 46, 3])
if pretrained:
model = neq_load(model, 'resnet101')
# model.load_state_dict(model_zoo.load_url(model_urls['resnet101']))
return model
def resnet152(pretrained=False, **kwargs):
"""Constructs a ResNet-152 model.
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
"""
model = ResNet(Bottleneck, [3, 8, 36, 3], **kwargs)
if pretrained:
model = neq_load(model, 'resnet152')
# model.load_state_dict(model_zoo.load_url(model_urls['resnet152']))
return model
def neq_load(model, name):
# load pre-trained model in a not-equal way
# when new model has been modified
pretrained_dict = model_zoo.load_url(model_urls[name])
model_dict = model.state_dict()
pretrained_dict = {k: v for k, v in pretrained_dict.items() if k in model_dict}
model_dict.update(pretrained_dict)
model.load_state_dict(model_dict)
return model
if __name__ == '__main__':
# model = resnet50_wide(pretrained = False)
model = resnet50_wide(ln=True)
print(model)
# summary(model, (3,64,64))
x = torch.rand(49, 3, 64, 64)
x = model(x).squeeze()
print(x.shape)
print(len(x.shape))
| 10,078
| 30.794953
| 90
|
py
|
restflow
|
restflow-master/tests/test.py
|
import unittest
import sys
from IPython.display import Image, display
import sympy
import os
script_path = os.path.abspath(__file__) # Get the absolute path of the script
script_dir = os.path.dirname(script_path) # Get the directory of the script
os.chdir(script_dir) # Change the current working directory to the directory of the script
sys.path.append('..') # path for local package
import restflow
from restflow import symvec
from restflow import symbolic
#parameters of model
c1, c2, c3, u, D, kappa = sympy.symbols('c1 c2 c3 u D kappa')
# dimension and reduced surface area
K_d, Lambda, delta_l, dim = sympy.symbols('K_d Lambda δl d')
# symbols for vectors
_q, _p, _r, _k, dot_kq, dot_pk, dot_qp, dot_qr, dot_pr, dot_rk = sympy.symbols('q p r k (k·q) (k·p) (q·p) (q·r) (p·r) (r·k)')
ctx = restflow.Context()
ctx.add_dot_product(_q, _k, dot_kq)
ctx.add_dot_product(_p, _k, dot_pk)
ctx.add_dot_product(_q, _p, dot_qp)
ctx.add_dot_product(_q, _r, dot_qr)
ctx.add_dot_product(_p, _r, dot_pr)
ctx.add_dot_product(_r, _k, dot_rk)
# create vectors
k, q, p, r = ctx.vector(_k), ctx.vector(_q), ctx.vector(_p), ctx.vector(_r)
'''------------------------------------------------------------------------------------------------------------'''
class AMBp:
def __init__(self):
self.alpha = 2
self.c1, self.c2, self.c3, self.u, self.D, self.kappa = sympy.symbols('c1 c2 c3 u D kappa')
def f(self,k):
return (self.kappa*k**self.alpha)
def v2(self,k1,k2,q):
expr = (self.c1*q**2+2*self.c2*k1*k2-self.c3*(k2**2*q*k1+k1**2*q*k2)/(q**2))*1/2
return sympy.fraction(sympy.cancel((expr)))
def v3(self,k1,k2,k3,q):
return (-self.u,1)
class Testclass(unittest.TestCase):
def test_integrate2(self):
model = AMBp()
expr = symbolic.Expression(model.c1*q**2,sympy.sympify(2))
self.assertEqual(expr.integrate1(5, k, q), model.c1*q**2/2*Lambda**dim*delta_l*K_d)
expr = symbolic.Expression(dot_kq**2*model.c3,q**2)
self.assertEqual(expr.integrate1(5, k, q), model.c3*Lambda**(dim+2)*delta_l*K_d/dim)
expr = symbolic.Expression(dot_kq*model.c2,sympy.sympify(1))
self.assertEqual(expr.integrate1(5, k, q), 0)
expr = symbolic.Expression((1+dot_kq)*(2+dot_kq),sympy.sympify(1))
self.assertEqual(expr.integrate1(5, k, q), K_d*Lambda**dim*delta_l*(Lambda**2*q**2 + 2*dim)/dim)
expr = symbolic.Expression(sympy.sympify(1),(k+q)**2)
self.assertEqual(expr.integrate1(5, k, q), K_d*Lambda**(dim - 6)*delta_l*(Lambda**4*dim*(dim + 2) - Lambda**2*q**2*(dim - 4)*(dim + 2) + q**4*(dim*(dim + 2) - 12*dim + 24))/(dim*(dim + 2)))
def test_integrate3(self):
cs_psi = sympy.symbols('cos_psi')
expr = symbolic.Expression((q-p-k)*(q-k),sympy.sympify(1))
self.assertEqual(expr.integrate2(3, k, q, p), K_d*Lambda**dim*delta_l*(Lambda**2 - cs_psi*p.sym*q.sym + q**2))
expr = symbolic.Expression(((k)*(q+p))**2,sympy.sympify(1))
self.assertEqual(expr.integrate2(3, k, q, p), K_d*Lambda**(dim + 2)*delta_l*(2*cs_psi*p.sym*q.sym + p**2 + q**2)/dim)
expr = symbolic.Expression(sympy.sympify(1),(q+k+p)**2)
self.assertEqual(expr.integrate2(3, k, q, p), K_d*Lambda**(dim - 4)*delta_l*(Lambda**2*dim - (dim - 4)*(2*cs_psi*p.sym*q.sym + p**2 + q**2))/dim)
def test_label_edges(self):
#kpz figure
v = [restflow.Vertex() for i in range(3)]
v[0].link_vertex(v[1])
v[0].link_vertex(v[2])
v[1].link_vertex(v[2])
v[1].add_outgoing(0)
g = restflow.Graph(v)
labels = [k, q]
g.label_edges(labels)
n = [restflow.Vertex() for i in range(3)]
n[0].link_vertex(n[1])
n[0]._out[0].label = q-k
n[0].link_vertex(n[2])
n[0]._out[1].label = k
n[1].link_vertex(n[2])
n[1]._out[0].label = -k
n[1].add_outgoing()
n[1]._out[1].label = q
h = restflow.Graph(n)
h.vertices[0]._in[0].label=q
for i in range(len(g.vertices)):
for j in range(len(g.vertices[i]._in)):
self.assertEqual((g.vertices[i]._in[j].label)**2, (h.vertices[i]._in[j].label)**2)
v = [restflow.Vertex() for i in range(5)]
v[0].link_vertex(v[1])
v[0].link_vertex(v[2])
v[2].link_vertex(v[3])
v[3].link_vertex(v[4])
v[4].link_vertex(v[1])
v[2].add_outgoing()
v[3].add_outgoing()
v[4].add_outgoing()
g = restflow.Graph(v)
labels = [k, p,r,q-p-r]
g.label_edges(labels)
n = [restflow.Vertex() for i in range(5)]
n[0].link_vertex(n[1])
n[0]._out[0].label = k
n[0].link_vertex(n[2])
n[0]._out[1].label = q-k
n[2].link_vertex(n[3])
n[2]._out[0].label = q-k-p
n[3].link_vertex(n[4])
n[3]._out[0].label = q-k-p-r
n[4].link_vertex(n[1])
n[4]._out[0].label = -k
n[2].add_outgoing()
n[2]._out[1].label = p
n[3].add_outgoing()
n[3]._out[1].label = r
n[4].add_outgoing()
n[4]._out[1].label = q-p-r
h = restflow.Graph(n)
h.vertices[0]._in[0].label = q
for i in range(len(g.vertices)):
for j in range(len(g.vertices[i]._in)):
self.assertEqual((g.vertices[i]._in[j].label)**2, (h.vertices[i]._in[j].label)**2)
def test_freq_integral(self):
model = AMBp()
#graph k
v = [restflow.Vertex() for i in range(5)]
v[0].link_vertex(v[1])
v[0].link_vertex(v[2])
v[2].link_vertex(v[3])
v[3].link_vertex(v[4])
v[4].link_vertex(v[1])
v[2].add_outgoing()
v[3].add_outgoing()
v[4].add_outgoing()
g1 = restflow.Graph(v)
labels = [k, p, r, q-p-r]
g1.label_edges(labels)
Q = 1/((model.f(k)+model.f(q-k))*(model.f(k)+model.f(q-p-k))*(model.f(k)+model.f(q-p-k-r)))
self.assertEqual(g1._calculate_freq_integral(g1.k,model.f)[0]/g1._calculate_freq_integral(g1.k,model.f)[1], Q)
#graph j
v = [restflow.Vertex() for i in range(5)]
v[0].link_vertex(v[1])
v[1].link_vertex(v[2])
v[2].link_vertex(v[3])
v[0].link_vertex(v[4])
v[4].link_vertex(v[3])
v[1].add_outgoing()
v[2].add_outgoing()
v[4].add_outgoing()
g2 = restflow.Graph(v)
labels = [k, p, r, q-p-r]
g2.label_edges(labels)
Qppm =(2*model.f(k)*(model.f(k)+model.f(p+r+k)+model.f(r+k)+model.f(q-p-r-k))+model.f(q-p-r-k)**2+model.f(p+r+k)*model.f(r+k)+model.f(p+r+k)*model.f(q-p-r-k)+model.f(r+k)*model.f(q-p-r-k))/((model.f(p+r+k)+model.f(q-p-r-k))*(model.f(r+k)+model.f(q-p-r-k)))
Q = 1/((model.f(k)+model.f(p+r+k))*(model.f(k)+model.f(r+k))*(model.f(k)+model.f(q-p-r-k)))*Qppm
self.assertEqual(g2._calculate_freq_integral(g2.k,model.f)[0]/g2._calculate_freq_integral(g2.k,model.f)[1], Q)
#graph b
v = [restflow.Vertex() for i in range(4)]
v[0].link_vertex(v[1])
v[0].link_vertex(v[2])
v[2].link_vertex(v[3])
v[3].link_vertex(v[1])
v[2].add_outgoing()
v[3].add_outgoing()
g3 = restflow.Graph(v)
labels = [k, p, q-p]
g3.label_edges(labels)
Q = 1/((model.f(k)+model.f(q-k))*(model.f(k)+model.f(q-p-k)))
self.assertEqual(g3._calculate_freq_integral(g3.k,model.f)[0]/g3._calculate_freq_integral(g3.k,model.f)[1], Q)
#graph a
v = [restflow.Vertex() for i in range(4)]
v[0].link_vertex(v[1])
v[0].link_vertex(v[2])
v[1].link_vertex(v[3])
v[2].link_vertex(v[3])
v[1].add_outgoing()
v[2].add_outgoing()# example: Figure (a) from above graph
g4 = restflow.Graph(v)
labels = [k, p, q-p]
g4.label_edges(labels)
Q = 1/((model.f(k)+model.f(p+k))*(model.f(k)+model.f(q-p-k)))*(2*model.f(k)+model.f(p+k)+model.f(q-p-k))/(model.f(p+k)+model.f(q-p-k))
self.assertEqual(g4._calculate_freq_integral(g4.k,model.f)[0]/g4._calculate_freq_integral(g4.k,model.f)[1], Q)
#kpz diagram
v = [restflow.Vertex() for i in range(3)]
v[0].link_vertex(v[1])
v[0].link_vertex(v[2])
v[1].link_vertex(v[2])
v[1].add_outgoing()
g5 = restflow.Graph(v)
labels = [k, q]
g5.label_edges(labels)
Q = 1/(model.f(q-k)+model.f(k))
self.assertEqual(g5._calculate_freq_integral(g5.k,model.f)[0]/g5._calculate_freq_integral(g5.k,model.f)[1], Q)
def test_multiplicity(self):
#graph d
v = [restflow.Vertex() for i in range(3)]
v[0].link_vertex(v[1])
v[0].add_outgoing()
v[0].link_vertex(v[2])
v[2].link_vertex(v[1])
v[2].add_outgoing()
g1 = restflow.Graph(v)
self.assertEqual(g1.calculate_multiplicity(), 12)
#graph j
v = [restflow.Vertex() for i in range(5)]
v[0].link_vertex(v[1])
v[1].link_vertex(v[2])
v[2].link_vertex(v[3])
v[0].link_vertex(v[4])
v[4].link_vertex(v[3])
v[1].add_outgoing()
v[2].add_outgoing()
v[4].add_outgoing()
g2 = restflow.Graph(v)
self.assertEqual(g2.calculate_multiplicity(), 8)
v = [restflow.Vertex() for i in range(4)]
v[0].link_vertex(v[1])
v[0].link_vertex(v[2])
v[2].link_vertex(v[3])
v[3].link_vertex(v[1])
v[0].add_outgoing()
v[2].add_outgoing()
v[3].add_outgoing()
g3 = restflow.Graph(v)
self.assertEqual(g3.calculate_multiplicity(), 24)
def test_integral(self):
model = AMBp()
#kpz
v = [restflow.Vertex() for i in range(3)]
v[0].link_vertex(v[1])
v[0].link_vertex(v[2])
v[1].link_vertex(v[2])
v[1].add_outgoing()
g1 = restflow.Graph(v)
labels = [k, q]
g1.label_edges(labels)
nom, den = g1.convert(model).num, g1.convert(model).den
Itruth = model.D*g1.k**model.alpha*g1.calculate_multiplicity()*g1._calculate_freq_integral(g1.k,model.f)[0]*model.v2(k,q-k,q)[0]*model.v2(-k,q,q-k)[0]/(model.f(g1.k)*g1._calculate_freq_integral(g1.k,model.f)[1]*model.v2(k,q-k,q)[1]*model.v2(-k,q,q-k)[1])
self.assertEqual(nom/den, Itruth)
#figure c
v = [restflow.Vertex() for i in range(3)]
v[0].link_vertex(v[1])
v[0].link_vertex(v[2])
v[2].link_vertex(v[1])
v[2].add_outgoing()
v[2].add_outgoing()
g2 = restflow.Graph(v)
labels = [k, q-p, p]
g2.label_edges(labels)
nom, den = g2.convert(model).num, g2.convert(model).den
Itruth = model.D*g2.k**model.alpha* g2.calculate_multiplicity()*g2._calculate_freq_integral(g2.k,model.f)[0]*model.v2(k,q-k,q)[0]*model.v3(-k,p,q-p,q-k)[0]/(model.f(g2.k)*g2._calculate_freq_integral(g2.k,model.f)[1]*model.v2(k,q-k,q)[1]*model.v3(-k,p,q-p,q-k)[1])
self.assertEqual(nom/den, Itruth)
#figure 3c
v = [restflow.Vertex() for i in range(3)]
v[0].link_vertex(v[1])
v[0].link_vertex(v[2])
v[2].link_vertex(v[1])
v[0].add_outgoing()
v[2].add_outgoing()
v[2].add_outgoing()
g3 = restflow.Graph(v)
labels = [k, p, r, q-p-r]
g3.label_edges(labels)
nom, den = g3.convert(model).num, g3.convert(model).den
Itruth = model.D*g3.k**model.alpha*g3.calculate_multiplicity()*g3._calculate_freq_integral(g3.k,model.f)[0]*model.v3(k,p,q-p-k,q)[0]*model.v3(r,q-p-r,-k,q-p-k)[0]/(model.f(g3.k)*g3._calculate_freq_integral(g3.k,model.f)[1]*model.v3(k,p,q-p-k,q)[1]*model.v3(r,q-p-r,-k,q-p-k)[1])
self.assertEqual(nom/den, Itruth)
def test_symmetrize(self):
_q, _k, _p, dot_kq, dot_pk, dot_qp = sympy.symbols('q k 0 (k·q) 0 0')
_r, dot_qr, dot_pr, dot_rk = sympy.symbols('0 0 0 0')
ctx = symvec.Context()
ctx.add_dot_product(_q,_k,dot_kq)
ctx.add_dot_product(_q,_p,dot_qp)
ctx.add_dot_product(_p,_k,dot_pk)
ctx.add_dot_product(_q,_r,dot_qr)
ctx.add_dot_product(_p,_r,dot_pr)
ctx.add_dot_product(_r,_k,dot_rk)
k = ctx.vector(_k)
q = ctx.vector(_q)
p = ctx.vector(_p)
model = AMBp()
# graph (d)
v = [restflow.Vertex() for i in range(3)]
v[0].link_vertex(v[1])
v[0].link_vertex(v[2])
v[2].link_vertex(v[1])
v[0].add_outgoing()
v[2].add_outgoing()
g = restflow.Graph(v)
exprs = g.convert_perm(model,[k,p,q-p])
Ic = restflow.integrate(exprs,3,[k,p,q-p])
labels = [k, p, q-p]
g.label_edges(labels)
expr = g.convert(model)
Ic1 = restflow.integrate([expr],3,labels)
labels = [k, q-p, p]
g.label_edges(labels)
expr = g.convert(model)
Ic2 = restflow.integrate([expr],3,labels)
Ictotal= sympy.simplify(sympy.Poly((Ic1+Ic2)/2,q.sym).as_expr())
self.assertEqual(Ic, Ictotal)
if __name__ == '__main__':
unittest.main(verbosity=2)
| 13,164
| 39.137195
| 286
|
py
|
restflow
|
restflow-master/restflow/symtools.py
|
import itertools
import sympy
def taylor(function_expression, variable_list, evaluation_point, degree):
"""
Returns a sympy expression of the Taylor series up to a given degree, of
a given multivariate expression, approximated as a multivariate
polynomial evaluated at the evaluation_point
"""
n_var = len(variable_list)
point_coordinates = [(i, j) for i, j in (zip(variable_list, evaluation_point))] # list of tuples with variables and their evaluation_point coordinates, to later perform substitution
deriv_orders = list(itertools.product(range(degree + 1), repeat=n_var)) # list with exponentials of the partial derivatives
deriv_orders = [deriv_orders[i] for i in range(len(deriv_orders)) if sum(deriv_orders[i]) <= degree] # Discarding some higher-order terms
n_terms = len(deriv_orders)
deriv_orders_as_input = [list(sum(list(zip(variable_list, deriv_orders[i])), ())) for i in range(n_terms)] # Individual degree of each partial derivative, of each term
polynomial = 0
for i in range(n_terms):
partial_derivatives_at_point = function_expression.diff(*deriv_orders_as_input[i]).subs(point_coordinates) # e.g. df/(dx*dy**2)
denominator = sympy.prod([sympy.factorial(j) for j in deriv_orders[i]]) # e.g. (1! * 2!)
distances_powered = sympy.prod([(sympy.Matrix(variable_list) - sympy.Matrix(evaluation_point))[j] ** deriv_orders[i][j] for j in range(n_var)]) # e.g. (x-x0)*(y-y0)**2
polynomial += partial_derivatives_at_point / denominator * distances_powered
return polynomial
def all_coeffs(expr,variables):
"""
Returns a dictionary of all monomials of multivariate polynomials.
(Extends the sympy function all_coeffs() to multivariate polynomials)
"""
x = sympy.IndexedBase('x')
expr = expr.expand()
free = list(variables)
pows = [p.as_base_exp() for p in expr.atoms(sympy.Pow,sympy.Symbol)]
P = {}
for p,e in pows:
if p not in free:
continue
elif p not in P:
P[p]=e
elif e>P[p]:
P[p] = e
reps = dict([(f, x[i]) for i,f in enumerate(free)])
xzero = dict([(v,0) for k,v in reps.items()])
e = expr.xreplace(reps); reps = {v:k for k,v in reps.items()}
return dict([(m.xreplace(reps), e.coeff(m).xreplace(xzero) if m!=1 else e.xreplace(xzero)) for m in _monoms(*[P[f] for f in free])])
def _monoms(*o):
# Used for all_coeffs
x = sympy.IndexedBase('x')
f = []
for i,o in enumerate(o):
f.append(sympy.Poly([1]*(o+1),x[i]).as_expr())
return sympy.Mul(*f).expand().args
def func_coeff(polynomial, q, p):
"""
Returns a dictionary of the monomials of the integrals
"""
# prepare
cs_psi = sympy.symbols('cos_psi')
polynomial = sympy.Poly(polynomial,(q.sym,p.sym,cs_psi)).as_expr()
variables = [q.sym,p.sym,cs_psi]
var_temp = []
# following for loop removes the dependency of the polynomial on q,p or
# cosψ if these monomials do not exist e.g. q^2+q has no p monomial
for var in variables:
if var not in polynomial.free_symbols:
var_temp.append(var)
variables = [element for element in variables if element not in var_temp]
coeffs = all_coeffs(polynomial,variables)
return coeffs
| 3,299
| 44.833333
| 186
|
py
|
restflow
|
restflow-master/restflow/graph.py
|
import math
import itertools
import sympy
import matplotlib.pyplot as plt
import feynman
from restflow import symbolic
class Edge:
"""Directed edge with label.
Attributes:
start (Vertex): start vertex
end (Vertex): end vertex
label (Vector or VectorAdd): wave vector to label edge
angle (real): orientation hint for edge in units of 2π
"""
def __init__(self,start=None,end=None,label=None,angle=0.0):
self.start = start
self.end = end
self.label = label
self.angle = angle
def _render_label(self):
return str(self.label.val)
class Vertex:
"""A vertex.
Attributes:
_in (list): incoming edges (1 or 2)
_out (list): outgoing edges (0, 2, or 3)
"""
def __init__(self):
self._in = []
self._out = []
@property
def degree(self):
return len(self._out)
def link_vertex(self,v,angle=None):
"""Link this vertex to another vertex.
Arguments:
v (Vertex): target vertex
angle (real): orientation hint for edge in units of 2π
"""
if angle == None:
e = Edge(start=self, end=v)
else:
e = Edge(start=self, end=v, angle=angle)
self._out.append(e)
v._in.append(e)
def add_outgoing(self,angle=None):
if angle == None:
e = Edge(start=self)
else:
e = Edge(start=self, angle=angle)
self._out.append(e)
def _assign_label(self):
"""
Recusively assigns the sum of outgoing wave vectors to the incoming leg.
Call for root vertex.
"""
sum = None
for e in self._out:
if e.label is None:
e.end._assign_label()
if sum is None:
sum = e.label
else:
sum = sum + e.label
self._in[0].label = sum
def _render(self,diagram,v):
"""
Recursively add vertices to diagram.
"""
dx = 0.2
if self._in[0].start is None:
v0 = diagram.vertex(v._xy,dxy=(-dx,0))
l = diagram.line(vstart=v0,vend=v)
l.text(self._in[0]._render_label(),horizontalalignment='center')
for e in self._out:
if e.end and e.end.degree == 0: # is correlation
if hasattr(e.end,'_g'):
ve = e.end._g
else:
ve = diagram.vertex(v._xy,dxy=(.5*dx,dx),marker='o',markerfacecolor='white',markeredgewidth=2)
e.end._g = ve
else:
ve = diagram.vertex(v._xy,angle=e.angle,radius=dx)
l = diagram.line(vstart=v,vend=ve)
l.text(e._render_label(),horizontalalignment='center')
if e.end:
e.end._render(diagram,ve)
class Graph:
"""Represents a single graph composed of vertices.
Attributes:
k (Vector): internal wave vector
vertices (list): list of vertices with the root at position 0
"""
def __init__(self,vertices):
self.k = None
self.vertices = vertices
self.root = vertices[0]
self.root._in.append(Edge(end=self.root)) # add incoming edge
def _reset_labels(self):
"""Deletes all the labels of the edges"""
for vertex in self.vertices:
for e_in in vertex._in:
e_in.label = None
for e_out in vertex._out:
e_out.label = None
def label_edges(self,labels):
"""Label all edges with the corresponding wave vector.
Requires a single-loop graph with a single correlation function.
Arguments:
k (Vector or VectorAdd): internal wave vector
p (list): list of n outgoing wave vectors (Vector or VectorAdd)
"""
self._reset_labels() #clears the previous labels to replace them
k = labels[0]
self.k = labels[0]
p = [item for item in labels[1:]]
p = p.copy()
# label external legs
for v in self.vertices:
if v.degree == 0: # is correlation function
v._in[0].label = k
v._in[1].label = -k
else:
for e in v._out:
if e.end == None: # is leaf
e.label = p.pop(0)
# now label all internal edges obeying "momentum conservation"
self.root._assign_label()
def _ext_vec(self):
"""Calculates array of square of labels of external legs"""
ext_array=[]
for v in self.vertices:
if v.degree > 0:
for leaf in v._out:
if leaf.end == None:
ext_array.append(leaf.label)
if v._in[0].start == None:
ext_array.append(v._in[0].label)
return [element**2 for element in ext_array]
def _calculate_freq_integral(self,k,f):
"""Determines the result of the frequency integration.
Arguments:
k (Vector): internal wave vector
f (func): propagator function
p (list): list of n outgoing wave vectors (Vector)
Returns:
tuple: (numerator,denominator)
"""
k_edge, majority, minority = [], [], []
for v in self.vertices:
# makes list of intermediate edge wave vectors excluding external legs
if v.degree > 0 and v._in[0].end != None and v._in[0].start != None:
k_edge.append(v._in[0].label)
#load array of external legs
ext_array=self._ext_vec()
#multiply by sink vector
k2_edge = [_k*k for _k in k_edge]
# extract the sign of the k wavector
num_prop = len(k_edge)
signs = [0]*num_prop
for i in range(len(k2_edge)):
k2_edge[i] = k2_edge[i].subs([(external,0) for external in ext_array]) # set external legs^2 to 0
for term in k2_edge[i].as_ordered_terms(): # for the remaining monomials
coeff, _ = term.as_coeff_Mul()
expr=term**2 # to avoid vector complications
if expr.is_Pow and expr.exp == 4: # if monomial is x^4
signs[i]=coeff # extract its coefficient
majority = [i for i, x in enumerate(signs) if x==max(set(signs), key = signs.count)]
minority = [i for i, x in enumerate(signs) if x==min(set(signs), key = signs.count)]
majority.extend([0,0,0,0]), minority.extend([0,0,0,0]), k_edge.extend([0,0,0,0]) # pad lists with zeros
# Q-function tuples of (numerator,denominator)
Qpm = (
2*f(k)+f(k_edge[0])+f(k_edge[1]),
f(k_edge[0])+f(k_edge[1])
)
Qppm = (
2*f(k)*(f(k)+f(k_edge[majority[0]])+f(k_edge[majority[1]])+f(k_edge[minority[0]]))+f(k_edge[minority[0]])**2+f(k_edge[majority[0]])*f(k_edge[majority[1]])+f(k_edge[majority[0]])*f(k_edge[minority[0]])+f(k_edge[majority[1]])*f(k_edge[minority[0]]),
(f(k_edge[majority[0]])+f(k_edge[minority[0]]))*(f(k_edge[majority[1]])+f(k_edge[minority[0]]))
)
# dictionary of (numerator,denominator) where key is the pair (number of
# propagators, sum of signs) e.g. (2,0) = Qpm, (3,1) = Qppm
dict_freq = {
(0,0): (1,1),
(1,1): (1,f(k_edge[0])+f(k)),
(2,2): (1,(f(k)+f(k_edge[0]))*(f(k)+f(k_edge[1]))),
(2,0): (Qpm[0],(f(k)+f(k_edge[0]))*(f(k)+f(k_edge[1]))*Qpm[1]),
(3,3): (1,(f(k)+f(k_edge[0]))*(f(k)+f(k_edge[1]))*(f(k)+f(k_edge[2]))),
(3,1): (Qppm[0],((f(k)+f(k_edge[0]))*(f(k)+f(k_edge[1]))*(f(k)+f(k_edge[2])))*Qppm[1])
}
return dict_freq[(len(signs),int(abs(sum(signs))))]
def calculate_multiplicity(self):
"""Calculates the multiplicity of the symmetrized graph.
Returns:
int: the multiplicity
"""
mult = 1
for v in self.vertices:
num_E = 0
num_B = 0
num_C = 0
for e in v._out:
if e.end == None:
num_E += 1
else:
if len(e.end._out) == 0:
num_C += 1
else:
num_B += 1
mult *= math.factorial(len(v._out))/(math.factorial(num_E)*math.factorial(num_B)*math.factorial(num_C))
return int(mult)
def convert(self, model):
"""Converts the graph into a symbolic expression.
This methods produces a symbolic representation of the graph, which
needs to be labeled. It requires a model definition and calculates
nominator and denominator separately.
Arguments:
model: object holding the model definition
p (list): list of n outgoing wave vectors (Vector)
Returns:
Expression: the integrand
"""
num,den = self._calculate_freq_integral(self.k,model.f)
for v in self.vertices:
if v.degree == 2:
v2_num,v2_den = model.v2(v._out[0].label,v._out[1].label,v._in[0].label)
num *= v2_num
den *= v2_den
elif v.degree == 3:
v3_num,v3_den = model.v3(v._out[0].label,v._out[1].label,v._out[2].label,v._in[0].label)
num *= v3_num
den *= v3_den
num *= self.calculate_multiplicity()*model.D*self.k**model.alpha
den *= model.f(self.k)
return symbolic.Expression(num,den)
def convert_perm(self,model, labels):
"""Converts the graph into a list of symbolic expression.
Calculates all permutations of outgoing wave vectors p and determines
their symbol expressions. Note that this method relabels graph edges.
Arguments:
model: object holding the model definition
k (Vector): internal wave vector
p (list): list of n outgoing wave vectors (Vector)
Returns:
list: of Expression objects
"""
exprs = []
k = labels[0]
self.k = labels[0]
p = [item for item in labels[1:]]
p = p.copy()
for _p in itertools.permutations(p):
self.label_edges([k]+list(_p))
exprs.append(self.convert(model))
return exprs
def plot_graph(self):
"""Plots the graph using the feynman package.
It can be used to verify that the input graph of the user is the desired
one.
"""
diagram = feynman.Diagram()
self.root._render(diagram,diagram.vertex(xy=(.25,.5)))
diagram.plot()
plt.show()
def export_latex_graph(self,filename):
"""Transforms the graph into a TeX file."""
with open(f'{filename}.tex','w') as file:
file.write(r'\documentclass[11pt,a4paper,border={1pt 1pt 16pt 1pt},varwidth]{standalone}' '\n' r'\usepackage[top=15mm,bottom=12mm,left=30mm,right=30mm,head=12mm,includeheadfoot]{geometry}' '\n' r'\usepackage{graphicx,color,soul}' '\n' r'\usepackage[compat=1.1.0]{tikz-feynman}' '\n' r'\usepackage{XCharter}' '\n' r'\begin{document}' '\n' r'\thispagestyle{empty}' '\n' r'\begin{figure*}[t]' '\n \t' r'\hspace{-0.4cm}\feynmandiagram [small,horizontal=root to v0] {' '\n')
num_leaf=0
file.write('\t \t root -- [fermion] v0, \n')
for i in range(len(self.vertices)):
if len(self.vertices[i]._out) != 0:
# loop through all neighbors of selected vertex
for j in range(len(self.vertices[i]._out)):
# extract id of neighboring vertices which are not ends of leaves
id_out = [self.vertices.index(e.end) if e.end in self.vertices else None for e in self.vertices[i]._out]
# if it is end of leaf
if self.vertices[i]._out[j].end == None:
file.write('\t \t v{0} -- [fermion] v{1}{2}, \n'.format(i, id_out[j], num_leaf))
num_leaf+=1
# if it is a sink
elif len(self.vertices[i]._out[j].end._in) == 2:
file.write('\t \t v{0} -- [fermion] v{1} [/tikzfeynman/empty dot], \n'.format(i, id_out[j]))
else:
file.write('\t \t v{0} -- [fermion] v{1}, \n'.format(i, id_out[j]))
file.write('\t }; \n')
file.write('\end{figure*} \n')
file.write('\end{document}')
| 11,176
| 32.972644
| 475
|
py
|
restflow
|
restflow-master/restflow/symvec.py
|
import copy
"""Implementation of symbolic vectors with sympy."""
class Context:
def __init__(self):
self.dots = {}
def add_dot_product(self, s1, s2, s_dot):
self.dots[frozenset((s1,s2))] = s_dot
def vector(self,sym):
return Vector(self,sym)
class VectorAdd:
"""
Represents the sum of two vectors or another sum.
"""
def __init__(self, a, b):
self.a = a
self.b = b
@property
def val(self):
return self.a.val + self.b.val
def __add__(self, rhs):
if type(rhs) is Vector or VectorAdd:
return VectorAdd(self, rhs)
else:
raise TypeError(rhs)
def __sub__(self, rhs):
if type(rhs) is Vector or VectorAdd:
return VectorAdd(self, -1*rhs)
else:
raise TypeError(rhs)
def __mul__(self, rhs):
if type(rhs) is Vector or type(rhs) is VectorAdd:
return self.a*rhs + self.b*rhs
else:
return VectorAdd(self.a*rhs, self.b*rhs)
def __rmul__(self, lhs):
return VectorAdd(lhs*self.a, lhs*self.b)
def __neg__(self):
return VectorAdd(-1*self.a, -1*self.b)
def __pow__(self, p):
if p == 2:
return self*self
elif p == 4:
return (self*self)**2
elif p == 0:
return 1
else:
raise ValueError
def free_symvec(self):
"""
Method to find the free symbolic vectors of an expression.
Returns:
new_list (array): Symbolic vectors composing an expression
"""
monomials = [] # array with monomial vectors of expression
copy_vec = copy.deepcopy(self)
element = copy_vec.a # VectorAdd.b is always vector, VectorAdd.a not always
monomials.append(copy_vec.a)
monomials.append(copy_vec.b)
while type(element) is VectorAdd: # while until self.a vector
monomials.append(element.a)
monomials.append(element.b)
element = element.a
monomials = [item for item in monomials if type(item) is not VectorAdd] #keep only vectors of decomposition
for item in monomials:
item.factor = 1 # remove coefficients of vectors
new_list, new_list2=[], []
for item in monomials:
if item**2 not in new_list2: # remove dublicate vectors
new_list.append(item)
new_list2.append(item**2)
return new_list
class Vector:
"""
Class to represent a symbolic vector. Product will return the scalar
product as symbol. For two vectors x and y also needs symbol in
context to represent dot product.
"""
def __init__(self, ctx, sym, factor=1):
self.ctx = ctx
self.sym = sym
self.factor = factor
@property
def val(self):
return self.factor*self.sym
def __add__(self, rhs):
if type(rhs) is Vector or type(rhs) is VectorAdd:
return VectorAdd(self, rhs)
else:
raise TypeError(rhs)
def __sub__(self, rhs):
if type(rhs) is Vector or type(rhs) is VectorAdd:
return VectorAdd(self, -1*rhs)
else:
raise TypeError(rhs)
def __mul__(self, rhs):
if type(rhs) is Vector:
assert(self.ctx == rhs.ctx)
if self.sym == rhs.sym:
return self.factor*self.sym * rhs.factor*rhs.sym
else:
key = frozenset((self.sym,rhs.sym))
return self.factor*rhs.factor * self.ctx.dots[key]
elif type(rhs) is VectorAdd:
return rhs.a*self + rhs.b*self
else:
return Vector(self.ctx, self.sym, rhs*self.factor)
def __rmul__(self, lhs):
return Vector(self.ctx, self.sym, lhs*self.factor)
def __neg__(self):
return Vector(self.ctx, self.sym, -1*self.factor)
def __pow__(self, p):
if p == 2:
return self*self
elif p == 4:
return (self*self)**2
elif p == 0:
return 1
else:
raise ValueError
| 4,135
| 28.542857
| 115
|
py
|
restflow
|
restflow-master/restflow/symbolic.py
|
import sympy
from restflow import symtools
from restflow import symvec
# global symbols
K_d, Lambda, delta_l, dim = sympy.symbols('K_d Lambda δl d')
def _integrate_theta(expr,cs,d):
"""Symbolically replace powers of cos by integrated expression."""
expr = expr.subs(cs**4,3/(d*(d+2)))
expr = expr.subs(cs**3,0)
expr = expr.subs(cs**2,1/d)
expr = expr.subs(cs,0)
return expr
def _integrate_magnitude(expr,k,d):
"""
Calculates the magnitude wavevector integral and multiplies by the
surface area of hypersphere S_d (from angular integral)
"""
expr = expr.subs(k.sym,Lambda)*Lambda**d*delta_l*K_d
return sympy.simplify(expr)
class Expression:
def __init__(self,num,den):
self.num = num
self.den = den
def integrate1(self,n,k,q):
"""Perform symbolic angular integration of k with one fixed q.
Expands up to order n of external wave vector q. Only treats scalar
products up to power of 4.
Arguments:
n (int): expansion order
k (Vector): integration wave vector
q (Vector): external wave vector
Returns:
sympy: scalar symbolic expression
"""
# prepare
cs = sympy.Symbol('cos_theta')
dot = k.ctx.dots[frozenset((q.sym,k.sym))]
num = sympy.sympify(self.num)
den = sympy.sympify(self.den)
expr = num/den
# replace dot product
expr = expr.subs(dot,q.sym*k.sym*cs)
# expand in orders of q
expr = sympy.series(expr,q.sym,x0=0,n=n).removeO()
expr = sympy.cancel(expr)
expr = sympy.Poly(expr,q.sym).as_expr()
# integrate
expr = _integrate_theta(expr,cs,dim)
return _integrate_magnitude(expr,k,dim)
def integrate2(self,n,k,q,p):
"""Perform symbolic angular integration of k with two external wave
vectors.
Arguments:
n (int): expansion order
k (Vector): integration wave vector
q (Vector): external incoming
p (Vector): external outgoing
Returns:
sympy: scalar symbolic expression
"""
O = sympy.O
# prepare
cs_psi, si_psi = sympy.symbols('cos_psi sin_psi')
cs, x = sympy.symbols('cos_theta (sin_theta·cos_phi)')
dot_qk = k.ctx.dots[frozenset((q.sym,k.sym))]
dot_pk = k.ctx.dots[frozenset((p.sym,k.sym))]
dot_qp = k.ctx.dots[frozenset((q.sym,p.sym))]
num = self.num
den = self.den
num, den = num.subs(dot_qk,q.sym*k.sym*cs), den.subs(dot_qk,q.sym*k.sym*cs)
num, den = num.subs(dot_pk,p.sym*k.sym*(cs_psi*cs+si_psi*x)), den.subs(dot_pk,p.sym*k.sym*(cs_psi*cs+si_psi*x))
num, den = num.subs(dot_qp,q.sym*p.sym*cs_psi), den.subs(dot_qp,q.sym*p.sym*cs_psi)
# expand the expression before the angular integration
# cancel common factors like q**2 before taylor expansion wrt q
num = sympy.expand(num)
den = sympy.expand(den)
num = num + O(q.sym**n)+O(p.sym**n)+sum([O(q.sym**i*p.sym**(n-i)) for i in range(0,n)])
num = num.removeO()
den = den + O(q.sym**n)+O(p.sym**n)+sum([O(q.sym**i*p.sym**(n-i)) for i in range(0,n)])
den = den.removeO()
expr = num*symtools.taylor(den**(-1), [q.sym,p.sym], [0,0], n)
# expand express discarding higher order terms
expr = sympy.expand(expr)+O(q.sym**n) + O(p.sym**n) + sum([O(q.sym**i*p.sym**(n-i)) for i in range(0,n)])
expr = expr.removeO()
# keep only the cs_psi dependence
expr = expr.subs(si_psi**2,1-cs_psi**2)
# integrate
expr = expr.subs(x**2,1/dim)
expr = expr.subs(x,0)
# treat remaining cos_theta
expr = _integrate_theta(expr,cs,dim)
# Factorizes the expression in powers of q and p
expr = sympy.Poly(expr,q.sym,p.sym).as_expr()
return _integrate_magnitude(expr,k,dim)
def _find_int_variable(labels):
"""Finds the integration variable (k), the ingoing vector (q) and the outgoing vectors
Uses the fact that: The (k) is repeated once in the labels array, the (q) is repeated once in the outgoing vectors (conservation of momentum)
Arguments:
labels (array): input vectors [v1, v2,...,vn] with v1 the sink vector and v2+...+vn = q
Returns:
k, q, p: 3 Vector objects
"""
# array with free vectors of sink vector
k_array = [labels[0]] if isinstance(labels[0], symvec.Vector) else labels[0].free_symvec()
p_array = [] # free vectors of each external leg
total_array = [] # free vectors of each element in labels
for element in labels[1:]: # for loop for external legs
if type(element) == symvec.Vector:
p_array = p_array + [element]
else:
p_array = p_array + element.free_symvec()
total_array = total_array + k_array + p_array
p_array_2 = [element**2 for element in p_array] # used for .count function
total_array_2 =[element**2 for element in total_array]
# index of element in p_array repeated only once
index_q = next((i for i, elem in enumerate(p_array_2) if p_array_2.count(elem) == 1), -1)
q = p_array[index_q]
# index of element in total_array repeated only once
index_k = next((i for i, elem in enumerate(total_array_2) if total_array_2.count(elem) == 1 and total_array_2[i] != q**2), -1) # if only one external leg then (k) should not be (q)
k = total_array[index_k]
if len(p_array)==1:
p = None
else:
p = p_array[index_q-1] # q repeated only once so rest is p
return k, q, p
def integrate(exprs, n, labels):
"""
Calculates the integrals of all the symbolic graph expressions and
adds them up.
Arguments:
exprs (list): of symbolic.Expression
n (int): expansion order
labels (list): of sink vector and outgoing wave vectors
"""
k, q, p = _find_int_variable(labels)
if p is None:
arr = [expr.integrate1(n,k,q) for expr in exprs]
return sympy.simplify(sympy.Poly(sum(arr)/len(exprs),q.sym).as_expr())
else:
arr = [expr.integrate2(n,k,q,p) for expr in exprs]
return sympy.simplify(sympy.Poly(sum(arr)/len(exprs),q.sym,p.sym).as_expr())
| 6,204
| 36.379518
| 183
|
py
|
restflow
|
restflow-master/restflow/__init__.py
|
from restflow.graph import *
from restflow.symvec import Context
from restflow.symbolic import integrate
| 104
| 34
| 39
|
py
|
cc
|
cc-master/optimise.py
|
import tensorflow as tf
from utils import *
from sklearn.model_selection import KFold
from models import *
import time
import datetime
import hyperopt
class FLAGS:
dir = "/data"
training_file = "clickbait17-validation-170630"
validation_file = "clickbait17-train-170331"
epochs = 20
batch_size = 64
filter_sizes = "3,4,5"
num_filters = 100
dropout_rate_hidden = 0.5
dropout_rate_cell = 0.3
dropout_rate_embedding = 0.2
state_size = 64
hidden_size = 0
timestamp = "0715"
y_len = 4
model = "SAN"
use_target_description = False
use_image = False
learning_rate = 0.01
embedding_size = 100
gradient_clipping_value = 1
def main(argv=None):
FLAGS.batch_size = argv["batch_size"]
FLAGS.dropout_rate_hidden = argv["dropout_rate_hidden"]
FLAGS.learning_rate = argv["learning_rate"]
FLAGS.gradient_clipping_value = argv["gradient_clipping_value"]
np.random.seed(81)
word2id, embedding = load_embeddings(fp=os.path.join(FLAGS.dir, "glove.6B."+str(FLAGS.embedding_size)+"d.txt"), embedding_size=FLAGS.embedding_size)
with open(os.path.join(FLAGS.dir, 'word2id.json'), 'w') as fout:
json.dump(word2id, fp=fout)
# vocab_size = embedding.shape[0]
# embedding_size = embedding.shape[1]
ids, post_texts, truth_classes, post_text_lens, truth_means, target_descriptions, target_description_lens, image_features = read_data(word2id=word2id, fps=[os.path.join(FLAGS.dir, FLAGS.training_file)], y_len=FLAGS.y_len, use_target_description=FLAGS.use_target_description, use_image=FLAGS.use_image)
post_texts = np.array(post_texts)
truth_classes = np.array(truth_classes)
post_text_lens = np.array(post_text_lens)
truth_means = np.array(truth_means)
shuffle_indices = np.random.permutation(np.arange(len(post_texts)))
post_texts = post_texts[shuffle_indices]
truth_classes = truth_classes[shuffle_indices]
post_text_lens = post_text_lens[shuffle_indices]
truth_means = truth_means[shuffle_indices]
max_post_text_len = max(post_text_lens)
print max_post_text_len
post_texts = pad_sequences(post_texts, max_post_text_len)
target_descriptions = np.array(target_descriptions)
target_description_lens = np.array(target_description_lens)
target_descriptions = target_descriptions[shuffle_indices]
target_description_lens = target_description_lens[shuffle_indices]
max_target_description_len = max(target_description_lens)
print max_target_description_len
target_descriptions = pad_sequences(target_descriptions, max_target_description_len)
image_features = np.array(image_features)
data = np.array(list(zip(post_texts, truth_classes, post_text_lens, truth_means, target_descriptions, target_description_lens, image_features)))
kf = KFold(n_splits=5)
round = 1
val_scores = []
for train, validation in kf.split(data):
train_data, validation_data = data[train], data[validation]
g = tf.Graph()
with g.as_default() as g:
tf.set_random_seed(81)
with tf.Session(graph=g) as sess:
if FLAGS.model == "DAN":
model = DAN(x1_maxlen=max_post_text_len, y_len=len(truth_classes[0]), x2_maxlen=max_target_description_len, embedding=embedding, filter_sizes=list(map(int, FLAGS.filter_sizes.split(","))), num_filters=FLAGS.num_filters, hidden_size=FLAGS.hidden_size, state_size=FLAGS.state_size, x3_size=len(image_features[0]))
if FLAGS.model == "CNN":
model = CNN(x1_maxlen=max_post_text_len, y_len=len(truth_classes[0]), x2_maxlen=max_target_description_len, embedding=embedding, filter_sizes=list(map(int, FLAGS.filter_sizes.split(","))), num_filters=FLAGS.num_filters, hidden_size=FLAGS.hidden_size, state_size=FLAGS.state_size, x3_size=len(image_features[0]))
if FLAGS.model == "BiRNN":
model = BiRNN(x1_maxlen=max_post_text_len, y_len=len(truth_classes[0]), x2_maxlen=max_target_description_len, embedding=embedding, filter_sizes=list(map(int, FLAGS.filter_sizes.split(","))), num_filters=FLAGS.num_filters, hidden_size=FLAGS.hidden_size, state_size=FLAGS.state_size, x3_size=len(image_features[0]))
if FLAGS.model == "SAN":
model = SAN(x1_maxlen=max_post_text_len, y_len=len(truth_classes[0]), x2_maxlen=max_target_description_len, embedding=embedding, filter_sizes=list(map(int, FLAGS.filter_sizes.split(","))), num_filters=FLAGS.num_filters, hidden_size=FLAGS.hidden_size, state_size=FLAGS.state_size, x3_size=len(image_features[0]), attention_size=2*FLAGS.state_size)
global_step = tf.Variable(0, name="global_step", trainable=False)
optimizer = tf.train.AdamOptimizer(FLAGS.learning_rate)
grads_and_vars = optimizer.compute_gradients(model.loss)
if FLAGS.gradient_clipping_value:
grads_and_vars = [(tf.clip_by_value(grad, -FLAGS.gradient_clipping_value, FLAGS.gradient_clipping_value), var) for grad, var in grads_and_vars]
train_op = optimizer.apply_gradients(grads_and_vars, global_step=global_step)
out_dir = os.path.join(FLAGS.dir, "runs", FLAGS.timestamp)
# loss_summary = tf.summary.scalar("loss", model.loss)
# acc_summary = tf.summary.scalar("accuracy", model.accuracy)
# train_summary_op = tf.summary.merge([loss_summary, acc_summary])
# train_summary_dir = os.path.join(out_dir, "summaries", "train")
# train_summary_writer = tf.summary.FileWriter(train_summary_dir, sess.graph)
# val_summary_op = tf.summary.merge([loss_summary, acc_summary])
# val_summary_dir = os.path.join(out_dir, "summaries", "validation")
# val_summary_writer = tf.summary.FileWriter(val_summary_dir, sess.graph)
checkpoint_dir = os.path.join(out_dir, "checkpoints")
checkpoint_prefix = os.path.join(checkpoint_dir, FLAGS.model+str(round))
if not os.path.exists(checkpoint_dir):
os.makedirs(checkpoint_dir)
saver = tf.train.Saver()
sess.run(tf.global_variables_initializer())
def train_step(input_x1, input_y, input_x1_len, input_z, input_x2, input_x2_len, input_x3):
feed_dict = {model.input_x1: input_x1,
model.input_y: input_y,
model.input_x1_len: input_x1_len,
model.input_z: input_z,
model.dropout_rate_hidden: FLAGS.dropout_rate_hidden,
model.dropout_rate_cell: FLAGS.dropout_rate_cell,
model.dropout_rate_embedding: FLAGS.dropout_rate_embedding,
model.batch_size: len(input_x1),
model.input_x2: input_x2,
model.input_x2_len: input_x2_len,
model.input_x3: input_x3}
_, step, loss, mse, accuracy = sess.run([train_op, global_step, model.loss, model.mse, model.accuracy], feed_dict)
time_str = datetime.datetime.now().isoformat()
print("{}: step {}, loss {:g}, mse {:g}, acc {:g}".format(time_str, step, loss, mse, accuracy))
# train_summary_writer.add_summary(summaries, step)
def validation_step(input_x1, input_y, input_x1_len, input_z, input_x2, input_x2_len, input_x3, writer=None):
feed_dict = {model.input_x1: input_x1,
model.input_y: input_y,
model.input_x1_len: input_x1_len,
model.input_z: input_z,
model.dropout_rate_hidden: 0,
model.dropout_rate_cell: 0,
model.dropout_rate_embedding: 0,
model.batch_size: len(input_x1),
model.input_x2: input_x2,
model.input_x2_len: input_x2_len,
model.input_x3: input_x3}
step, loss, mse, accuracy = sess.run([global_step, model.loss, model.mse, model.accuracy], feed_dict)
time_str = datetime.datetime.now().isoformat()
print("{}: step {}, loss {:g}, mse {:g}, acc {:g}".format(time_str, step, loss, mse, accuracy))
# if writer:
# writer.add_summary(summaries, step)
return mse
print("\nValidation: ")
post_text_val, truth_class_val, post_text_len_val, truth_mean_val, target_description_val, target_description_len_val, image_feature_val= zip(*validation_data)
validation_step(post_text_val, truth_class_val, post_text_len_val, truth_mean_val, target_description_val, target_description_len_val, image_feature_val)
print("\n")
min_mse_val = np.inf
for i in range(FLAGS.epochs):
batches = get_batch(train_data, FLAGS.batch_size)
for batch in batches:
post_text_batch, truth_class_batch, post_text_len_batch, truth_mean_batch, target_description_batch, target_description_len_batch, image_feature_batch = zip(*batch)
train_step(post_text_batch, truth_class_batch, post_text_len_batch, truth_mean_batch, target_description_batch, target_description_len_batch, image_feature_batch)
print("\nValidation: ")
mse_val = validation_step(post_text_val, truth_class_val, post_text_len_val, truth_mean_val, target_description_val, target_description_len_val, image_feature_val)
print("\n")
if mse_val < min_mse_val:
min_mse_val = mse_val
# saver.save(sess, checkpoint_prefix)
round += 1
val_scores.append(min_mse_val)
return np.mean(val_scores)
if __name__ == "__main__":
space = {
"batch_size": hyperopt.hp.choice("batch_size", [16, 32, 64, 128]),
"dropout_rate_hidden": hyperopt.hp.choice("dropout_rate_hidden", [0.3, 0.5, 0.7]),
"learning_rate": hyperopt.hp.choice("learning_rate", [0.001, 0.005, 0.01, 0.05]),
"gradient_clipping_value": hyperopt.hp.choice("gradient_clipping_value", [0.5, 1, 2, 5, 10])
}
best_model = hyperopt.fmin(main, space, algo=hyperopt.tpe.suggest, max_evals=100)
print(best_model)
print(hyperopt.space_eval(space, best_model))
| 10,843
| 59.581006
| 366
|
py
|
cc
|
cc-master/utils.py
|
import numpy as np
import json
import os
import re
import nltk
from gensim.models import Word2Vec
from tweet_utils import *
from collections import Counter
from PIL import Image
import scipy.io
import tensorflow as tf
from scipy import ndimage
import hickle
PAD = "<pad>" # reserve 0 for pad
UNK = "<unk>" # reserve 1 for unknown
# tokeniser = nltk.tokenize.stanford.StanfordTokenizer(path_to_jar='./stanford-postagger.jar')
# java_path = "/Library/Java/JavaVirtualMachines/jdk1.8.0_51.jdk/Contents/Home"
# os.environ['JAVAHOME'] = java_path
nltk_tokeniser = nltk.tokenize.TweetTokenizer()
np.random.seed(81)
def process_tweet(text):
FLAGS = re.MULTILINE | re.DOTALL
def hashtag(text):
text = text.group()
hashtag_body = text[1:]
result = " ".join(["<hashtag>"] + re.split(r"(?=[A-Z])", hashtag_body, flags=FLAGS))
return result
def allcaps(text):
text = text.group()
return text.lower() + " <allcaps>"
eyes = r"[8:=;]"
nose = r"['`\-]?"
# function so code less repetitive
def re_sub(pattern, repl):
return re.sub(pattern, repl, text, flags=FLAGS)
text = re_sub(r"https?:\/\/\S+\b|www\.(\w+\.)+\S*", "<url>")
text = re_sub(r"/"," / ")
text = re_sub(r"@\w+", "<user>")
text = re_sub(r"{}{}[)dD]+|[)dD]+{}{}".format(eyes, nose, nose, eyes), "<smile>")
text = re_sub(r"{}{}p+".format(eyes, nose), "<lolface>")
text = re_sub(r"{}{}\(+|\)+{}{}".format(eyes, nose, nose, eyes), "<sadface>")
text = re_sub(r"{}{}[\/|l*]".format(eyes, nose), "<neutralface>")
text = re_sub(r"<3","<heart>")
text = re_sub(r"[-+]?[.\d]*[\d]+[:,.\d]*", "<number>")
text = re_sub(r"#\S+", hashtag)
text = re_sub(r"([!?.]){2,}", r"\1 <repeat>")
text = re_sub(r"\b(\S*?)(.)\2{2,}\b", r"\1\2 <elong>")
## -- I just don't understand why the Ruby script adds <allcaps> to everything so I limited the selection.
# text = re_sub(r"([^a-z0-9()<>'`\-]){2,}", allcaps)
text = re_sub(r"([A-Z]){2,}", allcaps)
return text
def tokenise(text, with_process=True):
if with_process:
return nltk_tokeniser.tokenize(process_tweet(text).lower())
else:
# return nltk_tokeniser.tokenize(text)
return tweet_ark_tokenize(text.lower())
def load_embeddings(fp, embedding_size):
embedding = []
vocab = []
with open(fp, 'r') as f:
for each_line in f:
row = each_line.decode('utf-8').split(' ')
if len(row) == 2:
continue
vocab.append(row[0])
if len(row[1:]) != embedding_size:
print row[0]
print len(row[1:])
embedding.append(np.asarray(row[1:], dtype='float32'))
word2id = dict(zip(vocab, range(2, len(vocab))))
word2id[PAD] = 0
word2id[UNK] = 1
extra_embedding = [np.zeros(embedding_size), np.random.uniform(-0.1, 0.1, embedding_size)]
embedding = np.append(extra_embedding, embedding, 0)
return word2id, embedding
def read_data(fps, word2id=None, y_len=1, use_target_description=False, use_image=False, delete_irregularities=False):
ids = []
post_texts = []
post_text_lens = []
truth_means = []
truth_classes = []
id2truth_class = {}
id2truth_mean = {}
target_descriptions = []
target_description_lens = []
image_features = []
num = 0
for fp in fps:
if use_image:
with open(os.path.join(fp, "id2imageidx.json"), "r") as fin:
id2imageidx = json.load(fin)
all_image_features = hickle.load(os.path.join(fp, "image_features.hkl"))
if y_len:
with open(os.path.join(fp, 'truth.jsonl'), 'rb') as fin:
for each_line in fin:
each_item = json.loads(each_line.decode('utf-8'))
if delete_irregularities:
if each_item["truthClass"] == "clickbait" and float(each_item["truthMean"]) < 0.5 or each_item["truthClass"] != "clickbait" and float(each_item["truthMean"]) > 0.5:
continue
if y_len == 4:
each_label = [0, 0, 0, 0]
for each_key, each_value in Counter(each_item["truthJudgments"]).iteritems():
each_label[int(each_key//0.3)] = float(each_value)/5
id2truth_class[each_item["id"]] = each_label
if each_item["truthClass"] != "clickbait":
assert each_label[0]+each_label[1] > each_label[2]+each_label[3]
else:
assert each_label[0]+each_label[1] < each_label[2]+each_label[3]
if y_len == 2:
if each_item["truthClass"] == "clickbait":
id2truth_class[each_item["id"]] = [1, 0]
else:
id2truth_class[each_item["id"]] = [0, 1]
if y_len == 1:
if each_item["truthClass"] == "clickbait":
id2truth_class[each_item["id"]] = [1]
else:
id2truth_class[each_item["id"]] = [0]
id2truth_mean[each_item["id"]] = [float(each_item["truthMean"])]
with open(os.path.join(fp, 'instances.jsonl'), 'rb') as fin:
for each_line in fin:
each_item = json.loads(each_line.decode('utf-8'))
if each_item["id"] not in id2truth_class and y_len:
num += 1
continue
ids.append(each_item["id"])
each_post_text = " ".join(each_item["postText"])
each_target_description = each_item["targetTitle"]
if y_len:
truth_means.append(id2truth_mean[each_item["id"]])
truth_classes.append(id2truth_class[each_item["id"]])
if word2id:
if (each_post_text+" ").isspace():
post_texts.append([0])
post_text_lens.append(1)
else:
each_post_tokens = tokenise(each_post_text)
post_texts.append([word2id.get(each_token, 1) for each_token in each_post_tokens])
post_text_lens.append(len(each_post_tokens))
else:
post_texts.append([each_post_text])
if use_target_description:
if word2id:
if (each_target_description+" ").isspace():
target_descriptions.append([0])
target_description_lens.append(1)
else:
each_target_description_tokens = tokenise(each_target_description)
target_descriptions.append([word2id.get(each_token, 1) for each_token in each_target_description_tokens])
target_description_lens.append(len(each_target_description_tokens))
else:
target_descriptions.append([each_target_description])
else:
target_descriptions.append([])
target_description_lens.append(0)
if use_image:
image_features.append(all_image_features[id2imageidx[each_item["id"]]].flatten())
else:
image_features.append([])
print "Deleted number of items: " + str(num)
return ids, post_texts, truth_classes, post_text_lens, truth_means, target_descriptions, target_description_lens, image_features
def pad_sequences(sequences, maxlen):
if maxlen <= 0:
return sequences
shape = (len(sequences), maxlen)
padded_sequences = np.full(shape, 0)
for i, each_sequence in enumerate(sequences):
if len(each_sequence) > maxlen:
padded_sequences[i] = each_sequence[:maxlen]
else:
padded_sequences[i, :len(each_sequence)] = each_sequence
return padded_sequences
def get_batch(data, batch_size, shuffle=True):
data = np.array(data)
data_size = len(data)
batch_num_per_epoch = int((data_size-1)/batch_size)+1
if shuffle:
shuffle_indices = np.random.permutation(np.arange(data_size))
shuffled_data = data[shuffle_indices]
else:
shuffled_data = data
for i in range(batch_num_per_epoch):
start_ix = i * batch_size
end_ix = min((i+1)*batch_size, data_size)
yield shuffled_data[start_ix:end_ix]
def generate_embeddings(fp):
sentences = []
files = ["/data/clickbait17-train-170331", "/data/clickbait17-validation-170630", "/data/clickbait17-unlabeled-170429"]
for each_fp in files:
with open(os.path.join(each_fp, 'instances.jsonl'), 'rb') as f:
for each_line in f:
each_item = json.loads(each_line.decode('utf-8'))
for each_sentence in each_item["postText"]:
sentences.append(tokenise(each_sentence))
if each_item["targetTitle"]:
sentences.append(tokenise(each_item["targetTitle"]))
if each_item["targetDescription"]:
sentences.append(tokenise(each_item["targetDescription"]))
for each_sentence in each_item["targetParagraphs"]:
sentences.append(tokenise(each_sentence))
for each_sentence in each_item["targetCaptions"]:
sentences.append(tokenise(each_sentence))
word2vec_model = Word2Vec(sentences)
word2vec_model.wv.save_word2vec_format(os.path.join(fp, "s_clickbait.100.txt"), binary=False)
def extract_vgg_info(vgg_path):
vgg_data = scipy.io.loadmat(vgg_path)
normalization_matrix = vgg_data['normalization'][0][0][0]
mat_mean = np.mean(normalization_matrix, axis=(0,1))
network_weights = vgg_data["layers"][0]
return mat_mean, network_weights
def process_image(image_path, mat_mean):
image = scipy.misc.imread(image_path, mode="RGB")
image = scipy.misc.imresize(image, [224, 224])
return image - mat_mean
vgg_layers = ['conv1_1', 'relu1_1',
'conv1_2', 'relu1_2', 'pool1',
'conv2_1', 'relu2_1',
'conv2_2', 'relu2_2', 'pool2',
'conv3_1', 'relu3_1',
'conv3_2', 'relu3_2',
'conv3_3', 'relu3_3',
'conv3_4', 'relu3_4', 'pool3',
'conv4_1', 'relu4_1',
'conv4_2', 'relu4_2',
'conv4_3', 'relu4_3',
'conv4_4', 'relu4_4', 'pool4',
'conv5_1', 'relu5_1',
'conv5_2', 'relu5_2',
'conv5_3', 'relu5_3',
'conv5_4', 'relu5_4']
class VGG19(object):
def __init__(self, network_weights):
self.images = tf.placeholder(tf.float32, [None, 224, 224, 3], "images")
with tf.variable_scope("image_encoder"):
for i, layer in enumerate(vgg_layers):
layer_type = layer[:4]
if layer_type == "conv":
weights, bias = network_weights[i][0][0][0][0]
weights = np.transpose(weights, (1, 0, 2, 3))
bias = bias.reshape(-1)
if layer == "conv1_1":
h = self.images
h = tf.nn.bias_add(tf.nn.conv2d(h, tf.constant(weights), strides=[1, 1, 1, 1], padding="SAME"), bias)
elif layer_type == "relu":
h = tf.nn.relu(h)
elif layer_type == "pool":
h = tf.nn.max_pool(h, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='SAME')
if layer == "conv5_3":
self.features = tf.reshape(h, [-1, 196, 512])
def extract_image_features(fp="/data/clickbait17-train-170331"):
# # generate an blank image for no image occasions
# image = Image.new("RGB", (224, 224))
# image.save(os.path.join(fp, "media", "_.png"), "PNG")
id2imageidx = {}
image_names = [f for f in os.listdir(os.path.join(fp, "media")) if os.path.isfile(os.path.join(fp, "media", f))]
with open(os.path.join(fp, 'instances.jsonl'), 'rb') as f:
for each_line in f:
each_item = json.loads(each_line.decode('utf-8'))
if each_item["postMedia"]:
id2imageidx[each_item["id"]] = image_names.index(each_item["postMedia"][0].split("/")[1])+1 # index 0 reserved for no image
else:
id2imageidx[each_item["id"]] = 0
with open(os.path.join(fp, 'id2imageidx.json'), 'w') as fout:
json.dump(id2imageidx, fp=fout)
batch_size = 100
n_examples = len(image_names)
all_image_features = np.ndarray([n_examples+1, 196, 512], dtype=np.float32)
all_image_features[0, :] = np.random.uniform(-0.1, 0.1, [196, 512])
mat_mean, network_weights = extract_vgg_info("/data/imagenet-vgg-verydeep-19.mat")
vggnet = VGG19(network_weights)
with tf.Session() as sess:
tf.global_variables_initializer().run()
for start, end in zip(range(0, n_examples, batch_size), range(batch_size, n_examples+batch_size, batch_size)):
image_name_batch = image_names[start:end]
image_batch = np.array(map(lambda f: process_image(os.path.join(fp, "media", f), mat_mean), image_name_batch)).astype(np.float32)
image_features_batch = sess.run(vggnet.features, feed_dict={vggnet.images: image_batch})
all_image_features[start+1:end+1, :] = image_features_batch
hickle.dump(all_image_features, os.path.join(fp, "image_features.hkl"))
if __name__ == '__main__':
# text = "I TEST alllll kinds of #hashtags and #HASHTAGS, @mentions and 3000 (http://t.co/dkfjkdf). w/ <3 :) haha!!!!!"
# print(tokenise(text, True))
# read_data(fp="/data/clickbait17-validation-170630", y_len=4)
extract_image_features("/data/clickbait17-validation-170630")
| 14,046
| 43.312303
| 188
|
py
|
cc
|
cc-master/tweet_utils.py
|
# -*- coding: utf-8 -*-
"""
Twokenize -- a tokenizer designed for Twitter text in English and some other European languages.
This tokenizer code has gone through a long history:
(1) Brendan O'Connor wrote original version in Python, http://github.com/brendano/tweetmotif
TweetMotif: Exploratory Search and Topic Summarization for Twitter.
Brendan O'Connor, Michel Krieger, and David Ahn.
ICWSM-2010 (demo track), http://brenocon.com/oconnor_krieger_ahn.icwsm2010.tweetmotif.pdf
(2a) Kevin Gimpel and Daniel Mills modified it for POS tagging for the CMU ARK Twitter POS Tagger
(2b) Jason Baldridge and David Snyder ported it to Scala
(3) Brendan bugfixed the Scala port and merged with POS-specific changes
for the CMU ARK Twitter POS Tagger
(4) Tobi Owoputi ported it back to Java and added many improvements (2012-06)
Current home is http://github.com/brendano/ark-tweet-nlp and http://www.ark.cs.cmu.edu/TweetNLP
There have been at least 2 other Java ports, but they are not in the lineage for the code here.
Ported to Python by Myle Ott <myleott@gmail.com>.
"""
from __future__ import print_function
import operator
import re
import HTMLParser
def regex_or(*items):
return '(?:' + '|'.join(items) + ')'
Contractions = re.compile(u"(?i)(\w+)(n['’′]t|['’′]ve|['’′]ll|['’′]d|['’′]re|['’′]s|['’′]m)$", re.UNICODE)
Whitespace = re.compile(u"[\s\u0020\u00a0\u1680\u180e\u202f\u205f\u3000\u2000-\u200a]+", re.UNICODE)
punctChars = r"['\"“”‘’.?!…,:;]"
#punctSeq = punctChars+"+" #'anthem'. => ' anthem '.
punctSeq = r"['\"“”‘’]+|[.?!,…]+|[:;]+" #'anthem'. => ' anthem ' .
entity = r"&(?:amp|lt|gt|quot);"
# URLs
# BTO 2012-06: everyone thinks the daringfireball regex should be better, but they're wrong.
# If you actually empirically test it the results are bad.
# Please see https://github.com/brendano/ark-tweet-nlp/pull/9
urlStart1 = r"(?:https?://|\bwww\.)"
commonTLDs = r"(?:com|org|edu|gov|net|mil|aero|asia|biz|cat|coop|info|int|jobs|mobi|museum|name|pro|tel|travel|xxx)"
ccTLDs = r"(?:ac|ad|ae|af|ag|ai|al|am|an|ao|aq|ar|as|at|au|aw|ax|az|ba|bb|bd|be|bf|bg|bh|bi|bj|bm|bn|bo|br|bs|bt|" + \
r"bv|bw|by|bz|ca|cc|cd|cf|cg|ch|ci|ck|cl|cm|cn|co|cr|cs|cu|cv|cx|cy|cz|dd|de|dj|dk|dm|do|dz|ec|ee|eg|eh|" + \
r"er|es|et|eu|fi|fj|fk|fm|fo|fr|ga|gb|gd|ge|gf|gg|gh|gi|gl|gm|gn|gp|gq|gr|gs|gt|gu|gw|gy|hk|hm|hn|hr|ht|" + \
r"hu|id|ie|il|im|in|io|iq|ir|is|it|je|jm|jo|jp|ke|kg|kh|ki|km|kn|kp|kr|kw|ky|kz|la|lb|lc|li|lk|lr|ls|lt|" + \
r"lu|lv|ly|ma|mc|md|me|mg|mh|mk|ml|mm|mn|mo|mp|mq|mr|ms|mt|mu|mv|mw|mx|my|mz|na|nc|ne|nf|ng|ni|nl|no|np|" + \
r"nr|nu|nz|om|pa|pe|pf|pg|ph|pk|pl|pm|pn|pr|ps|pt|pw|py|qa|re|ro|rs|ru|rw|sa|sb|sc|sd|se|sg|sh|si|sj|sk|" + \
r"sl|sm|sn|so|sr|ss|st|su|sv|sy|sz|tc|td|tf|tg|th|tj|tk|tl|tm|tn|to|tp|tr|tt|tv|tw|tz|ua|ug|uk|us|uy|uz|" + \
r"va|vc|ve|vg|vi|vn|vu|wf|ws|ye|yt|za|zm|zw)" #TODO: remove obscure country domains?
urlStart2 = r"\b(?:[A-Za-z\d-])+(?:\.[A-Za-z0-9]+){0,3}\." + regex_or(commonTLDs, ccTLDs) + r"(?:\."+ccTLDs+r")?(?=\W|$)"
urlBody = r"(?:[^\.\s<>][^\s<>]*?)?"
urlExtraCrapBeforeEnd = regex_or(punctChars, entity) + "+?"
urlEnd = r"(?:\.\.+|[<>]|\s|$)"
url = regex_or(urlStart1, urlStart2) + urlBody + "(?=(?:"+urlExtraCrapBeforeEnd+")?"+urlEnd+")"
# Numeric
timeLike = r"\d+(?::\d+){1,2}"
#numNum = r"\d+\.\d+"
numberWithCommas = r"(?:(?<!\d)\d{1,3},)+?\d{3}" + r"(?=(?:[^,\d]|$))"
numComb = u"[\u0024\u058f\u060b\u09f2\u09f3\u09fb\u0af1\u0bf9\u0e3f\u17db\ua838\ufdfc\ufe69\uff04\uffe0\uffe1\uffe5\uffe6\u00a2-\u00a5\u20a0-\u20b9]?\\d+(?:\\.\\d+)+%?".encode('utf-8')
# Abbreviations
boundaryNotDot = regex_or("$", r"\s", r"[“\"?!,:;]", entity)
aa1 = r"(?:[A-Za-z]\.){2,}(?=" + boundaryNotDot + ")"
aa2 = r"[^A-Za-z](?:[A-Za-z]\.){1,}[A-Za-z](?=" + boundaryNotDot + ")"
standardAbbreviations = r"\b(?:[Mm]r|[Mm]rs|[Mm]s|[Dd]r|[Ss]r|[Jj]r|[Rr]ep|[Ss]en|[Ss]t)\."
arbitraryAbbrev = regex_or(aa1, aa2, standardAbbreviations)
separators = "(?:--+|―|—|~|–|=)"
decorations = u"(?:[♫♪]+|[★☆]+|[♥❤♡]+|[\u2639-\u263b]+|[\ue001-\uebbb]+)".encode('utf-8')
thingsThatSplitWords = r"[^\s\.,?\"]"
embeddedApostrophe = thingsThatSplitWords+r"+['’′]" + thingsThatSplitWords + "*"
# Emoticons
# myleott: in Python the (?iu) flags affect the whole expression
#normalEyes = "(?iu)[:=]" # 8 and x are eyes but cause problems
normalEyes = "[:=]" # 8 and x are eyes but cause problems
wink = "[;]"
noseArea = "(?:|-|[^a-zA-Z0-9 ])" # doesn't get :'-(
happyMouths = r"[D\)\]\}]+"
sadMouths = r"[\(\[\{]+"
tongue = "[pPd3]+"
otherMouths = r"(?:[oO]+|[/\\]+|[vV]+|[Ss]+|[|]+)" # remove forward slash if http://'s aren't cleaned
# mouth repetition examples:
# @aliciakeys Put it in a love song :-))
# @hellocalyclops =))=))=)) Oh well
# myleott: try to be as case insensitive as possible, but still not perfect, e.g., o.O fails
#bfLeft = u"(♥|0|o|°|v|\\$|t|x|;|\u0ca0|@|ʘ|•|・|◕|\\^|¬|\\*)".encode('utf-8')
bfLeft = u"(♥|0|[oO]|°|[vV]|\\$|[tT]|[xX]|;|\u0ca0|@|ʘ|•|・|◕|\\^|¬|\\*)".encode('utf-8')
bfCenter = r"(?:[\.]|[_-]+)"
bfRight = r"\2"
s3 = r"(?:--['\"])"
s4 = r"(?:<|<|>|>)[\._-]+(?:<|<|>|>)"
s5 = "(?:[.][_]+[.])"
# myleott: in Python the (?i) flag affects the whole expression
#basicface = "(?:(?i)" +bfLeft+bfCenter+bfRight+ ")|" +s3+ "|" +s4+ "|" + s5
basicface = "(?:" +bfLeft+bfCenter+bfRight+ ")|" +s3+ "|" +s4+ "|" + s5
eeLeft = r"[\\\ƪԄ\((<>;ヽ\-=~\*]+"
eeRight= u"[\\-=\\);'\u0022<>ʃ)//ノノ丿╯σっµ~\\*]+".encode('utf-8')
eeSymbol = r"[^A-Za-z0-9\s\(\)\*:=-]"
eastEmote = eeLeft + "(?:"+basicface+"|" +eeSymbol+")+" + eeRight
oOEmote = r"(?:[oO]" + bfCenter + r"[oO])"
emoticon = regex_or(
# Standard version :) :( :] :D :P
"(?:>|>)?" + regex_or(normalEyes, wink) + regex_or(noseArea,"[Oo]") + regex_or(tongue+r"(?=\W|$|RT|rt|Rt)", otherMouths+r"(?=\W|$|RT|rt|Rt)", sadMouths, happyMouths),
# reversed version (: D: use positive lookbehind to remove "(word):"
# because eyes on the right side is more ambiguous with the standard usage of : ;
regex_or("(?<=(?: ))", "(?<=(?:^))") + regex_or(sadMouths,happyMouths,otherMouths) + noseArea + regex_or(normalEyes, wink) + "(?:<|<)?",
#inspired by http://en.wikipedia.org/wiki/User:Scapler/emoticons#East_Asian_style
eastEmote.replace("2", "1", 1), basicface,
# iOS 'emoji' characters (some smileys, some symbols) [\ue001-\uebbb]
# TODO should try a big precompiled lexicon from Wikipedia, Dan Ramage told me (BTO) he does this
# myleott: o.O and O.o are two of the biggest sources of differences
# between this and the Java version. One little hack won't hurt...
oOEmote
)
Hearts = "(?:<+/?3+)+" #the other hearts are in decorations
Arrows = regex_or(r"(?:<*[-―—=]*>+|<+[-―—=]*>*)", u"[\u2190-\u21ff]+".encode('utf-8'))
# BTO 2011-06: restored Hashtag, AtMention protection (dropped in original scala port) because it fixes
# "hello (#hashtag)" ==> "hello (#hashtag )" WRONG
# "hello (#hashtag)" ==> "hello ( #hashtag )" RIGHT
# "hello (@person)" ==> "hello (@person )" WRONG
# "hello (@person)" ==> "hello ( @person )" RIGHT
# ... Some sort of weird interaction with edgepunct I guess, because edgepunct
# has poor content-symbol detection.
# This also gets #1 #40 which probably aren't hashtags .. but good as tokens.
# If you want good hashtag identification, use a different regex.
Hashtag = "#[a-zA-Z0-9_]+" #optional: lookbehind for \b
#optional: lookbehind for \b, max length 15
AtMention = "[@@][a-zA-Z0-9_]+"
# I was worried this would conflict with at-mentions
# but seems ok in sample of 5800: 7 changes all email fixes
# http://www.regular-expressions.info/email.html
Bound = r"(?:\W|^|$)"
Email = regex_or("(?<=(?:\W))", "(?<=(?:^))") + r"[a-zA-Z0-9._%+-]+@[a-zA-Z0-9.-]+\.[a-zA-Z]{2,4}(?=" +Bound+")"
# We will be tokenizing using these regexps as delimiters
# Additionally, these things are "protected", meaning they shouldn't be further split themselves.
Protected = re.compile(
unicode(regex_or(
Hearts,
url,
Email,
timeLike,
#numNum,
numberWithCommas,
numComb,
emoticon,
Arrows,
entity,
punctSeq,
arbitraryAbbrev,
separators,
decorations,
embeddedApostrophe,
Hashtag,
AtMention
).decode('utf-8')), re.UNICODE)
# Edge punctuation
# Want: 'foo' => ' foo '
# While also: don't => don't
# the first is considered "edge punctuation".
# the second is word-internal punctuation -- don't want to mess with it.
# BTO (2011-06): the edgepunct system seems to be the #1 source of problems these days.
# I remember it causing lots of trouble in the past as well. Would be good to revisit or eliminate.
# Note the 'smart quotes' (http://en.wikipedia.org/wiki/Smart_quotes)
#edgePunctChars = r"'\"“”‘’«»{}\(\)\[\]\*&" #add \\p{So}? (symbols)
edgePunctChars = u"'\"“”‘’«»{}\\(\\)\\[\\]\\*&" #add \\p{So}? (symbols)
edgePunct = "[" + edgePunctChars + "]"
notEdgePunct = "[a-zA-Z0-9]" # content characters
offEdge = r"(^|$|:|;|\s|\.|,)" # colon here gets "(hello):" ==> "( hello ):"
EdgePunctLeft = re.compile(offEdge + "("+edgePunct+"+)("+notEdgePunct+")", re.UNICODE)
EdgePunctRight = re.compile("("+notEdgePunct+")("+edgePunct+"+)" + offEdge, re.UNICODE)
def splitEdgePunct(input):
input = EdgePunctLeft.sub(r"\1\2 \3", input)
input = EdgePunctRight.sub(r"\1 \2\3", input)
return input
# The main work of tokenizing a tweet.
def simpleTokenize(text):
# Do the no-brainers first
splitPunctText = splitEdgePunct(text)
textLength = len(splitPunctText)
# BTO: the logic here got quite convoluted via the Scala porting detour
# It would be good to switch back to a nice simple procedural style like in the Python version
# ... Scala is such a pain. Never again.
# Find the matches for subsequences that should be protected,
# e.g. URLs, 1.0, U.N.K.L.E., 12:53
bads = []
badSpans = []
for match in Protected.finditer(splitPunctText):
# The spans of the "bads" should not be split.
if (match.start() != match.end()): #unnecessary?
bads.append( [splitPunctText[match.start():match.end()]] )
badSpans.append( (match.start(), match.end()) )
# Create a list of indices to create the "goods", which can be
# split. We are taking "bad" spans like
# List((2,5), (8,10))
# to create
# List(0, 2, 5, 8, 10, 12)
# where, e.g., "12" here would be the textLength
# has an even length and no indices are the same
indices = [0]
for (first, second) in badSpans:
indices.append(first)
indices.append(second)
indices.append(textLength)
# Group the indices and map them to their respective portion of the string
splitGoods = []
for i in range(0, len(indices), 2):
goodstr = splitPunctText[indices[i]:indices[i+1]]
splitstr = goodstr.strip().split(" ")
splitGoods.append(splitstr)
# Reinterpolate the 'good' and 'bad' Lists, ensuring that
# additonal tokens from last good item get included
zippedStr = []
for i in range(len(bads)):
zippedStr = addAllnonempty(zippedStr, splitGoods[i])
zippedStr = addAllnonempty(zippedStr, bads[i])
zippedStr = addAllnonempty(zippedStr, splitGoods[len(bads)])
# BTO: our POS tagger wants "ur" and "you're" to both be one token.
# Uncomment to get "you 're"
#splitStr = []
#for tok in zippedStr:
# splitStr.extend(splitToken(tok))
#zippedStr = splitStr
return zippedStr
def addAllnonempty(master, smaller):
for s in smaller:
strim = s.strip()
if (len(strim) > 0):
master.append(strim)
return master
# "foo bar " => "foo bar"
def squeezeWhitespace(input):
return Whitespace.sub(" ", input).strip()
# Final pass tokenization based on special patterns
def splitToken(token):
m = Contractions.search(token)
if m:
return [m.group(1), m.group(2)]
return [token]
# Assume 'text' has no HTML escaping.
def tweet_ark_tokenize(text):
return simpleTokenize(squeezeWhitespace(text))
# Twitter text comes HTML-escaped, so unescape it.
# We also first unescape &'s, in case the text has been buggily double-escaped.
def normalizeTextForTagger(text):
text = text.replace("&", "&")
text = HTMLParser.HTMLParser().unescape(text)
return text
# This is intended for raw tweet text -- we do some HTML entity unescaping before running the tagger.
#
# This function normalizes the input text BEFORE calling the tokenizer.
# So the tokens you get back may not exactly correspond to
# substrings of the original text.
def tokenizeRawTweetText(text):
tokens = tweet_ark_tokenize(normalizeTextForTagger(text))
return tokens
if __name__ == '__main__':
text = "I TEST alllll kinds of #hashtags and #HASHTAGS, @mentions and 3000 (http://t.co/dkfjkdf). w/ <3 :) haha!!!!!"
print(tweet_ark_tokenize(text))
| 13,028
| 41.718033
| 185
|
py
|
cc
|
cc-master/models.py
|
import tensorflow as tf
class CNN:
def __init__(self, x1_maxlen, x2_maxlen, y_len, embedding, filter_sizes, num_filters, hidden_size, state_size, x3_size):
self.input_x1 = tf.placeholder(tf.int32, [None, x1_maxlen], name="post_text")
self.input_x1_len = tf.placeholder(tf.int32, [None, ], name="post_text_len")
self.input_x2 = tf.placeholder(tf.int32, [None, x2_maxlen], name="target_description")
self.input_x2_len = tf.placeholder(tf.int32, [None, ], name="target_description_len")
self.input_x3 = tf.placeholder(tf.float32, [None, x3_size], name="image_feature")
self.input_y = tf.placeholder(tf.float32, [None, y_len], name="truth_class")
self.input_z = tf.placeholder(tf.float32, [None, 1], name="truth_mean")
self.dropout_rate_embedding = tf.placeholder(tf.float32, name="dropout_rate_embedding")
self.dropout_rate_hidden = tf.placeholder(tf.float32, name="dropout_rate_hidden")
self.dropout_rate_cell = tf.placeholder(tf.float32, name="dropout_rate_cell")
self.batch_size = tf.placeholder(tf.int32, name="batch_size")
self.W = tf.get_variable(shape=embedding.shape, initializer=tf.constant_initializer(embedding), name="embedding")
self.embedded_input_x1 = tf.nn.embedding_lookup(self.W, self.input_x1)
self.embedded_input_x1 = tf.layers.dropout(self.embedded_input_x1, rate=1-self.dropout_rate_embedding)
self.embedded_input_x1_expanded = tf.expand_dims(self.embedded_input_x1, -1)
pooled_outputs1 = []
for i, filter_size in enumerate(filter_sizes):
with tf.name_scope("1-conv-maxpool-%s" % filter_size):
filter_shape = [filter_size, embedding.shape[1], 1, num_filters]
W = tf.Variable(tf.truncated_normal(filter_shape, stddev=0.1), name="filter_weights")
b = tf.Variable(tf.constant(0.1, shape=[num_filters]), name="filter_biases")
conv = tf.nn.conv2d(self.embedded_input_x1_expanded, W, strides=[1, 1, 1, 1], padding="VALID", name="conv")
h = tf.nn.relu(tf.nn.bias_add(conv, b), name="relu")
pooled = tf.nn.max_pool(h, ksize=[1, x1_maxlen-filter_size+1, 1, 1], strides=[1, 1, 1, 1], padding="VALID", name="pool")
pooled_outputs1.append(pooled)
num_features = num_filters*len(filter_sizes)
self.h_pool1 = tf.concat(pooled_outputs1, 3)
self.h_pool_flat1 = tf.reshape(self.h_pool1, [-1, num_features])
if x3_size:
self.compressed_input_x3 = tf.layers.dense(tf.layers.dense(self.input_x3, 1024, activation=tf.nn.relu), 256, activation=tf.nn.relu)
self.h_pool_flat1 = tf.concat([self.h_pool_flat1, self.compressed_input_x3], axis=-1)
if hidden_size:
self.h_pool_flat1 = tf.layers.dense(self.h_pool_flat1, hidden_size, activation=tf.nn.relu)
self.h_drop1 = tf.layers.dropout(self.h_pool_flat1, rate=1-self.dropout_rate_hidden)
self.scores = tf.layers.dense(inputs=self.h_drop1, units=y_len)
if y_len == 1:
self.predictions = tf.nn.sigmoid(self.scores, name="prediction")
self.loss = tf.losses.mean_squared_error(self.input_z, self.predictions)
self.mse = tf.losses.mean_squared_error(self.input_z, self.predictions)
correct_predictions = tf.equal(tf.cast(tf.round(self.predictions), tf.int32), tf.cast(tf.round(self.input_y), tf.int32))
self.accuracy = tf.reduce_mean(tf.cast(correct_predictions, "float"), name="accuracy")
elif y_len == 2:
self.loss = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits=self.scores, labels=self.input_y))
self.predictions = tf.slice(tf.nn.softmax(self.scores), [0, 0], [-1, 1], name="prediction")
self.mse = tf.losses.mean_squared_error(self.input_z, self.predictions)
correct_predictions = tf.equal(tf.argmax(self.scores, 1), tf.argmax(self.input_y, 1))
self.accuracy = tf.reduce_mean(tf.cast(correct_predictions, "float"), name="accuracy")
elif y_len == 4:
self.normalised_scores = tf.nn.softmax(self.scores, name="distribution")
self.loss = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(labels=self.input_y, logits=self.scores))
self.predictions = tf.matmul(self.normalised_scores, tf.constant([0, 0.3333333333, 0.6666666666, 1.0], shape=[4, 1]), name="prediction")
self.mse = tf.losses.mean_squared_error(self.input_z, self.predictions)
correct_predictions = tf.equal(tf.argmax(tf.matmul(self.normalised_scores, tf.constant([1, 0, 1, 0, 0, 1, 0, 1], shape=[4, 2], dtype=tf.float32)), 1), tf.argmax(tf.matmul(self.input_y, tf.constant([1, 0, 1, 0, 0, 1, 0, 1], shape=[4, 2], dtype=tf.float32)), 1))
self.accuracy = tf.reduce_mean(tf.cast(correct_predictions, "float"), name="accuracy")
class DAN:
def __init__(self, x1_maxlen, x2_maxlen, y_len, embedding, filter_sizes, num_filters, hidden_size, state_size, x3_size):
self.input_x1 = tf.placeholder(tf.int32, [None, x1_maxlen], name="post_text")
self.input_x1_len = tf.placeholder(tf.int32, [None, ], name="post_text_len")
self.input_x2 = tf.placeholder(tf.int32, [None, x2_maxlen], name="target_description")
self.input_x2_len = tf.placeholder(tf.int32, [None, ], name="target_description_len")
self.input_x3 = tf.placeholder(tf.float32, [None, x3_size], name="image_feature")
self.input_y = tf.placeholder(tf.float32, [None, y_len], name="truth_class")
self.input_z = tf.placeholder(tf.float32, [None, 1], name="truth_mean")
self.dropout_rate_embedding = tf.placeholder(tf.float32, name="dropout_rate_embedding")
self.dropout_rate_hidden = tf.placeholder(tf.float32, name="dropout_rate_hidden")
self.dropout_rate_cell = tf.placeholder(tf.float32, name="dropout_rate_cell")
self.batch_size = tf.placeholder(tf.int32, name="batch_size")
self.W = tf.get_variable(shape=embedding.shape, initializer=tf.constant_initializer(embedding), name="embedding")
self.embedded_input_x1 = tf.nn.embedding_lookup(self.W, self.input_x1)
self.embedded_input_x1 = tf.layers.dropout(self.embedded_input_x1, rate=1-self.dropout_rate_embedding)
# self.avg_input_x1 = tf.reduce_mean(self.embedded_input_x1, axis=1)
mask = tf.cast(tf.contrib.keras.backend.repeat_elements(tf.expand_dims(tf.sequence_mask(self.input_x1_len, x1_maxlen), axis=-1), embedding.shape[1], axis=2), tf.float32)
masked_embedded_input_x1 = tf.multiply(self.embedded_input_x1, mask)
self.avg_input_x1 = tf.reduce_sum(masked_embedded_input_x1, axis=1)/tf.reduce_sum(mask, axis=1)
if hidden_size:
self.avg_input_x1 = tf.layers.dense(self.avg_input_x1, hidden_size, activation=tf.nn.relu)
self.h_drop1 = tf.layers.dropout(self.avg_input_x1, rate=1-self.dropout_rate_hidden)
self.scores = tf.layers.dense(inputs=self.h_drop1, units=y_len)
if y_len == 1:
self.predictions = tf.nn.sigmoid(self.scores, name="prediction")
self.loss = tf.losses.mean_squared_error(self.input_z, self.predictions)
self.mse = tf.losses.mean_squared_error(self.input_z, self.predictions)
correct_predictions = tf.equal(tf.cast(tf.round(self.predictions), tf.int32), tf.cast(tf.round(self.input_y), tf.int32))
self.accuracy = tf.reduce_mean(tf.cast(correct_predictions, "float"), name="accuracy")
elif y_len == 2:
self.loss = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits=self.scores, labels=self.input_y))
self.predictions = tf.slice(tf.nn.softmax(self.scores), [0, 0], [-1, 1], name="prediction")
self.mse = tf.losses.mean_squared_error(self.input_z, self.predictions)
correct_predictions = tf.equal(tf.argmax(self.scores, 1), tf.argmax(self.input_y, 1))
self.accuracy = tf.reduce_mean(tf.cast(correct_predictions, "float"), name="accuracy")
elif y_len == 4:
self.normalised_scores = tf.nn.softmax(self.scores, name="distribution")
self.loss = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(labels=self.input_y, logits=self.scores))
self.predictions = tf.matmul(self.normalised_scores, tf.constant([0, 0.3333333333, 0.6666666666, 1.0], shape=[4, 1]), name="prediction")
self.mse = tf.losses.mean_squared_error(self.input_z, self.predictions)
correct_predictions = tf.equal(tf.argmax(tf.matmul(self.normalised_scores, tf.constant([1, 0, 1, 0, 0, 1, 0, 1], shape=[4, 2], dtype=tf.float32)), 1), tf.argmax(tf.matmul(self.input_y, tf.constant([1, 0, 1, 0, 0, 1, 0, 1], shape=[4, 2], dtype=tf.float32)), 1))
self.accuracy = tf.reduce_mean(tf.cast(correct_predictions, "float"), name="accuracy")
def extract_last(output, lengths):
batch_range = tf.range(tf.shape(output)[0])
batch_idx = tf.stack([batch_range, lengths-1], axis=-1)
return tf.gather_nd(output, batch_idx)
class BiRNN:
def __init__(self, x1_maxlen, x2_maxlen, y_len, embedding, filter_sizes, num_filters, hidden_size, state_size, x3_size):
self.input_x1 = tf.placeholder(tf.int32, [None, x1_maxlen], name="post_text")
self.input_x1_len = tf.placeholder(tf.int32, [None, ], name="post_text_len")
self.input_x2 = tf.placeholder(tf.int32, [None, x2_maxlen], name="target_description")
self.input_x2_len = tf.placeholder(tf.int32, [None, ], name="target_description_len")
self.input_x3 = tf.placeholder(tf.float32, [None, x3_size], name="image_feature")
self.input_y = tf.placeholder(tf.float32, [None, y_len], name="truth_class")
self.input_z = tf.placeholder(tf.float32, [None, 1], name="truth_mean")
self.dropout_rate_embedding = tf.placeholder(tf.float32, name="dropout_rate_embedding")
self.dropout_rate_hidden = tf.placeholder(tf.float32, name="dropout_rate_hidden")
self.dropout_rate_cell = tf.placeholder(tf.float32, name="dropout_rate_cell")
self.batch_size = tf.placeholder(tf.int32, [], name="batch_size")
self.W = tf.get_variable(shape=embedding.shape, initializer=tf.constant_initializer(embedding), name="embedding")
self.embedded_input_x1 = tf.nn.embedding_lookup(self.W, self.input_x1)
self.embedded_input_x1 = tf.layers.dropout(self.embedded_input_x1, rate=1-self.dropout_rate_embedding)
cell_fw = tf.contrib.rnn.GRUCell(state_size)
cell_dropout_fw = tf.contrib.rnn.DropoutWrapper(cell_fw, output_keep_prob=1-self.dropout_rate_cell)
initial_state_fw = cell_fw.zero_state(self.batch_size, tf.float32)
cell_bw = tf.contrib.rnn.GRUCell(state_size)
cell_dropout_bw = tf.contrib.rnn.DropoutWrapper(cell_bw, output_keep_prob=1-self.dropout_rate_cell)
initial_state_bw = cell_bw.zero_state(self.batch_size, tf.float32)
outputs, states = tf.nn.bidirectional_dynamic_rnn(cell_fw=cell_dropout_fw, cell_bw=cell_dropout_bw, inputs=self.embedded_input_x1, sequence_length=self.input_x1_len, initial_state_bw=initial_state_bw, initial_state_fw=initial_state_fw)
bi_outputs = tf.concat(outputs, 2)
mask = tf.cast(tf.contrib.keras.backend.repeat_elements(tf.expand_dims(tf.sequence_mask(self.input_x1_len, x1_maxlen), axis=-1), 2*state_size, axis=2), tf.float32)
self.h_drop = tf.layers.dropout(tf.concat([extract_last(outputs[0], self.input_x1_len), outputs[1][:, 0, :]], -1), rate=1-self.dropout_rate_hidden)
# self.h_drop = tf.layers.dropout(tf.reduce_sum(bi_outputs, axis=1)/tf.reduce_sum(mask, axis=1), rate=1-self.dropout_rate_hidden)
#
# self.h_drop = tf.layers.dropout(tf.reduce_max(bi_outputs, axis=1), rate=1-self.dropout_rate_hidden)
self.scores = tf.layers.dense(inputs=self.h_drop, units=y_len)
if y_len == 1:
self.predictions = tf.nn.sigmoid(self.scores, name="prediction")
self.loss = tf.losses.mean_squared_error(self.input_z, self.predictions)
self.mse = tf.losses.mean_squared_error(self.input_z, self.predictions)
correct_predictions = tf.equal(tf.cast(tf.round(self.predictions), tf.int32), tf.cast(tf.round(self.input_y), tf.int32))
self.accuracy = tf.reduce_mean(tf.cast(correct_predictions, "float"), name="accuracy")
elif y_len == 2:
self.loss = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits=self.scores, labels=self.input_y))
self.predictions = tf.slice(tf.nn.softmax(self.scores), [0, 0], [-1, 1], name="prediction")
self.mse = tf.losses.mean_squared_error(self.input_z, self.predictions)
correct_predictions = tf.equal(tf.argmax(self.scores, 1), tf.argmax(self.input_y, 1))
self.accuracy = tf.reduce_mean(tf.cast(correct_predictions, "float"), name="accuracy")
elif y_len == 4:
self.normalised_scores = tf.nn.softmax(self.scores, name="distribution")
self.loss = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(labels=self.input_y, logits=self.scores))
self.predictions = tf.matmul(self.normalised_scores, tf.constant([0, 0.3333333333, 0.6666666666, 1.0], shape=[4, 1]), name="prediction")
self.mse = tf.losses.mean_squared_error(self.input_z, self.predictions)
correct_predictions = tf.equal(tf.argmax(tf.matmul(self.normalised_scores, tf.constant([1, 0, 1, 0, 0, 1, 0, 1], shape=[4, 2], dtype=tf.float32)), 1), tf.argmax(tf.matmul(self.input_y, tf.constant([1, 0, 1, 0, 0, 1, 0, 1], shape=[4, 2], dtype=tf.float32)), 1))
self.accuracy = tf.reduce_mean(tf.cast(correct_predictions, "float"), name="accuracy")
class SAN:
def __init__(self, x1_maxlen, x2_maxlen, y_len, embedding, filter_sizes, num_filters, hidden_size, state_size, x3_size, attention_size, view_size=1, alpha=0, beta=0):
if view_size == 1:
beta = 0
self.input_x1 = tf.placeholder(tf.int32, [None, x1_maxlen], name="post_text")
self.input_x1_len = tf.placeholder(tf.int32, [None, ], name="post_text_len")
self.input_x2 = tf.placeholder(tf.int32, [None, x2_maxlen], name="target_description")
self.input_x2_len = tf.placeholder(tf.int32, [None, ], name="target_description_len")
self.input_x3 = tf.placeholder(tf.float32, [None, x3_size], name="image_feature")
self.input_y = tf.placeholder(tf.float32, [None, y_len], name="truth_class")
self.input_z = tf.placeholder(tf.float32, [None, 1], name="truth_mean")
self.dropout_rate_embedding = tf.placeholder(tf.float32, name="dropout_rate_embedding")
self.dropout_rate_hidden = tf.placeholder(tf.float32, name="dropout_rate_hidden")
self.dropout_rate_cell = tf.placeholder(tf.float32, name="dropout_rate_cell")
self.batch_size = tf.placeholder(tf.int32, [], name="batch_size")
with tf.variable_scope("embedding"):
self.W = tf.get_variable(shape=embedding.shape, initializer=tf.constant_initializer(embedding), name="embedding")
self.embedded_input_x1 = tf.nn.embedding_lookup(self.W, self.input_x1)
self.embedded_input_x1 = tf.layers.dropout(self.embedded_input_x1, rate=1-self.dropout_rate_embedding)
with tf.variable_scope("biRNN"):
cell_fw = tf.contrib.rnn.GRUCell(state_size)
cell_dropout_fw = tf.contrib.rnn.DropoutWrapper(cell_fw, output_keep_prob=1-self.dropout_rate_cell)
initial_state_fw = cell_fw.zero_state(self.batch_size, tf.float32)
cell_bw = tf.contrib.rnn.GRUCell(state_size)
cell_dropout_bw = tf.contrib.rnn.DropoutWrapper(cell_bw, output_keep_prob=1-self.dropout_rate_cell)
initial_state_bw = cell_bw.zero_state(self.batch_size, tf.float32)
outputs, states = tf.nn.bidirectional_dynamic_rnn(cell_fw=cell_dropout_fw, cell_bw=cell_dropout_bw, inputs=self.embedded_input_x1, sequence_length=self.input_x1_len, initial_state_bw=initial_state_bw, initial_state_fw=initial_state_fw)
bi_outputs = tf.concat(outputs, 2)
with tf.variable_scope("attention"):
W_1 = tf.get_variable(shape=[2*state_size, attention_size], initializer=tf.contrib.layers.xavier_initializer(), name="W_1")
W_2 = tf.get_variable(shape=[attention_size, view_size], initializer=tf.contrib.layers.xavier_initializer(), name="W_2")
reshaped_bi_outputs = tf.reshape(bi_outputs, shape=[-1, 2*state_size])
if x3_size:
# self.compressed_input_x3 = tf.contrib.keras.backend.repeat(tf.layers.dense(tf.layers.dense(self.input_x3, 1024, activation=tf.nn.tanh), attention_size, activation=tf.nn.tanh), x1_maxlen)
self.compressed_input_x3 = tf.contrib.keras.backend.repeat(tf.layers.dense(self.input_x3, attention_size, activation=tf.nn.tanh), x1_maxlen)
self.compressed_input_x3 = tf.reshape(self.compressed_input_x3, shape=[-1, attention_size])
self.attention = tf.nn.softmax(tf.reshape(tf.matmul(tf.nn.tanh(tf.matmul(reshaped_bi_outputs, W_1)+self.compressed_input_x3), W_2), shape=[self.batch_size, x1_maxlen, view_size]), dim=1)
else:
self.attention = tf.nn.softmax(tf.reshape(tf.matmul(tf.nn.tanh(tf.matmul(reshaped_bi_outputs, W_1)), W_2), shape=[self.batch_size, x1_maxlen, view_size]), dim=1)
attention_output = tf.reshape(tf.matmul(tf.transpose(bi_outputs, perm=[0, 2, 1]), self.attention), shape=[self.batch_size, view_size*2*state_size])
with tf.variable_scope("penalty"):
attention_t = tf.transpose(self.attention, perm=[0, 2, 1])
attention_t_attention = tf.matmul(attention_t, self.attention)
identity = tf.reshape(tf.tile(tf.diag(tf.ones([view_size])), [self.batch_size, 1]), shape=[self.batch_size, view_size, view_size])
self.penalised_term = tf.square(tf.norm(attention_t_attention-identity, ord="euclidean", axis=[1, 2]))
self.h_drop = tf.layers.dropout(attention_output, rate=1-self.dropout_rate_hidden)
self.scores = tf.layers.dense(inputs=self.h_drop, units=y_len)
if y_len == 1:
self.predictions = tf.nn.sigmoid(self.scores, name="prediction")
self.loss = tf.reduce_mean(tf.square(tf.subtract(self.input_z, self.predictions))+beta*self.penalised_term)
self.mse = tf.losses.mean_squared_error(self.input_z, self.predictions)
correct_predictions = tf.equal(tf.cast(tf.round(self.predictions), tf.int32), tf.cast(tf.round(self.input_y), tf.int32))
self.accuracy = tf.reduce_mean(tf.cast(correct_predictions, "float"), name="accuracy")
elif y_len == 2:
self.loss = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits=self.scores, labels=self.input_y)+beta*self.penalised_term)
self.predictions = tf.slice(tf.nn.softmax(self.scores), [0, 0], [-1, 1], name="prediction")
self.mse = tf.losses.mean_squared_error(self.input_z, self.predictions)
correct_predictions = tf.equal(tf.argmax(self.scores, 1), tf.argmax(self.input_y, 1))
self.accuracy = tf.reduce_mean(tf.cast(correct_predictions, "float"), name="accuracy")
elif y_len == 4:
self.normalised_scores = tf.nn.softmax(self.scores, name="distribution")
self.loss = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(labels=self.input_y, logits=self.scores)+beta*self.penalised_term)
self.predictions = tf.matmul(self.normalised_scores, tf.constant([0, 0.3333333333, 0.6666666666, 1.0], shape=[4, 1]), name="prediction")
self.mse = tf.losses.mean_squared_error(self.input_z, self.predictions)
correct_predictions = tf.equal(tf.argmax(tf.matmul(self.normalised_scores, tf.constant([1, 0, 1, 0, 0, 1, 0, 1], shape=[4, 2], dtype=tf.float32)), 1), tf.argmax(tf.matmul(self.input_y, tf.constant([1, 0, 1, 0, 0, 1, 0, 1], shape=[4, 2], dtype=tf.float32)), 1))
self.accuracy = tf.reduce_mean(tf.cast(correct_predictions, "float"), name="accuracy")
| 20,233
| 80.58871
| 272
|
py
|
cc
|
cc-master/train.py
|
import tensorflow as tf
from utils import *
from sklearn.model_selection import KFold
from models import *
import time
import datetime
tf.app.flags.DEFINE_string("dir", "/data", "folder directory")
tf.app.flags.DEFINE_string("training_file", "clickbait17-validation-170630", "Training data file")
tf.app.flags.DEFINE_string("validation_file", "clickbait17-train-170331", "Validation data file")
tf.app.flags.DEFINE_integer("epochs", 20, "epochs")
tf.app.flags.DEFINE_integer("batch_size", 32, "batch_size")
tf.app.flags.DEFINE_string("filter_sizes", "3,4,5", "Comma-separated filter sizes")
tf.app.flags.DEFINE_integer("num_filters", 100, "Number of filters per filter size")
tf.app.flags.DEFINE_float("dropout_rate_hidden", 0.5, "Dropout rate of hidden layer")
tf.app.flags.DEFINE_float("dropout_rate_cell", 0.3, "Dropout rate of rnn cell")
tf.app.flags.DEFINE_float("dropout_rate_embedding", 0.2, "Dropout rate of word embedding")
tf.app.flags.DEFINE_integer("state_size", 64, "state_size")
tf.app.flags.DEFINE_integer("hidden_size", 0, "hidden_size")
tf.app.flags.DEFINE_string("timestamp", "0715", "Timestamp")
tf.app.flags.DEFINE_integer("y_len", 4, "how to interpret the annotation")
tf.app.flags.DEFINE_string("model", "SAN", "which model to use")
tf.app.flags.DEFINE_boolean("use_target_description", False, "whether to use the target description as input")
tf.app.flags.DEFINE_boolean("use_image", False, "whether to use the image as input")
tf.app.flags.DEFINE_float("learning_rate", 0.005, "learning rate")
tf.app.flags.DEFINE_integer("embedding_size", 100, "embedding size")
tf.app.flags.DEFINE_float("gradient_clipping_value", 2, "gradient clipping value")
FLAGS = tf.app.flags.FLAGS
def main(argv=None):
np.random.seed(81)
word2id, embedding = load_embeddings(fp=os.path.join(FLAGS.dir, "glove.6B."+str(FLAGS.embedding_size)+"d.txt"), embedding_size=FLAGS.embedding_size)
with open(os.path.join(FLAGS.dir, 'word2id.json'), 'w') as fout:
json.dump(word2id, fp=fout)
# vocab_size = embedding.shape[0]
# embedding_size = embedding.shape[1]
ids, post_texts, truth_classes, post_text_lens, truth_means, target_descriptions, target_description_lens, image_features = read_data(word2id=word2id, fps=[os.path.join(FLAGS.dir, FLAGS.training_file), os.path.join(FLAGS.dir, FLAGS.validation_file)], y_len=FLAGS.y_len, use_target_description=FLAGS.use_target_description, use_image=FLAGS.use_image)
post_texts = np.array(post_texts)
truth_classes = np.array(truth_classes)
post_text_lens = np.array(post_text_lens)
truth_means = np.array(truth_means)
shuffle_indices = np.random.permutation(np.arange(len(post_texts)))
post_texts = post_texts[shuffle_indices]
truth_classes = truth_classes[shuffle_indices]
post_text_lens = post_text_lens[shuffle_indices]
truth_means = truth_means[shuffle_indices]
max_post_text_len = max(post_text_lens)
print max_post_text_len
post_texts = pad_sequences(post_texts, max_post_text_len)
target_descriptions = np.array(target_descriptions)
target_description_lens = np.array(target_description_lens)
target_descriptions = target_descriptions[shuffle_indices]
target_description_lens = target_description_lens[shuffle_indices]
max_target_description_len = max(target_description_lens)
print max_target_description_len
target_descriptions = pad_sequences(target_descriptions, max_target_description_len)
image_features = np.array(image_features)
data = np.array(list(zip(post_texts, truth_classes, post_text_lens, truth_means, target_descriptions, target_description_lens, image_features)))
kf = KFold(n_splits=5)
round = 1
val_scores = []
val_accs = []
for train, validation in kf.split(data):
train_data, validation_data = data[train], data[validation]
g = tf.Graph()
with g.as_default() as g:
tf.set_random_seed(81)
with tf.Session(graph=g) as sess:
if FLAGS.model == "DAN":
model = DAN(x1_maxlen=max_post_text_len, y_len=len(truth_classes[0]), x2_maxlen=max_target_description_len, embedding=embedding, filter_sizes=list(map(int, FLAGS.filter_sizes.split(","))), num_filters=FLAGS.num_filters, hidden_size=FLAGS.hidden_size, state_size=FLAGS.state_size, x3_size=len(image_features[0]))
if FLAGS.model == "CNN":
model = CNN(x1_maxlen=max_post_text_len, y_len=len(truth_classes[0]), x2_maxlen=max_target_description_len, embedding=embedding, filter_sizes=list(map(int, FLAGS.filter_sizes.split(","))), num_filters=FLAGS.num_filters, hidden_size=FLAGS.hidden_size, state_size=FLAGS.state_size, x3_size=len(image_features[0]))
if FLAGS.model == "BiRNN":
model = BiRNN(x1_maxlen=max_post_text_len, y_len=len(truth_classes[0]), x2_maxlen=max_target_description_len, embedding=embedding, filter_sizes=list(map(int, FLAGS.filter_sizes.split(","))), num_filters=FLAGS.num_filters, hidden_size=FLAGS.hidden_size, state_size=FLAGS.state_size, x3_size=len(image_features[0]))
if FLAGS.model == "SAN":
model = SAN(x1_maxlen=max_post_text_len, y_len=len(truth_classes[0]), x2_maxlen=max_target_description_len, embedding=embedding, filter_sizes=list(map(int, FLAGS.filter_sizes.split(","))), num_filters=FLAGS.num_filters, hidden_size=FLAGS.hidden_size, state_size=FLAGS.state_size, x3_size=len(image_features[0]), attention_size=2*FLAGS.state_size)
global_step = tf.Variable(0, name="global_step", trainable=False)
optimizer = tf.train.AdamOptimizer(FLAGS.learning_rate)
grads_and_vars = optimizer.compute_gradients(model.loss)
if FLAGS.gradient_clipping_value:
grads_and_vars = [(tf.clip_by_value(grad, -FLAGS.gradient_clipping_value, FLAGS.gradient_clipping_value), var) for grad, var in grads_and_vars]
train_op = optimizer.apply_gradients(grads_and_vars, global_step=global_step)
out_dir = os.path.join(FLAGS.dir, "runs", FLAGS.timestamp)
# loss_summary = tf.summary.scalar("loss", model.loss)
# acc_summary = tf.summary.scalar("accuracy", model.accuracy)
# train_summary_op = tf.summary.merge([loss_summary, acc_summary])
# train_summary_dir = os.path.join(out_dir, "summaries", "train")
# train_summary_writer = tf.summary.FileWriter(train_summary_dir, sess.graph)
# val_summary_op = tf.summary.merge([loss_summary, acc_summary])
# val_summary_dir = os.path.join(out_dir, "summaries", "validation")
# val_summary_writer = tf.summary.FileWriter(val_summary_dir, sess.graph)
checkpoint_dir = os.path.join(out_dir, "checkpoints")
checkpoint_prefix = os.path.join(checkpoint_dir, FLAGS.model+str(round))
if not os.path.exists(checkpoint_dir):
os.makedirs(checkpoint_dir)
saver = tf.train.Saver()
sess.run(tf.global_variables_initializer())
def train_step(input_x1, input_y, input_x1_len, input_z, input_x2, input_x2_len, input_x3):
feed_dict = {model.input_x1: input_x1,
model.input_y: input_y,
model.input_x1_len: input_x1_len,
model.input_z: input_z,
model.dropout_rate_hidden: FLAGS.dropout_rate_hidden,
model.dropout_rate_cell: FLAGS.dropout_rate_cell,
model.dropout_rate_embedding: FLAGS.dropout_rate_embedding,
model.batch_size: len(input_x1),
model.input_x2: input_x2,
model.input_x2_len: input_x2_len,
model.input_x3: input_x3}
_, step, loss, mse, accuracy = sess.run([train_op, global_step, model.loss, model.mse, model.accuracy], feed_dict)
time_str = datetime.datetime.now().isoformat()
print("{}: step {}, loss {:g}, mse {:g}, acc {:g}".format(time_str, step, loss, mse, accuracy))
# train_summary_writer.add_summary(summaries, step)
def validation_step(input_x1, input_y, input_x1_len, input_z, input_x2, input_x2_len, input_x3, writer=None):
feed_dict = {model.input_x1: input_x1,
model.input_y: input_y,
model.input_x1_len: input_x1_len,
model.input_z: input_z,
model.dropout_rate_hidden: 0,
model.dropout_rate_cell: 0,
model.dropout_rate_embedding: 0,
model.batch_size: len(input_x1),
model.input_x2: input_x2,
model.input_x2_len: input_x2_len,
model.input_x3: input_x3}
step, loss, mse, accuracy = sess.run([global_step, model.loss, model.mse, model.accuracy], feed_dict)
time_str = datetime.datetime.now().isoformat()
print("{}: step {}, loss {:g}, mse {:g}, acc {:g}".format(time_str, step, loss, mse, accuracy))
# if writer:
# writer.add_summary(summaries, step)
return mse, accuracy
print("\nValidation: ")
post_text_val, truth_class_val, post_text_len_val, truth_mean_val, target_description_val, target_description_len_val, image_feature_val= zip(*validation_data)
validation_step(post_text_val, truth_class_val, post_text_len_val, truth_mean_val, target_description_val, target_description_len_val, image_feature_val)
print("\n")
min_mse_val = np.inf
acc = np.inf
for i in range(FLAGS.epochs):
batches = get_batch(train_data, FLAGS.batch_size)
for batch in batches:
post_text_batch, truth_class_batch, post_text_len_batch, truth_mean_batch, target_description_batch, target_description_len_batch, image_feature_batch = zip(*batch)
train_step(post_text_batch, truth_class_batch, post_text_len_batch, truth_mean_batch, target_description_batch, target_description_len_batch, image_feature_batch)
print("\nValidation: ")
mse_val, acc_val = validation_step(post_text_val, truth_class_val, post_text_len_val, truth_mean_val, target_description_val, target_description_len_val, image_feature_val)
print("\n")
if mse_val < min_mse_val:
min_mse_val = mse_val
acc = acc_val
# saver.save(sess, checkpoint_prefix)
round += 1
val_scores.append(min_mse_val)
val_accs.append(acc)
print np.mean(val_scores)
print np.mean(val_accs)
if __name__ == "__main__":
tf.app.run()
| 11,319
| 65.19883
| 366
|
py
|
cc
|
cc-master/test_final.py
|
import tensorflow as tf
from utils import *
from sklearn.model_selection import KFold
# from models import *
import time
import datetime
from sklearn.metrics import mean_squared_error as mse
from sklearn.metrics import accuracy_score as acc
import argparse
tf.app.flags.DEFINE_string("dir", "/data", "folder directory")
tf.app.flags.DEFINE_string("test_file", "clickbait17-train-170331", "Test data file")
tf.app.flags.DEFINE_string("timestamp", "0715", "Timestamp")
tf.app.flags.DEFINE_integer("max_post_text_len", 39, "Max length of the post text")
tf.app.flags.DEFINE_integer("max_target_description_len", 0, "Max length of the target description")
tf.app.flags.DEFINE_integer("if_annotated", 0, ">=1 if the Test data come with the annotations, 0 otherwise")
tf.app.flags.DEFINE_string("model", "SAN", "which model to use")
tf.app.flags.DEFINE_boolean("use_target_description", False, "whether to use the target description as input")
tf.app.flags.DEFINE_boolean("use_image", False, "whether to use the image as input")
FLAGS = tf.app.flags.FLAGS
def distribution2label(ar):
ar = np.array(ar)
constant = np.array([1, 0, 1, 0, 0, 1, 0, 1]).reshape((4, 2))
ar = np.argmax(np.dot(ar, constant), axis=1)
return ar
def main(argv=None):
if not os.path.exists(os.path.join(FLAGS.dir, 'word2id.json')):
print "Error: no word2id file!"
return
if not os.path.exists(os.path.join(FLAGS.dir, "runs", FLAGS.timestamp, "checkpoints")):
print "Error: no saved model!"
return
if FLAGS.use_image and not os.path.exists(os.path.join(FLAGS.dir, FLAGS.test_file, "id2imageidx.json")):
print "Error: no processed image features!"
return
with open(os.path.join(FLAGS.dir, 'word2id.json'), 'r') as fin:
word2id = json.load(fin)
ids, post_texts, truth_classes, post_text_lens, truth_means, target_descriptions, target_description_lens, image_features = read_data(word2id=word2id, fps=[argv[1]], y_len=FLAGS.if_annotated, use_target_description=FLAGS.use_target_description, use_image=FLAGS.use_image)
post_texts = np.array(post_texts)
truth_classes = np.array(truth_classes)
post_text_lens = [each_len if each_len <= FLAGS.max_post_text_len else FLAGS.max_post_text_len for each_len in post_text_lens]
post_text_lens = np.array(post_text_lens)
truth_means = np.array(truth_means)
truth_means = np.ravel(truth_means).astype(np.float32)
post_texts = pad_sequences(post_texts, FLAGS.max_post_text_len)
if not FLAGS.use_target_description:
FLAGS.max_target_description_len = 0
target_descriptions = np.array(target_descriptions)
target_description_lens = [each_len if each_len <= FLAGS.max_target_description_len else FLAGS.max_target_description_len for each_len in target_description_lens]
target_description_lens = np.array(target_description_lens)
target_descriptions = pad_sequences(target_descriptions, FLAGS.max_target_description_len)
image_features = np.array(image_features)
all_prediction = []
all_distribution = []
for i in range(1, 6):
tf.reset_default_graph()
saver = tf.train.import_meta_graph(os.path.join(FLAGS.dir, "runs", FLAGS.timestamp, "checkpoints", FLAGS.model+str(i)+".meta"), clear_devices=True)
with tf.Session() as sess:
saver.restore(sess, os.path.join(FLAGS.dir, "runs", FLAGS.timestamp, "checkpoints", FLAGS.model+str(i)))
g = tf.get_default_graph()
input_x1 = g.get_tensor_by_name("post_text:0")
input_x1_len = g.get_tensor_by_name("post_text_len:0")
dropout_rate_hidden = g.get_tensor_by_name("dropout_rate_hidden:0")
dropout_rate_cell = g.get_tensor_by_name("dropout_rate_cell:0")
dropout_rate_embedding = g.get_tensor_by_name("dropout_rate_embedding:0")
batch_size = g.get_tensor_by_name("batch_size:0")
input_x2 = g.get_tensor_by_name("target_description:0")
input_x2_len = g.get_tensor_by_name("target_description_len:0")
input_x3 = g.get_tensor_by_name("image_feature:0")
output_prediction = g.get_tensor_by_name("prediction:0")
output_distribution = g.get_tensor_by_name("distribution:0")
feed_dict = {input_x1: post_texts,
input_x1_len: post_text_lens,
dropout_rate_hidden: 0,
dropout_rate_cell: 0,
dropout_rate_embedding: 0,
batch_size: len(post_texts),
input_x2: target_descriptions,
input_x2_len: target_description_lens,
input_x3: image_features}
prediction, distribution = sess.run([output_prediction, output_distribution], feed_dict)
prediction = np.ravel(prediction).astype(np.float32)
all_prediction.append(prediction)
all_distribution.append(distribution)
if FLAGS.if_annotated:
print mse(prediction, truth_means)
print acc(distribution2label(truth_classes), distribution2label(distribution))
avg_prediction = np.mean(all_prediction, axis=0)
avg_distribution = np.mean(all_distribution, axis=0)
if FLAGS.if_annotated:
print mse(avg_prediction, truth_means)
print acc(distribution2label(truth_classes), distribution2label(avg_distribution))
if not os.path.exists(argv[2]):
os.makedirs(argv[2])
with open(os.path.join(argv[2], "predictions.jsonl"), 'w') as output:
for i in range(len(ids)):
output.write(json.dumps({"id": ids[i], "clickbaitScore": float(avg_prediction[i])})+'\n')
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument('-i', dest="input_directory")
parser.add_argument('-o', dest="output_directory")
argv = parser.parse_args()
tf.app.run(argv=[None, argv.input_directory, argv.output_directory])
| 6,002
| 53.081081
| 275
|
py
|
cl4ctr
|
cl4ctr-main/main_ml_base.py
|
import torch.nn as nn
import torch.nn.functional as F
from torch.optim.lr_scheduler import ReduceLROnPlateau
from model.FM import FactorizationMachineModel, FM_CL4CTR
from model.DeepFM import DeepFM, DeepFM_CL4CTR
import numpy as np
import random
import sys
import tqdm
import time
import argparse
import torch
import os
os.environ["KMP_DUPLICATE_LIB_OK"] = "TRUE"
from sklearn.metrics import log_loss, roc_auc_score
sys.path.append("../..")
from dataloader.frappe.dataloader import getdataloader_ml, getdataloader_frappe
from utils.utils_de import *
from utils.earlystoping import EarlyStopping
DEVICE = torch.device("cuda" if torch.cuda.is_available() else "cpu")
def get_model(
name,
field_dims,
batch_size=1024,
pratio=0.5,
embed_dim=20,
mlp_layers=(400, 400, 400)):
if name == "fm_cl4ctr":
return FM_CL4CTR(field_dims, embed_dim, batch_size=batch_size, pratio=pratio, fi_type="att")
elif name == "dfm_cl4ctr":
return DeepFM_CL4CTR(field_dims, embed_dim, mlp_layers=mlp_layers, batch_size=batch_size, pratio=pratio,
fi_type="att")
else:
raise ValueError('unknown model name: ' + name)
def count_params(model):
params = sum(param.numel() for param in model.parameters())
return params
def train(model,
optimizer,
data_loader,
criterion,
alpha=1.0,
beta=1e-2):
model.train()
pred = list()
target = list()
total_loss = 0
for i, (user_item, label) in enumerate(tqdm.tqdm(data_loader)):
label = label.float()
user_item = user_item.long()
user_item = user_item.cuda()
label = label.cuda()
model.zero_grad()
pred_y = torch.sigmoid(model(user_item).squeeze(1))
loss_y = criterion(pred_y, label)
# 1. Utilize simplified method to compute feature alignment and field uniformity
loss = loss_y + model.compute_cl_loss(user_item, alpha=alpha, beta=beta)
# 2. Utilize completely method to compute feature alignment and field uniformity
# loss = loss_y + model.compute_cl_loss_all(user_item, alpha=alpha, beta=beta)
loss.backward()
optimizer.step()
pred.extend(pred_y.tolist())
target.extend(label.tolist())
total_loss += loss.item()
# if (i + 1) % log_interval == 0:
# print('train_loss:', total_loss / (i + 1))
# print(f'loss_y:{loss_y.item()};loss_cl:{loss_cl.item()}')
# print("logloss",log_loss(target,pred))
ave_loss = total_loss / (i + 1)
return ave_loss
def test_roc(model, data_loader):
model.eval()
targets, predicts = list(), list()
with torch.no_grad():
for fields, target in tqdm.tqdm(
data_loader, smoothing=0, mininterval=1.0):
fields = fields.long()
target = target.float()
fields, target = fields.cuda(), target.cuda()
y = torch.sigmoid(model(fields).squeeze(1))
targets.extend(target.tolist())
predicts.extend(y.tolist())
return roc_auc_score(targets, predicts), log_loss(targets, predicts)
def main(dataset_name, model_name, epoch, embed_dim, learning_rate,
batch_size, weight_decay, save_dir, path,
pratio, alpha, beta):
path = "./data/"
field_dims, trainLoader, validLoader, testLoader = \
getdataloader_ml(path=path, batch_size=batch_size)
print(field_dims)
time_fix = time.strftime("%m%d%H%M%S", time.localtime())
for K in [embed_dim]:
paths = os.path.join(save_dir, dataset_name, model_name, str(K))
if not os.path.exists(paths):
os.makedirs(paths)
with open(paths + f"/{model_name}_{K}_{batch_size}_{alpha}_{beta}_{pratio}_{time_fix}.p",
"a+") as fout:
fout.write("Batch_size:{}\tembed_dim:{}\tlearning_rate:{}\tStartTime:{}\tweight_decay:{}\tpratio:{}\t"
"\talpha:{}\tbeta:{}\t\n"
.format(batch_size, K, learning_rate, time.strftime("%d%H%M%S", time.localtime()), weight_decay,
pratio, alpha, beta))
print("Start train -- K : {}".format(K))
criterion = torch.nn.BCELoss()
model = get_model(
name=model_name,
field_dims=field_dims,
batch_size=batch_size,
embed_dim=K,
pratio=pratio).cuda()
params = count_params(model)
fout.write("count_params:{}\n".format(params))
print(params)
optimizer = torch.optim.Adam(
params=model.parameters(),
lr=learning_rate,
weight_decay=weight_decay)
# Initial EarlyStopping
early_stopping = EarlyStopping(patience=8, verbose=True, prefix=path)
scheduler = ReduceLROnPlateau(optimizer, 'max', verbose=True, patience=4)
val_auc_best = 0
auc_index_record = ""
val_loss_best = 1000
loss_index_record = ""
for epoch_i in range(epoch):
print(__file__, model_name, K, epoch_i, "/", epoch)
print("Batch_size:{}\tembed_dim:{}\tlearning_rate:{}\tStartTime:{}\tweight_decay:{}\tpratio:{}\t"
"\talpha:{}\tbeta:{}\t"
.format(batch_size, K, learning_rate, time.strftime("%d%H%M%S", time.localtime()), weight_decay,
pratio, alpha, beta))
start = time.time()
train_loss = train(model, optimizer, trainLoader, criterion, alpha=alpha, beta=beta)
val_auc, val_loss = test_roc(model, validLoader)
test_auc, test_loss = test_roc(model, testLoader)
scheduler.step(val_auc)
end = time.time()
if val_loss < val_loss_best:
# torch.save({"state_dict": model.state_dict(), "best_auc": val_auc_best},
# paths + f"/{model_name}_final_{K}_{time_fix}.pt")
torch.save(model, paths + f"/{model_name}_best_auc_{K}_{pratio}_{time_fix}.pkl")
if val_auc > val_auc_best:
val_auc_best = val_auc
auc_index_record = "epoch_i:{}\t{:.6f}\t{:.6f}".format(epoch_i, test_auc, test_loss)
if val_loss < val_loss_best:
val_loss_best = val_loss
loss_index_record = "epoch_i:{}\t{:.6f}\t{:.6f}".format(epoch_i, test_auc, test_loss)
print(
"Train K:{}\tEpoch:{}\ttrain_loss:{:.6f}\tval_loss:{:.6f}\tval_auc:{:.6f}\ttime:{:.6f}\ttest_loss:{:.6f}\ttest_auc:{:.6f}\n"
.format(K, epoch_i, train_loss, val_loss, val_auc, end - start, test_loss, test_auc))
fout.write(
"Train K:{}\tEpoch:{}\ttrain_loss:{:.6f}\tval_loss:{:.6f}\tval_auc:{:.6f}\ttime:{:.6f}\ttest_loss:{:.6f}\ttest_auc:{:.6f}\n"
.format(K, epoch_i, train_loss, val_loss, val_auc, end - start, test_loss, test_auc))
early_stopping(val_auc)
if early_stopping.early_stop:
print("Early stopping")
break
print("Test:{}\t{:.6f}\t{:.6f}\t{:.6f}\t{:.6f}\t{:.6f}\t{:.6f}\n"
.format(K, val_auc, val_auc_best, val_loss, val_loss_best, test_loss, test_auc))
fout.write("Test:{}\t{:.6f}\t{:.6f}\t{:.6f}\t{:.6f}\t{:.6f}\t{:.6f}\n"
.format(K, val_auc, val_auc_best, val_loss, val_loss_best, test_loss, test_auc))
fout.write("auc_best:\t{}\nloss_best:\t{}".format(auc_index_record, loss_index_record))
def setup_seed(seed):
torch.manual_seed(seed)
torch.cuda.manual_seed_all(seed)
np.random.seed(seed)
random.seed(seed)
torch.backends.cudnn.deterministic = True
if __name__ == '__main__':
# CUDA_VISIBLE_DEVICES=1 python main_ml_base.py --choice 0
parser = argparse.ArgumentParser()
parser.add_argument('--dataset_name', default='ml_tag', help="")
parser.add_argument('--save_dir', default='chkpt_ml_tag', help="")
parser.add_argument('--path', default="../data/", help="")
parser.add_argument('--model_name', default='fm', help="")
parser.add_argument('--epoch', type=int, default=5, help="")
parser.add_argument('--learning_rate', type=float, default=0.01, help="learning rate")
parser.add_argument('--batch_size', type=int, default=1024, help="batch_size")
parser.add_argument('--weight_decay', type=float, default=1e-5, help="")
parser.add_argument('--device', default='cuda:0', help="cuda:0")
parser.add_argument('--choice', default=0, type=int, help="choice")
parser.add_argument('--hint', default="CL4CTR", help="")
parser.add_argument('--embed_dim', default=5, type=int, help="the size of feature dimension")
parser.add_argument('--pratio', default=0.5, type=float, help="pratio")
parser.add_argument('--alpha', default=1e-0, type=float, help="alpha")
parser.add_argument('--beta', default=1e-2, type=float, help="beta")
args = parser.parse_args()
if args.choice == 0:
model_names = ["fm_cl4ctr"] * 1
elif args.choice == 1:
model_names = ["dfm_cl4ctr"] * 1
print(model_names)
for name in model_names:
seed = np.random.randint(0, 100000)
setup_seed(seed)
main(dataset_name=args.dataset_name,
model_name=name,
epoch=args.epoch,
learning_rate=args.learning_rate,
batch_size=args.batch_size,
weight_decay=args.weight_decay,
save_dir=args.save_dir,
path=args.path,
pratio=args.pratio,
embed_dim=args.embed_dim,
alpha=args.alpha,
beta=args.beta
)
| 9,982
| 37.693798
| 145
|
py
|
cl4ctr
|
cl4ctr-main/utils/earlystoping.py
|
#!/usr/bin/env python
# -*- coding:utf-8 -*-
import numpy as np
import torch
class EarlyStopping:
"""Early stops the training if validation loss doesn't improve after a given patience."""
def __init__(self, patience=7, verbose=False, delta=0, prefix = None):
"""
Args:
patience (int): How long to wait after last time validation loss improved.
Default: 7
verbose (bool): If True, prints a message for each validation loss improvement.
Default: False
delta (float): Minimum change in the monitored quantity to qualify as an improvement.
Default: 0
"""
self.patience = patience
self.verbose = verbose
self.counter = 0
self.best_score = None
self.early_stop = False
self.val_loss_min = np.Inf
self.delta = delta
self.prefix_path = prefix
# def __call__(self, val_loss):
def __call__(self, val_auc):
score = val_auc
if self.best_score is None:
self.best_score = score
elif score < self.best_score + self.delta:
self.counter += 1
print(f'EarlyStopping counter: {self.counter} out of {self.patience}')
print("Now auc:{}\tBest_auc:{}".format(val_auc, self.best_score))
if self.counter >= self.patience:
self.early_stop = True
else:
self.best_score = score
self.counter = 0
def save_checkpoint(self, val_loss, model):
'''Saves model when validation loss decrease.'''
if self.verbose:
print(f'Validation loss decreased ({self.val_loss_min:.6f} --> {val_loss:.6f}). Saving model ...')
torch.save(model.state_dict(), self.prefix_path+'/es_checkpoint.pt') # 这里会存储迄今最优模型的参数
self.val_loss_min = val_loss
class EarlyStoppingLoss:
"""Early stops the training if validation loss doesn't improve after a given patience."""
def __init__(self, patience=7, verbose=False, delta=0, prefix = None):
"""
Args:
patience (int): How long to wait after last time validation loss improved.
Default: 7
verbose (bool): If True, prints a message for each validation loss improvement.
Default: False
delta (float): Minimum change in the monitored quantity to qualify as an improvement.
Default: 0
"""
self.patience = patience
self.verbose = verbose
self.counter = 0
self.best_score = None
self.early_stop = False
self.val_loss_min = np.Inf
self.delta = delta
self.prefix_path = prefix
def __call__(self, val_loss):
score = val_loss
if self.best_score is None:
self.best_score = score
elif score > self.best_score + self.delta:
self.counter += 1
print(f'EarlyStopping counter: {self.counter} out of {self.patience}')
print("Now loss:{}\tBest_loss:{}".format(val_loss,self.best_score))
if self.counter >= self.patience:
self.early_stop = True
else:
self.best_score = score
# self.save_checkpoint(val_loss, model)
self.counter = 0
def save_checkpoint(self, val_loss, model):
'''Saves model when validation loss decrease.'''
if self.verbose:
print(f'Validation loss decreased ({self.val_loss_min:.6f} --> {val_loss:.6f}). Saving model ...')
torch.save(model.state_dict(), self.prefix_path+'/es_checkpoint.pt') # 这里会存储迄今最优模型的参数
self.val_loss_min = val_loss
| 3,748
| 38.052083
| 111
|
py
|
cl4ctr
|
cl4ctr-main/utils/__init__.py
|
#!/usr/bin/env python
# -*- coding:utf-8 -*-
'''
@Author:wangfy
@project:PNNConvModel
@Time:2020/6/17 4:49 下午
'''
| 115
| 15.571429
| 23
|
py
|
cl4ctr
|
cl4ctr-main/utils/utils_de.py
|
#!/usr/bin/env python
# -*- coding:utf-8 -*-
def load_trained_embedding(from_model,to_model):
"""
:param from_model:
:param to_model:
:return: model with trained params
"""
model_dict = to_model.state_dict()
state_dict_trained = {name: param for name, param in from_model.named_parameters() if name in model_dict.keys()}
model_dict.update(state_dict_trained)
to_model.load_state_dict(model_dict)
return to_model
def count_params(model):
params = sum(param.numel() for param in model.parameters())
return params
| 562
| 27.15
| 116
|
py
|
cl4ctr
|
cl4ctr-main/model/DeepFM.py
|
#!/usr/bin/env python
# -*- coding:utf-8 -*-
from model.BasiclLayer import BasicCTR, BasicCL4CTR, FactorizationMachine, MultiLayerPerceptron
class DeepFM(BasicCTR):
def __init__(self, field_dims, embed_dim, mlp_layers=(400, 400, 400), dropout=0.5):
super(DeepFM, self).__init__(field_dims, embed_dim)
self.fm = FactorizationMachine(reduce_sum=True)
self.embed_output_size = len(field_dims) * embed_dim
self.mlp = MultiLayerPerceptron(self.embed_output_size, mlp_layers, dropout, output_layer=True)
def forward(self, x):
"""
:param x: B,F
:return:
"""
x_embed = self.embedding(x) # B,F,E
x_out = self.lr(x) + self.fm(x_embed) + self.mlp(x_embed.view(x.size(0), -1))
return x_out
class DeepFM_CL4CTR(BasicCL4CTR):
def __init__(self, field_dims, embed_dim, mlp_layers=(400, 400, 400), dropout=0.5, batch_size=1024, pratio=0.5,
fi_type="att"):
super(DeepFM_CL4CTR, self).__init__(field_dims, embed_dim, batch_size, pratio=pratio, fi_type=fi_type)
self.fm = FactorizationMachine(reduce_sum=True)
self.embed_output_size = len(field_dims) * embed_dim
self.mlp = MultiLayerPerceptron(self.embed_output_size, mlp_layers, dropout, output_layer=True)
def forward(self, x):
"""
:param x: B,F
:return:
"""
x_embed = self.embedding(x) # B,F,E
x_out = self.lr(x) + self.fm(x_embed) + self.mlp(x_embed.view(x.size(0), -1))
return x_out
| 1,538
| 37.475
| 115
|
py
|
cl4ctr
|
cl4ctr-main/model/data_aug.py
|
import torch
def maskrandom(x_emb, mask_ratio):
B, F, E = x_emb.size()
mask1 = torch.bernoulli(torch.ones(B, F, E) * mask_ratio).cuda()
mask2 = torch.bernoulli(torch.ones(B, F, E) * mask_ratio).cuda()
x_emb1 = x_emb * mask1
x_emb2 = x_emb * mask2
return x_emb1, x_emb2
def maskdimension(x_emb, mask_ratio):
B, F, E = x_emb.size()
mask1 = torch.bernoulli(torch.ones(B, 1, E) * mask_ratio).cuda()
mask2 = torch.bernoulli(torch.ones(B, 1, E) * mask_ratio).cuda()
x_emb1 = x_emb * mask1
x_emb2 = x_emb * mask2
return x_emb1, x_emb2
def maskfeature(x_emb, mask_ratio):
B, F, E = x_emb.size()
mask1 = torch.bernoulli(torch.ones(B, F, 1) * mask_ratio).cuda()
mask2 = torch.bernoulli(torch.ones(B, F, 1) * mask_ratio).cuda()
x_emb1 = x_emb * mask1
x_emb2 = x_emb * mask2
return x_emb1, x_emb2
| 863
| 28.793103
| 68
|
py
|
cl4ctr
|
cl4ctr-main/model/__init__.py
|
#!/usr/bin/env python
# -*- coding:utf-8 -*-
| 45
| 14.333333
| 22
|
py
|
cl4ctr
|
cl4ctr-main/model/BasiclLayer.py
|
import torch.nn as nn
import numpy as np
from .data_aug import *
class BasicCTR(nn.Module):
def __init__(self, field_dims, embed_dim):
super(BasicCTR, self).__init__()
self.embedding = FeaturesEmbedding(field_dims, embed_dim)
def forward(self, x):
raise NotImplemented
class BasicCL4CTR(nn.Module):
"""
The core implement of CL4CTR, in which three SSL losses(L_cl, L_ali and L_uni) are computed to regularize
feature representation.
"""
def __init__(self, field_dims, embed_dim, batch_size=1024, pratio=0.5, fi_type="att"):
super(BasicCL4CTR, self).__init__()
# 1、embedding layer
self.embedding = FeaturesEmbedding(field_dims, embed_dim)
self.field_dims = field_dims
self.num_field = len(field_dims)
self.input_dim = self.num_field * embed_dim
self.batch_size = batch_size
self.row, self.col = list(), list()
for i in range(batch_size - 1):
for j in range(i + 1, batch_size):
self.row.append(i), self.col.append(j)
# 2.1 Random mask.
self.pratio = pratio
self.dp1 = nn.Dropout(p=pratio)
self.dp2 = nn.Dropout(p=pratio)
# 2.2 FI_encoder. In most cases, we utilize three layer transformer layers.
self.encoder_layer = nn.TransformerEncoderLayer(d_model=embed_dim, nhead=1, dim_feedforward=128,
dropout=0.2)
self.fi_cl = nn.TransformerEncoder(self.encoder_layer, num_layers=3)
# 2.3 Projection
self.projector1 = nn.Linear(self.input_dim, embed_dim)
self.projector2 = nn.Linear(self.input_dim, embed_dim)
def forward(self, x):
raise NotImplemented
def compute_cl_loss(self, x, alpha=1.0, beta=0.01):
"""
:param x: embedding
:param alpha:
:param beta: beta = gamma
:return: L_cl * alpha + (L_ali+L_uni) * beta
# This is a simplified computation based only on the embedding of each batch,
# which can accelerate the training process.
"""
x_emb = self.embedding(x)
# 1. Compute feature alignment loss (L_ali) and feature uniformity loss (L_uni).
cl_align_loss = self.compute_alignment_loss(x_emb)
cl_uniform_loss = self.compute_uniformity_loss(x_emb)
if alpha == 0.0:
return (cl_align_loss + cl_uniform_loss) * beta
# 2. Compute contrastive loss.
x_emb1, x_emb2 = self.dp1(x_emb), self.dp2(x_emb)
x_h1 = self.fi_cl(x_emb1.transpose(0,1)).view(-1, self.input_dim) # B,E
x_h2 = self.fi_cl(x_emb2.transpose(0,1)).view(-1, self.input_dim) # B,E
x_h1 = self.projector1(x_h1)
x_h2 = self.projector2(x_h2)
cl_loss = torch.norm(x_h1.sub(x_h2), dim=1).pow_(2).mean()
# 3. Combine L_cl and (L_ali + L_uni) with two loss weights (alpha and beta)
loss = cl_loss * alpha + (cl_align_loss + cl_uniform_loss) * beta
return loss
def compute_cl_loss_all(self, x, alpha=1.0, beta=0.01):
"""
:param x: embedding
:param alpha:
:param beta: beta
:return: L_cl * alpha + (L_ali+L_uni) * beta
This is the full version of Cl4CTR, which computes L_ali and L_uni with full feature representations.
"""
x_emb = self.embedding(x)
# 1. Compute feature alignment loss (L_ali) and feature uniformity loss (L_uni).
cl_align_loss = self.compute_all_alignment_loss()
cl_uniform_loss = self.compute_all_uniformity_loss()
if alpha == 0.0:
return (cl_align_loss + cl_uniform_loss) * beta
# 2. Compute contrastive loss (L_cl).
x_emb1, x_emb2 = self.dp1(x_emb), self.dp2(x_emb)
x_h1 = self.fi_cl(x_emb1.transpose(0,1)).view(-1, self.input_dim) # B,E
x_h2 = self.fi_cl(x_emb2.transpose(0,1)).view(-1, self.input_dim) # B,E
x_h1 = self.projector1(x_h1)
x_h2 = self.projector2(x_h2)
cl_loss = torch.norm(x_h1.sub(x_h2), dim=1).pow_(2).mean()
# 3. Combine L_cl and (L_ali + L_uni) with two loss weights (alpha and beta)
loss = cl_loss * alpha + (cl_align_loss + cl_uniform_loss) * beta
return loss
def compute_alignment_loss(self, x_emb):
alignment_loss = torch.norm(x_emb[self.row].sub(x_emb[self.col]), dim=2).pow(2).mean()
return alignment_loss
def compute_uniformity_loss(self, x_emb):
frac = torch.matmul(x_emb, x_emb.transpose(2, 1)) # B,F,F
denom = torch.matmul(torch.norm(x_emb, dim=2).unsqueeze(2), torch.norm(x_emb, dim=2).unsqueeze(1)) # 64,30,30
res = torch.div(frac, denom + 1e-4)
uniformity_loss = res.mean()
return uniformity_loss
def compute_all_uniformity_loss(self):
"""
Calculate field uniformity loss based on all feature representation.
"""
embedds = self.embedding.embedding.weight
field_dims = self.field_dims
field_dims_cum = np.array((0, *np.cumsum(field_dims)))
field_len = embedds.size()[0]
field_index = np.array(range(field_len))
uniformity_loss = 0.0
# for i in
pairs = 0
for i, (start, end) in enumerate(zip(field_dims_cum[:-1], field_dims_cum[1:])):
index_f = np.logical_and(field_index >= start, field_index < end) # 前闭后开
embed_f = embedds[index_f, :]
embed_not_f = embedds[~index_f, :]
frac = torch.matmul(embed_f, embed_not_f.transpose(1, 0)) # f1,f2
denom = torch.matmul(torch.norm(embed_f, dim=1).unsqueeze(1),
torch.norm(embed_not_f, dim=1).unsqueeze(0)) # f1,f2
res = torch.div(frac, denom + 1e-4)
uniformity_loss += res.sum()
pairs += (field_len - field_dims[i]) * field_dims[i]
uniformity_loss /= pairs
return uniformity_loss
def compute_all_alignment_loss(self):
"""
Calculate feature alignment loss based on all feature representation.
"""
embedds = self.embedding.embedding.weight
field_dims = self.field_dims
field_dims_cum = np.array((0, *np.cumsum(field_dims)))
alignment_loss = 0.0
pairs = 0
for i, (start, end) in enumerate(zip(field_dims_cum[:-1], field_dims_cum[1:])):
embed_f = embedds[start:end, :]
loss_f = 0.0
for j in range(field_dims[i]):
loss_f += torch.norm(embed_f[j, :].sub(embed_f), dim=1).pow(2).sum()
pairs += field_dims[i] * field_dims[i]
alignment_loss += loss_f
alignment_loss /= pairs
return alignment_loss
class FeaturesLinear(torch.nn.Module):
"""
Linear regression layer for CTR prediction.
"""
def __init__(self, field_dims, output_dim=1):
super().__init__()
self.fc = torch.nn.Embedding(sum(field_dims), output_dim)
self.bias = torch.nn.Parameter(torch.zeros((output_dim,)))
self.offsets = np.array(
(0, *np.cumsum(field_dims)[:-1]), dtype=np.long)
def forward(self, x):
"""
:param x: B,F
:return: B,1
"""
x = x + x.new_tensor(self.offsets).unsqueeze(0)
return torch.sum(self.fc(x), dim=1) + self.bias
class FactorizationMachine(torch.nn.Module):
def __init__(self, reduce_sum=True):
super().__init__()
self.reduce_sum = reduce_sum
def forward(self, x):
"""
:param x: B,F,E
"""
square_of_sum = torch.sum(x, dim=1) ** 2 # B,embed_dim
sum_of_square = torch.sum(x ** 2, dim=1) # B,embed_dim
ix = square_of_sum - sum_of_square # B,embed_dim
if self.reduce_sum:
ix = torch.sum(ix, dim=1, keepdim=True)
return 0.5 * ix
class FeaturesEmbedding(torch.nn.Module):
def __init__(self, field_dims, embed_dim):
"""
:param field_dims: list
:param embed_dim
"""
super().__init__()
self.embedding = torch.nn.Embedding(sum(field_dims), embed_dim)
self.offsets = np.array((0, *np.cumsum(field_dims)[:-1]), dtype=np.long)
self._init_weight_()
def _init_weight_(self):
nn.init.normal_(self.embedding.weight, std=0.01)
# nn.init.xavier_normal_nn.init.xavier_normal_(self.embedding.weight)
def forward(self, x):
"""
:param x: B,F
:return: B,F,E
"""
x = x + x.new_tensor(self.offsets).unsqueeze(0)
return self.embedding(x)
class MultiLayerPerceptron(torch.nn.Module):
def __init__(self, input_dim, embed_dims, dropout=0.5, output_layer=False):
super().__init__()
layers = list()
for embed_dim in embed_dims:
layers.append(torch.nn.Linear(input_dim, embed_dim))
layers.append(torch.nn.BatchNorm1d(embed_dim))
layers.append(torch.nn.ReLU())
layers.append(torch.nn.Dropout(p=dropout))
input_dim = embed_dim
if output_layer:
layers.append(torch.nn.Linear(input_dim, 1))
self.mlp = torch.nn.Sequential(*layers)
self._init_weight_()
def _init_weight_(self):
for m in self.mlp:
if isinstance(m, nn.Linear):
nn.init.xavier_uniform_(m.weight)
def forward(self, x):
return self.mlp(x)
| 9,456
| 35.513514
| 118
|
py
|
cl4ctr
|
cl4ctr-main/model/FM.py
|
#!/usr/bin/env python
# -*- coding:utf-8 -*-
from model.BasiclLayer import BasicCTR, BasicCL4CTR, FactorizationMachine, FeaturesLinear
class FactorizationMachineModel(BasicCTR):
def __init__(self, field_dims, embed_dim):
super(FactorizationMachineModel, self).__init__(field_dims, embed_dim)
self.lr = FeaturesLinear(field_dims)
self.fm = FactorizationMachine(reduce_sum=True)
def forward(self, x):
"""
:param x: B,F
:return:
"""
emb_x = self.embedding(x)
x = self.lr(x) + self.fm(emb_x)
return x
class FM_CL4CTR(BasicCL4CTR):
# Extends BasicCL4CTR, which integrate contrastive learning approach for CTR models.
def __init__(self, field_dims, embed_dim, batch_size=1024, pratio=0.5, fi_type="att"):
super(FM_CL4CTR, self).__init__(field_dims, embed_dim, batch_size, pratio=pratio, fi_type=fi_type)
self.lr = FeaturesLinear(field_dims)
self.fm = FactorizationMachine(reduce_sum=True)
def forward(self, x):
"""
:param x: B,F
:return:
"""
emb_x = self.embedding(x)
x = self.lr(x) + self.fm(emb_x)
return x
| 1,190
| 30.342105
| 106
|
py
|
cl4ctr
|
cl4ctr-main/dataloader/frappe/dataloader.py
|
import numpy as np
import pandas as pd
import torch
import os
import tqdm
import pickle
class LoadData():
def __init__(self, path="./data/", dataset="frappe"):
self.dataset = dataset
self.path = path + dataset + "/"
self.trainfile = self.path + dataset + ".train.libfm"
self.testfile = self.path + dataset + ".test.libfm"
self.validationfile = self.path + dataset + ".validation.libfm"
self.features_M = {}
self.construct_df()
# self.Train_data, self.Validation_data, self.Test_data = self.construct_data( loss_type )
def construct_df(self):
self.data_train = pd.read_table(self.trainfile, sep=" ", header=None, engine='python')
self.data_test = pd.read_table(self.testfile, sep=" ", header=None, engine="python")
self.data_valid = pd.read_table(self.validationfile, sep=" ", header=None, engine="python")
for i in self.data_test.columns[1:]:
self.data_test[i] = self.data_test[i].apply(lambda x: int(x.split(":")[0]))
self.data_train[i] = self.data_train[i].apply(lambda x: int(x.split(":")[0]))
self.data_valid[i] = self.data_valid[i].apply(lambda x: int(x.split(":")[0]))
self.all_data = pd.concat([self.data_train, self.data_test, self.data_valid])
self.field_dims = []
for i in self.all_data.columns[1:]:
maps = {val: k for k, val in enumerate(set(self.all_data[i]))}
self.data_test[i] = self.data_test[i].map(maps)
self.data_train[i] = self.data_train[i].map(maps)
self.data_valid[i] = self.data_valid[i].map(maps)
self.features_M[i] = maps
self.field_dims.append(len(set(self.all_data[i])))
# -1 改成 0
self.data_test[0] = self.data_test[0].apply(lambda x: max(x, 0))
self.data_train[0] = self.data_train[0].apply(lambda x: max(x, 0))
self.data_valid[0] = self.data_valid[0].apply(lambda x: max(x, 0))
class RecData():
# define the dataset
def __init__(self, all_data):
self.data_df = all_data
def __len__(self):
return len(self.data_df)
def __getitem__(self, idx):
x = self.data_df.iloc[idx].values[1:]
y1 = self.data_df.iloc[idx].values[0]
return x, y1
def getdataloader_frappe(path="../data/", dataset="frappe", batch_size=256):
print("Load frappe dataset.")
DataF = LoadData(path=path, dataset=dataset)
datatest = RecData(DataF.data_test)
datatrain = RecData(DataF.data_train)
datavalid = RecData(DataF.data_valid)
print("datatrain", len(datatrain))
print("datavalid", len(datavalid))
print("datatest", len(datatest))
trainLoader = torch.utils.data.DataLoader(datatrain, batch_size=batch_size, shuffle=True, drop_last=True,
num_workers=8, pin_memory=True)
validLoader = torch.utils.data.DataLoader(datavalid, batch_size=batch_size, shuffle=False, drop_last=True,
num_workers=4, pin_memory=True)
testLoader = torch.utils.data.DataLoader(datatest, batch_size=batch_size, shuffle=False, num_workers=4,
pin_memory=True)
return DataF.field_dims, trainLoader, validLoader, testLoader
def getdataloader_ml(path="../.././data/", dataset="ml-tag", batch_size=256):
# we delete one parameter num_ng, as we do not utilize it.
path_ml = path + 'preprocess-ml.p'
if not os.path.exists(path_ml):
DataF = LoadData(path=path, dataset=dataset)
pickle.dump((DataF.data_test, DataF.data_train, DataF.data_valid, DataF.field_dims), open(path_ml, 'wb'))
print("success")
print("start load ml_tag data")
data_test, data_train, data_valid, field_dims = pickle.load(open(path_ml, mode='rb'))
datatest = RecData(data_test)
datatrain = RecData(data_train)
datavalid = RecData(data_valid)
print("ml-datatrain", len(datatrain))
print("ml-datavalid", len(datavalid))
print("ml-datatest", len(datatest))
trainLoader = torch.utils.data.DataLoader(datatrain, batch_size=batch_size, shuffle=True, drop_last=True,
num_workers=8, pin_memory=True)
validLoader = torch.utils.data.DataLoader(datavalid, batch_size=batch_size, shuffle=False, drop_last=True,
num_workers=4, pin_memory=True)
testLoader = torch.utils.data.DataLoader(datatest, batch_size=batch_size, shuffle=False, num_workers=4,
pin_memory=True)
return field_dims, trainLoader, validLoader, testLoader
if __name__ == '__main__':
field_dims,trainLoader,validLoader,testLoader = getdataloader_ml(batch_size=256)
for _ in tqdm.tqdm(trainLoader):
pass
it = iter(trainLoader)
print(next(it)[0])
print(field_dims)
| 4,921
| 44.155963
| 113
|
py
|
rank-based-evaluation
|
rank-based-evaluation-main/src/kgm/__init__.py
|
# coding=utf-8
"""A package for Knowledge Graph Matching and Entity Alignment."""
| 82
| 26.666667
| 66
|
py
|
rank-based-evaluation
|
rank-based-evaluation-main/src/kgm/modules/losses.py
|
# coding=utf-8
"""Loss functions for entity alignment and link prediction."""
import enum
import logging
from typing import Any, Callable, Mapping, Optional
import torch
from torch import nn
from torch.nn import functional
from .similarity import Similarity
from ..data import MatchSideEnum, SIDES
from ..utils.common import get_subclass_by_name
from ..utils.types import IDAlignment, NodeIDs
logger = logging.getLogger(name=__name__)
__all__ = [
'BaseLoss',
'ContrastiveLoss',
'FullMatchingLoss',
'MarginLoss',
'MatchingLoss',
'OrderPreservationLoss',
'SampledLinkPredictionLoss',
'SampledMatchingLoss',
'get_matching_loss',
'get_pairwise_loss',
]
# pylint: disable=abstract-method
class BaseLoss(nn.Module):
"""Abstract class for losses on similarity matrices."""
# pylint: disable=arguments-differ
def forward(self, similarities: torch.FloatTensor, true_indices: torch.LongTensor) -> torch.FloatTensor:
r"""
Efficiently compute loss values from a similarity matrix.
.. math::
\frac{1}{n(m-1))} \sum_{b=1}^{n} \sum_{j \neq true[b]} pairloss(sim[b, true[b]], sim[b, j])
:param similarities: shape: (n, m)
A batch of similarity values.
:param true_indices: shape (n,)
The index of the unique true choice in each batch.
"""
raise NotImplementedError
class MarginLoss(BaseLoss):
r"""Evaluate a margin based loss.
In particular the following form is used:
.. math::
baseloss(pos\_sim, neg\_sim) = g(neg\_sim + margin - pos\_sim)
where g is an activation function, e.g. ReLU leading to the classical margin loss formulation.
"""
def __init__(
self,
margin: float = 1.0,
exact_loss_value: bool = False,
activation: Callable[[torch.FloatTensor], torch.FloatTensor] = functional.relu,
):
"""
Initialize the loss.
:param margin: >0
The margin which should be between positive and negative similarity values.
:param exact_loss_value:
Can be disabled to compute the loss up to a constant additive term for improved performance.
:param activation:
The activation function to use. Typical examples:
- hard margin: torch.functional.relu
- soft margin: torch.functional.softplus
"""
super().__init__()
self.margin = margin
self.exact_loss_value = exact_loss_value
self.activation = activation
def forward(self, similarities: torch.FloatTensor, true_indices: torch.LongTensor) -> torch.FloatTensor: # noqa: D102
batch_size, num_choices = similarities.shape
batch_indices = torch.arange(batch_size, device=similarities.device)
pos_sim = similarities[batch_indices, true_indices].unsqueeze(dim=1)
# as pos_sim + margin - pos_sim = margin, there is no gradient for comparison of positives with positives
# as there are num_choices elements per row, with one positive, and (num_choices-1) negatives, we need to subtract
# (margin/num_choices) to compensate for that in the loss value.
# As this is a constant, the gradient is the same as if we would not add it, hence we only do it, if explicitly requested.
loss_value = self.activation(similarities + self.margin - pos_sim).mean()
if self.exact_loss_value:
loss_value = loss_value - (self.activation(torch.as_tensor(data=self.margin, dtype=torch.float, device=loss_value.device)) / num_choices)
return loss_value
@enum.unique
class LossDirectionEnum(str, enum.Enum):
"""An enum for specification of the direction of a matching loss."""
#: Loss is matching entities from a left graph to a right one
left_to_right = 'left_to_right'
#: Loss is matching entities from a right graph to a left one
right_to_left = 'right_to_left'
#: Loss is averaging loss of matching entities from a left to a right graph and from the right to the left one
symmetrical = 'symmetrical'
# pylint: disable=abstract-method
class MatchingLoss(nn.Module):
"""An API for graph matching losses."""
#: The similarity
similarity: Similarity
#: The direction in which to compute the loss
loss_direction: LossDirectionEnum
def __init__(
self,
similarity: Similarity,
loss_direction: LossDirectionEnum = LossDirectionEnum.symmetrical,
):
"""
Initialize the loss.
:param similarity:
The similarity to use for comparing node representations.
:param loss_direction:
Defines a direction of matching, which loss is optimized during training
"""
super().__init__()
self.similarity = similarity
self.loss_direction = loss_direction
# pylint: disable=arguments-differ
def forward(
self,
alignment: IDAlignment,
representations: Mapping[MatchSideEnum, torch.FloatTensor],
negatives: Optional[torch.LongTensor] = None,
) -> torch.FloatTensor:
"""
Compute the loss.
:param alignment: shape: (2, num_aligned)
The aligned nodes in form of node ID pairs.
:param representations:
side -> repr, where repr is a tensor of shape (num_nodes_side, dim)
:param negatives: shape: (2, num_aligned, num_negatives)
Negative samples. negatives[0] has to be combined with alignment[1] for a valid pair.
"""
partial_losses = []
# left-to-right loss
if self.loss_direction in {LossDirectionEnum.left_to_right, LossDirectionEnum.symmetrical}:
source_side, target_side = SIDES
partial_losses.append(
self._one_side_matching_loss(
source=representations[source_side],
target=representations[target_side],
alignment=alignment,
negatives=None if negatives is None else negatives[1],
)
)
# right-to-left loss
if self.loss_direction in {LossDirectionEnum.right_to_left, LossDirectionEnum.symmetrical}:
target_side, source_side = SIDES
partial_losses.append(
self._one_side_matching_loss(
source=representations[source_side],
target=representations[target_side],
alignment=alignment.flip(0),
negatives=None if negatives is None else negatives[0],
)
)
assert len(partial_losses) > 0
return sum(partial_losses) / len(partial_losses)
def _one_side_matching_loss(
self,
source: torch.FloatTensor,
target: torch.FloatTensor,
alignment: IDAlignment,
negatives: Optional[NodeIDs]
) -> torch.FloatTensor:
"""
Compute the loss from selected nodes in source graph to the other graph.
:param source: shape: (num_source, dim)
Source node representations.
:param target: shape: (num_target, dim)
Target node representations.
:param alignment: shape: (2, num_aligned)
The alignment.
:param negatives: shape: (num_aligned, num_negatives)
The negative examples from target side.
"""
raise NotImplementedError
class SampledMatchingLoss(MatchingLoss):
"""Apply a base loss to a similarity matrix where negative samples are used to reduce memory footprint."""
#: The base loss
base_loss: BaseLoss
#: The number of negative samples
num_negatives: int
#: Whether to use self-adversarial weighting
self_adversarial_weighting: bool
def __init__(
self,
similarity: Similarity,
base_loss: BaseLoss,
loss_direction: LossDirectionEnum = LossDirectionEnum.symmetrical,
num_negatives: int = 1,
self_adversarial_weighting: bool = False,
):
"""
Initialize the loss.
:param similarity:
The similarity to use for computing the similarity matrix.
:param base_loss:
The base loss to apply to the similarity matrix.
:param num_negatives:
The number of negative samples for each positive pair.
:param self_adversarial_weighting:
Whether to apply self-adversarial weighting.
"""
super().__init__(
similarity=similarity,
loss_direction=loss_direction
)
self.base_loss = base_loss
self.num_negatives = num_negatives
self.self_adversarial_weighting = self_adversarial_weighting
def _one_side_matching_loss(
self,
source: torch.FloatTensor,
target: torch.FloatTensor,
alignment: IDAlignment,
negatives: Optional[NodeIDs],
) -> torch.FloatTensor: # noqa: D102
# Split mapping
source_ind, target_ind_pos = alignment
# Extract representations, shape: (batch_size, dim)
anchor = source[source_ind]
# Positive scores
pos_scores = self.similarity.one_to_one(left=anchor, right=target[target_ind_pos])
# Negative samples in target graph, shape: (batch_size, num_negatives)
if negatives is None:
negatives = torch.randint(
target.shape[0],
size=(target_ind_pos.shape[0], self.num_negatives),
device=target.device,
)
# Negative scores, shape: (batch_size, num_negatives, dim)
neg_scores = self.similarity.one_to_one(left=anchor.unsqueeze(1), right=target[negatives])
# self-adversarial weighting as described in RotatE paper: https://arxiv.org/abs/1902.10197
if self.self_adversarial_weighting:
neg_scores = functional.softmax(neg_scores, dim=1).detach() * neg_scores
# Evaluate base loss
return self.base_loss(
similarities=torch.cat([pos_scores.unsqueeze(dim=-1), neg_scores], dim=-1),
true_indices=torch.zeros_like(target_ind_pos),
).mean()
def matching_loss_name_normalizer(name: str) -> str:
"""Normalize the class name of a MatchingLoss."""
return name.lower().replace('matchingloss', '')
def base_loss_name_normalizer(name: str) -> str:
"""Normalize the class name of a base BaseLoss."""
return name.lower().replace('loss', '')
def get_pairwise_loss(name: str, **kwargs: Any) -> BaseLoss:
"""
Get a pairwise loss by class name.
:param name:
The name of the class.
:param kwargs:
Additional key-word based constructor arguments.
:return:
The base loss instance.
"""
pairwise_loss_cls = get_subclass_by_name(base_class=BaseLoss, name=name, normalizer=base_loss_name_normalizer)
pairwise_loss = pairwise_loss_cls(**kwargs)
return pairwise_loss
def get_matching_loss(name: str, similarity: Similarity, **kwargs) -> MatchingLoss:
"""
Get a matching loss by class name.
:param name:
The name of the class.
:param similarity:
The similarity to use.
:param kwargs:
Additional key-word based constructor arguments.
:return:
The matching loss instance.
"""
matching_loss_cls = get_subclass_by_name(base_class=MatchingLoss, name=name, normalizer=matching_loss_name_normalizer)
matching_loss = matching_loss_cls(similarity=similarity, **kwargs)
return matching_loss
| 11,575
| 33.97281
| 149
|
py
|
rank-based-evaluation
|
rank-based-evaluation-main/src/kgm/modules/sampler.py
|
"""Sampling methods for negative samples."""
from abc import abstractmethod
from typing import Optional, Tuple
import torch
from kgm.utils.types import NodeIDs
class NegativeSampler:
"""Abstract class encapsulating a logic of choosing negative examples."""
@abstractmethod
def sample(
self,
size: Tuple[int, ...],
device: torch.device,
max_id: Optional[int] = None,
candidates: Optional[NodeIDs] = None,
) -> NodeIDs:
"""Choose negative samples.
If a set of candidates is provided, the samples are chosen from them. Otherwise, the max_id parameter will be
used to sample from [0, max_id-1].
:param size:
Expected shape of the output tensor of indices.
:param device:
Device of the output tensor.
:param max_id: >0
The maximum ID (exclusive).
:param candidates: shape: (num_of_candidates,)
Tensor containing candidates for negative examples to choose from.
"""
raise NotImplementedError
class UniformRandomSampler(NegativeSampler):
"""NegativeExamplesSampler implementation using uniform random distribution to choose negative samples."""
def sample(
self,
size: Tuple[int, ...],
device: torch.device,
max_id: Optional[int] = None,
candidates: Optional[NodeIDs] = None,
) -> NodeIDs: # noqa: D102
if candidates is not None:
return candidates[torch.randint(candidates.shape[0], size=size, device=candidates.device)]
return torch.randint(max_id, size=size, dtype=torch.long, device=device)
| 1,654
| 31.45098
| 117
|
py
|
rank-based-evaluation
|
rank-based-evaluation-main/src/kgm/modules/graph.py
|
# coding=utf-8
"""
Module for message passing modules.
The message passing is split into three phases:
1) Message Creation
Calculate messages. Potentially takes the source and target node representations, as well as the relation-type of
the considered edge into account, i.e. for a triple (e_i, r, e_j): m_{i->j} = f(x_i, x_j, r)
2) Message Passing
The message are exchanged, i.e. m_{i->j} moves from i to j. This is done in parallel for all messages.
3) Message Aggregation
All incoming messages are aggregated into a single vector, i.e. a_j = agg({m_{i->j} for all i})
4) Node Update
The new node representations are calculated given the aggregated messages, as well as the old node representation,
i.e. x_j := update(x_j, a_j)
"""
import logging
from typing import Optional
import torch
from torch import nn
from ..utils.torch_utils import _guess_num_nodes
from ..utils.types import NodeIDs, RelationIDs
logger = logging.getLogger(name=__name__)
__all__ = [
'AliGAT',
'AliGate',
'BasesLinearRelationSpecificMessageCreator',
'BlockLinearRelationSpecificMessageCreator',
'GAT',
'GCNBlock',
'IdentityMessageCreator',
'LinearMessageCreator',
'MeanAggregator',
'MessagePassingBlock',
'MessagePassingBlock',
'OnlyUpdate',
'SumAggregator',
]
class MissingEdgeTypesException(BaseException):
"""Class requires edge information."""
def __init__(self, cls):
super().__init__(f'{cls.__name__} requires passing edge types.')
# pylint: disable=abstract-method
class MessageCreator(nn.Module):
"""Abstract class for different methods to create messages to send."""
def reset_parameters(self) -> None:
"""Reset the module's parameters."""
# TODO: Subclass from ExtendedModule
# pylint: disable=arguments-differ
def forward(
self,
x: torch.FloatTensor,
source: NodeIDs,
target: NodeIDs,
edge_type: Optional[RelationIDs] = None,
) -> torch.FloatTensor:
"""
Create messages.
:param x: shape: (num_nodes, node_embedding_dim)
The node representations.
:param source: (num_edges,)
The source indices for each edge.
:param target: shape: (num_edges,)
The target indices for each edge.
:param edge_type: shape: (num_edges,)
The edge type for each edge.
:return: shape: (num_edges, message_dim)
The messages source -> target.
"""
raise NotImplementedError
class IdentityMessageCreator(MessageCreator):
"""Send source embeddings unchanged."""
def forward(
self,
x: torch.FloatTensor,
source: NodeIDs,
target: NodeIDs,
edge_type: Optional[RelationIDs] = None,
) -> torch.FloatTensor: # noqa: D102
return x.index_select(dim=0, index=source)
class LinearMessageCreator(MessageCreator):
"""Transform source embeddings by learned linear transformation."""
def __init__(
self,
input_dim: int,
message_dim: int,
use_bias: bool = False,
):
"""
Initialize the message creator.
:param input_dim: >0
The number of input features, i.e. the dimension of the embedding vector.
:param message_dim: > 0
The number of output features, i.e. the dimension of the message vector.
:param use_bias:
Whether to use a bias after the linear transformation.
"""
super().__init__()
self.linear = nn.Linear(in_features=input_dim, out_features=message_dim, bias=use_bias)
def reset_parameters(self) -> None: # noqa: D102
# TODO: Subclass from ExtendedModule
self.linear.reset_parameters()
def forward(
self,
x: torch.FloatTensor,
source: NodeIDs,
target: NodeIDs,
edge_type: Optional[RelationIDs] = None,
) -> torch.FloatTensor: # noqa: D102
x = self.linear(x)
return x.index_select(dim=0, index=source)
# pylint: disable=abstract-method
class MessageAggregator(nn.Module):
"""
Aggregation method for incoming messages.
Should be permutation-invariant, and able to process an arbitrary number of messages into a single vector.
"""
def reset_parameters(self) -> None:
# TODO: Subclass from ExtendedModule
pass
# pylint: disable=arguments-differ
def forward(
self,
msg: torch.FloatTensor,
source: NodeIDs,
target: NodeIDs,
edge_type: Optional[RelationIDs] = None,
num_nodes: Optional[int] = None,
) -> torch.FloatTensor:
"""
Aggregate messages per node.
:param msg: shape: (num_edges, message_dim)
The messages source -> target.
:param source: (num_edges,)
The source indices for each edge.
:param target: shape: (num_edges,)
The target indices for each edge.
:param edge_type: shape: (num_edges,)
The edge type for each edge.
:param num_nodes: >0
The number of nodes. If None is provided tries to guess the number of nodes by max(source.max(), target.max()) + 1
:return: shape: (num_nodes, update_dim)
The node updates.
"""
raise NotImplementedError
class SumAggregator(MessageAggregator):
"""Sum over incoming messages."""
def forward(
self,
msg: torch.FloatTensor,
source: NodeIDs,
target: NodeIDs,
edge_type: Optional[RelationIDs] = None,
num_nodes: Optional[int] = None,
) -> torch.FloatTensor: # noqa: D102
num_nodes = _guess_num_nodes(num_nodes=num_nodes, source=source, target=target)
dim = msg.shape[1]
return torch.zeros(num_nodes, dim, dtype=msg.dtype, device=msg.device).index_add_(dim=0, index=target, source=msg)
class MeanAggregator(MessageAggregator):
"""Average over incoming messages."""
def forward(
self,
msg: torch.FloatTensor,
source: NodeIDs,
target: NodeIDs,
edge_type: Optional[RelationIDs] = None,
num_nodes: Optional[int] = None,
) -> torch.FloatTensor: # noqa: D102
num_nodes = _guess_num_nodes(num_nodes=num_nodes, source=source, target=target)
dim = msg.shape[1]
sum_agg = torch.zeros(num_nodes, dim, dtype=msg.dtype, device=msg.device).index_add_(dim=0, index=target, source=msg)
uniq, count = torch.unique(target, sorted=False, return_counts=True)
norm = torch.zeros(num_nodes, dtype=torch.long, device=msg.device).scatter_(dim=0, index=uniq, src=count).clamp_min(min=1).float().reciprocal().unsqueeze(dim=-1)
return sum_agg * norm
# pylint: disable=abstract-method
class NodeUpdater(nn.Module):
"""Compute new node representation based on old representation and aggregated messages."""
def reset_parameters(self) -> None:
# TODO: Merge with AbstractKGMatchingModel's reset_parameters
pass
# pylint: disable=arguments-differ
def forward(
self,
x: torch.FloatTensor,
delta: torch.FloatTensor,
) -> torch.FloatTensor:
"""
Update node representations.
:param x: shape: (num_nodes, node_embedding_dim)
The node representations.
:param delta: (num_nodes, update_dim)
The node updates.
:return: shape: (num_nodes, new_node_embedding_dim)
The new node representations.
"""
raise NotImplementedError
class OnlyUpdate(NodeUpdater):
"""Discard old node representation and only use aggregated messages."""
def forward(
self,
x: torch.FloatTensor,
delta: torch.FloatTensor,
) -> torch.FloatTensor: # noqa: D102
return delta
# pylint: disable=abstract-method
class MessagePassingBlock(nn.Module):
"""A message passing block comprising a message creation, message aggregation, and an update module."""
def __init__(
self,
message_creator: MessageCreator,
message_aggregator: MessageAggregator,
node_updater: NodeUpdater,
):
"""
Initialize the block.
:param message_creator:
The module to create messages potentially based on the source and target node representation, as well as the
edge type.
:param message_aggregator:
The module to aggregate all incoming messages to a fixed size vector.
:param node_updater:
The module to calculate the new node representation based on the old representation and the aggregated
incoming messages.
"""
super().__init__()
# Bind sub-modules
self.message_creator = message_creator
self.message_aggregator = message_aggregator
self.node_updater = node_updater
def reset_parameters(self) -> None:
"""Reset parameters. Delegates to submodules."""
self.message_creator.reset_parameters()
self.message_aggregator.reset_parameters()
self.node_updater.reset_parameters()
# pylint: disable=arguments-differ
def forward(
self,
x: torch.FloatTensor,
source: NodeIDs,
target: NodeIDs,
edge_type: Optional[RelationIDs] = None,
edge_weights: Optional[torch.FloatTensor] = None,
) -> torch.FloatTensor:
"""
Calculate new node representations by message passing.
:param x: shape: (num_nodes, node_embedding_dim)
The node representations.
:param source: (num_edges,)
The source indices for each edge.
:param target: shape: (num_edges,)
The target indices for each edge.
:param edge_type: shape: (num_edges,)
The edge type for each edge.
:param edge_weights: shape (num_edges,)
The edge weights.
:return: shape: (num_nodes, new_node_embedding_dim)
The new node representations.
"""
# create messages
messages = self.message_creator(x=x, source=source, target=target, edge_type=edge_type)
# apply edge weights
if edge_weights is not None:
messages = messages * edge_weights.unsqueeze(dim=-1)
# aggregate
delta = self.message_aggregator(msg=messages, source=source, target=target, edge_type=edge_type, num_nodes=x.shape[0])
del messages
return self.node_updater(x=x, delta=delta)
class GCNBlock(MessagePassingBlock):
"""
GCN model roughly following https://arxiv.org/abs/1609.02907.
Notice that this module does only the message passing part, and does **not** apply a non-linearity.
"""
def __init__(
self,
input_dim: int,
output_dim: int,
use_bias: bool,
):
"""
Initialize the block.
:param input_dim: >0
The number of input features, i.e. the dimension of the embedding vector.
:param output_dim: > 0
The number of output features.
:param use_bias:
Whether to use a bias after the linear transformation.
"""
super().__init__(
message_creator=LinearMessageCreator(
input_dim=input_dim,
message_dim=output_dim,
use_bias=use_bias
),
message_aggregator=SumAggregator(),
node_updater=OnlyUpdate()
)
| 11,533
| 30.172973
| 169
|
py
|
rank-based-evaluation
|
rank-based-evaluation-main/src/kgm/modules/similarity.py
|
# coding=utf-8
"""Modules for computing similarities between vectors."""
import enum
from abc import abstractmethod
from typing import Optional, Union
import torch
from torch import nn
from torch.nn import functional
from ..utils.common import get_subclass_by_name, value_to_enum
# pylint: disable=abstract-method
class DistanceToSimilarity(nn.Module):
"""A method to convert distances to similarities."""
# pylint: disable=arguments-differ
@abstractmethod
def forward(self, distances: torch.FloatTensor) -> torch.FloatTensor:
"""
Transform a distance value to a similarity value.
:param distances: The distances.
:return: The similarities.
"""
raise NotImplementedError
class BoundInverseTransformation(DistanceToSimilarity):
r"""
Compute the similarity using a transformation to (0, 1].
.. math::
sim = \frac{1}{1 + dist}
"""
def forward(self, distances: torch.FloatTensor) -> torch.FloatTensor: # noqa: D102
return (distances + 1).reciprocal()
class NegativeTransformation(DistanceToSimilarity):
r"""
Multiply similarity by -1.
.. math::
sim = -dist
"""
def forward(self, distances: torch.FloatTensor) -> torch.FloatTensor: # noqa: D102
return -distances
class SimilarityEnum(str, enum.Enum):
"""How to determine node/relation similarity."""
#: Dot product
dot = 'dot'
#: L2-distance based
l2 = 'l2'
#: L1-distance based
l1 = 'l1'
#: Cosine similarity
cos = 'cos'
# pylint: disable=abstract-method
class Similarity(nn.Module):
"""Base class for similarity functions."""
# pylint: disable=arguments-differ
def forward(
self,
left: torch.FloatTensor,
right: torch.FloatTensor,
) -> torch.FloatTensor:
"""
Compute pairwise similarity scores.
:param left: shape: (n, d)
The left vectors.
:param right: shape: (m, d)
The right vectors.
:return shape: (m, n)
The similarity matrix.
"""
return self.all_to_all(left=left, right=right)
@abstractmethod
def all_to_all(
self,
left: torch.FloatTensor,
right: torch.FloatTensor,
) -> torch.FloatTensor:
"""
Compute pairwise similarity scores.
.. math::
out[i, j] = sim(left[i], right[j])
:param left: shape: (n, d)
The left vectors.
:param right: shape: (m, d)
The right vectors.
:return shape: (m, n)
sim_ij = sim(left_i, right_j)
"""
raise NotImplementedError
@abstractmethod
def one_to_one(
self,
left: torch.FloatTensor,
right: torch.FloatTensor,
) -> torch.FloatTensor:
"""Compute similarity scores.
.. math::
out[i] = sim(left[i], right[i])
:param left: shape: (n, d)
:param right: shape: (n, d)
:return shape: (n,)
"""
raise NotImplementedError
class DotProductSimilarity(Similarity):
"""Dot product as similarity."""
def all_to_all(
self,
left: torch.FloatTensor,
right: torch.FloatTensor,
) -> torch.FloatTensor: # noqa: D102
return left @ right.t()
def one_to_one(
self,
left: torch.FloatTensor,
right: torch.FloatTensor,
) -> torch.FloatTensor: # noqa: D102
return torch.sum(left * right, dim=-1)
class LpSimilarity(Similarity):
"""Similarity based on Lp distance."""
def __init__(
self,
p: int = 2,
transformation: DistanceToSimilarity = None,
):
"""
Initialize the similarity.
:param p:
The p to use for the L_p distance.
:param transformation:
The distance to similarity transformation to use. If None, use 1 / (1 + dist).
"""
super().__init__()
if transformation is None:
transformation = BoundInverseTransformation()
self.p = p
self.transformation = transformation
def all_to_all(
self,
left: torch.FloatTensor,
right: torch.FloatTensor,
) -> torch.FloatTensor: # noqa: D102
if self.p == 1:
# work-around to avoid memory issue
distances = l1c(left, right)
elif self.p == 2:
# work-around to avoid memory issue in backward pass, cf. https://github.com/pytorch/pytorch/issues/31599
# || x - y ||**2 = <x-y, x-y> = <x,x> + <y,y> - 2<x,y>
distances = ((left ** 2).sum(dim=-1).unsqueeze(dim=1) + (right ** 2).sum(dim=-1).unsqueeze(dim=0) - 2. * left @ right.t()).relu().sqrt()
else:
distances = torch.cdist(left, right, p=self.p)
return self.transformation(distances)
def one_to_one(
self,
left: torch.FloatTensor,
right: torch.FloatTensor,
) -> torch.FloatTensor: # noqa: D102
return self.transformation(torch.norm(left - right, dim=-1, p=self.p))
def extra_repr(self) -> str: # noqa: D102
return f'p={self.p}, transformation={self.transformation}'
class CosineSimilarity(Similarity):
"""Cosine similarity."""
def all_to_all(
self,
left: torch.FloatTensor,
right: torch.FloatTensor,
) -> torch.FloatTensor: # noqa: D102
left_n = functional.normalize(left, p=2, dim=-1)
right_n = functional.normalize(right, p=2, dim=-1)
return left_n @ right_n.t()
def one_to_one(
self,
left: torch.FloatTensor,
right: torch.FloatTensor,
) -> torch.FloatTensor: # noqa: D102
left_n = functional.normalize(left, p=2, dim=-1)
right_n = functional.normalize(right, p=2, dim=-1)
return (left_n * right_n).sum(dim=-1)
def transformation_normalizer(name: str) -> str:
"""Normalize the name of a transformation."""
return name.lower().replace('_', '').replace('transformation', '')
def get_similarity(
similarity: Union[SimilarityEnum, str],
transformation: Optional[Union[DistanceToSimilarity, str]] = None,
) -> Similarity:
"""
Instantiate a similarity instance.
:param similarity:
The chosen similarity as enum.
:param transformation:
The transformation to use to convert distances to similarities.
:return:
The similarity function.
"""
if not isinstance(similarity, SimilarityEnum):
similarity = value_to_enum(enum_cls=SimilarityEnum, value=similarity)
if isinstance(transformation, str):
transformation = get_subclass_by_name(base_class=DistanceToSimilarity, name=transformation, normalizer=transformation_normalizer)()
if similarity == SimilarityEnum.dot:
return DotProductSimilarity()
elif similarity == SimilarityEnum.l2:
return LpSimilarity(p=2, transformation=transformation)
elif similarity == SimilarityEnum.l1:
return LpSimilarity(p=1, transformation=transformation)
elif similarity == SimilarityEnum.cos:
return CosineSimilarity()
else:
raise KeyError(f'Unknown similarity: {similarity}')
# Inherit from Function
class L1CDist(torch.autograd.Function):
"""
Compute L1 distance between all pairs of vectors.
.. note ::
This is a workaround for torch.cdist, until the memory problem is fixed: https://github.com/pytorch/pytorch/issues/24345
"""
# pylint: disable=arguments-differ
@staticmethod
def forward(ctx, x1, x2): # noqa: D102
ctx.save_for_backward(x1, x2)
# cdist.forward does not have the memory problem
return torch.cdist(x1, x2, p=1)
# pylint: disable=arguments-differ
@staticmethod
def backward(ctx, grad_dist): # noqa: D102
# This is a pattern that is very convenient - at the top of backward
# unpack saved_tensors and initialize all gradients w.r.t. inputs to
# None. Thanks to the fact that additional trailing Nones are
# ignored, the return statement is simple even when the function has
# optional inputs.
grad_x1 = grad_x2 = None
# Retrieve saved values
x1, x2 = ctx.saved_tensors
dims = x1.shape[1]
# These needs_input_grad checks are optional and there only to
# improve efficiency. If you want to make your code simpler, you can
# skip them. Returning gradients for inputs that don't require it is
# not an error.
if ctx.needs_input_grad[0]:
grad_x1 = torch.empty_like(x1)
if ctx.needs_input_grad[1]:
grad_x2 = torch.empty_like(x2)
if any(ctx.needs_input_grad):
for i in range(dims):
#: sign: shape: (n1, n2)
sign = torch.sign(x1[:, None, i] - x2[None, :, i])
if ctx.needs_input_grad[0]:
grad_x1[:, i] = torch.sum(grad_dist * sign, dim=1)
if ctx.needs_input_grad[1]:
grad_x2[:, i] = -torch.sum(grad_dist * sign, dim=0)
return grad_x1, grad_x2
l1c = L1CDist.apply
| 9,200
| 27.933962
| 148
|
py
|
rank-based-evaluation
|
rank-based-evaluation-main/src/kgm/modules/__init__.py
|
# coding=utf-8
"""Components for building and training models."""
from .losses import BaseLoss, MarginLoss, MatchingLoss, SampledMatchingLoss, get_matching_loss, get_pairwise_loss
from .similarity import BoundInverseTransformation, CosineSimilarity, DistanceToSimilarity, DotProductSimilarity, LpSimilarity, NegativeTransformation, Similarity, SimilarityEnum, get_similarity
__all__ = [
'BoundInverseTransformation',
'CosineSimilarity',
'DistanceToSimilarity',
'DotProductSimilarity',
'get_matching_loss',
'get_pairwise_loss',
'get_similarity',
'LpSimilarity',
'MarginLoss',
'MatchingLoss',
'NegativeTransformation',
'BaseLoss',
'SampledMatchingLoss',
'Similarity',
'SimilarityEnum',
]
| 747
| 31.521739
| 194
|
py
|
rank-based-evaluation
|
rank-based-evaluation-main/src/kgm/modules/embeddings/base.py
|
"""Basic node embedding modules."""
import enum
import math
import pathlib
from typing import Any, Mapping, Optional, Type, Union
import torch
from torch import nn
from .init.base import ConstantNodeEmbeddingInitializer, NodeEmbeddingInitializer, RandomNodeEmbeddingInitializer
from .norm import EmbeddingNormalizationMethod, EmbeddingNormalizer, NoneEmbeddingNormalizer, get_normalizer_by_name
from ...data import KnowledgeGraph, KnowledgeGraphAlignmentDataset, MatchSideEnum
from ...utils.common import reduce_kwargs_for_method
from ...utils.torch_utils import ExtendedModule
from ...utils.types import NodeIDs
class EmbeddingNormalizationMode(str, enum.Enum):
"""The embedding normalization mode."""
#: Do not normalize
none = "none"
#: Only normalize once after initialization
initial = "initial"
#: Normalize in every forward pass
every_forward = "every_forward"
#: Normalize after every parameter update (non-differentiable).
after_update = "after_update"
# pylint: disable=abstract-method
class Embedding(ExtendedModule):
"""An embedding with additional initialization and normalization logic."""
#: The actual data
_embedding: nn.Embedding
# The initializer
initializer: NodeEmbeddingInitializer
#: The normalizer
normalizer: EmbeddingNormalizer
#: additionally associated KnowledgeGraph
graph: Optional[KnowledgeGraph]
def __init__(
self,
num_embeddings: int,
embedding_dim: Optional[int] = None,
initializer: Optional[NodeEmbeddingInitializer] = None,
trainable: bool = True,
normalizer: Optional[EmbeddingNormalizer] = None,
normalization_mode: EmbeddingNormalizationMode = EmbeddingNormalizationMode.none,
dropout: Optional[float] = None,
shared: bool = False,
):
"""
Initialize the module.
:param num_embeddings:
The number of embeddings.
:param embedding_dim:
The embedding dimension. If not provided, the initializer must provide one.
:param initializer:
The node embedding initializer.
:param trainable:
Whether the embeddings are trainable.
:param normalizer:
The node embedding normalizer.
:param normalization_mode:
The node embedding normalization mode.
:param dropout:
A node embedding dropout.
:param shared:
Whether to use a shared embedding for all nodes.
"""
super().__init__()
# Store embedding initialization method for re-initialization
if initializer is None:
initializer = RandomNodeEmbeddingInitializer()
self.initializer = initializer
if embedding_dim is None:
embedding_dim = initializer.embedding_dim
if embedding_dim is None:
raise ValueError('Either embedding_dim must be provided, or the initializer must provide a dimension.')
self.embedding_dim = embedding_dim
if (normalization_mode == EmbeddingNormalizationMode.none) != (normalizer is None or isinstance(normalizer, NoneEmbeddingNormalizer)):
raise ValueError("normalization_mode == none if and only if normalize is None.")
if normalization_mode == EmbeddingNormalizationMode.after_update:
raise NotImplementedError(normalization_mode)
# Bind normalizer
self.normalizer = normalizer
self.normalization_mode = normalization_mode
# Node embedding dropout
if dropout is not None:
dropout = nn.Dropout(p=dropout)
self.dropout = dropout
# Whether to share embeddings
self.shared = shared
# Store num nodes
self.num_embeddings = num_embeddings
# Allocate embeddings
if self.shared:
num_embeddings = 1
self._embedding = nn.Embedding(
num_embeddings=num_embeddings,
embedding_dim=embedding_dim,
)
# Set trainability
self._embedding.weight.requires_grad_(trainable)
# Initialize
self.reset_parameters()
@property
def weight(self) -> nn.Parameter:
"""Return the embedding weights."""
return self._embedding.weight
# pylint: disable=arguments-differ
def forward(
self,
indices: Optional[NodeIDs] = None,
) -> torch.FloatTensor:
"""
Forward pass for embeddings.
Optionally applies dropout and embedding normalization.
:param indices:
The indices to lookup. May be None to get all embeddings.
:return: shape: (batch_size, embedding_dim)
The embeddings. If indices=None, batch_size=num_embeddings.
"""
if indices is None:
if self.shared:
x = self._embedding.weight.repeat(self.num_embeddings, 1)
else:
x = self._embedding.weight
else:
if self.shared:
indices = torch.zeros_like(indices)
x = self._embedding(indices)
# apply dropout if requested
if self.dropout is not None:
x = self.dropout(x)
# Apply normalization if requested
if self.normalization_mode == EmbeddingNormalizationMode.every_forward:
x = self.normalizer.normalize(x=x)
return x
@torch.no_grad()
def reset_parameters(self) -> None:
"""Reset parameters."""
self.initializer.init_one_(embedding=self._embedding.weight)
if self.normalization_mode in {
EmbeddingNormalizationMode.initial,
EmbeddingNormalizationMode.after_update
}:
self._embedding.weight.data = self.normalizer.normalize(x=self._embedding.weight.data)
class NodeEmbeddingInitMethod(str, enum.Enum):
"""Enum for selecting how to initialize node embeddings."""
#: Initialize all to ones
ones = 'ones'
#: standard normal distribution
std_one = 'std_one'
#: std = 1 / sqrt(sum_i n_nodes_i)
sqrt_total = 'sqrt_total'
#: std = 1 / sqrt(n_nodes_i)
sqrt_individual = 'sqrt_individual'
def __str__(self): # noqa: D105
return str(self.name)
def get_embedding_pair(
init: Union[NodeEmbeddingInitMethod, Type[NodeEmbeddingInitializer], NodeEmbeddingInitializer],
dataset: KnowledgeGraphAlignmentDataset,
embedding_dim: Optional[int] = None,
dropout: Optional[float] = None,
trainable: bool = True,
init_config: Optional[Mapping[str, Any]] = None,
norm: EmbeddingNormalizationMethod = EmbeddingNormalizationMethod.none,
normalization_mode: EmbeddingNormalizationMode = EmbeddingNormalizationMode.none,
shared: bool = False,
) -> Mapping[MatchSideEnum, Embedding]:
"""
Create node embeddings for each graph side.
:param init:
The initializer. Can be a enum, a class, or an already initialized initializer.
:param dataset:
The dataset.
:param embedding_dim:
The embedding dimension. If not provided, the initializer must provide one.
:param dropout:
A node embedding dropout value.
:param trainable:
Whether the embedding should be set trainable.
:param init_config:
A key-value dictionary used for initializing the node embedding initializer (only relevant if not already
initialized).
:param norm:
The embedding normalization method. The method is applied in every forward pass.
:param normalization_mode:
The node embedding normalization mode. None if and only if norm is None.
:param shared:
Whether to use one shared embedding for all nodes.
:return:
A mapping side -> node embedding.
"""
# Build normalizer
normalizer = get_normalizer_by_name(name=norm)
return nn.ModuleDict({
side: Embedding(
num_embeddings=num_embeddings,
embedding_dim=embedding_dim,
initializer=resolve_initializer(
init=init,
dataset=dataset,
side=side,
init_config=init_config,
),
trainable=trainable,
normalizer=normalizer,
normalization_mode=normalization_mode,
dropout=dropout,
shared=shared,
)
for side, num_embeddings in dataset.num_nodes.items()
})
def init_method_normalizer(name: str):
"""Normalize the name of an initialization method."""
return name.lower().replace('_', '').replace('nodeembeddinginitializer', '')
def resolve_initializer(
init: Union[NodeEmbeddingInitMethod, Type[NodeEmbeddingInitializer], NodeEmbeddingInitializer, Mapping[MatchSideEnum, NodeEmbeddingInitializer]],
dataset: KnowledgeGraphAlignmentDataset,
side: MatchSideEnum,
init_config: Optional[Mapping[str, Any]] = None,
cache_root: pathlib.Path = None,
) -> NodeEmbeddingInitializer:
"""
Resolve a node embedding intializer from a config.
:param init:
The chosen init. Can be
* enum value
* class
* instance
* mapping from side to instance.
:param dataset:
The dataset.
:param side:
The side for which the initializer should be created.
:param init_config:
Additional configuration for the initializer.
:param cache_root:
The cache root directory used for storing datasets. Defaults to ~/.kgm
:return:
An initializer instance.
"""
if isinstance(init, dict):
init = init[side]
if cache_root is None:
cache_root = pathlib.Path("~", ".kgm")
cache_root = cache_root.expanduser()
# already instantiated
if isinstance(init, NodeEmbeddingInitializer):
return init
if isinstance(init, type) and issubclass(init, NodeEmbeddingInitializer):
return init(**(reduce_kwargs_for_method(method=init.__init__, kwargs=init_config)))
if init == NodeEmbeddingInitMethod.sqrt_total:
total = sum(dataset.num_nodes.values())
return RandomNodeEmbeddingInitializer(std=1. / math.sqrt(total))
elif init == NodeEmbeddingInitMethod.sqrt_individual:
return RandomNodeEmbeddingInitializer(std=1. / math.sqrt(dataset.num_nodes[side]))
elif init == NodeEmbeddingInitMethod.std_one:
return RandomNodeEmbeddingInitializer(std=1.)
elif init == NodeEmbeddingInitMethod.ones:
return ConstantNodeEmbeddingInitializer(value=1.0)
else:
raise ValueError(init)
| 10,589
| 32.619048
| 149
|
py
|
rank-based-evaluation
|
rank-based-evaluation-main/src/kgm/modules/embeddings/norm.py
|
# coding=utf-8
"""Embedding normalization."""
import enum
from abc import abstractmethod
from typing import Union
import torch
from torch.nn import functional
from ...utils.common import get_subclass_by_name
class EmbeddingNormalizer:
"""Embedding normalization."""
@abstractmethod
def normalize(
self,
x: torch.FloatTensor,
) -> torch.FloatTensor:
"""Normalize a batch of embeddings, e.g. during forward pass.
:param x: shape: (batch_size, dim)
The tensor of embeddings.
"""
raise NotImplementedError
class LpNormalization(EmbeddingNormalizer):
"""Normalize the unit L_p norm."""
def __init__(self, p: int):
"""
Initialize the normalizer.
:param p: >0
The parameter p of the Lp distance.
"""
self.p = p
def normalize(
self,
x: torch.FloatTensor,
) -> torch.FloatTensor: # noqa: D102
return functional.normalize(x, p=self.p, dim=-1)
def norm_method_normalizer(name: str):
"""Normalize the name of a normalization method."""
return name.lower().replace('_', '').replace('embeddingnormalizer', '')
class L2EmbeddingNormalizer(LpNormalization):
"""L2 normalization."""
def __init__(self):
"""Initialize the normalizer."""
super().__init__(p=2)
class L1EmbeddingNormalizer(LpNormalization):
"""L1 normalization."""
def __init__(self):
"""Initialize the normalizer."""
super().__init__(p=1)
class NoneEmbeddingNormalizer(EmbeddingNormalizer):
"""Dummy normalization which does not actually change anything."""
def normalize(
self,
x: torch.FloatTensor,
) -> torch.FloatTensor: # noqa: D102
return x
@enum.unique
class EmbeddingNormalizationMethod(str, enum.Enum):
"""An enum for embedding normalizations."""
none = 'none'
l2 = 'l2'
l1 = 'l1'
def get_normalizer_by_name(name: Union[EmbeddingNormalizationMethod, str]) -> EmbeddingNormalizer:
"""Get an embedding normalizer by name."""
if isinstance(name, EmbeddingNormalizationMethod):
name = name.value
norm_class = get_subclass_by_name(
base_class=EmbeddingNormalizer,
name=name,
normalizer=norm_method_normalizer,
)
return norm_class()
| 2,340
| 22.887755
| 98
|
py
|
rank-based-evaluation
|
rank-based-evaluation-main/src/kgm/modules/embeddings/__init__.py
|
# coding=utf-8
"""Modules for embeddings."""
from .base import get_embedding_pair
from .init.base import ConstantNodeEmbeddingInitializer, PretrainedNodeEmbeddingInitializer, RandomNodeEmbeddingInitializer
__all__ = [
'ConstantNodeEmbeddingInitializer',
'PretrainedNodeEmbeddingInitializer',
'RandomNodeEmbeddingInitializer',
'get_embedding_pair',
]
| 367
| 29.666667
| 123
|
py
|
rank-based-evaluation
|
rank-based-evaluation-main/src/kgm/modules/embeddings/init/base.py
|
# coding=utf-8
"""Node embedding initialization."""
import pathlib
from typing import Any, Optional, Sequence, Union
import torch
from torch import nn
from ....data import KnowledgeGraph, MatchSideEnum
class NodeEmbeddingInitializer:
"""Initialization methods."""
def init_one_(
self,
embedding: torch.FloatTensor,
graph: Optional[KnowledgeGraph] = None,
) -> None:
"""
Initialize embedding in-place.
:param embedding:
The embedding.
:param graph:
The corresponding knowledge graph. TODO: DEPRECATED.
"""
raise NotImplementedError
@property
def embedding_dim(self) -> Optional[int]:
"""Return the embedding dimension."""
return None
class RandomNodeEmbeddingInitializer(NodeEmbeddingInitializer):
"""Initialize nodes i.i.d. with random vectors drawn from the given distribution."""
def __init__(
self,
random_distribution=nn.init.normal_,
**kwargs: Any,
):
"""
Initialize the initializers.
:param random_distribution:
The random distribution to use for initialization.
"""
self.random_dist_ = random_distribution
self.kwargs = kwargs
def init_one_(
self,
embedding: torch.FloatTensor,
graph: Optional[KnowledgeGraph] = None,
) -> None: # noqa: D102
self.random_dist_(embedding, **self.kwargs)
class ConstantNodeEmbeddingInitializer(NodeEmbeddingInitializer):
"""Initialize embeddings with a constant value."""
def __init__(
self,
value: float = 1.0,
):
"""
Initialize the initializer.
:param value:
The constant value.
"""
self.value = value
def init_one_(
self,
embedding: torch.FloatTensor,
graph: Optional[KnowledgeGraph] = None,
) -> None: # noqa: D102
nn.init.constant_(tensor=embedding, val=self.value)
class PretrainedNodeEmbeddingInitializer(NodeEmbeddingInitializer):
"""Load pretrained node embeddings."""
def __init__(
self,
embeddings: torch.FloatTensor,
):
"""
Initialize the initializer.
:param embeddings: shape: (n, d)
The pretrained embeddings.
"""
super().__init__()
self.pretrained = embeddings
@staticmethod
def from_path(directory: Union[pathlib.Path, str], side: MatchSideEnum) -> 'PretrainedNodeEmbeddingInitializer':
"""Construct initializer from pretrained embeddings stored under a path."""
# TODO: Watch out for ID mismatch!
return PretrainedNodeEmbeddingInitializer(
embeddings=torch.load(
PretrainedNodeEmbeddingInitializer.output_file_path(
directory=directory,
side=side,
)
)
)
@staticmethod
def output_file_path(directory: Union[pathlib.Path, str], side: MatchSideEnum) -> pathlib.Path:
"""Return the canonical file path."""
return pathlib.Path(directory) / f'{side.value}.pt'
def save_to_path(self, directory: Union[pathlib.Path, str], side: MatchSideEnum) -> pathlib.Path:
"""Save pretrained node embedding into a file."""
output_path = PretrainedNodeEmbeddingInitializer.output_file_path(directory=directory, side=side)
torch.save(obj=self.pretrained, f=output_path)
return output_path
def init_one_(
self,
embedding: torch.FloatTensor,
graph: Optional[KnowledgeGraph] = None,
) -> None: # noqa: D102
embedding.data.copy_(self.pretrained, non_blocking=True)
@property
def embedding_dim(self) -> Optional[int]: # noqa: D102
return self.pretrained.shape[-1]
class CombinedInitializer(NodeEmbeddingInitializer):
"""Combines several initializers, each of which is used for a subset of the embeddings."""
def __init__(
self,
initializer_map: torch.LongTensor,
initializers: Sequence[NodeEmbeddingInitializer],
):
"""
Initialize the initializer.
:param initializer_map: shape: (num_embeddings,)
A vector of the indices of the initializers to use for each embedding ID.
:param initializers:
The initializers.
"""
self.initializer_map = initializer_map
self.base_initializers = initializers
def init_one_(
self,
embedding: torch.FloatTensor,
graph: Optional[KnowledgeGraph] = None,
) -> None: # noqa: D102
for i, initializer in enumerate(self.base_initializers):
mask = self.initializer_map == i
emb = torch.empty_like(embedding[mask])
initializer.init_one_(emb)
embedding.data[mask] = emb
| 4,886
| 28.439759
| 116
|
py
|
rank-based-evaluation
|
rank-based-evaluation-main/src/kgm/modules/embeddings/init/__init__.py
|
# coding=utf-8
"""Node embedding initialization methods."""
from .base import NodeEmbeddingInitializer
__all__ = [
'NodeEmbeddingInitializer',
]
| 150
| 17.875
| 44
|
py
|
rank-based-evaluation
|
rank-based-evaluation-main/src/kgm/training/base.py
|
"""Common training loop parts."""
import logging
from typing import Any, Generic, Iterable, Mapping, Optional, Tuple, Type, TypeVar
import torch
from torch import nn
from torch.optim import Optimizer
from kgm.utils.common import NonFiniteLossError, kwargs_or_empty, last
from kgm.utils.torch_utils import construct_optimizer_from_config, get_device
logger = logging.getLogger(name=__name__)
BatchType = TypeVar('BatchType')
class BaseTrainer(Generic[BatchType]):
"""A base class for training loops."""
#: The model
model: nn.Module
#: The optimizer instance
optimizer: Optimizer
def __init__(
self,
model: nn.Module,
train_batch_size: Optional[int] = None,
optimizer_cls: Type[Optimizer] = None,
optimizer_kwargs: Optional[Mapping[str, Any]] = None,
clip_grad_norm: Optional[float] = None,
accumulate_gradients: int = 1,
device: Optional[torch.device] = None,
):
"""
Initialize a new training loop.
:param model:
The model to train.
:param train_batch_size:
The batch size to use for training.
:param optimizer_cls:
The optimizer class.
:param optimizer_kwargs:
Keyword-based arguments for the optimizer.
:param clip_grad_norm:
Whether to apply gradient clipping (norm-based).
:param accumulate_gradients:
Accumulate gradients over batches. This can be used to simulate a larger batch size, while keeping the
memory footprint small.
:param device:
The device on which to train.
:param accumulate_gradients:
Accumulate gradients over batches. This can be used to simulate a larger batch size, while keeping the
memory footprint small.
:param device:
The device on which to train.
"""
device = get_device(device=device)
# Bind parameters
self.train_batch_size = train_batch_size
self.model = model.to(device=device)
self.epoch = 0
self.accumulate_gradients = accumulate_gradients
self.device = device
self.clip_grad_norm = clip_grad_norm
self.accumulate_gradients = accumulate_gradients
self.device = device
# create optimizer
if optimizer_cls is None:
optimizer_cls = 'adam'
optimizer_config = dict(cls=optimizer_cls)
optimizer_config.update(kwargs_or_empty(optimizer_kwargs))
self.optimizer_config = optimizer_config
self.reset_optimizer()
def reset_optimizer(self) -> None:
"""Reset the optimizer."""
self.optimizer = construct_optimizer_from_config(
model=self.model,
optimizer_config=self.optimizer_config,
)
def _train_one_epoch(self) -> Mapping[str, Any]:
"""
Train the model for one epoch on the given device.
:return:
A dictionary of training results. Contains at least `loss` with the epoch loss value.
"""
epoch_loss, counter = 0., 0
# Iterate over batches
i = -1
for i, batch in enumerate(self._iter_batches()):
# Compute batch loss
batch_loss, real_batch_size = self._train_one_batch(batch=batch)
# Break on non-finite loss values
if not torch.isfinite(batch_loss).item():
raise NonFiniteLossError
# Update epoch loss
epoch_loss += batch_loss.item() * real_batch_size
counter += real_batch_size
# compute gradients
batch_loss.backward()
# Apply gradient updates
if i % self.accumulate_gradients == 0:
self._parameter_update()
# For the last batch, we definitely do an update
if self.accumulate_gradients > 1 and (i % self.accumulate_gradients) != 0:
self._parameter_update()
return dict(
loss=epoch_loss / counter
)
def _parameter_update(self):
"""Update the parameters using the optimizer."""
# Gradient clipping
if self.clip_grad_norm is not None:
nn.utils.clip_grad_norm_(
parameters=(p for p in self.model.parameters() if p.requires_grad),
max_norm=self.clip_grad_norm,
)
# update parameters
self.optimizer.step()
# clear gradients afterwards
self.optimizer.zero_grad()
def _iter_batches(self) -> Iterable[BatchType]:
"""Iterate over batches."""
raise NotImplementedError
def _train_one_batch(self, batch: BatchType) -> Tuple[torch.Tensor, int]:
"""
Train on a single batch.
:param batch: shape: (batch_size,)
The sample IDs.
:return:
A tuple (batch_loss, real_batch_size) of the batch loss (a scalar tensor), and the actual batch size.
"""
raise NotImplementedError
def train_iter(
self,
num_epochs: int = 1,
) -> Iterable[Mapping[str, Any]]:
"""
Train the model, and return intermediate results.
:param num_epochs:
The number of epochs.
:return:
One result dictionary per epoch.
"""
epoch_result = dict()
for _ in range(self.epoch, self.epoch + num_epochs):
self.model.train()
# training step
self.epoch += 1
epoch_result = dict(
epoch=self.epoch,
train=self._train_one_epoch(),
)
yield epoch_result
return epoch_result
def train(
self,
num_epochs: int = 1,
final_eval: bool = True,
) -> Mapping[str, Any]:
"""
Train the model, and return intermediate results.
:param num_epochs:
The number of epochs.
:param final_eval:
Whether to perform an evaluation after the last training epoch.
:return:
A dictionary containing the result.
"""
return last(self.train_iter(num_epochs=num_epochs))
| 6,203
| 30.175879
| 114
|
py
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.