repo stringlengths 1 99 | file stringlengths 13 215 | code stringlengths 12 59.2M | file_length int64 12 59.2M | avg_line_length float64 3.82 1.48M | max_line_length int64 12 2.51M | extension_type stringclasses 1
value |
|---|---|---|---|---|---|---|
XFL | XFL-master/test/common/crypto/one_time_pad/test_one_time_add.py | # Copyright 2022 The XFL Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pickle
from functools import reduce
from random import randint
from secrets import token_hex
import numpy as np
import pytest
import torch
from common.crypto.one_time_pad.one_time_add import (OneTimeAdd,
OneTimePadCiphertext,
OneTimePadContext,
OneTimeKey)
def almost_equal(a, b):
if isinstance(a, np.ndarray):
return np.all(a - b < 1e-4)
elif isinstance(a, torch.Tensor):
return torch.all(a - b < 1e-4)
else:
return a - b < 1e-4
# def correctness_scalar(modulus_exp, data_type, num_keys):
# # random keys
# key1 = [np.array(int(token_hex(modulus_exp//8), 16))]
# for i in range(num_keys - 1):
# key = int(token_hex(modulus_exp//8), 16)
# key = np.array(key)
# key1.append(key)
# is_addition = randint(0, 1)
# # random input
# if "numpy" in data_type:
# data = np.random.random(())
# elif "torch" in data_type:
# data = torch.rand(())
# # context
# context_ = OneTimePadContext(modulus_exp, data_type)
# # encrypt
# ciphertext = OneTimeAdd.encrypt(context_, data, key1, is_addition, serialized=False)
# ciphertext2 = OneTimeAdd.encrypt(context_, data, key1, is_addition, serialized=True)
# assert pickle.dumps(ciphertext.data) == ciphertext2
# # decrypt
# plaintext = OneTimeAdd.decrypt(context_, ciphertext, key1, is_addition)
# assert almost_equal(data, plaintext)
def correctness(data_shape, modulus_exp, data_type, num_keys):
if data_shape == ():
flatten_shape = 0
else:
flatten_shape = reduce(lambda x, y: x*y, data_shape)
# random keys
if flatten_shape == 0:
key1 = [np.array(int(token_hex(modulus_exp//8), 16))]
for i in range(num_keys - 1):
key = int(token_hex(modulus_exp//8), 16)
key = np.array(key)
key1.append(key)
else:
key1 = [int(token_hex(modulus_exp//8), 16) for i in range(flatten_shape)]
key1 = [np.array(key1).reshape(*data_shape)]
for i in range(num_keys - 1):
key = [int(token_hex(modulus_exp//8), 16) for i in range(flatten_shape)]
key = np.array(key).reshape(*data_shape)
key1.append(key)
key1 = OneTimeKey(key1, modulus_exp)
is_addition = [randint(0, 1) for i in range(len(key1))]
# random input
if "numpy" in data_type:
data = np.random.random(data_shape)
elif "torch" in data_type:
data = torch.rand(data_shape)
# context
context_ = OneTimePadContext(modulus_exp, data_type)
# encrypt
ciphertext = OneTimeAdd.encrypt(context_, data, key1, is_addition, serialized=False)
ciphertext2 = OneTimeAdd.encrypt(context_, data, key1, is_addition, serialized=True)
assert pickle.dumps(ciphertext.data) == ciphertext2
# decrypt
plaintext = OneTimeAdd.decrypt(context_, ciphertext, key1, is_addition)
assert almost_equal(data, plaintext)
# addition and subtraction
# random input
if "numpy" in data_type:
data3 = np.random.random(data_shape)
elif "torch" in data_type:
data3 = torch.rand(data_shape)
key3 = list(map(lambda x: np.array(-x), key1.value))
key3 = OneTimeKey(key3, modulus_exp)
ciphertext3 = OneTimeAdd.encrypt(context_, data3, key3, is_addition, serialized=False)
ciphertext4 = OneTimeAdd.encrypt(context_, data3, key1, is_addition, serialized=False)
c = ciphertext + ciphertext3
plaintext = c.decode()
assert almost_equal(data + data3, plaintext)
c = ciphertext - ciphertext4
plaintext = c.decode()
assert almost_equal(data - data3, plaintext)
if flatten_shape == 0:
key2 = [np.array(int(token_hex(modulus_exp//8), 16))]
for i in range(num_keys - 1):
key = int(token_hex(modulus_exp//8), 16)
key = np.array(key)
key2.append(key)
else:
key2 = [int(token_hex(modulus_exp//8), 16) for i in range(flatten_shape)]
key2 = [np.array(key2).reshape(*data_shape)]
for i in range(num_keys - 1):
key = [int(token_hex(modulus_exp//8), 16) for i in range(flatten_shape)]
key = np.array(key).reshape(*data_shape)
key2.append(key)
key2 = OneTimeKey(key2, modulus_exp)
ciphertext1 = OneTimeAdd.encrypt(context_, data, key1, is_addition, serialized=False)
ciphertext2 = OneTimeAdd.encrypt(context_, data3, key2, is_addition, serialized=False)
c = ciphertext1 + ciphertext2
key3 = [key1.value[i] + key2.value[i] for i in range(len(key1))]
key3 = OneTimeKey(key3, modulus_exp)
p = OneTimeAdd.decrypt(context_, c, key3, is_addition)
assert almost_equal(data + data3, p)
ciphertext1 = OneTimeAdd.encrypt(context_, data, key1, is_addition, serialized=False)
ciphertext2 = OneTimeAdd.encrypt(context_, data3, OneTimeKey([-i for i in key2.value], modulus_exp), is_addition, serialized=False)
c = ciphertext1 + ciphertext2
key3 = [key1.value[i] - key2.value[i] for i in range(len(key1))]
key3 = OneTimeKey(key3, modulus_exp)
p = OneTimeAdd.decrypt(context_, c, key3, is_addition)
assert almost_equal(data + data3, p)
def test_correctness():
data_shape_list = [(), (11,), (3, 5), (7, 10, 24)]
modulus_exp_list = [64, 128]
data_type_list = ["numpy.ndarray", "numpy", "torch.Tensor", "torch"]
numpy_keys_list = [1] # [1, 3, 5]
# for modulus_exp in modulus_exp_list:
# for data_type in data_type_list:
# for numpy_keys in numpy_keys_list:
# correctness_scalar(modulus_exp, data_type, numpy_keys)
for data_shape in data_shape_list:
for modulus_exp in modulus_exp_list:
for data_type in data_type_list:
for numpy_keys in numpy_keys_list:
correctness(data_shape, modulus_exp, data_type, numpy_keys)
def test_exception():
modulus_exp = 128,
data_type = "pandas"
with pytest.raises(ValueError):
OneTimePadContext(modulus_exp, data_type)
with pytest.raises(ValueError):
OneTimePadContext(modulus_exp, data_type)
# ------------------------------------------------------------------------------
modulus_exp = 128
data_type = "numpy.ndarray"
context_ = OneTimePadContext(modulus_exp, data_type)
data = 'fdfdsfd'
with pytest.raises(TypeError):
OneTimePadCiphertext(data, context_)
context_ = 54645654634
data = np.array([2, 4])
with pytest.raises(TypeError):
OneTimePadCiphertext(data, context_)
# ------------------------------------------------------------------------------
key_shape = (3, 4)
flatten_shape = 12
key = [int(token_hex(modulus_exp//8), 16) for i in range(flatten_shape)]
key = np.array(key).reshape(*key_shape)
key = OneTimeKey(key, modulus_exp)
is_addition = [randint(0, 1) for i in range(len(key))]
data_type = "torch.Tensor"
context_ = OneTimePadContext(modulus_exp, data_type)
data_shape = (4, 5)
data = torch.rand(data_shape)
with pytest.raises(ValueError):
OneTimeAdd.encrypt(context_, data, key, is_addition, serialized=True)
key_shape = (4, 5)
flatten_shape = 20
key = [int(token_hex(modulus_exp//8), 16) for i in range(flatten_shape)]
key = np.array(key).reshape(*key_shape)
key = OneTimeKey(key, modulus_exp)
# ------------------------------------------------------------------------------
modulus_exp = 128
data_type = 'numpy'
key_shape = (3, 4)
flatten_shape = 12
key = [int(token_hex(modulus_exp//8), 16) for i in range(flatten_shape)]
key = np.array(key).reshape(*key_shape)
key = OneTimeKey(key, modulus_exp)
key_shape = (4, 5)
flatten_shape = 20
key1 = [int(token_hex(modulus_exp//8), 16) for i in range(flatten_shape)]
key1 = np.array(key1).reshape(*key_shape)
key1 = OneTimeKey(key1, modulus_exp)
is_addition = randint(0, 1)
data_shape = (4, 5)
data = np.random.random(data_shape)
context_ = OneTimePadContext(modulus_exp, data_type)
ciphertext = OneTimeAdd.encrypt(context_, data, key1, is_addition, serialized=False)
with pytest.raises(ValueError):
OneTimeAdd.decrypt(context_, ciphertext, key, is_addition)
key = [key.value, key.value]
key = OneTimeKey(key, modulus_exp)
ciphertext = OneTimeAdd.encrypt(context_, data, key1, is_addition, serialized=False)
with pytest.raises(ValueError):
OneTimeAdd.decrypt(context_, ciphertext, key, is_addition)
# ------------------------------------------------------------------------------
modulus_exp = 64
context2 = OneTimePadContext(modulus_exp, data_type)
key2 = OneTimeKey(key1.value, modulus_exp)
ciphertext2 = OneTimeAdd.encrypt(context2, data, key2, is_addition, serialized=False)
with pytest.raises(ValueError):
ciphertext + ciphertext2
# ------------------------------------------------------------------------------
is_addition = [randint(0, 1) for i in range(len(key) + 1)]
with pytest.raises(ValueError):
OneTimeAdd.encrypt(context_, data, key1, is_addition, serialized=False)
| 10,297 | 34.388316 | 135 | py |
XFL | XFL-master/test/common/fedavg/otp/test_trainer3.py | # Copyright 2022 The XFL Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# import json
# import time
# from concurrent import futures
# import grpc
# from common.utils.grpc_channel_options import options
# from common.communication.gRPC.python import trainer_pb2_grpc
# from service.trainer import TrainerService
# from fed_api import Commu
# from fed_api import FedNode
# from fed_api import DataPool
# from fed_api import get_fedavg_trainer_inst
# from random_input import param_torch, param_numpy, weight_factors, sec_conf
# def do_fedavg(id, sec_conf):
# fedavg_trainer = get_fedavg_trainer_inst(sec_conf)
# if 'torch' in sec_conf['data_type']:
# local_weight = param_torch[id-1]
# elif 'numpy' in sec_conf['data_type']:
# local_weight = param_numpy[id-1]
# weight_factor = weight_factors[id-1]
# fedavg_trainer.aggregate(local_weight, weight_factor)
# if __name__ == "__main__":
# id = 'node-3'
# FedNode.init_fednode()
# FedNode.config["node_id"] = str(id)
# FedNode.node_id = str(id)
# server = grpc.server(futures.ThreadPoolExecutor(max_workers=10), options=options)
# trainer_pb2_grpc.add_TrainerServicer_to_server(TrainerService(), server)
# FedNode.add_server(server, "trainer")
# server.start()
# with open("./config/data_pool_config.json") as f:
# data_pool_config = json.load(f)
# DataPool(data_pool_config)
# Commu(FedNode.config)
# time.sleep(5)
# for conf in sec_conf:
# do_fedavg(3, conf)
| 2,072 | 29.485294 | 87 | py |
XFL | XFL-master/test/common/fedavg/otp/test_trainer1.py | # Copyright 2022 The XFL Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# import json
# import time
# from concurrent import futures
# import grpc
# from common.utils.grpc_channel_options import options
# from common.communication.gRPC.python import trainer_pb2_grpc
# from service.trainer import TrainerService
# from fed_api import Commu
# from fed_api import FedNode
# from fed_api import DataPool
# from fed_api import get_fedavg_trainer_inst
# from random_input import param_torch, param_numpy, weight_factors, sec_conf
# def do_fedavg(id, sec_conf):
# fedavg_trainer = get_fedavg_trainer_inst(sec_conf)
# if 'torch' in sec_conf['data_type']:
# local_weight = param_torch[id-1]
# elif 'numpy' in sec_conf['data_type']:
# local_weight = param_numpy[id-1]
# weight_factor = weight_factors[id-1]
# fedavg_trainer.aggregate(local_weight, weight_factor)
# if __name__ == "__main__":
# id = 'node-1'
# FedNode.init_fednode()
# FedNode.config["node_id"] = str(id)
# FedNode.node_id = str(id)
# server = grpc.server(futures.ThreadPoolExecutor(max_workers=10), options=options)
# trainer_pb2_grpc.add_TrainerServicer_to_server(TrainerService(), server)
# FedNode.add_server(server, "trainer")
# server.start()
# with open("./config/data_pool_config.json") as f:
# data_pool_config = json.load(f)
# DataPool(data_pool_config)
# Commu(FedNode.config)
# time.sleep(5)
# for conf in sec_conf:
# do_fedavg(1, conf)
| 2,075 | 29.985075 | 87 | py |
XFL | XFL-master/test/common/fedavg/otp/random_input.py | # Copyright 2022 The XFL Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# import numpy as np
# import torch
# import random
# from collections import OrderedDict
# seed = 0
# torch.manual_seed(seed)
# # torch.cuda.manual_seed_all(seed)
# np.random.seed(seed)
# random.seed(seed)
# # torch.backends.cudnn.deterministic = True
# def gen_params(dtype: str):
# shape_dict = OrderedDict(
# {
# 'a': (2, 3, 4),
# 'b': (5),
# 'c': ()
# }
# )
# w = OrderedDict()
# for k, v in shape_dict.items():
# if dtype == 'numpy':
# w[k] = np.random.random(v).astype(np.float32) * 2 - 1
# elif dtype == 'torch':
# w[k] = torch.rand(v) * 2 - 1
# return w
# num_trainer = 3
# param_torch = [gen_params('torch') for i in range(num_trainer)]
# param_numpy = [gen_params('numpy') for i in range(num_trainer)]
# weight_factors = [random.random() for i in range(num_trainer)]
# sec_conf = [
# {
# "method": "otp",
# "key_bitlength": 128,
# "data_type": "torch.Tensor",
# "key_exchange": {
# "key_bitlength": 3072,
# "optimized": True
# },
# "csprng": {
# "name": "hmac_drbg",
# "method": "sha512",
# }
# },
# {
# "method": "otp",
# "key_bitlength": 128,
# "data_type": "numpy.ndarray",
# "key_exchange": {
# "key_bitlength": 3072,
# "optimized": True
# },
# "csprng": {
# "name": "hmac_drbg",
# "method": "sha512",
# }
# }
# ]
| 2,199 | 24.882353 | 74 | py |
XFL | XFL-master/test/common/fedavg/otp/test_trainer2.py | # Copyright 2022 The XFL Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# import json
# import time
# from concurrent import futures
# import grpc
# from common.utils.grpc_channel_options import options
# from common.communication.gRPC.python import trainer_pb2_grpc
# from service.trainer import TrainerService
# from fed_api import Commu
# from fed_api import FedNode
# from fed_api import DataPool
# from fed_api import get_fedavg_trainer_inst
# from random_input import param_torch, param_numpy, weight_factors, sec_conf
# def do_fedavg(id, sec_conf):
# fedavg_trainer = get_fedavg_trainer_inst(sec_conf)
# if 'torch' in sec_conf['data_type']:
# local_weight = param_torch[id-1]
# elif 'numpy' in sec_conf['data_type']:
# local_weight = param_numpy[id-1]
# weight_factor = weight_factors[id-1]
# fedavg_trainer.aggregate(local_weight, weight_factor)
# if __name__ == "__main__":
# id = 'node-2'
# FedNode.init_fednode()
# FedNode.config["node_id"] = str(id)
# FedNode.node_id = str(id)
# server = grpc.server(futures.ThreadPoolExecutor(max_workers=10), options=options)
# trainer_pb2_grpc.add_TrainerServicer_to_server(TrainerService(), server)
# FedNode.add_server(server, "trainer")
# server.start()
# with open("./config/data_pool_config.json") as f:
# data_pool_config = json.load(f)
# DataPool(data_pool_config)
# Commu(FedNode.config)
# time.sleep(5)
# for conf in sec_conf:
# do_fedavg(2, conf)
| 2,072 | 29.485294 | 87 | py |
XFL | XFL-master/test/common/fedavg/otp/test_scheduler.py | # Copyright 2022 The XFL Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# import json
# import time
# from concurrent import futures
# from typing import OrderedDict
# from functools import reduce
# import grpc
# import numpy as np
# import torch
# from common.utils.grpc_channel_options import options
# from common.communication.gRPC.python import scheduler_pb2_grpc
# from service.scheduler import SchedulerService
# from fed_api import Commu
# from fed_api import FedNode
# from fed_api import DataPool
# from fed_api import get_fedavg_scheduler_inst
# from random_input import param_torch, param_numpy, weight_factors, sec_conf
# def almost_equal(a, b):
# for k in a:
# if isinstance(a[k], np.ndarray):
# return np.all(a[k] - b[k] < 1e-4)
# else:
# return torch.all(a[k] - b[k] < 1e-4)
# def do_fedavg(sec_conf):
# fedavg_trainer = get_fedavg_scheduler_inst(sec_conf)
# result = fedavg_trainer.aggregate(weight_factors)
# def f(x, y):
# for k in x:
# x[k] += y[k]
# return x
# if 'torch' in sec_conf["data_type"]:
# param = param_torch
# elif 'numpy' in sec_conf["data_type"]:
# param = param_numpy
# for i, item in enumerate(param):
# for k in item:
# param[i][k] *= weight_factors[i]
# expected_result = reduce(f, param)
# sum_weight_factors = sum(weight_factors)
# for k in expected_result:
# expected_result[k] /= sum_weight_factors
# assert almost_equal(result, expected_result)
# if __name__ == "__main__":
# FedNode.init_fednode(is_scheduler=True)
# FedNode.config["node_id"] = 'scheduler'
# FedNode.node_id = 'scheduler'
# server = grpc.server(futures.ThreadPoolExecutor(max_workers=10), options=options)
# scheduler_pb2_grpc.add_SchedulerServicer_to_server(SchedulerService(), server)
# FedNode.add_server(server, "scheduler")
# server.start()
# with open("./config/data_pool_config.json") as f:
# data_pool_config = json.load(f)
# DataPool(data_pool_config)
# Commu(FedNode.config)
# time.sleep(5)
# for conf in sec_conf:
# do_fedavg(conf)
| 2,756 | 28.967391 | 87 | py |
XFL | XFL-master/test/common/utils/test_model_preserver.py | # Copyright 2022 The XFL Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pytest
from common.utils.model_preserver import ModelPreserver, os, torch
class TestModelPreserver():
@pytest.mark.parametrize('final,model_path', [(True, "test_save_dir/test.model.pth"), (False, "test_save_dir/test.model_epoch_10.pth")])
def test_save(self, mocker, final, model_path):
mocker.patch("os.makedirs")
mocker.patch("torch.save")
ModelPreserver.save("test_save_dir","test.model.pth", {}, epoch=10, final=final, suggest_threshold=0.1)
os.makedirs.assert_called_once_with("test_save_dir")
torch.save.assert_called_once_with({"state_dict":{},"suggest_threshold":0.1}, model_path)
def test_load(self, mocker):
mocker.patch("torch.load")
ModelPreserver.load("test_path")
torch.load.assert_called_once_with("test_path")
| 1,425 | 35.564103 | 140 | py |
XFL | XFL-master/test/common/utils/test_config_parser.py | # Copyright 2022 The XFL Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from common.utils.config_parser import replace_variable
def test_replace_variable():
output = {
"path": "/opt/checkpoints/[JOB_ID]/[NODE_ID]",
"model": {
"name": "vertical_xgboost_[STAGE_ID].json"
},
"x": ["x_[STAGE_ID-1]"]
}
job_id = "001"
node_id = "a"
stage_id = "2"
res = replace_variable(output, stage_id, job_id, node_id)
assert res == {
"path": "/opt/checkpoints/001/a",
"model": {
"name": "vertical_xgboost_2.json"
},
"x": ["x_1"]
}
| 1,168 | 28.225 | 74 | py |
XFL | XFL-master/test/algorithm/core/test_optimizer.py | # Copyright 2022 The XFL Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
from algorithm.core.optimizer.torch_optimizer import get_optimizer
def test_get_optimizer():
optim = get_optimizer('ASGD')
assert issubclass(optim, torch.optim.ASGD)
| 801 | 31.08 | 74 | py |
XFL | XFL-master/test/algorithm/framework/horizontal/test_h_poisson_regression.py | # Copyright 2022 The XFL Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import os
import shutil
from random import SystemRandom
import pickle
import numpy as np
import pandas as pd
import pytest
import torch
from gmpy2 import powmod
from service.fed_config import FedConfig
from service.fed_node import FedNode
from algorithm.framework.horizontal.poisson_regression.assist_trainer import HorizontalPoissonRegressionAssistTrainer
from algorithm.framework.horizontal.poisson_regression.label_trainer import HorizontalPoissonRegressionLabelTrainer
from algorithm.core.horizontal.aggregation.aggregation_otp import AggregationOTPRoot, AggregationOTPLeaf
from algorithm.core.horizontal.aggregation.aggregation_plain import AggregationPlainRoot, AggregationPlainLeaf
from common.communication.gRPC.python.channel import DualChannel
from common.communication.gRPC.python.commu import Commu
from common.crypto.key_agreement.contants import primes_hex
MOV = b"@" # middle of value
EOV = b"&" # end of value
def prepare_data():
np.random.seed(42)
case_df = pd.DataFrame({
'x0': np.random.random(1000) + 0.5,
'x1': [0] * 1000,
'x2': np.random.random(1000) + 1.0,
'x3': np.random.random(1000),
'x4': np.random.random(1000) + 1.0
})
feat_mat = case_df.values
lin_theo = np.dot(feat_mat, np.array([1, 0, 1, 3, 0]))
print(f"Max of lin_theo: {lin_theo.max()}")
print(f"Min of lin_theo: {lin_theo.min()}")
theore_pred = np.exp(np.dot(feat_mat, np.array([1, 0, 1, 3, 0])))
print(f"Theoretical pred: {theore_pred}")
print(f"Min theoretical pred: {theore_pred.min()}")
print(f"Min of case_df: {case_df.min(axis=0)}")
case_df['y'] = np.rint(
np.exp(case_df['x0'] + 1*case_df['x2'] + 2*case_df['x3'])
)
case_df = case_df[['y', 'x0', 'x1', 'x2', 'x3', 'x4']]
case_df.head(800).to_csv(
"/opt/dataset/unit_test/train_data.csv", index=False
)
case_df.tail(200).to_csv(
"/opt/dataset/unit_test/test_data.csv", index=False
)
@pytest.fixture()
def get_assist_trainer_conf():
with open("python/algorithm/config/horizontal_poisson_regression/assist_trainer.json") as f:
conf = json.load(f)
yield conf
@pytest.fixture()
def get_trainer_conf():
with open("python/algorithm/config/horizontal_poisson_regression/trainer.json") as f:
conf = json.load(f)
yield conf
@pytest.fixture(scope="module", autouse=True)
def env():
if not os.path.exists("/opt/dataset/unit_test"):
os.makedirs("/opt/dataset/unit_test")
if not os.path.exists("/opt/checkpoints/unit_test"):
os.makedirs("/opt/checkpoints/unit_test")
prepare_data()
yield
if os.path.exists("/opt/dataset/unit_test"):
shutil.rmtree("/opt/dataset/unit_test")
if os.path.exists("/opt/checkpoints/unit_test"):
shutil.rmtree("/opt/checkpoints/unit_test")
class TestPoissonRegression:
@pytest.mark.parametrize("encryption_method", ['plain', 'otp'])
def test_trainer(self, get_trainer_conf, get_assist_trainer_conf, encryption_method, mocker):
fed_method = None
fed_assist_method = None
mocker.patch.object(Commu, "node_id", "assist_trainer")
Commu.trainer_ids = ['node-1', 'node-2']
Commu.scheduler_id = 'assist_trainer'
conf = get_trainer_conf
assist_conf = get_assist_trainer_conf
mocker.patch.object(
FedConfig, "get_label_trainer", return_value=['node-1', 'node-2']
)
mocker.patch.object(
FedConfig, "get_assist_trainer", return_value='assist_trainer'
)
mocker.patch.object(FedConfig, "node_id", 'node-1')
mocker.patch.object(FedNode, "node_id", "node-1")
if encryption_method == "plain":
assist_conf["train_info"]["train_params"]["encryption"] = {"plain": {}}
sec_conf = assist_conf["train_info"]["train_params"]["encryption"]["plain"]
else:
sec_conf = assist_conf["train_info"]["train_params"]["encryption"]["otp"]
if encryption_method == "plain":
fed_method = AggregationPlainLeaf(sec_conf)
fed_assist_method = AggregationPlainRoot(sec_conf)
elif encryption_method == "otp":
mocker.patch.object(DualChannel, "__init__", return_value=None)
DualChannel.remote_id = "node-2"
supported_shortest_exponents = [225, 275, 325, 375, 400]
shorest_exponent = supported_shortest_exponents[1]
lower_bound = 1 << (supported_shortest_exponents[1] - 1)
upper_bound = 1 << shorest_exponent
primes = [int(p.replace(' ', ''), 16) for p in primes_hex]
rand_num_generator = SystemRandom()
a = rand_num_generator.randint(lower_bound, upper_bound)
g_power_a = powmod(2, a, primes[1])
mocker.patch.object(DualChannel, "swap",
return_value=(1, g_power_a))
mocker.patch.object(Commu, "node_id", "node-1")
fed_method = AggregationOTPLeaf(sec_conf)
fed_assist_method = AggregationOTPRoot(sec_conf)
print(f"trainer conf: {json.dumps(conf)}")
mocker.patch.object(
DualChannel, "__init__", return_value=None
)
mocker.patch.object(
DualChannel, "send", return_value=None
)
recv_mocker = mocker.patch.object(
DualChannel, "recv",
return_value = {
"model_info":assist_conf["model_info"], "train_info": assist_conf["train_info"]
}
)
prt = HorizontalPoissonRegressionLabelTrainer(conf)
prt.model.linear.weight = torch.nn.parameter.Parameter(
torch.tensor([[1.0, 0.0, 1.0, 1.0, 0.0]]))
prt.model.linear.bias = torch.nn.parameter.Parameter(
torch.tensor([0.0]))
prt_a = HorizontalPoissonRegressionAssistTrainer(assist_conf)
prt_a.model.linear.weight = torch.nn.parameter.Parameter(
torch.tensor([[1.0, 0.0, 0.0, 0.0, 0.0]]))
print(prt_a.model.linear.weight)
prt_a.model.linear.bias = torch.nn.parameter.Parameter(
torch.tensor([0.0]))
esflag_recv = pickle.dumps(False) + EOV
params_plain_recv = pickle.dumps(prt_a.model.state_dict()) + EOV
print("param plain received")
print(params_plain_recv)
print("param plain received loaded")
print(pickle.loads(params_plain_recv))
params_send = fed_method._calc_upload_value(
prt.model.state_dict(), len(prt.train_dataloader.dataset))
params_collect = pickle.dumps(params_send)
print(f"Params collect: {params_collect}")
print(f"Loaded params: {pickle.loads(params_collect)}")
print()
# agg_otp = fed_assist_method._calc_aggregated_params(
# list(map(lambda x: pickle.loads(x), [params_collect, params_collect])))
# print(f"agg otp: {agg_otp}")
agg_otp = prt_a.model.state_dict()
def mock_recv(*args, **kwargs):
if recv_mocker.call_count % 2 == 1:
return esflag_recv
else:
return params_plain_recv
def mock_agg(*args, **kwargs):
return agg_otp
recv_mocker = mocker.patch.object(
DualChannel, "recv", side_effect=mock_recv
)
mocker.patch.object(
AggregationOTPRoot, "aggregate", side_effect=mock_agg
)
mocker.patch.object(
AggregationPlainRoot, "aggregate", side_effect=mock_agg
)
mocker.patch("service.fed_control._send_progress")
print(prt.model)
prt.model.linear.weight = torch.nn.parameter.Parameter(
torch.tensor([[1.0, 0.0, 1.0, 1.0, 0.0]]))
prt.model.linear.bias = torch.nn.parameter.Parameter(
torch.tensor([0.0]))
prt.fit()
print("Successfully tested label trainer")
prt_a.model.linear.weight = torch.nn.parameter.Parameter(
torch.tensor([[1.0, 0.0, 0.0, 0.0, 0.0]]))
print(prt_a.model.linear.weight)
prt_a.model.linear.bias = torch.nn.parameter.Parameter(
torch.tensor([0.0]))
print(prt_a.model.linear.bias)
prt_a.fit()
| 8,853 | 39.063348 | 117 | py |
XFL | XFL-master/test/algorithm/framework/horizontal/test_h_vgg_jax.py | # Copyright 2022 The XFL Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import os
import shutil
from random import SystemRandom
import pickle
import numpy as np
import pytest
import service.fed_config
from algorithm.core.horizontal.aggregation.aggregation_otp import AggregationOTPRoot, AggregationOTPLeaf
from algorithm.core.horizontal.aggregation.aggregation_plain import AggregationPlainRoot, AggregationPlainLeaf
from common.communication.gRPC.python.channel import DualChannel
from common.communication.gRPC.python.commu import Commu
from common.crypto.key_agreement.contants import primes_hex
from gmpy2 import powmod
MOV = b"@" # middle of value
EOV = b"&" # end of value
def prepare_data():
np.random.seed(0)
data, label = np.random.randint(256, size=(
64, 32, 32, 3)), np.random.randint(10, size=64)
test_data, test_labels = data[:32], label[:32]
train_data, train_labels = data[32:64], label[32:64]
np.savez("/opt/dataset/unit_test/test_data.npz",
data=test_data, labels=test_labels)
np.savez("/opt/dataset/unit_test/train_data.npz",
data=train_data, labels=train_labels)
@pytest.fixture()
def get_assist_trainer_conf():
with open("python/algorithm/config/horizontal_vgg_jax/assist_trainer.json") as f:
conf = json.load(f)
conf["input"]["valset"][0]["path"] = "/opt/dataset/unit_test"
conf["input"]["valset"][0]["name"] = "test_data.npz"
conf["output"]["model"]["path"] = "/opt/checkpoints/unit_test"
conf["output"]["metrics"]["path"] = "/opt/checkpoints/unit_test"
conf["output"]["evaluation"]["path"] = "/opt/checkpoints/unit_test"
conf["model_info"]["config"]["layers"] = "unit_test"
conf["train_info"]["params"]["batch_size"] = 8
conf["train_info"]["params"]["global_epoch"] = 2
yield conf
@pytest.fixture()
def get_trainer_conf():
with open("python/algorithm/config/horizontal_vgg_jax/trainer.json") as f:
conf = json.load(f)
conf["input"]["trainset"][0]["path"] = "/opt/dataset/unit_test"
conf["input"]["trainset"][0]["name"] = "train_data.npz"
conf["output"]["metrics"]["path"] = "/opt/checkpoints/unit_test"
conf["output"]["evaluation"]["path"] = "/opt/checkpoints/unit_test"
conf["model_info"]["config"]["layers"] = "unit_test"
conf["train_info"]["params"]["batch_size"] = 8
conf["train_info"]["params"]["global_epoch"] = 2
yield conf
@pytest.fixture(scope="module", autouse=True)
def env():
if not os.path.exists("/opt/dataset/unit_test"):
os.makedirs("/opt/dataset/unit_test")
if not os.path.exists("/opt/checkpoints/unit_test"):
os.makedirs("/opt/checkpoints/unit_test")
prepare_data()
yield
if os.path.exists("/opt/dataset/unit_test"):
shutil.rmtree("/opt/dataset/unit_test")
if os.path.exists("/opt/checkpoints/unit_test"):
shutil.rmtree("/opt/checkpoints/unit_test")
class TestVgg:
@pytest.mark.parametrize("encryption_method", ['plain']) # ['otp', 'plain'] otp too slow
def test_trainer(self, get_trainer_conf, get_assist_trainer_conf, encryption_method, mocker):
fed_method = None
fed_assist_method = None
mocker.patch.object(Commu, "node_id", "assist_trainer")
Commu.trainer_ids = ['node-1', 'node-2']
Commu.scheduler_id = 'assist_trainer'
conf = get_trainer_conf
assist_conf = get_assist_trainer_conf
mocker.patch.object(
service.fed_config.FedConfig, "get_label_trainer", return_value=['node-1', 'node-2']
)
mocker.patch.object(
service.fed_config.FedConfig, "get_assist_trainer", return_value='assist_trainer'
)
mocker.patch.object(
service.fed_config.FedConfig, "node_id", 'node-1'
)
if encryption_method == "plain":
conf["train_info"]["params"]["aggregation_config"]["encryption"] = {"plain": {}}
assist_conf["train_info"]["params"]["aggregation_config"]["encryption"] = {"plain": {}}
sec_conf = conf["train_info"]["params"]["aggregation_config"]["encryption"]
def mock_recv(*args, **kwargs):
return params_plain_recv
def mock_agg(*args, **kwargs):
return agg_otp
if encryption_method == "plain":
fed_method = AggregationPlainLeaf(sec_conf)
fed_assist_method = AggregationPlainRoot(sec_conf)
elif encryption_method == "otp":
mocker.patch.object(DualChannel, "__init__", return_value=None)
# dc = DualChannel(name="otp_diffie_hellman", ids=['node-1', 'node-2'])
DualChannel.remote_id = "node-2"
supported_shortest_exponents = [225, 275, 325, 375, 400]
shorest_exponent = supported_shortest_exponents[1]
lower_bound = 1 << (supported_shortest_exponents[1] - 1)
upper_bound = 1 << shorest_exponent
primes = [int(p.replace(' ', ''), 16) for p in primes_hex]
rand_num_generator = SystemRandom()
a = rand_num_generator.randint(lower_bound, upper_bound)
g_power_a = powmod(2, a, primes[1])
mocker.patch.object(DualChannel, "swap", return_value=(1, g_power_a))
fed_method = AggregationOTPLeaf(sec_conf)
fed_assist_method = AggregationOTPRoot(sec_conf)
service.fed_config.FedConfig.stage_config = conf
from algorithm.framework.horizontal.vgg_jax.assist_trainer import HorizontalVggJaxAssistTrainer
from algorithm.framework.horizontal.vgg_jax.label_trainer import HorizontalVggJaxLabelTrainer
rest = HorizontalVggJaxLabelTrainer(conf)
rest_a = HorizontalVggJaxAssistTrainer(assist_conf)
params_plain_recv = pickle.dumps(rest_a.state_dict) + EOV
params_send = fed_method._calc_upload_value(rest.state_dict, len(rest.train_dataloader.dataset))
params_collect = pickle.dumps(params_send)
agg_otp = fed_assist_method._calc_aggregated_params(list(map(lambda x: pickle.loads(x), [params_collect,params_collect])))
def mock_recv(*args, **kwargs):
if recv_mocker.call_count % 4 in [1,2]:
return params_plain_recv
elif recv_mocker.call_count % 4 in [0,3] :
return params_collect
recv_mocker = mocker.patch.object(
DualChannel, "recv", side_effect=mock_recv
)
mocker.patch.object(
DualChannel, "__init__", return_value=None
)
mocker.patch.object(
DualChannel, "send", return_value=None
)
mocker.patch.object(
AggregationOTPRoot, "aggregate", side_effect=mock_agg
)
mocker.patch.object(
AggregationPlainRoot, "aggregate", side_effect=mock_agg
)
mocker.patch("service.fed_control._send_progress")
rest.fit()
rest_a.fit()
| 7,515 | 41.948571 | 130 | py |
XFL | XFL-master/test/algorithm/framework/horizontal/test_h_nbafl.py | # Copyright 2022 The XFL Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import os
import shutil
import numpy as np
from scipy.stats import normaltest
import pickle
import pandas as pd
import torch
import pytest
from service.fed_config import FedConfig
from service.fed_node import FedNode
from algorithm.core.horizontal.aggregation.aggregation_plain import AggregationPlainRoot, AggregationPlainLeaf
from algorithm.framework.horizontal.nbafl.assist_trainer import HorizontalNbaflAssistTrainer
from algorithm.framework.horizontal.nbafl.label_trainer import HorizontalNbaflLabelTrainer
from common.communication.gRPC.python.channel import DualChannel
from common.utils.logger import logger
from common.utils.config_sync import ConfigSynchronizer
from common.communication.gRPC.python.commu import Commu
MOV = b"@" # middle of value
EOV = b"&" # end of value
def prepare_data():
case_df = pd.DataFrame({
'x0': np.random.random(1000),
'x1': [0] * 1000,
'x2': 2 * np.random.random(1000) + 1.0,
'x3': 3 * np.random.random(1000) - 1.0,
'x4': np.random.random(1000)
})
case_df['y'] = np.where(
case_df['x0'] + case_df['x2'] + case_df['x3'] > 2.5, 1, 0)
case_df = case_df[['y', 'x0', 'x1', 'x2', 'x3', 'x4']]
case_df.head(800).to_csv(
"/opt/dataset/unit_test/train_data.csv", index=True
)
case_df.tail(200).to_csv(
"/opt/dataset/unit_test/test_data.csv", index=True
)
@pytest.fixture()
def get_assist_trainer_conf():
with open("python/algorithm/config/horizontal_nbafl/assist_trainer.json") as f:
conf = json.load(f)
yield conf
@pytest.fixture()
def get_trainer_conf():
with open("python/algorithm/config/horizontal_nbafl/trainer.json") as f:
conf = json.load(f)
yield conf
@pytest.fixture(scope="module", autouse=True)
def env():
if not os.path.exists("/opt/dataset/unit_test"):
os.makedirs("/opt/dataset/unit_test")
if not os.path.exists("/opt/checkpoints/unit_test"):
os.makedirs("/opt/checkpoints/unit_test")
prepare_data()
yield
if os.path.exists("/opt/dataset/unit_test"):
shutil.rmtree("/opt/dataset/unit_test")
if os.path.exists("/opt/checkpoints/unit_test"):
shutil.rmtree("/opt/checkpoints/unit_test")
class TestNbafl:
def test_uplink_sigma(self, get_trainer_conf, get_assist_trainer_conf, mocker):
conf = get_trainer_conf
assist_conf = get_assist_trainer_conf
mocker.patch.object(Commu, "node_id", "assist_trainer")
Commu.trainer_ids = ['node-1', 'node-2']
Commu.scheduler_id = 'assist_trainer'
mocker.patch.object(
FedConfig, "get_label_trainer", return_value=['node-1', 'node-2']
)
mocker.patch.object(
FedConfig, "get_assist_trainer", return_value='assist_trainer'
)
mocker.patch.object(FedConfig, "node_id", 'node-1')
mocker.patch.object(FedNode, "node_id", "node-1")
mocker.patch.object(
DualChannel, "__init__", return_value=None
)
mocker.patch.object(
DualChannel, "send", return_value=None
)
recv_mocker = mocker.patch.object(
DualChannel, "recv",
return_value = {
"model_info":assist_conf["model_info"], "train_info": assist_conf["train_info"]
}
)
nbafl_t = HorizontalNbaflLabelTrainer(conf)
logger.info(f"{len(nbafl_t.train_dataloader.dataset)} of data")
nbafl_t._calc_uplink_sigma({})
sigma_u = nbafl_t.sigma_u
expected_sigma_u = np.sqrt(2 * np.log(12.5)) / 80
logger.info(f"expected uplink sigma: {expected_sigma_u}")
assert np.abs(sigma_u - expected_sigma_u) < 0.0001
def test_uplink_add_noise(self, get_trainer_conf, get_assist_trainer_conf, mocker):
conf = get_trainer_conf
assist_conf = get_assist_trainer_conf
mocker.patch.object(Commu, "node_id", "assist_trainer")
Commu.trainer_ids = ['node-1', 'node-2']
Commu.scheduler_id = 'assist_trainer'
mocker.patch.object(
FedConfig, "get_label_trainer", return_value=['node-1', 'node-2']
)
mocker.patch.object(
FedConfig, "get_assist_trainer", return_value='assist_trainer'
)
mocker.patch.object(FedConfig, "node_id", 'node-1')
mocker.patch.object(FedNode, "node_id", "node-1")
mocker.patch.object(
DualChannel, "__init__", return_value=None
)
mocker.patch.object(
DualChannel, "send", return_value=None
)
mocker.patch.object(
DualChannel, "recv",
return_value = {
"model_info":assist_conf["model_info"], "train_info": assist_conf["train_info"]
}
)
nbafl_t = HorizontalNbaflLabelTrainer(conf)
nbafl_t.sigma_u = 0.1
diff_list = []
orig_params = [
param.data.detach().clone() for param in nbafl_t.model.parameters()
]
np.random.seed(42)
torch.manual_seed(42)
for _ in range(3):
iter_diff_list = []
nbafl_t._add_noise({})
for orig_param, new_param in zip(orig_params, nbafl_t.model.parameters()):
iter_diff_list.extend(torch.flatten(
orig_param - new_param.data.detach()
).numpy().tolist())
diff_list.extend(iter_diff_list)
_, pval = normaltest(diff_list)
logger.info("Normal test p-value: {}".format(pval))
assert pval > 0.1
diff_sigma = np.std(diff_list)
logger.info("Diff std: {}".format(diff_sigma))
assert np.abs(diff_sigma - nbafl_t.sigma_u) < 0.05
def test_downlink_sigma(self, get_trainer_conf, get_assist_trainer_conf, mocker):
conf = get_trainer_conf
assist_conf = get_assist_trainer_conf
mocker.patch.object(Commu, "node_id", "assist_trainer")
Commu.trainer_ids = ['node-1', 'node-2']
Commu.scheduler_id = 'assist_trainer'
mocker.patch.object(
FedConfig, "get_label_trainer", return_value=['node-1', 'node-2']
)
mocker.patch.object(
FedConfig, "get_assist_trainer", return_value='assist_trainer'
)
mocker.patch.object(FedConfig, "node_id", 'node-1')
mocker.patch.object(FedNode, "node_id", "node-1")
mocker.patch.object(
DualChannel, "__init__", return_value=None
)
mocker.patch.object(
DualChannel, "send", return_value=None
)
mocker.patch.object(
DualChannel, "recv",
return_value = {
"model_info":assist_conf["model_info"], "train_info": assist_conf["train_info"]
}
)
nbafl_at = HorizontalNbaflAssistTrainer(conf)
nbafl_at.min_sample_num = 10
expected_sigma_d = 10 * \
np.sqrt(2 * np.log(12.5)) * np.sqrt((25-8) / 20)
nbafl_at._calc_downlink_sigma({})
assert (nbafl_at.sigma_d - expected_sigma_d) < 0.0001
def test_label_trainer(self, get_trainer_conf, get_assist_trainer_conf, mocker):
conf = get_trainer_conf
assist_conf = get_assist_trainer_conf
mocker.patch.object(Commu, "node_id", "assist_trainer")
Commu.trainer_ids = ['node-1', 'node-2']
Commu.scheduler_id = 'assist_trainer'
mocker.patch.object(
FedConfig, "get_label_trainer", return_value=['node-1', 'node-2']
)
mocker.patch.object(
FedConfig, "get_assist_trainer", return_value='assist_trainer'
)
mocker.patch.object(FedConfig, "node_id", 'node-1')
mocker.patch.object(FedNode, "node_id", "node-1")
mocker.patch.object(
DualChannel, "__init__", return_value=None
)
mocker.patch.object(
DualChannel, "send", return_value=None
)
mocker.patch.object(
DualChannel, "recv",
return_value = {
"model_info":assist_conf["model_info"], "train_info": assist_conf["train_info"]
}
)
nbafl_t = HorizontalNbaflLabelTrainer(conf)
nbafl_t.sigma_u = 0.1
mocker.patch.object(
ConfigSynchronizer, "__init__", return_value=None
)
mocker.patch.object(
ConfigSynchronizer, "sync", return_value=assist_conf
)
mocker.patch("service.fed_control._send_progress")
nbafl_at = HorizontalNbaflAssistTrainer(assist_conf)
sec_conf = assist_conf["train_info"]["train_params"]["encryption"]["plain"]
fed_method = AggregationPlainLeaf(sec_conf)
fed_assist_method = AggregationPlainRoot(sec_conf)
esflag_recv = pickle.dumps(False) + EOV
params_plain_recv = pickle.dumps(nbafl_at.model.state_dict()) + EOV
params_send = fed_method._calc_upload_value(nbafl_t.model.state_dict(), len(nbafl_t.train_dataloader.dataset))
params_collect = pickle.dumps(params_send)
agg_otp = fed_assist_method._calc_aggregated_params(list(map(lambda x: pickle.loads(x), [params_collect,params_collect])))
def mock_recv(*args, **kwargs):
if recv_mocker.call_count % 2 == 1:
return esflag_recv
else:
return params_plain_recv
def mock_agg(*args, **kwargs):
return agg_otp
recv_mocker = mocker.patch.object(
DualChannel, "recv", side_effect=mock_recv
)
mocker.patch.object(
AggregationPlainRoot, "aggregate", side_effect=mock_agg
)
nbafl_t.fit()
nbafl_at.min_sample_num = 10
mocker.patch.object(
DualChannel, "recv", return_value=10
)
nbafl_at.fit()
| 10,433 | 37.360294 | 130 | py |
XFL | XFL-master/test/algorithm/framework/transfer/test_transfer_logistic_regression.py | # Copyright 2022 The XFL Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import os
import shutil
import numpy as np
import pandas as pd
import pytest
import torch
from service.fed_config import FedConfig
from service.fed_node import FedNode
from algorithm.framework.transfer.logistic_regression.label_trainer import \
TransferLogisticRegressionLabelTrainer
from algorithm.framework.transfer.logistic_regression.trainer import \
TransferLogisticRegressionTrainer
from common.communication.gRPC.python.channel import DualChannel
from common.communication.gRPC.python.commu import Commu
from common.utils.config_sync import ConfigSynchronizer
def prepare_data():
case_dict = {}
for i in range(30):
if i == 0:
case_dict.update({f"x{i:0>2d}": [0] * 100})
elif i == 10:
case_dict.update({f"x{i:0>2d}": [1] * 100})
elif i == 20:
case_dict.update({f"x{i:0>2d}": [2] * 100})
else:
case_dict.update({f"x{i:0>2d}": np.random.random(100)})
case_df = pd.DataFrame(case_dict)
case_df["y"] = np.where(case_df["x00"] + case_df["x10"] + case_df["x20"] + case_df["x29"] > 3.5, 1, 0)
columns_labeled = ["y"] + [f"x{i:0>2d}" for i in range(15)]
columns_1 = [f"x{i:0>2d}" for i in range(15, 30)]
case_df[columns_labeled].head(60).to_csv(
"/opt/dataset/unit_test/train_labeled.csv", index=True
)
case_df[columns_labeled].tail(20).to_csv(
"/opt/dataset/unit_test/test_labeled.csv", index=True
)
case_df[columns_1].head(80).tail(60).to_csv(
"/opt/dataset/unit_test/train_1.csv", index=True
)
case_df[columns_1].tail(20).to_csv(
"/opt/dataset/unit_test/test_1.csv", index=True
)
overlap_index = np.linspace(20, 59, 40, dtype=np.int16)
np.save("/opt/dataset/unit_test/overlap_index.npy", overlap_index)
@pytest.fixture()
def get_label_trainer_conf():
with open("python/algorithm/config/transfer_logistic_regression/label_trainer.json") as f:
conf = json.load(f)
yield conf
@pytest.fixture()
def get_trainer_conf():
with open("python/algorithm/config/transfer_logistic_regression/trainer.json") as f:
conf = json.load(f)
yield conf
@pytest.fixture(scope="module", autouse=True)
def env():
if not os.path.exists("/opt/dataset/unit_test"):
os.makedirs("/opt/dataset/unit_test")
if not os.path.exists("/opt/checkpoints/unit_test"):
os.makedirs("/opt/checkpoints/unit_test")
prepare_data()
yield
if os.path.exists("/opt/dataset/unit_test"):
shutil.rmtree("/opt/dataset/unit_test")
if os.path.exists("/opt/checkpoints/unit_test"):
shutil.rmtree("/opt/checkpoints/unit_test")
class TestTransferLogisticRegression:
@pytest.mark.parametrize("encryption_method", ["plain"])
def test_label_trainer(self, get_label_trainer_conf, get_trainer_conf, encryption_method, mocker):
# label trainer 流程测试
conf = get_label_trainer_conf
Commu.node_id = "node-1"
Commu.trainer_ids = ['node-1', 'node-2']
Commu.scheduler_id = 'node-1'
mocker.patch.object(
FedConfig, "get_label_trainer", return_value=["node-1"]
)
mocker.patch.object(
FedConfig, "get_trainer", return_value=["node-2"]
)
mocker.patch.object(FedConfig, "node_id", 'node-1')
mocker.patch.object(FedNode, "node_id", "node-1")
mocker.patch.object(DualChannel, "send", return_value=0)
def mock_recv():
if mock_channel_recv.call_count <= lrlt.global_epoch * lrlt.local_epoch:
return (torch.rand(40, 5), torch.rand(40, 5, 5), torch.rand(40, 5))
else:
return torch.rand(20, 5)
mock_channel_recv = mocker.patch.object(
DualChannel, "recv", side_effect=mock_recv
)
mocker.patch.object(
ConfigSynchronizer, "__init__", return_value=None
)
mocker.patch.object(
ConfigSynchronizer, "sync", return_value=conf
)
lrlt = TransferLogisticRegressionLabelTrainer(conf)
lrlt.fit()
# load pretrained model
lrlt.pretrain_model_path = "/opt/checkpoints/unit_test"
lrlt.pretrain_model_name = "transfer_logitstic_regression_0.model"
lrlt._set_model()
@pytest.mark.parametrize("encryption_method", ["plain"])
def test_trainer(self, get_trainer_conf, get_label_trainer_conf, encryption_method, mocker):
# trainer 流程测试
conf = get_trainer_conf
conf_l = get_label_trainer_conf
Commu.trainer_ids = ['node-1', 'node-2']
Commu.scheduler_id = 'node-1'
mocker.patch.object(Commu, "node_id", "node-1")
mocker.patch.object(
FedConfig, "get_label_trainer", return_value=['node-1', 'node-2']
)
mocker.patch.object(
FedConfig, "node_id", 'node-1'
)
mocker.patch.object(
DualChannel, "__init__", return_value=None
)
mocker.patch.object(
DualChannel, "send", return_value=None
)
recv_mocker = mocker.patch.object(
DualChannel, "recv",
return_value = {
"model_info":conf_l["model_info"], "train_info": conf_l["train_info"]
}
)
lrt = TransferLogisticRegressionTrainer(conf)
mocker.patch.object(
DualChannel, "send", return_value=0
)
mocker.patch.object(
DualChannel, "recv", return_value=(torch.rand(40, 5, 5), torch.rand(40, 5), torch.rand(40, 1))
)
lrt.fit()
# load pretrained model
lrt.pretrain_model_path = "/opt/checkpoints/unit_test"
lrt.pretrain_model_name = "transfer_logitstic_regression_0.model"
lrt._set_model()
| 6,386 | 36.133721 | 106 | py |
XFL | XFL-master/test/algorithm/framework/vertical/test_kmeans.py | # Copyright 2022 The XFL Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import copy
import json
import os
import pickle
import shutil
import numpy as np
import pandas as pd
import pytest
import torch
from sklearn.metrics import davies_bouldin_score
import service.fed_config
from algorithm.framework.vertical.kmeans.assist_trainer import \
VerticalKmeansAssistTrainer
from algorithm.framework.vertical.kmeans.table_agg_base import (
TableAggregatorAbstractAssistTrainer, TableAggregatorAbstractTrainer)
from algorithm.framework.vertical.kmeans.table_agg_otp import (
TableAggregatorOTPAssistTrainer, TableAggregatorOTPTrainer)
from algorithm.framework.vertical.kmeans.table_agg_plain import (
TableAggregatorPlainAssistTrainer, TableAggregatorPlainTrainer)
from algorithm.framework.vertical.kmeans.trainer import VerticalKmeansTrainer
from algorithm.framework.vertical.kmeans.label_trainer import VerticalKmeansLabelTrainer
from common.communication.gRPC.python.channel import (
BroadcastChannel, DualChannel)
from common.communication.gRPC.python.commu import Commu
from common.crypto.key_agreement.diffie_hellman import DiffieHellman
def prepare_data():
label_list = [0, 1, 2, 3, 4] * 200
np.random.shuffle(label_list)
case_df = pd.DataFrame({
"y": label_list,
"x0": np.random.random(1000) * 0.2 + np.array(label_list),
"x1": np.random.random(1000)
})
case_df[['y', 'x0', 'x1']].to_csv(
"/opt/dataset/unit_test/train_guest.csv", index=True, index_label='id'
)
case_df[['x0', 'x1']].to_csv(
"/opt/dataset/unit_test/train_host.csv", index=True, index_label='id'
)
case_df[['x0', 'x1']].to_csv(
"/opt/dataset/unit_test/train_guest_without_id.csv", index=False
)
case_df[['x0', 'x1']].to_csv(
"/opt/dataset/unit_test/train_host_without_id.csv", index=False
)
mock_config = {
"train_info": {
"train_params": {
"encryption": {
"otp": {
"key_bitlength": 128,
"data_type": "torch.Tensor",
"key_exchange": {
"key_bitlength": 3072,
"optimized": True
},
"csprng": {
"name": "hmac_drbg",
"method": "sha512"
}
}
},
"k": 5,
"max_iter": 50,
"tol": 1e-6,
"random_seed": 50
}
}
}
@pytest.fixture(scope="module", autouse=True)
def env():
Commu.node_id = "node-1"
Commu.trainer_ids = ['node-1', 'node-2']
Commu.scheduler_id = 'assist_trainer'
if not os.path.exists("/opt/dataset/unit_test"):
os.makedirs("/opt/dataset/unit_test")
if not os.path.exists("/opt/checkpoints/unit_test"):
os.makedirs("/opt/checkpoints/unit_test")
prepare_data()
yield
if os.path.exists("/opt/dataset/unit_test"):
shutil.rmtree("/opt/dataset/unit_test")
if os.path.exists("/opt/checkpoints/unit_test"):
shutil.rmtree("/opt/checkpoints/unit_test")
@pytest.fixture()
def get_label_trainer_conf():
with open("python/algorithm/config/vertical_kmeans/label_trainer.json") as f:
conf = json.load(f)
conf["input"]["trainset"][0]["path"] = "/opt/dataset/unit_test"
conf["input"]["trainset"][0]["name"] = "train_guest.csv"
conf["output"]["path"] = "/opt/checkpoints/unit_test"
yield conf
@pytest.fixture()
def get_trainer_conf():
with open("python/algorithm/config/vertical_kmeans/trainer.json") as f:
conf = json.load(f)
conf["input"]["trainset"][0]["path"] = "/opt/dataset/unit_test"
conf["input"]["trainset"][0]["name"] = "train_host.csv"
conf["output"]["path"] = "/opt/checkpoints/unit_test"
yield conf
@pytest.fixture()
def get_scheduler_conf():
with open("python/algorithm/config/vertical_kmeans/assist_trainer.json") as f:
conf = json.load(f)
yield conf
class TestVerticalKmeansTrainer:
def test_init_method(self, mocker, get_label_trainer_conf, get_trainer_conf, get_scheduler_conf):
mocker.patch.object(
DualChannel, "__init__", return_value=None
)
mocker.patch.object(
TableAggregatorOTPTrainer, "__init__", return_value=None
)
mocker.patch.object(
TableAggregatorOTPTrainer, "send", return_value=None
)
conf = copy.deepcopy(get_label_trainer_conf)
def mock_func(*args, **kwargs):
if mock_dual_recv.call_count == 1:
return mock_config
mock_dual_recv = mocker.patch.object(
DualChannel, "recv", side_effect=mock_func
)
mocker.patch.object(
DualChannel, "send", return_value=None
)
label_trainer = VerticalKmeansLabelTrainer(conf)
mocker.patch.object(
DualChannel, "recv", return_value=np.array([1.0] * 1000)
)
label_trainer.init = "kmeans++"
label_trainer.init_centers()
conf = copy.deepcopy(get_scheduler_conf)
conf["train_info"] = mock_config["train_info"]
assist_trainer = VerticalKmeansAssistTrainer(conf)
mocker.patch.object(
TableAggregatorOTPAssistTrainer, "aggregate", return_value=torch.Tensor(list(range(100))).reshape(20, 5)
)
assist_trainer.init = "kmeans++"
assist_trainer.init_centers()
conf = copy.deepcopy(get_trainer_conf)
trainer = VerticalKmeansTrainer(conf)
mocker.patch.object(
DualChannel, "recv", return_value=[1, 2, 3, 4, 5]
)
trainer.init = "kmeans++"
trainer.init_centers()
def test_table_agg_otp(self, mocker):
pd_table = pd.DataFrame({"x": [1, 2, 3], "y": [4, 5, 6]})
table = torch.tensor(pd_table.values)
def mock_diffiehellman():
ret = b"\xf0F\xc6\x1dJ(\xb0\x19\xc3j6$bw\xcb\xad\xe1\xdd?\x1c\xd728\xa9\x0eD\xf4\x95\xd4)*," \
b"@Sd\x897\xb4N7GG\x17\x01\xa6#-$]w3\xc2x\x97\x045\xb4\xd8c\xa9\xa4\x9f\xdb\x1a?\xd0\x80\xd7=\x02" \
b"\x07\xb0A\xeaQ\x17\x89W:\x1a\x85.\xea\x19O\x8b\xe8\x83\x04\xf4\xb4\\S~\xff1\x8cT\xeb\x99x9;\xb9" \
b"\x90\x00\x00\x96\x91A\x1d\xe8\xa0l6\xf1\xc1P\xf4\x14\xf2\xd5\xceg}\xc04e/l3^o\xd4\xe0\tC7\xd7\xaa" \
b"&\xfa4\x1378`\xb9\xd5\t\x0ez\xe3\x80\xde\r;\x8dI\x80\\\xea\xdf\xce\xe3a\xd2\xe3\x88\nm`\xce7" \
b"\xf14CUe\xac]\x93\xc5\x86\xed\x19K{" \
b"x\x93\x98\xdd\xb2\x1aS\xb5q\x071\xb0\x0b'x\x16\xfcE\xccw\x11U@\x9aB\xa7\x1a\xbb\x80\xd3tn@\xc6\x1a" \
b"\xc31Y\xe4\xe0\x07\x83\xca\xecW\xa0\x08\x12\x93\xc3g\xad\xadF\x8c\xcd\x105\xe6\x07\x0f\xc9\xa1\xe9" \
b"\xee\xf9M\x16\xf8b\xb5]x\x0b3\x11\xafn\xa2w\xb4]1\x9f\xb3\xa5\xba/\xd9R\xa8*\xddi\x83\x1bg\xde\xf2" \
b"\xcd\xc7\xb7 m\xb28`\xe5UH;\x1b\xc8Mq\xa8\x03\xa78x\x01\xb3\x95\x81r.\x07\\]\xc1\x1d\xa5\xff\x99" \
b"\x8b\xd0\xab\\\\<\x03\x1co\x08+\x964*\t\x80v\xd6m2:es\x0f\xa2\x1at\x0b-\x8aN\xa3\x0bu\xa9XoN\xcd" \
b"\xd3{\x10\x8dO\x7f\xba\x99\n\x99jHqL\xa7aV\r\xf7\x1d\xde\xe8\x18 "
return ret
mocker.patch.object(
BroadcastChannel, "send", return_value=0
)
mocker.patch.object(
BroadcastChannel, "collect", return_value=[pd_table, pd_table]
)
encryption = {
"otp": {
"key_bitlength": 128,
"data_type": "torch.Tensor",
"key_exchange": {
"key_bitlength": 3072,
"optimized": True
},
"csprng": {
"name": "hmac_drbg",
"method": "sha512"
}
}
}
mocker.patch.object(
DiffieHellman, "exchange", return_value=mock_diffiehellman()
)
table_trainer = TableAggregatorOTPTrainer(
sec_conf=encryption["otp"], trainer_ids=['node-1', 'node-2'])
table_trainer.send(table)
assert table_trainer.send(None) is None
mocker.patch.object(
BroadcastChannel, "collect", return_value=[pd_table.to_numpy(), pd_table.to_numpy()]
)
table_scheduler = TableAggregatorOTPAssistTrainer(
sec_conf=encryption["otp"], trainer_ids=['node-1', 'node-2'])
table_scheduler.aggregate()
@pytest.mark.parametrize("computing_engine", ["local", "spark"])
def test_label_trainer(self, get_label_trainer_conf, computing_engine, mocker):
conf = get_label_trainer_conf
conf["computing_engine"] = computing_engine
# mock 类初始化需要的函数,避免建立通信通道时报错
mocker.patch.object(
DualChannel, "__init__", return_value=None
)
mocker.patch.object(
DualChannel, "send", return_value=0
)
def mock_func(*args, **kwargs):
if mock_dual_recv.call_count == 1:
return mock_config
mock_dual_recv = mocker.patch.object(
DualChannel, "recv", side_effect=mock_func
)
mocker.patch.object(
TableAggregatorOTPTrainer, "__init__", return_value=None
)
mocker.patch.object(
TableAggregatorOTPTrainer, "send"
)
mocker.patch.object(
service.fed_config.FedConfig, "get_label_trainer", return_value=["trainer-1"]
)
mocker.patch.object(
service.fed_config.FedConfig, "get_trainer", return_value=["trainer-2"]
)
mocker.patch.object(
service.fed_config.FedConfig, "get_assist_trainer", return_value="scheduler"
)
# 定义初始化类
vkt = VerticalKmeansLabelTrainer(conf)
# mock scheduler侧的接口返回
def mock_get_cluster():
return VerticalKmeansAssistTrainer.get_cluster(vkt.dist_table)
def mock_converged_flag():
return bool(vkt.local_tol < vkt.tol)
mocker.patch.object(
vkt.channels.get("init_center", DualChannel), "recv",
return_value=np.random.choice(1000, vkt.k, replace=False)
)
mocker.patch.object(
vkt.channels["cluster_result"], "recv", side_effect=mock_get_cluster
)
mocker.patch.object(
vkt.channels["converged_flag"], "recv", side_effect=mock_converged_flag
)
vkt.fit()
# 是否能正常收敛
assert vkt.is_converged
assert os.path.exists(
"/opt/checkpoints/unit_test/vertical_kmeans_[STAGE_ID].model")
with open("/opt/checkpoints/unit_test/vertical_kmeans_[STAGE_ID].model", "rb") as f:
model = json.load(f)
assert model["k"] == vkt.k
assert model["iter"] <= vkt.max_iter
assert model["is_converged"]
assert model["tol"] == vkt.tol
assert len(model["cluster_centers"]) == vkt.k
def test_label_trainer_only_features(self, get_label_trainer_conf, mocker):
conf = get_label_trainer_conf
conf["input"]["trainset"][0]["has_id"] = False
conf["input"]["trainset"][0]["has_label"] = False
# mock 类初始化需要的函数,避免建立通信通道时报错
mocker.patch.object(
DualChannel, "__init__", return_value=None
)
mocker.patch.object(
DualChannel, "send", return_value=0
)
def mock_func(*args, **kwargs):
if mock_dual_recv.call_count == 1:
return mock_config
mock_dual_recv = mocker.patch.object(
DualChannel, "recv", side_effect=mock_func
)
mocker.patch.object(
TableAggregatorOTPTrainer, "__init__", return_value=None
)
mocker.patch.object(
TableAggregatorOTPTrainer, "send"
)
mocker.patch.object(
service.fed_config.FedConfig, "get_label_trainer", return_value=["trainer-1"]
)
mocker.patch.object(
service.fed_config.FedConfig, "get_trainer", return_value=["trainer-2"]
)
mocker.patch.object(
service.fed_config.FedConfig, "get_assist_trainer", return_value="scheduler"
)
# 定义初始化类
vkt = VerticalKmeansLabelTrainer(conf)
# mock scheduler侧的接口返回
def mock_get_cluster():
return VerticalKmeansAssistTrainer.get_cluster(vkt.dist_table)
def mock_converged_flag():
return bool(vkt.local_tol < vkt.tol)
mocker.patch.object(
vkt.channels.get("init_center", DualChannel), "recv", return_value=np.random.choice(1000, vkt.k, replace=False)
)
mocker.patch.object(
vkt.channels["cluster_result"], "recv", side_effect=mock_get_cluster
)
mocker.patch.object(
vkt.channels["converged_flag"], "recv", side_effect=mock_converged_flag
)
vkt.fit()
# 是否能正常收敛
assert vkt.is_converged
assert os.path.exists(
"/opt/checkpoints/unit_test/vertical_kmeans_[STAGE_ID].model")
with open("/opt/checkpoints/unit_test/vertical_kmeans_[STAGE_ID].model", "rb") as f:
model = json.load(f)
assert model["k"] == vkt.k
assert model["iter"] <= vkt.max_iter
assert model["is_converged"]
assert model["tol"] == vkt.tol
assert len(model["cluster_centers"]) == vkt.k
@pytest.mark.parametrize("computing_engine", ["local", "spark"])
def test_trainer(self, get_trainer_conf, computing_engine, mocker):
conf = get_trainer_conf
conf["computing_engine"] = computing_engine
# mock 类初始化需要的函数,避免建立通信通道时报错
mocker.patch.object(
DualChannel, "__init__", return_value=None
)
mocker.patch.object(
DualChannel, "send", return_value=0
)
def mock_func(*args, **kwargs):
if mock_dual_recv.call_count == 1:
return mock_config
mock_dual_recv = mocker.patch.object(
DualChannel, "recv", side_effect=mock_func
)
mocker.patch.object(
TableAggregatorOTPTrainer, "__init__", return_value=None
)
mocker.patch.object(
TableAggregatorOTPTrainer, "send"
)
mocker.patch.object(
service.fed_config.FedConfig, "get_label_trainer", return_value=["trainer-1"]
)
mocker.patch.object(
service.fed_config.FedConfig, "get_trainer", return_value=["trainer-2"]
)
mocker.patch.object(
service.fed_config.FedConfig, "get_assist_trainer", return_value="scheduler"
)
# 定义初始化类
vkt = VerticalKmeansTrainer(get_trainer_conf)
# 初始化类中心
init_centers = np.random.choice(1000, vkt.k, replace=False)
mocker.patch.object(
vkt.channels.get("init_center", DualChannel), "recv", return_value=init_centers
)
# mock scheduler侧的接口返回
def mock_get_cluster():
return VerticalKmeansAssistTrainer.get_cluster(vkt.dist_table)
def mock_converged_flag():
return bool(vkt.local_tol < vkt.tol)
mocker.patch.object(
vkt.channels["cluster_result"], "recv", side_effect=mock_get_cluster
)
mocker.patch.object(
vkt.channels["converged_flag"], "recv", side_effect=mock_converged_flag
)
vkt.fit()
# 是否能正常收敛
assert vkt.is_converged
assert os.path.exists(
"/opt/checkpoints/unit_test/vertical_kmeans_[STAGE_ID].model")
with open("/opt/checkpoints/unit_test/vertical_kmeans_[STAGE_ID].model", "rb") as f:
model = json.load(f)
assert model["k"] == vkt.k
assert model["iter"] <= vkt.max_iter
assert model["is_converged"]
assert model["tol"] == vkt.tol
assert len(model["cluster_centers"]) == vkt.k
# 检查输出
assert os.path.exists(
"/opt/checkpoints/unit_test/cluster_result_[STAGE_ID].csv")
if computing_engine == "local":
df = pd.read_csv(
"/opt/checkpoints/unit_test/cluster_result_[STAGE_ID].csv")
assert (df["id"] == vkt.train_ids).all()
# assert (df["cluster_label"] == vkt.cluster_result).all()
@pytest.mark.parametrize("computing_engine", ["local", "spark"])
def test_scheduler(self, get_scheduler_conf, computing_engine, mocker):
conf = get_scheduler_conf
conf["computing_engine"] = computing_engine
# mock 类初始化需要的函数,避免建立通信通道时报错
mocker.patch("service.fed_control._send_progress")
mocker.patch.object(
DualChannel, "__init__", return_value=None
)
mocker.patch.object(
DualChannel, "send", return_value=0
)
def mock_func(*args, **kwargs):
if mock_dual_recv.call_count == 1:
return mock_config
mock_dual_recv = mocker.patch.object(
DualChannel, "recv", side_effect=mock_func
)
mocker.patch.object(
TableAggregatorOTPAssistTrainer, "__init__", return_value=None
)
mocker.patch.object(
service.fed_config.FedConfig, "get_label_trainer", return_value=["trainer-1"]
)
mocker.patch.object(
service.fed_config.FedConfig, "get_trainer", return_value=["trainer-2"]
)
mocker.patch.object(
service.fed_config.FedConfig, "get_assist_trainer", return_value="scheduler"
)
# 定义初始化类
vks = VerticalKmeansAssistTrainer(conf)
def mock_dual_recv():
if mock_recv.call_count > 2:
return 1.0
else:
return 1000, 2
# mock trainer的发送结果
# tolerance
mock_recv = mocker.patch.object(
DualChannel, "recv", side_effect=mock_dual_recv
)
# distance table
dist_table = torch.tensor(np.random.random((1000, vks.k)))
# center dist
center_dist = torch.tensor(np.random.random(vks.k * (vks.k - 1)))
def mock_aggregate():
if mock_agg.call_count > 1 and mock_agg.call_count % 2 == 1:
return center_dist
else:
return dist_table
mock_agg = mocker.patch.object(
vks.dist_table_agg_executor, "aggregate", side_effect=mock_aggregate
)
vks.fit()
def test_calc_dbi(self, get_scheduler_conf, get_label_trainer_conf, mocker):
# mock 类初始化需要的函数,避免建立通信通道时报错
mocker.patch.object(
DualChannel, "__init__", return_value=None
)
mocker.patch.object(
DualChannel, "send", return_value=0
)
def mock_func(*args, **kwargs):
return mock_config
mocker.patch.object(
DualChannel, "recv", side_effect=mock_func
)
mocker.patch.object(
TableAggregatorOTPTrainer, "__init__", return_value=None
)
mocker.patch.object(
TableAggregatorOTPAssistTrainer, "__init__", return_value=None
)
mocker.patch.object(
TableAggregatorOTPTrainer, "send"
)
mocker.patch.object(
service.fed_config.FedConfig, "get_label_trainer", return_value=["trainer-1"]
)
mocker.patch.object(
service.fed_config.FedConfig, "get_trainer", return_value=["trainer-2"]
)
mocker.patch.object(
service.fed_config.FedConfig, "get_assist_trainer", return_value="scheduler"
)
vkt = VerticalKmeansTrainer(get_label_trainer_conf)
vks = VerticalKmeansAssistTrainer(get_scheduler_conf)
init_centers = np.random.choice(1000, vkt.k, replace=False)
mocker.patch.object(
vkt.channels.get("init_center", DualChannel), "recv", return_value=init_centers
)
# 检查指标
center_ids = vkt.init_centers()
cluster_centers = vkt.train_features.iloc[center_ids]
dist_table = vkt.distance_table(cluster_centers)
cluster_result = vks.get_cluster(dist_table)
centers = vkt.calc_centers(cluster_centers, cluster_result)
center_dist = vkt.distance_between_centers(centers)
mocker.patch.object(
vks.dist_table_agg_executor, "aggregate", return_value=center_dist
)
vks.cluster_count_list = vks.calc_cluster_count(cluster_result)
dist_table = vkt.distance_table(centers)
vks.calc_dbi(dist_table, cluster_result, 0)
dbi_score = davies_bouldin_score(
vkt.train_features.to_numpy(), cluster_result)
np.testing.assert_almost_equal(vks.DBI, dbi_score, 3)
# 验证当一族结果为空时,DBI的计算
cluster_result_missing = []
for _ in cluster_result:
if _ != 1:
cluster_result_missing.append(_)
else:
cluster_result_missing.append(0)
# 重新计算簇中心坐标
centers = vkt.calc_centers(cluster_centers, cluster_result_missing)
center_dist = vkt.distance_between_centers(centers)
mocker.patch.object(
vks.dist_table_agg_executor, "aggregate", return_value=center_dist
)
vks.cluster_count_list = vks.calc_cluster_count(cluster_result_missing)
dist_table = vkt.distance_table(centers)
vks.calc_dbi(dist_table, cluster_result_missing, 1)
dbi_score = davies_bouldin_score(
vkt.train_features.to_numpy(), cluster_result_missing)
np.testing.assert_almost_equal(vks.DBI, dbi_score, 3)
def test_table_agg_base(self):
table_trainer = TableAggregatorAbstractTrainer()
table_trainer.send(pd.DataFrame({"x": [1, 2, 3]}))
table_scheduler = TableAggregatorAbstractAssistTrainer()
table_scheduler.aggregate()
def test_table_agg_plain(self, mocker):
pd_table = pd.DataFrame({"x": [1, 2, 3]})
mocker.patch.object(
BroadcastChannel, "send", return_value=0
)
mocker.patch.object(
BroadcastChannel, "collect", return_value=[pd_table, pd_table]
)
table_trainer = TableAggregatorPlainTrainer(sec_conf={"plain": {}})
table_trainer.send(pd_table)
table_scheduler = TableAggregatorPlainAssistTrainer(sec_conf={
"plain": {}})
aggregated_table = table_scheduler.aggregate()
assert aggregated_table["x"].iloc[2] == 6
| 23,214 | 35.330203 | 123 | py |
XFL | XFL-master/test/algorithm/framework/vertical/test_logistic_regression.py | # Copyright 2022 The XFL Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import copy
import json
import os
import shutil
import numpy as np
import pandas as pd
import pytest
import tenseal as ts
import torch
from google.protobuf import json_format
import service.fed_config
import service.fed_node
import service.fed_control
from algorithm.framework.vertical.logistic_regression.label_trainer import \
VerticalLogisticRegressionLabelTrainer
from algorithm.framework.vertical.logistic_regression.trainer import \
VerticalLogisticRegressionTrainer
from common.communication.gRPC.python.channel import BroadcastChannel
from common.crypto.paillier.paillier import Paillier
from common.communication.gRPC.python.commu import Commu
from common.model.python.linear_model_pb2 import LinearModel
def prepare_data():
case_df = pd.DataFrame({
'x0': np.random.random(1000),
'x1': [0] * 1000,
'x2': 2 * np.random.random(1000) + 1.0,
'x3': 3 * np.random.random(1000) - 1.0,
'x4': np.random.random(1000)
})
case_df['y'] = np.where(
case_df['x0'] + case_df['x2'] + case_df['x3'] > 2.5, 1, 0)
case_df[['y', 'x0', 'x1', 'x2']].head(80).to_csv(
"/opt/dataset/unit_test/train_guest.csv", index=True
)
case_df[['y', 'x0', 'x1', 'x2']].tail(20).to_csv(
"/opt/dataset/unit_test/test_guest.csv", index=True
)
case_df[['x3', 'x4']].head(80).to_csv(
"/opt/dataset/unit_test/train_host.csv", index=True
)
case_df[['x3', 'x4']].tail(20).to_csv(
"/opt/dataset/unit_test/test_host.csv", index=True
)
config_sync = {
"train_info": {
"interaction_params": {
"save_frequency": -1,
"write_training_prediction": True,
"write_validation_prediction": True,
"echo_training_metrics": True
},
"train_params": {
"global_epoch": 10,
"batch_size": 2048,
"encryption": {
"ckks": {
"poly_modulus_degree": 8192,
"coeff_mod_bit_sizes": [
60,
40,
40,
60
],
"global_scale_bit_size": 40
}
},
"optimizer": {
"lr": 0.01,
"p": 2,
"alpha": 1e-4
},
"early_stopping": {
"key": "acc",
"patience": 10,
"delta": 0
},
"random_seed": None
}
}
}
@pytest.fixture()
def get_label_trainer_conf():
with open("python/algorithm/config/vertical_logistic_regression/label_trainer.json") as f:
conf = json.load(f)
conf["input"]["trainset"][0]["path"] = "/opt/dataset/unit_test"
conf["input"]["trainset"][0]["name"] = "train_guest.csv"
conf["input"]["valset"][0]["path"] = "/opt/dataset/unit_test"
conf["input"]["valset"][0]["name"] = "test_guest.csv"
conf["output"]["path"] = "/opt/checkpoints/unit_test"
conf["train_info"]["interaction_params"]["save_frequency"] = -1
yield conf
@pytest.fixture()
def get_trainer_conf():
with open("python/algorithm/config/vertical_logistic_regression/trainer.json") as f:
conf = json.load(f)
conf["input"]["trainset"][0]["path"] = "/opt/dataset/unit_test"
conf["input"]["trainset"][0]["name"] = "train_host.csv"
conf["input"]["valset"][0]["path"] = "/opt/dataset/unit_test"
conf["input"]["valset"][0]["name"] = "test_host.csv"
conf["output"]["path"] = "/opt/checkpoints/unit_test"
# conf["train_info"]["interaction_params"]["save_frequency"] = -1
yield conf
@pytest.fixture(scope="module", autouse=True)
def env():
Commu.node_id = "node-1"
Commu.trainer_ids = ['node-1', 'node-2']
Commu.scheduler_id = 'assist_trainer'
service.fed_node.FedNode.node_name = 'node-1'
if not os.path.exists("/opt/dataset/unit_test"):
os.makedirs("/opt/dataset/unit_test")
if not os.path.exists("/opt/checkpoints/unit_test"):
os.makedirs("/opt/checkpoints/unit_test")
# if not os.path.exists("/opt/config/unit_test"):
# os.makedirs("/opt/config/unit_test")
prepare_data()
yield
if os.path.exists("/opt/dataset/unit_test"):
shutil.rmtree("/opt/dataset/unit_test")
# if os.path.exists("/opt/config/unit_test"):
# shutil.rmtree("/opt/config/unit_test")
if os.path.exists("/opt/checkpoints/unit_test"):
shutil.rmtree("/opt/checkpoints/unit_test")
class TestLogisticRegression:
@pytest.mark.parametrize("encryption_method, p", [
("ckks", 1), ("paillier", 1), ("plain", 1), ("other", 1),
("ckks", 0), ("paillier", 0), ("plain", 0),
("ckks", 2), ("paillier", 2), ("plain", 2), ("ckks", 3)
])
def test_label_trainer(self, get_label_trainer_conf, p, encryption_method, mocker):
# label trainer 流程测试
mocker.patch("service.fed_control._send_progress")
mocker.patch.object(
BroadcastChannel, "broadcast", return_value=0
)
mocker.patch.object(
service.fed_config.FedConfig, "get_label_trainer", return_value=["node-1"]
)
mocker.patch.object(
service.fed_config.FedConfig, "get_trainer", return_value=["node-2"]
)
lrt = VerticalLogisticRegressionLabelTrainer(get_label_trainer_conf)
mocker.patch.object(
BroadcastChannel, "scatter", return_value=0
)
if encryption_method == "paillier":
lrt.encryption_config = {
"paillier": {
"key_bit_size": 2048,
"precision": 7,
"djn_on": True,
"parallelize_on": True
}
}
elif encryption_method == "plain":
lrt.encryption_config = {
"plain": {}
}
elif encryption_method == "ckks":
pass
else:
lrt.encryption_config = {
encryption_method: {}
}
encryption_config = lrt.encryption_config
if encryption_method == "ckks":
private_context = ts.context(
ts.SCHEME_TYPE.CKKS,
poly_modulus_degree=encryption_config[encryption_method]["poly_modulus_degree"],
coeff_mod_bit_sizes=encryption_config[encryption_method]["coeff_mod_bit_sizes"]
)
private_context.generate_galois_keys()
private_context.generate_relin_keys()
private_context.global_scale = 1 << encryption_config[
encryption_method]["global_scale_bit_size"]
serialized_public_context = private_context.serialize(
save_public_key=True,
save_secret_key=False,
save_galois_keys=True,
save_relin_keys=True
)
public_context = ts.context_from(serialized_public_context)
elif encryption_method == "paillier":
private_context = Paillier.context(
encryption_config[encryption_method]["key_bit_size"],
djn_on=encryption_config[encryption_method]["djn_on"])
public_context = private_context.to_public().serialize()
public_context = Paillier.context_from(public_context)
def mock_collect(*args, **kwargs):
if mock_channel_collect.call_count <= 1:
return [2]
if encryption_method == "ckks":
if mock_channel_collect.call_count > 10:
return []
if mock_channel_collect.call_count % 3 == 2:
return [torch.tensor(np.zeros([80, 1]))]
elif mock_channel_collect.call_count % 3 == 0:
pred_residual = torch.tensor(np.random.random(2))
enc_pred_residual = ts.ckks_vector(
private_context, pred_residual.numpy().flatten())
serialized_enc_pred_residual = enc_pred_residual.serialize()
pred_residual = ts.ckks_vector_from(
public_context, serialized_enc_pred_residual)
return [pred_residual.serialize()]
else:
return [torch.tensor(np.zeros([20, 1]))]
elif encryption_method == "paillier":
return []
elif encryption_method == "plain":
if mock_channel_collect.call_count >= 10:
return []
if mock_channel_collect.call_count % 2 == 0:
return [torch.tensor(np.zeros([80, 1]))]
else:
return [torch.tensor(np.zeros([20, 1]))]
else:
pass
mock_channel_collect = mocker.patch.object(
BroadcastChannel, "collect", side_effect=mock_collect
)
lrt.optimizer_config['p'] = p
if encryption_method not in ("ckks", "paillier", "plain"):
msg = f"Encryption method {encryption_method} not supported! Valid methods are 'paillier', 'ckks', 'plain'."
with pytest.raises(ValueError) as e:
lrt.fit()
exec_msg = e.value.args[0]
assert exec_msg == msg
elif p not in (0, 1, 2):
with pytest.raises(NotImplementedError) as e:
lrt.fit()
exec_msg = e.value.args[0]
assert exec_msg == "Regular P={} not implement.".format(p)
else:
lrt.fit()
self.check_model_output()
@pytest.mark.parametrize("encryption_method, p", [
("ckks", 1), ("paillier", 1), ("plain", 1), ("other", 1),
("ckks", 0), ("paillier", 0), ("plain", 0),
("ckks", 2), ("paillier", 2), ("plain", 2), ("ckks", 3)
])
def test_trainer(self, get_trainer_conf, encryption_method, p, mocker):
# trainer 流程测试
mocker.patch.object(
BroadcastChannel, "broadcast", return_value=0
)
def mock_func(*args, **kwargs):
if mock_broadcast_recv.call_count == 1:
return copy.deepcopy(config_sync)
elif mock_broadcast_recv.call_count == 2:
return 50
else:
return 0
mock_broadcast_recv = mocker.patch.object(
BroadcastChannel, "recv", side_effect=mock_func
)
mocker.patch.object(
service.fed_config.FedConfig, "get_label_trainer", return_value=["node-1"]
)
mocker.patch.object(
service.fed_config.FedConfig, "get_trainer", return_value=["node-2"]
)
lrt = VerticalLogisticRegressionTrainer(get_trainer_conf)
if encryption_method == "paillier":
lrt.encryption_config = {
"paillier": {
"key_bit_size": 2048,
"precision": 7,
"djn_on": True,
"parallelize_on": True
}
}
elif encryption_method == "plain":
lrt.encryption_config = {
"plain": {}
}
elif encryption_method == "ckks":
pass
else:
lrt.encryption_config = {
encryption_method: {}
}
encryption_config = lrt.encryption_config
if encryption_method == "ckks":
private_context = ts.context(
ts.SCHEME_TYPE.CKKS,
poly_modulus_degree=encryption_config[encryption_method]["poly_modulus_degree"],
coeff_mod_bit_sizes=encryption_config[encryption_method]["coeff_mod_bit_sizes"]
)
private_context.generate_galois_keys()
private_context.generate_relin_keys()
private_context.global_scale = 1 << encryption_config[
encryption_method]["global_scale_bit_size"]
elif encryption_method == "paillier":
num_cores = - \
1 if encryption_config[encryption_method]["parallelize_on"] else 1
private_context = Paillier.context(
encryption_config[encryption_method]["key_bit_size"],
djn_on=encryption_config[encryption_method]["djn_on"])
def mock_predict_residual(*args, **kwargs):
if encryption_method == "ckks":
if mock_channel_recv.call_count <= 1:
serialized_public_context = private_context.serialize(
save_public_key=True,
save_secret_key=False,
save_galois_keys=True,
save_relin_keys=True
)
return serialized_public_context
elif mock_channel_recv.call_count % 3 == 2:
pred_residual = torch.tensor(np.random.random(80))
enc_pred_residual = ts.ckks_vector(
private_context, pred_residual.numpy().flatten())
serialized_enc_pred_residual = enc_pred_residual.serialize()
return serialized_enc_pred_residual
elif mock_channel_recv.call_count % 3 == 0:
return np.random.random(2)
else:
return False, False, -1
elif encryption_method == "paillier":
if mock_channel_recv.call_count <= 1:
return private_context.to_public().serialize()
elif mock_channel_recv.call_count % 3 == 2:
pred_residual = torch.tensor(np.random.random(80))
enc_pred_residual = Paillier.encrypt(
private_context,
pred_residual.numpy().astype(np.float32).flatten(),
precision=encryption_config["paillier"]["precision"],
obfuscation=True,
num_cores=num_cores
)
return Paillier.serialize(enc_pred_residual)
elif mock_channel_recv.call_count % 3 == 0:
return np.random.random(2)
else:
return False, False, -1
elif encryption_method == "plain":
if mock_channel_recv.call_count % 2 == 1:
return torch.tensor(np.random.random((80, 1)), dtype=torch.float)
else:
return False, False, -1
mock_channel_recv = mocker.patch.object(
BroadcastChannel, "recv", side_effect=mock_predict_residual
)
mocker.patch.object(
BroadcastChannel, "send", return_value=0
)
lrt.optimizer_config['p'] = p
if encryption_method not in ("ckks", "paillier", "plain"):
msg = f"Encryption method {encryption_method} not supported! Valid methods are 'paillier', 'ckks', 'plain'."
with pytest.raises(ValueError) as e:
lrt.fit()
exec_msg = e.value.args[0]
assert exec_msg == msg
elif p not in (0, 1, 2):
with pytest.raises(NotImplementedError) as e:
lrt.fit()
exec_msg = e.value.args[0]
assert exec_msg == "Regular P={} not implement.".format(p)
else:
lrt.fit()
self.check_model_output()
@pytest.mark.parametrize("encryption_method", ["ckks"])
def test_early_stopping(self, get_label_trainer_conf, get_trainer_conf, encryption_method, mocker):
# 早停测试
mocker.patch("service.fed_control._send_progress")
get_label_trainer_conf["train_info"]["train_params"]["early_stopping"]["patience"] = 1
get_label_trainer_conf["train_info"]["train_params"]["early_stopping"]["delta"] = 1e-3
mocker.patch.object(
BroadcastChannel, "broadcast", return_value=0
)
mocker.patch.object(
BroadcastChannel, "scatter", return_value=0
)
mocker.patch.object(
service.fed_config.FedConfig, "get_label_trainer", return_value=["node-1"]
)
mocker.patch.object(
service.fed_config.FedConfig, "get_trainer", return_value=["node-2"]
)
lrt = VerticalLogisticRegressionLabelTrainer(get_label_trainer_conf)
encryption_config = lrt.encryption_config
private_context = ts.context(
ts.SCHEME_TYPE.CKKS,
poly_modulus_degree=encryption_config["ckks"]["poly_modulus_degree"],
coeff_mod_bit_sizes=encryption_config["ckks"]["coeff_mod_bit_sizes"]
)
private_context.generate_galois_keys()
private_context.generate_relin_keys()
private_context.global_scale = 1 << encryption_config["ckks"]["global_scale_bit_size"]
serialized_public_context = private_context.serialize(
save_public_key=True,
save_secret_key=False,
save_galois_keys=True,
save_relin_keys=True
)
public_context = ts.context_from(serialized_public_context)
def mock_collect(*args, **kwargs):
if encryption_method == "ckks":
print(mock_channel_collect.call_count)
if mock_channel_collect.call_count >= 8:
return []
if mock_channel_collect.call_count % 3 == 1:
return [torch.tensor(np.zeros([80, 1]))]
elif mock_channel_collect.call_count % 3 == 2:
pred_residual = torch.tensor(np.random.random(2))
enc_pred_residual = ts.ckks_vector(
private_context, pred_residual.numpy().flatten())
serialized_enc_pred_residual = enc_pred_residual.serialize()
pred_residual = ts.ckks_vector_from(
public_context, serialized_enc_pred_residual)
return [pred_residual.serialize()]
else:
return [torch.tensor(np.zeros([20, 1]))]
elif encryption_method == "paillier":
return []
mock_channel_collect = mocker.patch.object(
BroadcastChannel, "collect", side_effect=mock_collect
)
mocker.patch.object(
lrt, "check_data", return_value=None
)
lrt.fit()
def mock_func(*args, **kwargs):
if mock_broadcast_recv.call_count == 1:
return copy.deepcopy(config_sync)
elif mock_broadcast_recv.call_count == 2:
return 50
else:
return 0
mock_broadcast_recv = mocker.patch.object(
BroadcastChannel, "recv", side_effect=mock_func
)
trainer = VerticalLogisticRegressionTrainer(get_trainer_conf)
encryption_config = trainer.encryption_config
private_context = ts.context(
ts.SCHEME_TYPE.CKKS,
poly_modulus_degree=encryption_config["ckks"]["poly_modulus_degree"],
coeff_mod_bit_sizes=encryption_config["ckks"]["coeff_mod_bit_sizes"]
)
private_context.generate_galois_keys()
private_context.generate_relin_keys()
private_context.global_scale = 1 << encryption_config["ckks"]["global_scale_bit_size"]
def mock_predict_residual(*args, **kwargs):
if mock_channel_recv.call_count <= 1:
serialized_public_context = private_context.serialize(
save_public_key=True,
save_secret_key=False,
save_galois_keys=True,
save_relin_keys=True
)
return serialized_public_context
elif mock_channel_recv.call_count % 3 == 2:
pred_residual = torch.tensor(np.random.random(80))
enc_pred_residual = ts.ckks_vector(
private_context, pred_residual.numpy().flatten())
serialized_enc_pred_residual = enc_pred_residual.serialize()
return serialized_enc_pred_residual
elif mock_channel_recv.call_count % 3 == 0:
return np.random.random(2)
else:
return True, True, 1
mock_channel_recv = mocker.patch.object(
BroadcastChannel, "recv", side_effect=mock_predict_residual
)
mocker.patch.object(
BroadcastChannel, "send", return_value=0
)
trainer.fit()
@pytest.mark.parametrize("encryption_method", ["ckks"])
def test_save_frequency(self, get_label_trainer_conf, get_trainer_conf, encryption_method, mocker):
# 测试模型留存频率参数是否生效
mocker.patch("service.fed_control._send_progress")
get_label_trainer_conf["train_info"]["interaction_params"]["save_frequency"] = 1
get_trainer_conf["train_info"]["interaction_params"] = {}
get_trainer_conf["train_info"]["interaction_params"]["save_frequency"] = 1
mocker.patch.object(
BroadcastChannel, "broadcast", return_value=0
)
def mock_collect(*args, **kwargs):
if encryption_method == "ckks":
if mock_channel_collect.call_count > 9:
return []
if mock_channel_collect.call_count % 3 == 1:
return [torch.tensor(np.zeros([80, 1]))]
elif mock_channel_collect.call_count % 3 == 2:
pred_residual = torch.tensor(np.random.random(2))
enc_pred_residual = ts.ckks_vector(
private_context, pred_residual.numpy().flatten())
serialized_enc_pred_residual = enc_pred_residual.serialize()
pred_residual = ts.ckks_vector_from(
public_context, serialized_enc_pred_residual)
return [pred_residual.serialize()]
else:
return [torch.tensor(np.zeros([20, 1]))]
elif encryption_method == "paillier":
return []
mock_channel_collect = mocker.patch.object(
BroadcastChannel, "collect", side_effect=mock_collect
)
mocker.patch.object(
BroadcastChannel, "scatter", return_value=0
)
mocker.patch.object(
service.fed_config.FedConfig, "get_label_trainer", return_value=["node-1"]
)
mocker.patch.object(
service.fed_config.FedConfig, "get_trainer", return_value=["node-2"]
)
lrt = VerticalLogisticRegressionLabelTrainer(get_label_trainer_conf)
encryption_config = lrt.encryption_config
private_context = ts.context(
ts.SCHEME_TYPE.CKKS,
poly_modulus_degree=encryption_config["ckks"]["poly_modulus_degree"],
coeff_mod_bit_sizes=encryption_config["ckks"]["coeff_mod_bit_sizes"]
)
private_context.generate_galois_keys()
private_context.generate_relin_keys()
private_context.global_scale = 1 << encryption_config["ckks"]["global_scale_bit_size"]
serialized_public_context = private_context.serialize(
save_public_key=True,
save_secret_key=False,
save_galois_keys=True,
save_relin_keys=True
)
public_context = ts.context_from(serialized_public_context)
mocker.patch.object(
lrt, "check_data", return_value=None
)
lrt.fit()
def mock_func(*args, **kwargs):
if mock_broadcast_recv.call_count == 1:
return config_sync
elif mock_broadcast_recv.call_count == 2:
return 50
else:
return 0
mock_broadcast_recv = mocker.patch.object(
BroadcastChannel, "recv", side_effect=mock_func
)
trainer = VerticalLogisticRegressionTrainer(get_trainer_conf)
encryption_config = trainer.encryption_config
private_context = ts.context(
ts.SCHEME_TYPE.CKKS,
poly_modulus_degree=encryption_config["ckks"]["poly_modulus_degree"],
coeff_mod_bit_sizes=encryption_config["ckks"]["coeff_mod_bit_sizes"]
)
private_context.generate_galois_keys()
private_context.generate_relin_keys()
private_context.global_scale = 1 << encryption_config["ckks"]["global_scale_bit_size"]
def mock_predict_residual(*args, **kwargs):
if mock_channel_recv.call_count <= 1:
serialized_public_context = private_context.serialize(
save_public_key=True,
save_secret_key=False,
save_galois_keys=True,
save_relin_keys=True
)
return serialized_public_context
elif mock_channel_recv.call_count % 3 == 2:
pred_residual = torch.tensor(np.random.random(80))
enc_pred_residual = ts.ckks_vector(
private_context, pred_residual.numpy().flatten())
serialized_enc_pred_residual = enc_pred_residual.serialize()
return serialized_enc_pred_residual
elif mock_channel_recv.call_count % 3 == 0:
return np.random.random(2)
else:
return False, False, -1
mock_channel_recv = mocker.patch.object(
BroadcastChannel, "recv", side_effect=mock_predict_residual
)
mocker.patch.object(
BroadcastChannel, "send", return_value=0
)
trainer.fit()
@pytest.mark.parametrize("encryption_method", ["ckks"])
def test_save_path(self, get_label_trainer_conf, encryption_method, mocker):
# 假如留存目录不存在,是否会自动创建完成运行\
mocker.patch("service.fed_control._send_progress")
mocker.patch.object(
BroadcastChannel, "broadcast", return_value=0
)
mocker.patch.object(
BroadcastChannel, "scatter", return_value=0
)
get_label_trainer_conf["output"]["path"] = "/opt/checkpoints/unit_test_2"
def mock_collect(*args, **kwargs):
if encryption_method == "ckks":
if mock_channel_collect.call_count > 9:
return []
if mock_channel_collect.call_count % 3 == 1:
return [torch.tensor(np.zeros([80, 1]))]
elif mock_channel_collect.call_count % 3 == 2:
pred_residual = torch.tensor(np.random.random(2))
enc_pred_residual = ts.ckks_vector(
private_context, pred_residual.numpy().flatten())
serialized_enc_pred_residual = enc_pred_residual.serialize()
pred_residual = ts.ckks_vector_from(
public_context, serialized_enc_pred_residual)
return [pred_residual.serialize()]
else:
return [torch.tensor(np.zeros([20, 1]))]
elif encryption_method == "paillier":
return []
mock_channel_collect = mocker.patch.object(
BroadcastChannel, "collect", side_effect=mock_collect
)
mocker.patch.object(
service.fed_config.FedConfig, "get_label_trainer", return_value=["node-1"]
)
mocker.patch.object(
service.fed_config.FedConfig, "get_trainer", return_value=["node-2"]
)
lrt = VerticalLogisticRegressionLabelTrainer(get_label_trainer_conf)
encryption_config = lrt.encryption_config
private_context = ts.context(
ts.SCHEME_TYPE.CKKS,
poly_modulus_degree=encryption_config["ckks"]["poly_modulus_degree"],
coeff_mod_bit_sizes=encryption_config["ckks"]["coeff_mod_bit_sizes"]
)
private_context.generate_galois_keys()
private_context.generate_relin_keys()
private_context.global_scale = 1 << encryption_config["ckks"]["global_scale_bit_size"]
serialized_public_context = private_context.serialize(
save_public_key=True,
save_secret_key=False,
save_galois_keys=True,
save_relin_keys=True
)
public_context = ts.context_from(serialized_public_context)
mocker.patch.object(
lrt, "check_data", return_value=None
)
lrt.fit()
shutil.rmtree("/opt/checkpoints/unit_test_2")
@staticmethod
def check_model_output():
# 检查是否正常输出了model_config.json
assert os.path.exists("/opt/checkpoints/unit_test/model_config.json")
with open("/opt/checkpoints/unit_test/model_config.json") as f:
model_config = json.load(f)
# 检查model_config.json的stage是否符合预期
assert model_config[-1]["class_name"] == "VerticalLogisticRegression"
filename = "/opt/checkpoints/unit_test/" + model_config[0]["filename"][:-5] +'.pmodel'
dim = model_config[-1]["input_dim"]
bias = model_config[-1]["bias"]
if bias:
assert dim == 3
else:
assert dim == 2
# 检查是否写出了模型文件,模型文件是否合法
assert os.path.exists(filename)
with open(filename, 'rb') as f:
byte_str = f.read()
m = LinearModel()
m.ParseFromString(byte_str)
model = json_format.MessageToDict(m,
including_default_value_fields=True,
preserving_proto_field_name=True)
assert len(model["state_dict"]["weight"]) == dim
if bias:
assert "bias" in model["state_dict"]
else:
assert model["state_dict"].get("bias", 0.0) == 0.0
| 30,427 | 39.952894 | 120 | py |
XFL | XFL-master/test/algorithm/framework/vertical/test_xgboost.py | # Copyright 2022 The XFL Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import copy
import json
import os
import random
import shutil
import string
from multiprocess.pool import ApplyResult
import numpy as np
import pandas as pd
import pytest
import service.fed_config
from algorithm.core.paillier_acceleration import embed, umbed
from algorithm.core.tree.tree_structure import Node, Tree
from algorithm.framework.vertical.xgboost import (decision_tree_label_trainer,
decision_tree_trainer)
from algorithm.framework.vertical.xgboost.label_trainer import \
VerticalXgboostLabelTrainer
from algorithm.framework.vertical.xgboost.trainer import VerticalXgboostTrainer
from common.communication.gRPC.python.channel import (BroadcastChannel,
DualChannel)
from common.communication.gRPC.python.commu import Commu
from common.crypto.paillier.paillier import Paillier
from service.fed_config import FedConfig
from service.fed_node import FedNode
from algorithm.core.tree.tree_structure import Node, SplitInfo
random.seed(1)
private_context = Paillier.context(2048, True)
public_context = private_context.to_public()
def prepare_data():
case_df = pd.DataFrame({
'x0': np.arange(100),
'x1': np.arange(100),
'x2': 2 * np.arange(100) - 40.0,
'x3': 3 * np.arange(100) + 1.0,
'x4': np.arange(100)[::-1]
})
case_df['y'] = np.where(
case_df['x0'] + case_df['x2'] + case_df['x3'] > 40, 1, 0)
case_df[['y', 'x0', 'x1', 'x2']].head(80).to_csv(
"/opt/dataset/unit_test/train_guest.csv", index=True
)
case_df[['y', 'x0', 'x1', 'x2']].tail(20).to_csv(
"/opt/dataset/unit_test/test_guest.csv", index=True
)
case_df[['x3', 'x4']].head(80).to_csv(
"/opt/dataset/unit_test/train_host.csv", index=True
)
case_df[['x3', 'x4']].tail(20).to_csv(
"/opt/dataset/unit_test/test_host.csv", index=True
)
FedNode.node_id = "node-1"
FedNode.node_name = "node-1"
def prepare_test_data():
case_df = pd.DataFrame({
'x0': np.arange(99),
'x1': np.arange(99),
'x2': 2 * np.arange(99) - 40.0,
'x3': 3 * np.arange(99) + 1.0,
'x4': np.arange(99)[::-1]
})
case_df['y'] = np.where(
case_df['x0'] + case_df['x2'] + case_df['x3'] > 40, 1, 0)
case_df[['y', 'x0', 'x1', 'x2']].to_csv(
"/opt/dataset/unit_test/infer_guest.csv", index=True
)
case_df[['x3', 'x4']].to_csv(
"/opt/dataset/unit_test/infer_host.csv", index=True
)
xgb_output = {
"suggest_threshold": 0.6161117553710938,
"lr": [0.3],
"max_depth": [2],
"trees": [
{
"party_id": "node-1",
"tree_index": 0,
"root_node_id": "0_4lN0P7QTwWq25Eei",
"nodes": {
"0_4lN0P7QTwWq25Eei": {
"id": "0_4lN0P7QTwWq25Eei", "depth": 0, "left_node_id": "0_gw94EBW5tiD8kCqG",
"right_node_id": "0_vpKZWumTxYcojXLq",
"split_info": {
"owner_id": "node-1", "feature_idx": 0, "is_category": True,
"split_point": None, "left_cat": [4, 2, 6, 1]
},
"is_leaf": False,
"weight": None, "linkage": None
}, "0_gw94EBW5tiD8kCqG": {
"id": "0_gw94EBW5tiD8kCqG", "depth": 1, "left_node_id": None, "right_node_id": None,
"split_info": None,
"is_leaf": True, "weight": 1.5769230769230769, "linkage": "left"
},
"0_vpKZWumTxYcojXLq": {
"id": "0_vpKZWumTxYcojXLq", "depth": 1, "left_node_id": None,
"right_node_id": None,
"split_info": None, "is_leaf": True, "weight": -1.5, "linkage": "right"
}
}
}
],
"version": "1.0", "loss_method": "BCEWithLogitsLoss", "num_trees": 1,
"node_id_group": {
"0_4lN0P7QTwWq25Eei": ["0_4lN0P7QTwWq25Eei"]
}
}
with open("/opt/checkpoints/unit_test/node-1/vertical_xgboost_guest.pmodel", 'w') as f:
json.dump(xgb_output, f)
xgb_output = {"4_WTqDQjPt39iMc7Ug": {"id": "4_WTqDQjPt39iMc7Ug",
"split_info": {"owner_id": "node-2", "feature_idx": 0, "is_category": True,
"split_point": None, "left_cat": [1, 0, 2, 5]}}}
with open("/opt/checkpoints/unit_test/node-2/vertical_xgboost_host.pmodel", 'w') as f:
json.dump(xgb_output, f)
def enc_grad_hess(grad, hess):
if grad is None:
return Paillier.encrypt(context=private_context,
data=hess,
precision=7,
obfuscation=True,
num_cores=1)
elif hess is None:
return Paillier.encrypt(context=private_context,
data=grad,
precision=7,
obfuscation=True,
num_cores=1)
else:
grad_hess = embed([grad, hess], interval=(
1 << 128), precision=64)
enc_grad_hess = Paillier.encrypt(context=private_context,
data=grad_hess,
precision=0, # must be 0
obfuscation=True,
num_cores=1)
return enc_grad_hess
@pytest.fixture()
def get_label_trainer_infer_conf():
conf = {
"identity": "label_trainer",
"model_info": {
"name": "vertical_xgboost",
"config": {}
},
"inference": True,
"input": {
"testset": [
{
"type": "csv",
"path": "/opt/dataset/unit_test",
"name": "infer_guest.csv",
"has_label": True,
"has_id": True
}
],
"pretrained_model": {
"path": "/opt/checkpoints/unit_test/node-1",
"name": "vertical_xgboost_guest.pmodel"
}
},
"output": {
"path": "/opt/checkpoints/unit_test/node-1",
"testset": {
"name": "predicted_probabilities_train.csv"
}
},
"train_info": {
"interaction_params": {
},
"train_params": {
"batch_size_val": 99
}
}
}
yield conf
@pytest.fixture()
def get_trainer_infer_conf():
conf = {
"identity": "trainer",
"model_info": {
"name": "vertical_xgboost",
"config": {}
},
"inference": True,
"input": {
"testset": [
{
"type": "csv",
"path": "/opt/dataset/unit_test",
"name": "infer_host.csv",
"has_label": False,
"has_id": True
}
],
"pretrained_model": {
"path": "/opt/checkpoints/unit_test/node-2",
"name": "vertical_xgboost_host.pmodel"
}
},
"output": {
},
"train_info": {
"interaction_params": {
},
"train_params": {
"batch_size_val": 99
}
}
}
yield conf
@pytest.fixture()
def get_label_trainer_conf():
with open("algorithm/config/vertical_xgboost/label_trainer.json") as f:
conf = json.load(f)
conf["input"]["trainset"][0]["path"] = "/opt/dataset/unit_test"
conf["input"]["trainset"][0]["name"] = "train_guest.csv"
conf["input"]["valset"][0]["path"] = "/opt/dataset/unit_test"
conf["input"]["valset"][0]["name"] = "test_guest.csv"
conf["input"]["testset"] = []
conf["output"]["path"] = "/opt/checkpoints/unit_test"
conf["output"]["prediction_train"]["name"] = "/opt/checkpoints/unit_test/predicted_probabilities_train.csv"
conf["output"]["prediction_val"]["name"] = "/opt/checkpoints/unit_test/predicted_probabilities_val.csv"
conf["output"]["model"]["name"] = "vertical_xgboost_guest.model"
conf["output"]["proto_model"]["name"] = "vertical_xgboost_guest.pmodel"
conf["output"]["feature_importance"]["name"] = "/opt/checkpoints/unit_test/feature_importances.csv"
conf["train_info"]["train_params"]["num_bins"] = 10
conf["train_info"]["train_params"]["max_depth"] = 2
conf["train_info"]["train_params"]["min_sample_split"] = 1
conf["train_info"]["train_params"]["downsampling"]["row"]["top_rate"] = 0.5
conf["train_info"]["train_params"]["downsampling"]["row"]["other_rate"] = 0.5
conf["train_info"]["train_params"]["num_trees"] = 1
conf["train_info"]["train_params"]["max_num_cores"] = 2
conf["train_info"]["train_params"]["metric"] = {
"acc": {},
"precision": {},
"recall": {},
"f1_score": {},
"auc": {},
}
conf["train_info"]["train_params"]["early_stopping"]["key"] = "acc"
yield conf
@pytest.fixture()
def get_trainer_conf():
with open("algorithm/config/vertical_xgboost/trainer.json") as f:
conf = json.load(f)
conf["input"]["trainset"][0]["path"] = "/opt/dataset/unit_test"
conf["input"]["trainset"][0]["name"] = "train_host.csv"
conf["input"]["valset"][0]["path"] = "/opt/dataset/unit_test"
conf["input"]["valset"][0]["name"] = "test_host.csv"
conf["input"]["testset"] = []
conf["output"]["path"] = "/opt/checkpoints/unit_test"
conf["output"]["model"]["name"] = "vertical_xgboost_host.model"
conf["output"]["proto_model"]["name"] = "vertical_xgboost_host.pmodel"
conf["train_info"]["train_params"]["num_bins"] = 10
conf["train_info"]["train_params"]["max_depth"] = 2
conf["train_info"]["train_params"]["min_sample_split"] = 1
conf["train_info"]["train_params"]["num_trees"] = 1
conf["train_info"]["train_params"]["max_num_cores"] = 2
conf["train_info"]["train_params"]["advanced"]["row_batch"] = 20
yield conf
@pytest.fixture(scope="module", autouse=True)
def env():
Commu.node_id = "node-1"
Commu.trainer_ids = ['node-1', 'node-2']
Commu.scheduler_id = 'assist_trainer'
os.chdir("python")
if not os.path.exists("/opt/dataset/unit_test"):
os.makedirs("/opt/dataset/unit_test")
if not os.path.exists("/opt/checkpoints/unit_test/node-1"):
os.makedirs("/opt/checkpoints/unit_test/node-1")
if not os.path.exists("/opt/checkpoints/unit_test/node-2"):
os.makedirs("/opt/checkpoints/unit_test/node-2")
if not os.path.exists("/opt/config/unit_test"):
os.makedirs("/opt/config/unit_test")
prepare_data()
prepare_test_data()
yield
if os.path.exists("/opt/dataset/unit_test"):
shutil.rmtree("/opt/dataset/unit_test")
if os.path.exists("/opt/config/unit_test"):
shutil.rmtree("/opt/config/unit_test")
if os.path.exists("/opt/checkpoints/unit_test"):
shutil.rmtree("/opt/checkpoints/unit_test")
os.chdir("..")
class TestVerticalXgboost:
@pytest.mark.filterwarnings('ignore::DeprecationWarning')
@pytest.mark.parametrize('embed', [(True), (False)])
def test_label_trainer(self, get_label_trainer_conf, embed, mocker):
def mock_generate_id(*args, **kwargs):
return str(mock_tree_generate_id.call_count)
def mock_dualchannel_recv(*args, **kwargs):
if embed:
# recv summed_grad_hess
if mock_channel_recv.call_count in [1, 2, 4]:
hist_list = [(np.zeros(8), np.array([8] * 10))
for _ in range(2)]
return [False, hist_list, [2]]
elif mock_channel_recv.call_count in [6, 7]:
return {'1': np.packbits(np.array([True, True, True, True, True, True, True, True, True,
True, True, True, True, True, True, True, True, True,
True, True])),
'2': np.packbits(np.array([True, True, True, True, True, True, True, True, True,
True, True, True, True, True, True, True, True, True,
True, True])),
'3': np.packbits(np.array([True, True, True, True, True, True, True, True, True,
True, True, True, True, True, True, True, True, True,
True, True])),
}
elif mock_channel_recv.call_count <= 5 or (
mock_channel_recv.call_count >= 8 and mock_channel_recv.call_count <= 12):
# features = pd.DataFrame({
# 'x3': np.array([0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 2, 2, 2, 2, 2, 2, 2, 2, 3, 3, 3, 3, 3, 3, 3, 3, 4, 4, 4, 4, 4, 4, 4, 4, 5, 5, 5, 5, 5, 5, 5, 5, 6, 6, 6, 6, 6, 6, 6, 6, 7, 7, 7, 7, 7, 7, 7, 7, 8, 8, 8, 8, 8, 8, 8, 8, 9, 9, 9, 9, 9, 9, 9, 9]),
# 'x4': np.array([9, 9, 9, 9, 9, 9, 9, 9, 8, 8, 8, 8, 8, 8, 8, 8, 7, 7, 7, 7, 7, 7, 7, 7, 6, 6, 6, 6, 6, 6, 6, 6, 5, 5, 5, 5, 5, 5, 5, 5, 4, 4, 4, 4, 4, 4, 4, 4, 3, 3, 3, 3, 3, 3, 3, 3, 2, 2, 2, 2, 2, 2, 2, 2, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0])
# }
# )
# sample_index = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 40, 41, 42, 43, 44, 48, 52, 53, 55, 59, 60, 61, 63, 64, 65, 66, 68, 70, 73, 74, 75, 76, 77, 78, 79]
# grad = [0.8333333, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, -0.5, -0.5, -0.5, -0.5, -0.5, -0.5, -0.5, -0.5, -0.5, -0.5, -0.5, -0.5, -0.5, -0.5, -0.5, -0.5, -0.5, -0.5, -0.5, -0.5, -0.5, -0.5, -0.5, -0.5, -0.5, -0.8333333, -0.8333333, -0.8333333, -0.8333333, -0.8333333, -0.8333333, -0.8333333, -0.8333333, -0.8333333, -0.8333333, -0.8333333, -0.8333333, -0.8333333, -0.8333333, -0.8333333, -0.8333333, -0.8333333, -0.8333333, -0.8333333, -0.8333333, -0.8333333, -0.8333333, -0.8333333, -0.5, -0.5]
# hess = [0.41666666, 0.25, 0.25, 0.25, 0.25, 0.25, 0.25, 0.25, 0.25, 0.25, 0.25, 0.25, 0.25, 0.25, 0.25, 0.25, 0.25, 0.25, 0.25, 0.25, 0.25, 0.25, 0.25, 0.25, 0.25, 0.25, 0.25, 0.25, 0.25, 0.25, 0.25, 0.25, 0.25, 0.25, 0.25, 0.25, 0.25, 0.25, 0.25, 0.41666666, 0.41666666, 0.41666666, 0.41666666, 0.41666666, 0.41666666, 0.41666666, 0.41666666, 0.41666666, 0.41666666, 0.41666666, 0.41666666, 0.41666666, 0.41666666, 0.41666666, 0.41666666, 0.41666666, 0.41666666, 0.41666666, 0.41666666, 0.41666666, 0.41666666, 0.41666666, 0.25, 0.25]
# grad_hess = embed([grad, hess], interval=(
# 1 << 128), precision=64)
# enc_grad_hess = Paillier.encrypt(context=private_context,
# data=grad_hess,
# precision=0, # must be 0
# obfuscation=True,
# num_cores=1)
# enc_grad_hess = Paillier.serialize(enc_grad_hess, compression=False)
# grad_hess = Paillier.ciphertext_from(public_context, enc_grad_hess, compression=False)
# big_feature = Feature.create(values=features.iloc[sample_index,:],sample_index=sample_index, grad_hess=grad_hess)
# res = []
# for col_name in big_feature.feature_columns:
# res.append(big_feature.data.groupby([col_name])['xfl_grad_hess'].agg({'count', 'sum'}))
# hist_list = [(res_hist['sum'].to_numpy(), res_hist['count'].to_numpy()) for res_hist in res]
hist_list = [(np.zeros(8), np.array([8] * 10))
for _ in range(2)]
return [False, hist_list]
elif mock_channel_recv.call_count <= 7:
return {'1': np.array([True, True, True, True, True, True, True, True, True,
True, True, True, True, True, True, True, True, True,
True, True]),
'2': np.array([True, True, True, True, True, True, True, True, True,
True, True, True, True, True, True, True, True, True,
True, True]),
'3': np.array([True, True, True, True, True, True, True, True, True,
True, True, True, True, True, True, True, True, True,
True, True])
}
elif mock_channel_recv.call_count <= 14 and mock_channel_recv.call_count >= 13:
return {'1': np.array([True, True, True, True, True, True, True, True, True,
True, True, True, True, True, True, True, True, True,
True, True]),
'2': np.array([True, True, True, True, True, True, True, True, True,
True, True, True, True, True, True, True, True, True,
True, True]),
'3': np.array([True, True, True, True, True, True, True, True, True,
True, True, True, True, True, True, True, True, True,
True, True])
}
elif not embed:
if mock_channel_recv.call_count in [1, 2, 4]:
hist_list = [(np.zeros(8), np.zeros(
8), np.array([8] * 10)) for _ in range(2)]
return [False, hist_list, [2]]
elif mock_channel_recv.call_count in [6, 7]:
return {'1': np.packbits(np.array([True, True, True, True, True, True, True, True, True,
True, True, True, True, True, True, True, True, True,
True, True])),
'2': np.packbits(np.array([True, True, True, True, True, True, True, True, True,
True, True, True, True, True, True, True, True, True,
True, True])),
'3': np.packbits(np.array([True, True, True, True, True, True, True, True, True,
True, True, True, True, True, True, True, True, True,
True, True])),
}
elif mock_channel_recv.call_count <= 5 or (
mock_channel_recv.call_count >= 8 and mock_channel_recv.call_count <= 12):
hist_list = [(np.zeros(8), np.zeros(
8), np.array([8] * 10)) for _ in range(2)]
return [False, hist_list]
elif mock_channel_recv.call_count <= 7:
return {'1': np.array([True, True, True, True, True, True, True, True, True,
True, True, True, True, True, True, True, True, True,
True, True]),
'2': np.array([True, True, True, True, True, True, True, True, True,
True, True, True, True, True, True, True, True, True,
True, True]),
'3': np.array([True, True, True, True, True, True, True, True, True,
True, True, True, True, True, True, True, True, True,
True, True])
}
elif mock_channel_recv.call_count <= 14 and mock_channel_recv.call_count >= 13:
return {'1': np.array([True, True, True, True, True, True, True, True, True,
True, True, True, True, True, True, True, True, True,
True, True]),
'2': np.array([True, True, True, True, True, True, True, True, True,
True, True, True, True, True, True, True, True, True,
True, True]),
'3': np.array([True, True, True, True, True, True, True, True, True,
True, True, True, True, True, True, True, True, True,
True, True])
}
def mock_broadcasetchannel_recv():
pass
if not embed:
mocker.patch.object(decision_tree_label_trainer, "EMBEDING", False)
mocker.patch("service.fed_control._send_progress")
mocker.patch.object(FedConfig, "get_trainer", return_value=["node-2"])
mocker.patch.object(FedNode, "node_id", "node-1")
mocker.patch.object(Commu, "node_id", "node-1")
mocker.patch.object(Commu, "trainer_ids", ["node-1", "node-2"])
mocker.patch.object(Commu, "scheduler_id", "scheduler")
mocker.patch.object(
BroadcastChannel, "collect", return_value=[{"train": (80, 3), "valid": (20, 3)}]
)
mocker.patch.object(
BroadcastChannel, "broadcast"
)
mocker.patch.object(
DualChannel, "send"
)
mock_channel_recv = mocker.patch.object(
DualChannel, "recv", side_effect=mock_dualchannel_recv
)
mock_tree_generate_id = mocker.patch.object(
Tree, "_generate_id", side_effect=mock_generate_id
)
mocker.patch.object(
service.fed_config.FedConfig, "get_label_trainer", return_value=["node-1"]
)
mocker.patch.object(
service.fed_config.FedConfig, "get_trainer", return_value=["node-2"]
)
mocker.patch.object(
FedNode, "config", return_value={"trainer": {"node-2": []}}
)
xgb_label_trainer = VerticalXgboostLabelTrainer(
get_label_trainer_conf)
mocker.patch.object(
xgb_label_trainer.channels["sync"], "collect", return_value=[{"node-2": [1, 2]}, {"node-3": [1, 2]}]
)
xgb_label_trainer.fit()
self.check_label_trainer_output()
@pytest.mark.parametrize('embed', [(True), (False)])
def test_trainer(self, get_trainer_conf, embed, mocker):
def mock_broadcastchannel_recv(*args, **kwargs):
config = {
"train_info": {
"interaction_params": {
"save_frequency": -1,
"echo_training_metrics": True,
"write_training_prediction": True,
"write_validation_prediction": True
},
"train_params": {
"lossfunc": {
"BCEWithLogitsLoss": {}
},
"num_trees": 1,
"num_bins": 10,
"downsampling": {
"row": {
"run_goss": True
}
},
"encryption": {
"paillier": {
"key_bit_size": 2048,
"precision": 7,
"djn_on": True,
"parallelize_on": True
}
},
"batch_size_val": 40960
}
}
}
if broadchannel_recv_mocker.call_count == 1:
return config
# elif broadchannel_recv_mocker.call_count == 2:
# encryption = config["train_info"]["train_params"]["encryption"]
# if "paillier" in encryption:
# encryption = encryption["paillier"]
# private_context = Paillier.context(
# encryption["key_bit_size"], encryption["djn_on"])
# return private_context.to_public().serialize()
# else:
# return None
if embed:
# recv embed grad hess
if broadchannel_recv_mocker.call_count in [3, 6]:
grad = np.array(
[0.8333333, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, -0.5, -0.5, -0.5,
-0.5, -0.5, -0.5, -0.5, -0.5, -0.5, -0.5, -0.5, -
0.5, -0.5, -0.5, -0.5, -0.5, -0.5, -0.5, -0.5,
-0.5, -0.5, -0.5, -0.5, -0.5, -0.5, -0.8333333, -0.8333333, -
0.8333333, -0.8333333, -0.8333333, -0.8333333, -
0.8333333, -0.8333333, -0.8333333, -0.8333333,
-0.8333333, -0.8333333, -0.8333333, -0.8333333, -
0.8333333, -0.8333333, -0.8333333, -0.8333333,
-0.8333333, -0.8333333, -0.8333333, -0.8333333, -0.8333333, -0.5, -0.5])
hess = np.array(
[0.41666666, 0.25, 0.25, 0.25, 0.25, 0.25, 0.25, 0.25, 0.25, 0.25, 0.25, 0.25, 0.25, 0.25, 0.25,
0.25, 0.25, 0.25, 0.25, 0.25, 0.25, 0.25, 0.25, 0.25, 0.25, 0.25, 0.25, 0.25, 0.25, 0.25, 0.25,
0.25, 0.25, 0.25, 0.25, 0.25, 0.25, 0.25, 0.25, 0.41666666, 0.41666666,
0.41666666, 0.41666666, 0.41666666, 0.41666666, 0.41666666, 0.41666666, 0.41666666, 0.41666666,
0.41666666, 0.41666666, 0.41666666, 0.41666666, 0.41666666, 0.41666666, 0.41666666, 0.41666666,
0.41666666, 0.41666666, 0.41666666, 0.41666666, 0.41666666, 0.25, 0.25])
return Paillier.serialize(enc_grad_hess(grad, hess), compression=False)
# recv public context for Paillier
elif broadchannel_recv_mocker.call_count == 2:
return public_context.serialize()
# recv tree node
elif broadchannel_recv_mocker.call_count == 4:
def _generate_id():
id = ''.join(random.sample(
string.ascii_letters + string.digits, 16))
return id
return Node(id=_generate_id(), depth=0)
elif not embed:
# recv grad and hess
if broadchannel_recv_mocker.call_count in [3, 6]:
grad = np.array(
[0.8333333, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, -0.5, -0.5, -0.5,
-0.5, -0.5, -0.5, -0.5, -0.5, -0.5, -0.5, -0.5, -
0.5, -0.5, -0.5, -0.5, -0.5, -0.5, -0.5, -0.5,
-0.5, -0.5, -0.5, -0.5, -0.5, -0.5, -0.8333333, -0.8333333, -
0.8333333, -0.8333333, -0.8333333, -0.8333333, -
0.8333333, -0.8333333, -0.8333333, -0.8333333,
-0.8333333, -0.8333333, -0.8333333, -0.8333333, -
0.8333333, -0.8333333, -0.8333333, -0.8333333,
-0.8333333, -0.8333333, -0.8333333, -0.8333333, -0.8333333, -0.5, -0.5])
hess = np.array(
[0.41666666, 0.25, 0.25, 0.25, 0.25, 0.25, 0.25, 0.25, 0.25, 0.25, 0.25, 0.25, 0.25, 0.25, 0.25,
0.25, 0.25, 0.25, 0.25, 0.25, 0.25, 0.25, 0.25, 0.25, 0.25, 0.25, 0.25, 0.25, 0.25, 0.25, 0.25,
0.25, 0.25, 0.25, 0.25, 0.25, 0.25, 0.25, 0.25, 0.41666666, 0.41666666,
0.41666666, 0.41666666, 0.41666666, 0.41666666, 0.41666666, 0.41666666, 0.41666666, 0.41666666,
0.41666666, 0.41666666, 0.41666666, 0.41666666, 0.41666666, 0.41666666, 0.41666666, 0.41666666,
0.41666666, 0.41666666, 0.41666666, 0.41666666, 0.41666666, 0.25, 0.25])
return Paillier.serialize(enc_grad_hess(grad, None), compression=False), Paillier.serialize(
enc_grad_hess(None, hess), compression=False)
# recv public context for Paillier
elif broadchannel_recv_mocker.call_count == 2:
return public_context.serialize()
# recv tree node
elif broadchannel_recv_mocker.call_count == 4:
def _generate_id():
id = ''.join(random.sample(
string.ascii_letters + string.digits, 16))
return id
return Node(id=_generate_id(), depth=0)
def mock_dualchannel_recv(*args, **kwargs):
# recv min split info
if dualchannel_recv_mocker.call_count == 1:
return -1, 1, 1
# recv early stop
elif dualchannel_recv_mocker.call_count == 2:
return True
if not embed:
mocker.patch.object(decision_tree_trainer, "EMBEDING", False)
mocker.patch.object(FedConfig, "get_label_trainer",
return_value=["node-1"])
mocker.patch.object(FedNode, "node_id", "node-2")
mocker.patch.object(FedNode, "create_channel")
mocker.patch.object(Commu, "node_id", "node-2")
mocker.patch.object(Commu, "trainer_ids", ["node-1", "node-2"])
mocker.patch.object(Commu, "scheduler_id", "scheduler")
broadchannel_recv_mocker = mocker.patch.object(
BroadcastChannel, "recv", side_effect=mock_broadcastchannel_recv
)
dualchannel_recv_mocker = mocker.patch.object(
DualChannel, "recv", side_effect=mock_dualchannel_recv
)
mocker.patch.object(
service.fed_config.FedConfig, "get_label_trainer", return_value=["node-1"]
)
mocker.patch.object(
service.fed_config.FedConfig, "get_trainer", return_value=["node-2"]
)
xgb_trainer = VerticalXgboostTrainer(get_trainer_conf)
xgb_trainer.fit()
self.check_trainer_output()
@staticmethod
def check_label_trainer_output():
# 检查是否正确输出了预测值文件
assert os.path.exists(
"/opt/checkpoints/unit_test/predicted_probabilities_train.csv")
assert os.path.exists(
"/opt/checkpoints/unit_test/predicted_probabilities_val.csv")
# 检查是否正确输出了模型文件
assert os.path.exists(
"/opt/checkpoints/unit_test/node-2/vertical_xgboost_host.pmodel")
assert os.path.exists(
"/opt/checkpoints/unit_test/node-1/vertical_xgboost_guest.pmodel")
# 检查是否正确输出了model config
assert os.path.exists("/opt/checkpoints/unit_test/model_config.json")
with open("/opt/checkpoints/unit_test/model_config.json") as f:
model_config = json.load(f)
assert model_config[0]["class_name"] == "VerticalXGBooster"
assert model_config[0]["filename"] == "vertical_xgboost_guest.pmodel"
# 检查是否正确输出了feature importance文件
assert os.path.exists(
"/opt/checkpoints/unit_test/feature_importances.csv")
@staticmethod
def check_trainer_output():
# 检查是否正确输出了模型文件
assert os.path.exists(
"/opt/checkpoints/unit_test/vertical_xgboost_host.pmodel")
# 检查是否正确输出了model config
assert os.path.exists("/opt/checkpoints/unit_test/model_config.json")
with open("/opt/checkpoints/unit_test/model_config.json") as f:
model_config = json.load(f)
assert model_config[2]["class_name"] == "VerticalXGBooster"
assert model_config[2]["filename"] == "vertical_xgboost_host.pmodel"
| 33,748 | 48.053779 | 557 | py |
XFL | XFL-master/test/algorithm/framework/vertical/test_xgb.py | # Copyright 2022 The XFL Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import copy
import pytest
import json
from multiprocess.pool import ApplyResult
import pandas as pd
import numpy as np
from google.protobuf import json_format
import service.fed_config
from service.fed_config import FedConfig
from service.fed_node import FedNode
from algorithm.core.paillier_acceleration import embed
from algorithm.core.tree.xgboost_loss import get_xgb_loss_inst
from common.communication.gRPC.python.channel import BroadcastChannel, DualChannel
from common.communication.gRPC.python.commu import Commu
from common.crypto.paillier.paillier import Paillier
from algorithm.core.tree.tree_structure import Node
from algorithm.framework.vertical.xgboost.label_trainer import VerticalXgboostLabelTrainer
from algorithm.framework.vertical.xgboost.trainer import VerticalXgboostTrainer
from algorithm.framework.vertical.xgboost.decision_tree_label_trainer import VerticalDecisionTreeLabelTrainer
from algorithm.framework.vertical.xgboost.decision_tree_trainer import VerticalDecisionTreeTrainer
from common.model.python.tree_model_pb2 import XGBoostModel, NodeModel
@pytest.fixture(scope='module', autouse=True)
def prepare_data(tmp_factory):
df = pd.DataFrame({
"x0": np.random.random(200),
"x1": np.round(np.random.random(200) * 10.0),
"x2": np.random.uniform(200) * 2.0,
"x3": np.random.random(200) * 3.0,
"x4": np.arange(0, 200, 1),
'y': np.round(np.random.random(200))
})
df[['y', 'x0', 'x1', 'x2']].head(120).to_csv(
tmp_factory.join("train_guest.csv"), index=True, index_label='id'
)
df[['y', 'x0', 'x1', 'x2']].tail(80).to_csv(
tmp_factory.join("test_guest.csv"), index=True, index_label='id'
)
df[['x3', 'x4']].head(120).to_csv(
tmp_factory.join("train_host.csv"), index=True, index_label='id'
)
df[['x3', 'x4']].tail(80).to_csv(
tmp_factory.join("test_host.csv"), index=True, index_label='id'
)
Commu.node_id = "node-1"
FedNode.node_id = "node-1"
Commu.trainer_ids = ['node-1', 'node-2']
service.fed_node.FedNode.node_name = 'node-1'
@pytest.fixture(scope='module', autouse=True)
def prepare_model(tmp_factory):
d = {
"suggest_threshold": 0.6161117553710938,
"lr": [0.3],
"max_depth": [2],
"trees": [
{
"party_id": "node-1",
"tree_index": 0,
"root_node_id": "0_4lN0P7QTwWq25Eei",
"nodes": {
"0_4lN0P7QTwWq25Eei": {
"id": "0_4lN0P7QTwWq25Eei", "depth": 0, "left_node_id": "0_gw94EBW5tiD8kCqG",
"right_node_id": "0_vpKZWumTxYcojXLq",
"split_info": {
"owner_id": "node-1", "feature_idx": 0, "is_category": True,
"split_point": None, "left_cat": [4, 2, 6, 1]
},
"is_leaf": False,
"weight": None, "linkage": None
}, "0_gw94EBW5tiD8kCqG": {
"id": "0_gw94EBW5tiD8kCqG", "depth": 1, "left_node_id": None, "right_node_id": None,
"split_info": None,
"is_leaf": True, "weight": 1.5769230769230769, "linkage": "left"
},
"0_vpKZWumTxYcojXLq": {
"id": "0_vpKZWumTxYcojXLq", "depth": 1, "left_node_id": None,
"right_node_id": None,
"split_info": None, "is_leaf": True, "weight": -1.5, "linkage": "right"
}
}
}
],
"version": "1.0", "loss_method": "BCEWithLogitsLoss", "num_trees": 1,
"node_id_group": {
"0_4lN0P7QTwWq25Eei": {"node_id_list": ["0_4lN0P7QTwWq25Eei"]}
}
}
xgb = XGBoostModel()
json_format.ParseDict(d, xgb)
xgb_output = xgb.SerializeToString()
with open(tmp_factory.join("vertical_xgboost_guest.pmodel"), 'wb') as f:
f.write(xgb_output)
d = {"nodes": {"4_WTqDQjPt39iMc7Ug": {"id": "4_WTqDQjPt39iMc7Ug",
"split_info": {"owner_id": "node-2", "feature_idx": 0, "is_category": True,
"split_point": None, "left_cat": [1, 0, 2, 5]}}}}
xgb = NodeModel()
json_format.ParseDict(d, xgb)
xgb_output = xgb.SerializeToString()
with open(tmp_factory.join("vertical_xgboost_host.pmodel"), 'wb') as f:
f.write(xgb_output)
@pytest.fixture()
def get_label_trainer_infer_conf(tmp_factory):
conf = {
"identity": "label_trainer",
"model_info": {
"name": "vertical_xgboost"
},
"inference": True,
"input": {
"testset": [
{
"type": "csv",
"path": str(tmp_factory),
"name": "test_guest.csv",
"has_label": True,
"has_id": True
}
],
"pretrained_model": {
"path": str(tmp_factory),
"name": "vertical_xgboost_guest.pmodel"
}
},
"output": {
"path": str(tmp_factory),
"testset": {
"name": "predicted_probabilities_train.csv"
}
},
"train_info": {
"interaction_params": {
},
"train_params": {
"batch_size_val": 80
}
}
}
yield conf
@pytest.fixture()
def get_trainer_infer_conf(tmp_factory):
conf = {
"identity": "trainer",
"model_info": {
"name": "vertical_xgboost"
},
"inference": True,
"input": {
"testset": [
{
"type": "csv",
"path": str(tmp_factory),
"name": "test_host.csv",
"has_label": False,
"has_id": True
}
],
"pretrained_model": {
"path": str(tmp_factory),
"name": "vertical_xgboost_host.pmodel"
}
},
"output": {
},
"train_info": {
"interaction_params": {
},
"train_params": {
"batch_size_val": 99
}
}
}
yield conf
class TestVerticalXGBoost:
@pytest.mark.parametrize("dataset, empty", [("train", False), ("valid", False), ("test", False),
("train", True), ("valid", True), ("test", True)])
def test_check_dataset(self, get_label_trainer_infer_conf, mocker, tmp_factory, dataset, empty):
with open("python/algorithm/config/vertical_xgboost/label_trainer.json") as f:
conf = json.load(f)
conf["input"]["trainset"][0]["path"] = str(tmp_factory)
conf["input"]["trainset"][0]["name"] = "train_guest.csv"
conf["input"]["valset"][0]["path"] = str(tmp_factory)
conf["input"]["valset"][0]["name"] = "test_guest.csv"
if dataset == "test":
conf["inference"] = True
conf["input"]["testset"][0]["path"] = str(tmp_factory)
conf["input"]["testset"][0]["name"] = "test_guest.csv"
else:
del conf["input"]["testset"]
conf["output"]["path"] = str(tmp_factory)
mocker.patch.object(
DualChannel, "__init__", return_value=None
)
mocker.patch.object(
BroadcastChannel, "broadcast", return_value=None
)
mocker.patch.object(
BroadcastChannel, "collect", return_value=[{}]
)
mocker.patch.object(
service.fed_config.FedConfig, "get_label_trainer", return_value=["node-1"]
)
mocker.patch.object(
service.fed_config.FedConfig, "get_trainer", return_value=["node-2"]
)
xgb_label_trainer = VerticalXgboostLabelTrainer(conf)
def mock_dim():
if empty:
d = {"train": (120, 2), "valid": (80, 2), "test": (80, 2)}
d[dataset] = (d[dataset][0], 0)
dims = [d]
else:
dims = [{"train": (120, 2), "valid": (80, 2), "test": (80, 2)}]
d = {"train": (120, 2), "valid": (80, 2), "test": (80, 2)}
d[dataset] = (1, 2)
dims.append(d)
return dims
mocker.patch.object(
xgb_label_trainer.channels["check_dataset_com"], "collect", side_effect=mock_dim
)
if empty:
if dataset == "train":
xgb_label_trainer.train_features = pd.DataFrame()
elif dataset == "valid":
xgb_label_trainer.val_features = pd.DataFrame()
elif dataset == "test":
xgb_label_trainer.test_features = pd.DataFrame()
with pytest.raises(ValueError) as e:
xgb_label_trainer.check_dataset()
@pytest.mark.parametrize("num_bins", [2, 8, 128, 1024, 100000])
def test_cat_label_trainer(self, mocker, tmp_factory, num_bins):
"""
通过参数化测试当分类数大于小于num_bins的两种情况,同时测试num_bins在三段区间下的初始化
Args:
tmp_factory:
num_bins:
Returns:
"""
with open("python/algorithm/config/vertical_xgboost/label_trainer.json") as f:
conf = json.load(f)
conf["input"]["trainset"][0]["path"] = str(tmp_factory)
conf["input"]["trainset"][0]["name"] = "train_guest.csv"
conf["input"]["valset"][0]["path"] = str(tmp_factory)
conf["input"]["valset"][0]["name"] = "test_guest.csv"
conf["output"]["path"] = str(tmp_factory)
conf["train_info"]["train_params"]["category"]["cat_features"]["col_names"] = [
"x1"]
conf["train_info"]["train_params"]["num_bins"] = num_bins
del conf["input"]["testset"]
mocker.patch("service.fed_control._send_progress")
mocker.patch.object(
BroadcastChannel, "__init__", return_value=None
)
mocker.patch.object(
BroadcastChannel, "collect", return_value=[]
)
mocker.patch.object(
BroadcastChannel, "broadcast", return_value=None
)
xgb_label_trainer = VerticalXgboostLabelTrainer(conf)
mocker.patch.object(
xgb_label_trainer.channels["check_dataset_com"], "collect", return_value=[]
)
mocker.patch.object(
xgb_label_trainer.channels["sync"], "collect", return_value=[{}]
)
mocker.patch.object(
FedNode, "config", return_value={"trainer": {}}
)
xgb_label_trainer.fit()
self.check_label_trainer_output(tmp_factory)
def test_trainer(self, mocker, tmp_factory):
# load default config
with open("python/algorithm/config/vertical_xgboost/trainer.json") as f:
conf = json.load(f)
conf["input"]["trainset"][0]["path"] = str(tmp_factory)
conf["input"]["trainset"][0]["name"] = "train_host.csv"
conf["input"]["valset"][0]["path"] = str(tmp_factory)
conf["input"]["valset"][0]["name"] = "test_host.csv"
conf["output"]["path"] = str(tmp_factory)
# if conf["train_info"]["train_params"]["downsampling"]["row"]["run_goss"]:
# conf["train_info"]["train_params"]["downsampling"]["row"]["top_rate"] = 0.5
# conf["train_info"]["train_params"]["downsampling"]["row"]["other_rate"] = 0.5
del conf["input"]["testset"]
# mocker channels in VerticalXgboostTrainer.__init__
mocker.patch.object(
DualChannel, "__init__", return_value=None
)
mocker.patch.object(
BroadcastChannel, "send", return_value=None
)
mocker.patch.object(
DualChannel, "send", return_value=None
)
def mock_func(*args, **kwargs):
"""
mock encryption keys
Args:
*args:
**kwargs:
Returns:
the paillier context
"""
config = {
"train_info": {
"interaction_params": {
"save_frequency": -1,
"echo_training_metrics": True,
"write_training_prediction": True,
"write_validation_prediction": True
},
"train_params": {
"lossfunc": {
"BCEWithLogitsLoss": {}
},
"num_trees": 10,
"num_bins": 16,
"downsampling": {
"row": {
"run_goss": True
}
},
"encryption": {
"paillier": {
"key_bit_size": 2048,
"precision": 7,
"djn_on": True,
"parallelize_on": True
}
},
"batch_size_val": 40960
}
}
}
if mock_broadcast_recv.call_count == 1:
return config
elif mock_broadcast_recv.call_count == 2:
encryption = config["train_info"]["train_params"]["encryption"]
if "paillier" in encryption:
encryption = encryption["paillier"]
private_context = Paillier.context(
encryption["key_bit_size"], encryption["djn_on"])
return private_context.to_public().serialize()
else:
return None
mocker.patch.object(
BroadcastChannel, "__init__", return_value=None
)
mock_broadcast_recv = mocker.patch.object(
BroadcastChannel, "recv", side_effect=mock_func
)
xgb_trainer = VerticalXgboostTrainer(conf)
# mock for iters
private_context = Paillier.context(2048, True)
public_context = private_context.to_public()
xgb_trainer.public_context = public_context
def mock_grad_hess(*args, **kwargs):
"""
mock the grad and hess calculation in the label trainer.
Args:
*args:
**kwargs:
Returns:
paillier encrypted grad and hess vec
"""
y = np.array([0, 1] * 60)
y_pred = np.array([0.5] * 120)
loss_inst = get_xgb_loss_inst("BCEWithLogitsLoss")
grad = loss_inst.cal_grad(y, y_pred, after_prediction=True)
hess = loss_inst.cal_hess(y, y_pred, after_prediction=True)
grad_hess = embed([grad, hess], interval=(1 << 128), precision=64)
enc_grad_hess = Paillier.encrypt(context=private_context,
data=grad_hess,
precision=0, # must be 0
obfuscation=True,
num_cores=999)
return Paillier.serialize(enc_grad_hess, compression=False)
def mock_node(*args, **kwargs):
"""
mock the node passing to the trainer
Args:
*args:
**kwargs:
Returns:
an empty None
"""
if node_mocker.call_count <= 1:
return Node(id="mock_id")
else:
return None
# mock results from the label trainer according to difference channels
mocker.patch.object(
xgb_trainer.channels["individual_grad_hess"], "recv", side_effect=mock_grad_hess
)
node_mocker = mocker.patch.object(
xgb_trainer.channels["tree_node"], "recv", side_effect=mock_node
)
mocker.patch.object(
xgb_trainer.channels["min_split_info"], "recv", return_value=[-1, -1, -1]
)
mocker.patch.object(
xgb_trainer.channels["restart_com"], "recv", return_value=0
)
mocker.patch.object(
xgb_trainer.channels["early_stop_com"], "recv", return_value=False
)
xgb_trainer.fit()
self.check_trainer_output(tmp_factory)
def test_label_trainer(self, mocker, tmp_factory):
with open("python/algorithm/config/vertical_xgboost/label_trainer.json") as f:
conf = json.load(f)
conf["input"]["trainset"][0]["path"] = str(tmp_factory)
conf["input"]["trainset"][0]["name"] = "train_guest.csv"
conf["input"]["valset"][0]["path"] = str(tmp_factory)
conf["input"]["valset"][0]["name"] = "test_guest.csv"
conf["output"]["path"] = str(tmp_factory)
del conf["input"]["testset"]
mocker.patch("service.fed_control._send_progress")
mocker.patch.object(
BroadcastChannel, "__init__", return_value=None
)
mocker.patch.object(
BroadcastChannel, "broadcast", return_value=None
)
mocker.patch.object(
BroadcastChannel, "collect", return_value=[{}]
)
xgb_label_trainer = VerticalXgboostLabelTrainer(conf)
mocker.patch.object(
xgb_label_trainer.channels["check_dataset_com"], "collect", return_value=[]
)
mocker.patch.object(
FedNode, "config", return_value={"trainer": {}}
)
xgb_label_trainer.fit()
self.check_label_trainer_output(tmp_factory)
# cover dual channel created in: VerticalXgboostLabelTrainer.__init__
mocker.patch.object(
FedConfig, "get_trainer", return_value=["node_id"]
)
mocker.patch.object(
DualChannel, "__init__", return_value=None
)
VerticalXgboostLabelTrainer(conf)
@staticmethod
def check_label_trainer_output(tmp_factory):
# 检查是否正确输出了预测值文件
assert os.path.exists(tmp_factory.join(
"xgb_prediction_train_[STAGE_ID].csv"))
assert os.path.exists(tmp_factory.join(
"xgb_prediction_val_[STAGE_ID].csv"))
# 检查是否正确输出了模型文件
assert os.path.exists(tmp_factory.join(
"vertical_xgboost_[STAGE_ID].model"))
# 检查是否正确输出了model config
assert os.path.exists(tmp_factory.join("model_config.json"))
with open(tmp_factory.join("model_config.json")) as f:
model_config = json.load(f)
assert model_config[0]["class_name"] == "VerticalXGBooster"
assert model_config[0]["filename"] == "vertical_xgboost_[STAGE_ID].pmodel"
# 检查是否正确输出了feature importance文件
assert os.path.exists(tmp_factory.join(
"xgb_feature_importance_[STAGE_ID].csv"))
@staticmethod
def check_trainer_output(tmp_factory):
# 检查是否正确输出了模型文件
assert os.path.exists(tmp_factory.join(
"vertical_xgboost_[STAGE_ID].pmodel"))
# 检查是否正确输出了model config
assert os.path.exists(tmp_factory.join("model_config.json"))
with open(tmp_factory.join("model_config.json")) as f:
model_config = json.load(f)
assert model_config[0]["class_name"] == "VerticalXGBooster"
assert model_config[0]["filename"] == "vertical_xgboost_[STAGE_ID].pmodel"
def test_predict_label_trainer(self, get_label_trainer_infer_conf, mocker, tmp_factory):
mocker.patch.object(
DualChannel, "__init__", return_value=None
)
def mock_collect(*args, **kwargs):
if collect_mocker.call_count == 2:
return [{"test": (80, 2)}]
else:
return {}
mocker.patch.object(
ApplyResult, "get", return_value={"0_4lN0P7QTwWq25Eei": np.array([1] * 50 + [0] * 30),
"0_gw94EBW5tiD8kCqG": np.array([1] * 25 + [0] * 55),
"0_vpKZWumTxYcojXLq": np.array([1] * 75 + [0] * 5)}
)
mocker.patch.object(
BroadcastChannel, "broadcast", return_value=None
)
collect_mocker = mocker.patch.object(
BroadcastChannel, "collect", side_effect=mock_collect
)
mocker.patch.object(
BroadcastChannel, "scatter", return_value=None
)
mocker.patch.object(
service.fed_config.FedConfig, "get_label_trainer", return_value=["node-1"]
)
mocker.patch.object(
service.fed_config.FedConfig, "get_trainer", return_value=["node-2"]
)
xgb_label_trainer = VerticalXgboostLabelTrainer(
get_label_trainer_infer_conf)
xgb_label_trainer.predict()
df = pd.read_csv(tmp_factory.join("predicted_probabilities_train.csv"))
assert (df["pred"] > 0.5).sum() == 50
def test_predict_empty_testset(self, get_label_trainer_infer_conf, mocker, tmp_factory):
conf = copy.deepcopy(get_label_trainer_infer_conf)
del conf["input"]["testset"]
mocker.patch.object(
DualChannel, "__init__", return_value=None
)
def mock_collect(*args, **kwargs):
if collect_mocker.call_count == 2:
return [{"test": (80, 2)}]
else:
return {}
mocker.patch.object(
ApplyResult, "get", return_value={"0_4lN0P7QTwWq25Eei": np.array([1] * 50 + [0] * 30),
"0_gw94EBW5tiD8kCqG": np.array([1] * 25 + [0] * 55),
"0_vpKZWumTxYcojXLq": np.array([1] * 75 + [0] * 5)}
)
mocker.patch.object(
BroadcastChannel, "broadcast", return_value=None
)
collect_mocker = mocker.patch.object(
BroadcastChannel, "collect", side_effect=mock_collect
)
mocker.patch.object(
BroadcastChannel, "scatter", return_value=None
)
mocker.patch.object(
service.fed_config.FedConfig, "get_label_trainer", return_value=["node-1"]
)
mocker.patch.object(
service.fed_config.FedConfig, "get_trainer", return_value=["node-2"]
)
xgb_label_trainer = VerticalXgboostLabelTrainer(conf)
xgb_label_trainer.predict()
df = pd.read_csv(tmp_factory.join("predicted_probabilities_train.csv"))
assert df.shape == (80, 2)
def test_predict_trainer(self, get_trainer_infer_conf, mocker, tmp_factory):
mocker.patch.object(
DualChannel, "__init__", return_value=None
)
mocker.patch.object(
DualChannel, "send", return_value=0
)
mocker.patch.object(
BroadcastChannel, "send", return_value=0
)
def mock_func(*args, **kwargs):
config = {
"train_info": {
"train_params": {
"lossfunc": {
"BCEWithLogitsLoss": {}
},
"batch_size_val": 40960
}
}
}
return config
mocker.patch.object(
BroadcastChannel, "recv", side_effect=mock_func
)
mocker.patch.object(
service.fed_config.FedConfig, "get_label_trainer", return_value=["node-1"]
)
mocker.patch.object(
service.fed_config.FedConfig, "get_trainer", return_value=["node-2"]
)
xgb_trainer = VerticalXgboostTrainer(get_trainer_infer_conf)
xgb_trainer.predict()
| 24,834 | 36.402108 | 117 | py |
XFL | XFL-master/test/algorithm/framework/vertical/test_xgb2.py | # Copyright 2022 The XFL Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import copy
import pytest
import json
from multiprocess.pool import ApplyResult
import pandas as pd
import numpy as np
import service.fed_config
from service.fed_config import FedConfig
from service.fed_node import FedNode
from algorithm.core.paillier_acceleration import embed
from algorithm.core.tree.xgboost_loss import get_xgb_loss_inst
from common.communication.gRPC.python.channel import BroadcastChannel, DualChannel
from common.communication.gRPC.python.commu import Commu
from common.crypto.paillier.paillier import Paillier
from algorithm.core.tree.tree_structure import Node
from algorithm.framework.vertical.xgboost.label_trainer import VerticalXgboostLabelTrainer
from algorithm.framework.vertical.xgboost.trainer import VerticalXgboostTrainer
from algorithm.framework.vertical.xgboost.decision_tree_label_trainer import VerticalDecisionTreeLabelTrainer
from algorithm.framework.vertical.xgboost.decision_tree_trainer import VerticalDecisionTreeTrainer
@pytest.fixture(scope='module', autouse=True)
def prepare_data(tmp_factory):
df = pd.DataFrame({
"x0": np.random.random(200),
# np.round(np.random.random(200) * 10.0),
"x1": np.random.randint(0, 10, 200),
"x2": np.random.uniform(200) * 2.0,
"x3": np.random.random(200) * 3.0,
"x4": np.random.randint(0, 10, 200), # np.arange(0, 200, 1),
'y': np.round(np.random.random(200))
})
df[['y', 'x0', 'x1', 'x2']].head(120).to_csv(
tmp_factory.join("train_guest.csv"), index=True, index_label='id'
)
df[['y', 'x0', 'x1', 'x2']].tail(80).to_csv(
tmp_factory.join("test_guest.csv"), index=True, index_label='id'
)
df[['x3', 'x4']].head(120).to_csv(
tmp_factory.join("train_host.csv"), index=True, index_label='id'
)
df[['x3', 'x4']].tail(80).to_csv(
tmp_factory.join("test_host.csv"), index=True, index_label='id'
)
Commu.node_id = "node-1"
FedNode.node_id = "node-1"
FedNode.config = {"trainer": []}
Commu.trainer_ids = ['node-1', 'node-2']
class TestVerticalXGBoost:
@pytest.mark.parametrize('feature_index', [(1), (0)])
def test_decision_tree_trainer(self, mocker, tmp_factory, feature_index):
with open("python/algorithm/config/vertical_xgboost/trainer.json") as f:
conf = json.load(f)
conf["input"]["trainset"][0]["path"] = str(tmp_factory)
conf["input"]["trainset"][0]["name"] = "train_host.csv"
conf["input"]["valset"][0]["path"] = str(tmp_factory)
conf["input"]["valset"][0]["name"] = "test_host.csv"
del conf["input"]["testset"]
conf["output"]["path"] = str(tmp_factory)
# if conf["train_info"]["train_params"]["downsampling"]["row"]["run_goss"]:
# conf["train_info"]["train_params"]["downsampling"]["row"]["top_rate"] = 0.5
# conf["train_info"]["train_params"]["downsampling"]["row"]["other_rate"] = 0.5
conf["train_info"]["train_params"]["category"]["cat_features"]["col_index"] = "1"
conf["train_info"]["train_params"]["advanced"]["col_batch"] = 1
conf["train_info"]["train_params"]["advanced"]["row_batch"] = 1
# mocker channels in VerticalXgboostTrainer.__init__
mocker.patch.object(
DualChannel, "__init__", return_value=None
)
mocker.patch.object(
BroadcastChannel, "send", return_value=None
)
mocker.patch.object(
DualChannel, "send", return_value=None
)
def mock_func(*args, **kwargs):
"""
mock encryption keys
Args:
*args:
**kwargs:
Returns:
the paillier context
"""
config = {
"train_info": {
"interaction_params": {
"save_frequency": -1,
"echo_training_metrics": True,
"write_training_prediction": True,
"write_validation_prediction": True
},
"train_params": {
"lossfunc": {
"BCEWithLogitsLoss": {}
},
"num_trees": 10,
"num_bins": 16,
"downsampling": {
"row": {
"run_goss": True
}
},
"encryption": {
"paillier": {
"key_bit_size": 2048,
"precision": 7,
"djn_on": True,
"parallelize_on": True
}
},
"batch_size_val": 40960
}
}
}
if mock_broadcast_recv.call_count == 1:
return config
elif mock_broadcast_recv.call_count == 2:
encryption = config["train_info"]["train_params"]["encryption"]
if "paillier" in encryption:
encryption = encryption["paillier"]
private_context = Paillier.context(
encryption["key_bit_size"], encryption["djn_on"])
return private_context.to_public().serialize()
else:
return None
mock_broadcast_recv = mocker.patch.object(
BroadcastChannel, "recv", side_effect=mock_func
)
mocker.patch.object(
service.fed_config.FedConfig, "get_label_trainer", return_value=["node-1"]
)
mocker.patch.object(
service.fed_config.FedConfig, "get_trainer", return_value=["node-2"]
)
xgb_trainer = VerticalXgboostTrainer(conf)
sampled_features, feature_id_mapping = xgb_trainer.col_sample()
cat_columns_after_sampling = list(filter(
lambda x: feature_id_mapping[x] in xgb_trainer.cat_columns, list(feature_id_mapping.keys())))
split_points_after_sampling = [
xgb_trainer.split_points[feature_id_mapping[k]] for k in feature_id_mapping.keys()]
sample_index = [2, 4, 6, 7, 8, 10]
def mock_grad_hess(*args, **kwargs):
private_context = Paillier.context(xgb_trainer.xgb_config.encryption_param.key_bit_size,
xgb_trainer.xgb_config.encryption_param.djn_on)
# grad = np.random.random(xgb_trainer.xgb_config.num_bins)
# hess = np.random.random(xgb_trainer.xgb_config.num_bins)
grad = np.random.random(len(sample_index))
hess = np.random.random(len(sample_index))
grad_hess = embed([grad, hess], interval=(1 << 128), precision=64)
enc_grad_hess = Paillier.encrypt(private_context,
data=grad_hess,
precision=0, # must be 0
obfuscation=True,
num_cores=999)
return Paillier.serialize(enc_grad_hess, compression=False)
mocker.patch.object(
BroadcastChannel, "recv", side_effect=mock_grad_hess
)
decision_tree = VerticalDecisionTreeTrainer(tree_param=xgb_trainer.xgb_config,
features=sampled_features,
cat_columns=cat_columns_after_sampling,
split_points=split_points_after_sampling,
channels=xgb_trainer.channels,
encryption_context=xgb_trainer.public_context,
feature_id_mapping=feature_id_mapping,
tree_index=0)
def mock_node(*args, **kwargs):
"""
mock the node passing to the trainer
Args:
*args:
**kwargs:
Returns:
an empty None
"""
if node_mocker.call_count == 1:
return Node(id="mock_id",
depth=1,
sample_index=sample_index,
)
elif node_mocker.call_count == 2:
return Node(id="mock_id_2",
depth=1,
sample_index=sample_index,
)
else:
return None
node_mocker = mocker.patch.object(
decision_tree.tree_node_chann, "recv", side_effect=mock_node
)
mocker.patch.object(
decision_tree.min_split_info_chann, "recv", return_value=[feature_index, 0, [0]]
)
decision_tree.fit()
@pytest.mark.parametrize('feature_index', [(1), (0)])
def test_decision_tree_trainer_plain(self, mocker, tmp_factory, feature_index):
with open("python/algorithm/config/vertical_xgboost/trainer.json") as f:
conf = json.load(f)
conf["input"]["trainset"][0]["path"] = str(tmp_factory)
conf["input"]["trainset"][0]["name"] = "train_host.csv"
conf["input"]["valset"][0]["path"] = str(tmp_factory)
conf["input"]["valset"][0]["name"] = "test_host.csv"
del conf["input"]["testset"]
conf["output"]["path"] = str(tmp_factory)
# if conf["train_info"]["train_params"]["downsampling"]["row"]["run_goss"]:
# conf["train_info"]["train_params"]["downsampling"]["row"]["top_rate"] = 0.5
# conf["train_info"]["train_params"]["downsampling"]["row"]["other_rate"] = 0.5
conf["train_info"]["train_params"]["category"]["cat_features"]["col_index"] = "1"
# mocker channels in VerticalXgboostTrainer.__init__
mocker.patch.object(
DualChannel, "__init__", return_value=None
)
mocker.patch.object(
BroadcastChannel, "send", return_value=None
)
mocker.patch.object(
DualChannel, "send", return_value=None
)
def mock_func(*args, **kwargs):
"""
mock encryption keys
Args:
*args:
**kwargs:
Returns:
the paillier context
"""
config = {
"train_info": {
"interaction_params": {
"save_frequency": -1,
"echo_training_metrics": True,
"write_training_prediction": True,
"write_validation_prediction": True
},
"train_params": {
"lossfunc": {
"BCEWithLogitsLoss": {}
},
"num_trees": 10,
"num_bins": 16,
"downsampling": {
"row": {
"run_goss": True
}
},
"encryption": {
"plain": {
}
},
"batch_size_val": 40960
}
}
}
if mock_broadcast_recv.call_count == 1:
return config
elif mock_broadcast_recv.call_count == 2:
encryption = config["train_info"]["train_params"]["encryption"]
if "paillier" in encryption:
encryption = encryption["paillier"]
private_context = Paillier.context(
encryption["key_bit_size"], encryption["djn_on"])
return private_context.to_public().serialize()
else:
return None
mock_broadcast_recv = mocker.patch.object(
BroadcastChannel, "recv", side_effect=mock_func
)
mocker.patch.object(
service.fed_config.FedConfig, "get_label_trainer", return_value=["node-1"]
)
mocker.patch.object(
service.fed_config.FedConfig, "get_trainer", return_value=["node-2"]
)
xgb_trainer = VerticalXgboostTrainer(conf)
sampled_features, feature_id_mapping = xgb_trainer.col_sample()
cat_columns_after_sampling = list(filter(
lambda x: feature_id_mapping[x] in xgb_trainer.cat_columns, list(feature_id_mapping.keys())))
split_points_after_sampling = [
xgb_trainer.split_points[feature_id_mapping[k]] for k in feature_id_mapping.keys()]
sample_index = [2, 4, 6, 7, 8, 10]
def mock_grad_hess(*args, **kwargs):
grad = np.random.random(len(sample_index))
hess = np.random.random(len(sample_index))
return [grad, hess]
mocker.patch.object(
BroadcastChannel, "recv", side_effect=mock_grad_hess
)
decision_tree = VerticalDecisionTreeTrainer(tree_param=xgb_trainer.xgb_config,
features=sampled_features,
cat_columns=cat_columns_after_sampling,
split_points=split_points_after_sampling,
channels=xgb_trainer.channels,
encryption_context=xgb_trainer.public_context,
feature_id_mapping=feature_id_mapping,
tree_index=0)
def mock_node(*args, **kwargs):
"""
mock the node passing to the trainer
Args:
*args:
**kwargs:
Returns:
an empty None
"""
if node_mocker.call_count == 1:
return Node(id="mock_id",
depth=1,
sample_index=sample_index,
)
elif node_mocker.call_count == 2:
return Node(id="mock_id_2",
depth=1,
sample_index=sample_index,
)
else:
return None
node_mocker = mocker.patch.object(
decision_tree.tree_node_chann, "recv", side_effect=mock_node
)
mocker.patch.object(
decision_tree.min_split_info_chann, "recv", return_value=[feature_index, 0, [0]]
)
decision_tree.fit()
def test_decision_tree_trainer_exception(self, mocker, tmp_factory):
with open("python/algorithm/config/vertical_xgboost/trainer.json") as f:
conf = json.load(f)
conf["input"]["trainset"][0]["path"] = str(tmp_factory)
conf["input"]["trainset"][0]["name"] = "train_host.csv"
conf["input"]["valset"][0]["path"] = str(tmp_factory)
conf["input"]["valset"][0]["name"] = "test_host.csv"
del conf["input"]["testset"]
conf["output"]["path"] = str(tmp_factory)
# if conf["train_info"]["train_params"]["downsampling"]["row"]["run_goss"]:
# conf["train_info"]["train_params"]["downsampling"]["row"]["top_rate"] = 0.5
# conf["train_info"]["train_params"]["downsampling"]["row"]["other_rate"] = 0.5
conf["train_info"]["train_params"]["category"]["cat_features"]["col_index"] = "1"
# mocker channels in VerticalXgboostTrainer.__init__
mocker.patch.object(
DualChannel, "__init__", return_value=None
)
mocker.patch.object(
BroadcastChannel, "send", return_value=None
)
mocker.patch.object(
DualChannel, "send", return_value=None
)
def mock_func(*args, **kwargs):
"""
mock encryption keys
Args:
*args:
**kwargs:
Returns:
the paillier context
"""
config = {
"train_info": {
"interaction_params": {
"save_frequency": -1,
"echo_training_metrics": True,
"write_training_prediction": True,
"write_validation_prediction": True
},
"train_params": {
"lossfunc": {
"BCEWithLogitsLoss": {}
},
"num_trees": 10,
"num_bins": 16,
"downsampling": {
"row": {
"run_goss": True
}
},
"encryption": {
"plain": {
}
},
"batch_size_val": 40960
}
}
}
if mock_broadcast_recv.call_count == 1:
return config
elif mock_broadcast_recv.call_count == 2:
encryption = config["train_info"]["train_params"]["encryption"]
if "paillier" in encryption:
encryption = encryption["paillier"]
private_context = Paillier.context(
encryption["key_bit_size"], encryption["djn_on"])
return private_context.to_public().serialize()
else:
return None
mock_broadcast_recv = mocker.patch.object(
BroadcastChannel, "recv", side_effect=mock_func
)
mocker.patch.object(
service.fed_config.FedConfig, "get_label_trainer", return_value=["node-1"]
)
mocker.patch.object(
service.fed_config.FedConfig, "get_trainer", return_value=["node-2"]
)
xgb_trainer = VerticalXgboostTrainer(conf)
sampled_features, feature_id_mapping = xgb_trainer.col_sample()
cat_columns_after_sampling = list(filter(
lambda x: feature_id_mapping[x] in xgb_trainer.cat_columns, list(feature_id_mapping.keys())))
split_points_after_sampling = [
xgb_trainer.split_points[feature_id_mapping[k]] for k in feature_id_mapping.keys()]
with pytest.raises(ValueError):
xgb_trainer.xgb_config.encryption_param.method = 'palin'
decision_tree = VerticalDecisionTreeTrainer(tree_param=xgb_trainer.xgb_config,
features=sampled_features,
cat_columns=cat_columns_after_sampling,
split_points=split_points_after_sampling,
channels=xgb_trainer.channels,
encryption_context=xgb_trainer.public_context,
feature_id_mapping=feature_id_mapping,
tree_index=0)
@pytest.mark.parametrize('run_goss, encryption_method', [(True, "paillier"), (False, "plain")])
def test_decision_tree_label_trainer(self, mocker, tmp_factory, run_goss, encryption_method):
with open("python/algorithm/config/vertical_xgboost/label_trainer.json") as f:
conf = json.load(f)
conf["input"]["trainset"][0]["path"] = str(tmp_factory)
conf["input"]["trainset"][0]["name"] = "train_guest.csv"
conf["input"]["valset"][0]["path"] = str(tmp_factory)
conf["input"]["valset"][0]["name"] = "test_guest.csv"
conf["output"]["path"] = str(tmp_factory)
conf["train_info"]["train_params"]["downsampling"]["row"]["run_goss"] = run_goss
if encryption_method == "plain":
conf["train_info"]["train_params"]["encryption"] = {"plain": {}}
del conf["input"]["testset"]
mocker.patch("service.fed_control._send_progress")
mocker.patch.object(
BroadcastChannel, "__init__", return_value=None
)
mocker.patch.object(
BroadcastChannel, "broadcast", return_value=None
)
xgb_label_trainer = VerticalXgboostLabelTrainer(conf)
train_y_pred = np.zeros_like(xgb_label_trainer.train_label) + 0.5
sampled_features, feature_id_mapping = xgb_label_trainer.col_sample()
cat_columns_after_sampling = list(filter(
lambda x: feature_id_mapping[x] in xgb_label_trainer.cat_columns, list(feature_id_mapping.keys())))
split_points_after_sampling = [
xgb_label_trainer.split_points[feature_id_mapping[k]] for k in feature_id_mapping.keys()]
decision_tree = VerticalDecisionTreeLabelTrainer(tree_param=xgb_label_trainer.xgb_config,
y=xgb_label_trainer.train_label,
y_pred=train_y_pred,
features=sampled_features,
cat_columns=cat_columns_after_sampling,
split_points=split_points_after_sampling,
channels=xgb_label_trainer.channels,
encryption_context=xgb_label_trainer.private_context,
feature_id_mapping=feature_id_mapping,
tree_index=0)
mocker_grad_hess = mocker.patch.object(
DualChannel, "__init__", return_value=None
)
mocker_grad_hess = mocker.patch.object(
DualChannel, "send", return_value=None
)
decision_tree.summed_grad_hess_channs = {
"node-2": DualChannel(name="summed_grad_hess_node-2")}
decision_tree.sample_index_after_split_channs = {
"node-2": DualChannel(name="sample_index_after_split_node-2")}
def mock_grad_hess(*args, **kwargs):
grad = np.random.random(xgb_label_trainer.xgb_config.num_bins)
hess = np.random.random(xgb_label_trainer.xgb_config.num_bins)
if encryption_method == "plain":
if mocker_grad_hess.call_count > 1:
return False, [(grad, hess, np.random.randint(1, 10, xgb_label_trainer.xgb_config.num_bins))], [0]
else:
return True, [(grad, hess, np.random.randint(1, 10, xgb_label_trainer.xgb_config.num_bins))], [0]
grad_hess = embed([grad, hess], interval=(1 << 128), precision=64)
grad_hess_enc = Paillier.encrypt(xgb_label_trainer.private_context,
data=grad_hess,
precision=0, # must be 0
obfuscation=True,
num_cores=999)
grad_hess_hist_list = []
remote_cat_index = []
grad_hess_hist_list.append(
(grad_hess_enc, [xgb_label_trainer.xgb_config.num_bins]))
if mocker_grad_hess.call_count > 1:
return False, grad_hess_hist_list, remote_cat_index
else:
return True, grad_hess_hist_list, remote_cat_index
mocker_grad_hess = mocker.patch.object(
decision_tree.summed_grad_hess_channs["node-2"], "recv", side_effect=mock_grad_hess
)
mocker.patch.object(
decision_tree.sample_index_after_split_channs["node-2"],
"recv",
return_value=[range(len(decision_tree.y) // 2), [range(len(decision_tree.y) // 2, len(decision_tree.y))]]
)
decision_tree.fit()
with pytest.raises(ValueError):
xgb_label_trainer.xgb_config.encryption_param.method = 'palin'
decision_tree = VerticalDecisionTreeLabelTrainer(tree_param=xgb_label_trainer.xgb_config,
y=xgb_label_trainer.train_label,
y_pred=train_y_pred,
features=sampled_features,
cat_columns=cat_columns_after_sampling,
split_points=split_points_after_sampling,
channels=xgb_label_trainer.channels,
encryption_context=xgb_label_trainer.private_context,
feature_id_mapping=feature_id_mapping,
tree_index=0)
# def test_decision_tree_label_trainer(self, mocker, tmp_factory):
# with open("python/algorithm/config/vertical_xgboost/label_trainer.json") as f:
# conf = json.load(f)
# conf["input"]["trainset"][0]["path"] = str(tmp_factory)
# conf["input"]["trainset"][0]["name"] = "train_guest.csv"
# conf["input"]["valset"][0]["path"] = str(tmp_factory)
# conf["input"]["valset"][0]["name"] = "test_guest.csv"
# conf["output"]["path"] = str(tmp_factory)
# del conf["input"]["testset"]
# mocker.patch.object(
# BroadcastChannel, "__init__", return_value=None
# )
# mocker.patch.object(
# BroadcastChannel, "broadcast", return_value=None
# )
# xgb_label_trainer = VerticalXgboostLabelTrainer(conf)
# train_y_pred = np.zeros_like(xgb_label_trainer.train_label) + 0.5
# sampled_features, feature_id_mapping = xgb_label_trainer.col_sample()
# cat_columns_after_sampling = list(filter(
# lambda x: feature_id_mapping[x] in xgb_label_trainer.cat_columns, list(feature_id_mapping.keys())))
# split_points_after_sampling = [
# xgb_label_trainer.split_points[feature_id_mapping[k]] for k in feature_id_mapping.keys()]
# decision_tree = VerticalDecisionTreeLabelTrainer(tree_param=xgb_label_trainer.xgb_config,
# y=xgb_label_trainer.train_label,
# y_pred=train_y_pred,
# features=sampled_features,
# cat_columns=cat_columns_after_sampling,
# split_points=split_points_after_sampling,
# channels=xgb_label_trainer.channels,
# encryption_context=xgb_label_trainer.private_context,
# feature_id_mapping=feature_id_mapping,
# tree_index=0)
# mocker_grad_hess = mocker.patch.object(
# DualChannel, "__init__", return_value=None
# )
# decision_tree.summed_grad_hess_channs = {
# "node-2": DualChannel(name="summed_grad_hess_node-2")}
# def mock_grad_hess(*args, **kwargs):
# grad = np.random.random(xgb_label_trainer.xgb_config.num_bins)
# hess = np.random.random(xgb_label_trainer.xgb_config.num_bins)
# grad_hess = embed([grad, hess], interval=(1 << 128), precision=64)
# grad_hess_enc = Paillier.encrypt(xgb_label_trainer.private_context,
# data=grad_hess,
# precision=0, # must be 0
# obfuscation=True,
# num_cores=999)
# grad_hess_hist_list = []
# remote_cat_index = []
# grad_hess_hist_list.append(
# (grad_hess_enc, [xgb_label_trainer.xgb_config.num_bins]))
# if mocker_grad_hess.call_count > 1:
# return False, grad_hess_hist_list, remote_cat_index
# else:
# return True, grad_hess_hist_list, remote_cat_index
# mocker_grad_hess = mocker.patch.object(
# decision_tree.summed_grad_hess_channs["node-2"], "recv", side_effect=mock_grad_hess
# )
# decision_tree.fit()
# def test_trainer(self, mocker, tmp_factory):
# # load default config
# with open("python/algorithm/config/vertical_xgboost/trainer.json") as f:
# conf = json.load(f)
# conf["input"]["trainset"][0]["path"] = str(tmp_factory)
# conf["input"]["trainset"][0]["name"] = "train_host.csv"
# conf["input"]["valset"][0]["path"] = str(tmp_factory)
# conf["input"]["valset"][0]["name"] = "test_host.csv"
# conf["output"]["path"] = str(tmp_factory)
# # if conf["train_info"]["train_params"]["downsampling"]["row"]["run_goss"]:
# # conf["train_info"]["train_params"]["downsampling"]["row"]["top_rate"] = 0.5
# # conf["train_info"]["train_params"]["downsampling"]["row"]["other_rate"] = 0.5
# del conf["input"]["testset"]
# # mocker channels in VerticalXgboostTrainer.__init__
# mocker.patch.object(
# DualChannel, "__init__", return_value=None
# )
# mocker.patch.object(
# BroadcastChannel, "send", return_value=None
# )
# mocker.patch.object(
# DualChannel, "send", return_value=None
# )
# def mock_func(*args, **kwargs):
# """
# mock encryption keys
# Args:
# *args:
# **kwargs:
# Returns:
# the paillier context
# """
# config = {
# "train_info": {
# "interaction_params": {
# "save_frequency": -1,
# "echo_training_metrics": True,
# "write_training_prediction": True,
# "write_validation_prediction": True
# },
# "train_params": {
# "lossfunc": {
# "BCEWithLogitsLoss": {}
# },
# "num_trees": 10,
# "num_bins": 16,
# "downsampling": {
# "row": {
# "run_goss": True
# }
# },
# "encryption": {
# "paillier": {
# "key_bit_size": 2048,
# "precision": 7,
# "djn_on": True,
# "parallelize_on": True
# }
# },
# "batch_size_val": 40960
# }
# }
# }
# if mock_broadcast_recv.call_count == 1:
# return config
# elif mock_broadcast_recv.call_count == 2:
# encryption = config["train_info"]["train_params"]["encryption"]
# if "paillier" in encryption:
# encryption = encryption["paillier"]
# private_context = Paillier.context(
# encryption["key_bit_size"], encryption["djn_on"])
# return private_context.to_public().serialize()
# else:
# return None
# mocker.patch.object(
# BroadcastChannel, "__init__", return_value=None
# )
# mock_broadcast_recv = mocker.patch.object(
# BroadcastChannel, "recv", side_effect=mock_func
# )
# xgb_trainer = VerticalXgboostTrainer(conf)
# # mock for iters
# private_context = Paillier.context(2048, True)
# public_context = private_context.to_public()
# xgb_trainer.public_context = public_context
# def mock_grad_hess(*args, **kwargs):
# """
# mock the grad and hess calculation in the label trainer.
# Args:
# *args:
# **kwargs:
# Returns:
# paillier encrypted grad and hess vec
# """
# y = np.array([0, 1] * 60)
# y_pred = np.array([0.5] * 120)
# loss_inst = get_xgb_loss_inst("BCEWithLogitsLoss")
# grad = loss_inst.cal_grad(y, y_pred, after_prediction=True)
# hess = loss_inst.cal_hess(y, y_pred, after_prediction=True)
# grad_hess = embed([grad, hess], interval=(1 << 128), precision=64)
# enc_grad_hess = Paillier.encrypt(context=private_context,
# data=grad_hess,
# precision=0, # must be 0
# obfuscation=True,
# num_cores=999)
# return Paillier.serialize(enc_grad_hess, compression=False)
# def mock_node(*args, **kwargs):
# """
# mock the node passing to the trainer
# Args:
# *args:
# **kwargs:
# Returns:
# an empty None
# """
# if node_mocker.call_count <= 1:
# return Node(id="mock_id")
# else:
# return None
# # mock results from the label trainer according to difference channels
# mocker.patch.object(
# xgb_trainer.channels["individual_grad_hess"], "recv", side_effect=mock_grad_hess
# )
# node_mocker = mocker.patch.object(
# xgb_trainer.channels["tree_node"], "recv", side_effect=mock_node
# )
# mocker.patch.object(
# xgb_trainer.channels["min_split_info"], "recv", return_value=[-1, -1, -1]
# )
# mocker.patch.object(
# xgb_trainer.channels["restart_com"], "recv", return_value=0
# )
# mocker.patch.object(
# xgb_trainer.channels["early_stop_com"], "recv", return_value=False
# )
# xgb_trainer.fit()
# self.check_trainer_output(tmp_factory)
# def test_label_trainer(self, mocker, tmp_factory):
# with open("python/algorithm/config/vertical_xgboost/label_trainer.json") as f:
# conf = json.load(f)
# conf["input"]["trainset"][0]["path"] = str(tmp_factory)
# conf["input"]["trainset"][0]["name"] = "train_guest.csv"
# conf["input"]["valset"][0]["path"] = str(tmp_factory)
# conf["input"]["valset"][0]["name"] = "test_guest.csv"
# conf["output"]["path"] = str(tmp_factory)
# del conf["input"]["testset"]
# mocker.patch.object(
# BroadcastChannel, "__init__", return_value=None
# )
# mocker.patch.object(
# BroadcastChannel, "broadcast", return_value=None
# )
# xgb_label_trainer = VerticalXgboostLabelTrainer(conf)
# mocker.patch.object(
# xgb_label_trainer.channels["check_dataset_com"], "collect", return_value=[]
# )
# xgb_label_trainer.fit()
# self.check_label_trainer_output(tmp_factory)
# # cover dual channel created in: VerticalXgboostLabelTrainer.__init__
# mocker.patch.object(
# FedConfig, "get_trainer", return_value=["node_id"]
# )
# mocker.patch.object(
# DualChannel, "__init__", return_value=None
# )
# VerticalXgboostLabelTrainer(conf)
# @staticmethod
# def check_label_trainer_output(tmp_factory):
# # 检查是否正确输出了预测值文件
# assert os.path.exists(tmp_factory.join(
# "xgb_prediction_train_[STAGE_ID].csv"))
# assert os.path.exists(tmp_factory.join(
# "xgb_prediction_val_[STAGE_ID].csv"))
# # 检查是否正确输出了模型文件
# assert os.path.exists(tmp_factory.join(
# "vertical_xgboost_[STAGE_ID].model"))
# # 检查是否正确输出了model config
# assert os.path.exists(tmp_factory.join("model_config.json"))
# with open(tmp_factory.join("model_config.json")) as f:
# model_config = json.load(f)
# assert model_config[0]["class_name"] == "VerticalXGBooster"
# assert model_config[0]["filename"] == "vertical_xgboost_[STAGE_ID].model"
# # 检查是否正确输出了feature importance文件
# assert os.path.exists(tmp_factory.join(
# "xgb_feature_importance_[STAGE_ID].csv"))
# @staticmethod
# def check_trainer_output(tmp_factory):
# # 检查是否正确输出了模型文件
# assert os.path.exists(tmp_factory.join(
# "vertical_xgboost_[STAGE_ID].model"))
# # 检查是否正确输出了model config
# assert os.path.exists(tmp_factory.join("model_config.json"))
# with open(tmp_factory.join("model_config.json")) as f:
# model_config = json.load(f)
# assert model_config[0]["class_name"] == "VerticalXGBooster"
# assert model_config[0]["filename"] == "vertical_xgboost_[STAGE_ID].model"
# def test_predict_label_trainer(self, get_label_trainer_infer_conf, mocker, tmp_factory):
# mocker.patch.object(
# DualChannel, "__init__", return_value=None
# )
# mocker.patch.object(
# ApplyResult, "get", return_value={"0_4lN0P7QTwWq25Eei": np.array([1] * 50 + [0] * 30),
# "0_gw94EBW5tiD8kCqG": np.array([1] * 25 + [0] * 55),
# "0_vpKZWumTxYcojXLq": np.array([1] * 75 + [0] * 5)}
# )
# mocker.patch.object(
# BroadcastChannel, "broadcast", return_value=None
# )
# mocker.patch.object(
# BroadcastChannel, "collect", return_value=[{"test": (80, 2)}]
# )
# mocker.patch.object(
# service.fed_config.FedConfig, "get_label_trainer", return_value=["node-1"]
# )
# mocker.patch.object(
# service.fed_config.FedConfig, "get_trainer", return_value=["node-2"]
# )
# xgb_label_trainer = VerticalXgboostLabelTrainer(
# get_label_trainer_infer_conf)
# xgb_label_trainer.predict()
# df = pd.read_csv(tmp_factory.join("predicted_probabilities_train.csv"))
# assert (df["pred"] > 0.5).sum() == 50
# def test_predict_empty_testset(self, get_label_trainer_infer_conf, mocker, tmp_factory):
# conf = copy.deepcopy(get_label_trainer_infer_conf)
# del conf["input"]["testset"]
# mocker.patch.object(
# DualChannel, "__init__", return_value=None
# )
# mocker.patch.object(
# ApplyResult, "get", return_value={"0_4lN0P7QTwWq25Eei": np.array([1] * 50 + [0] * 30),
# "0_gw94EBW5tiD8kCqG": np.array([1] * 25 + [0] * 55),
# "0_vpKZWumTxYcojXLq": np.array([1] * 75 + [0] * 5)}
# )
# mocker.patch.object(
# BroadcastChannel, "broadcast", return_value=None
# )
# mocker.patch.object(
# BroadcastChannel, "collect", return_value=[{"test": (80, 2)}]
# )
# mocker.patch.object(
# service.fed_config.FedConfig, "get_label_trainer", return_value=["node-1"]
# )
# mocker.patch.object(
# service.fed_config.FedConfig, "get_trainer", return_value=["node-2"]
# )
# xgb_label_trainer = VerticalXgboostLabelTrainer(conf)
# xgb_label_trainer.predict()
# df = pd.read_csv(tmp_factory.join("predicted_probabilities_train.csv"))
# assert df.shape == (80, 2)
# def test_predict_trainer(self, get_trainer_infer_conf, mocker, tmp_factory):
# mocker.patch.object(
# DualChannel, "__init__", return_value=None
# )
# mocker.patch.object(
# DualChannel, "send", return_value=0
# )
# mocker.patch.object(
# BroadcastChannel, "send", return_value=0
# )
# def mock_func(*args, **kwargs):
# config = {
# "train_info": {
# "train_params": {
# "lossfunc": {
# "BCEWithLogitsLoss": {}
# },
# "batch_size_val": 40960
# }
# }
# }
# return config
# mocker.patch.object(
# BroadcastChannel, "recv", side_effect=mock_func
# )
# mocker.patch.object(
# service.fed_config.FedConfig, "get_label_trainer", return_value=["node-1"]
# )
# mocker.patch.object(
# service.fed_config.FedConfig, "get_trainer", return_value=["node-2"]
# )
# xgb_trainer = VerticalXgboostTrainer(get_trainer_infer_conf)
# xgb_trainer.predict()
| 43,089 | 43.560496 | 118 | py |
XFL | XFL-master/demo/horizontal/chatglm/chatglm-demo/quantization.py | from torch.nn import Linear
from torch.nn.parameter import Parameter
import bz2
import torch
import base64
import ctypes
from transformers.utils import logging
from typing import List
from functools import partial
logger = logging.get_logger(__name__)
try:
from cpm_kernels.kernels.base import LazyKernelCModule, KernelFunction, round_up
class Kernel:
def __init__(self, code: bytes, function_names: List[str]):
self.code = code
self._function_names = function_names
self._cmodule = LazyKernelCModule(self.code)
for name in self._function_names:
setattr(self, name, KernelFunction(self._cmodule, name))
quantization_code = "$QlpoOTFBWSZTWU9yuJUAQHN//////////f/n/8/n///n//bt4dTidcVx8X3V9FV/92/v4B7/AD5FBQFAAAChSgKpFCFAFVSigUAAAEKhSgUUqgFBKigqVREQAABQBQIANDTTIGI00BkZBkNGE0A0BkBkGQGRkaNAaAGQNBoGgDIAAYIGTI0DQAQAaGmmQMRpoDIyDIaMJoBoDIDIMgMjI0aA0AMgaDQNAGQAAwQMmRoGgAgA0NNMgYjTQGRkGQ0YTQDQGQGQZAZGRo0BoAZA0GgaAMgABggZMjQNABABoaaZAxGmgMjIMhowmgGgMgMgyAyMjRoDQAyBoNA0AZAADBAyZGgaAAmqU1NEgJqnptU/Sn4jRR6J6epk2pqb1Q/SgAPUGgyNNGjQ2SBpoAZAAGg0NB6mgDIAAAAA2oaApSREBNAARhGiYEaEwU8pvImlP0k2aam1GaGqbFNM1MHpTwmkepmyU9R6nqPKekHqNNPUxNGhp6n6p6QaZ6o9TG1GMqcoV9ly6nRanHlq6zPNbnGZNi6HSug+2nPiZ13XcnFYZW+45W11CumhzYhchOJ2GLLV1OBjBjGf4TptOddTSOcVxhqYZMYwZXZZY00zI1paX5X9J+b+f4e+x43RXSxXPOdquiGpduatGyXneN696M9t4HU2eR5XX/kPhP261NTx3JO1Ow7LyuDmeo9a7d351T1ZxnvnrvYnrXv/hXxPCeuYx2XsNmO003eg9J3Z6U7b23meJ4ri01OdzTk9BNO96brz+qT5nuvvH3ds/G+m/JcG/F2XYuhXlvO+jP7U3XgrzPN/lr8Sf1n6j4j7jZs+s/T0tNaNNYzTs12rxjwztHlnire3Nzc3N1wuBwOBwXBvZfoHpD7rFmR99V5vj3aXza3xdBbXMalubTg/jIv5dfAi54Pdc75j4z412n3Npj3Ld/ENm7a3b/Cod6h/ret1/5vn/C+l+gdslMvgPSLJ8d8q+U66fevYn/tW1chleEtNTGlcHCbLRlq0tHzF5tsbbZZfHjjLgZu42XCuC3NrdjTasZGNzgxPIrGqp7r3p7L2p5XjnpPSmTd5XtzqnB6U87zzg1Ol0zd0zsLszxR6lkxp35u6/teL0L0W922cR7Lu1lpL9CsHirzuM2T+BgsyViT6LHcm0/Vr6U/7LGGyJeqTEjt0PHWhF5mCT7R9mtlDwriYv0Tyr/OxYt6qp5r0mPVT0608TqnqMZaarU2nFwrTzzlrs1ed7z1ux60wyr4ydCaTi3enW8x68x0zU7tXSlcmPSW1mGpWJMg4zmPC2lK96tp0OE80y4MfEvnZj8zGluR6b22ki1Ou9V2nCd9xovcPvcYMZYy0lvN60ScZ45vN6yeCeeXFb1lVjnnCar5fwXwE2bzJ4HI1XVPXfXZMm44GUsMpYsmLB65TuVdm0cl0b+i/wGNN66XjeV7zuPpHcnK/juhhjdfId5jMdE5nN0dGmmm2zZs2cexD5n9p/dY352XsvXHaZNWWsmmS1atjR452nYudzvqv2HMRyvNNnlMcDl3R2+yx2uVrBubTW9icHDVtbNXlZm7jma1rM4VurZZd2y6nUau7ZXZ7bVU+mnoOVxZGMrVmvX60605JwmzGZhhhjTWtaaaMaaGTGmNMZasY0iX8VMUl8eepaIrzGSpemWOQyZORk2bNpjUybMmxqYmknCGCFynutfksaZpjTNMaaatM0xsxcGR0sociNqxNSmhhR1ZJPbsn8qyF0t2qH6iYBclclalbtTTcHTDsPaX6rlnElph2Jyumumtynv2Kk8GI7rsvXbIcJgHJOSaSXnnGaI3m87RtVXJOZ/YtgdTE6Wpha6ZlE8ayXkef1fh602r2WwvfMXtMdLlkfnLFdYYwYso+bWqm7yJqHXZGw2nrS5ZanSYnWlxBxMF1V940K2wdrI7R6OYf7DGGamMmTSbRhlS45xmVOumF1EyPCmHrrN8wwZOOrdNtLeMtzFzDlWnfTBxMk2NaXIZHBYxYLD4w8yju0ao65Vz1OIXoS9dLanwCe1PWrYuWMqf1if1z2k2yYfKJ741PDgno1ZQ8DRqvUny3mNoWTzGO6m1DkrJI8JiR5cSd+vZdGOO8nrMoc5+NDUFsMSXaZJeNlMmGLtJsovOsUp7I9S5VojKxF6bTVEelXqlfJobQr3LozSh2Jk7VcrVMfhXqszGWMzNqGhqZY0OadxkyyMssKugZR0KNFXBHlqwmJgTE/BNVMk6ItJXZMR0H47GpXv/DMOvNkmVuaV1PRfEdxuqc7Hcd+ZV/zTLaRxWk0nl9CdCeM6mn5rstHIBcpiuwmUZXeq81DacHI2rmrZ5SuE5mOZd6LQrZg9mx32TprA8BMo5jKN6yLTCi3WzQaZSuhzTtM1fUTGVpG8Tw+KXI0tjEpiWxtLYynOlktSbVlaI5kxP8TDH8kx50xoxi5KcA4pcja8KWLRlO/Ks6q06ergnvm1ca3Tq8Uw7LTUsmWyctXPWmpitl/uvGcWTGXGuAXDfhqazGmjkxcJW5hMMMMpYsXl2TZYtVOddG3XCarUt6Ptq9CZXSNzyuRzqRZOjsxdBbFVz6OA5HI43r1jityVlVpVkxmOsyaYWE1NTGq1sOVh36mHMcxtSvcy70edG0ZGR3I1Go1GRlV7mWWo1G0ZGRqlvH40l7o4m5xMWLLLYyNjnqc8556mdPqLJ31n/1nWOncxzG1tizrHs/Z+d2vP/B/l8wdJ6rHUn2nbbDq4p6htFtYzMMMTaZis1K5GKzGNmxhmUx2DDlZ/qNnIx41xnaMfCZWYaZWtNLTNW8ND4Fw1MyZOCdM428suKG1ehW8TesOydg7J+YYcD4cYR+8dFK6M4E3HM9ZfRNNL+Sn6rsl4DsrDl2HpPCnfxjGXtbZtYys1ttlyJ4T+BvexjGWRjMszK4Jpc77D3GyuVD7q0+G8m9G+2+rGm7cOR2y7FdtY2XUYx/oNlfRYxhMYyYZkyyg55enna9Kt/FFi6GMMwYwdwxWgxGMLKYmUyGExTKMZkMFhkymKuh0NOBNnBu+23LdwDoZYYzGGMxtORaTU1pjTGWTTGGtMrNWUsyyTTLLG1qy2ZjbK2DBllWqxMtBMaYZQmcE7zvvRcTkclUwdkxTaSdyySt/7fpL+T1v516Ji97fwr5JbLu305zMn5+GMTTZ9F+y7ExwmGVfG44yxn3dLv6l5i+Wth1jCrDq21nW9LqvvDzz3Vf3LLH/O/32TJ/erx3bXftO4eF+G956D952K/An4NfvOpjFjExjevP/UmE0fIoZXx6/w6lX/no3D0bLt+ixjieBM6ksRd0yB4Lt2SwYNE+gd1detlZWUnpiZfGfFaK+4PyCa/v18V8X75pe9fLXzp7l3VjF76vWZmHwGz1IZNWT7b8yddJ4q5kyrVdfru6atWc7bVYztL9Jf4GXvT+Y8m9/YsXP6H018a8D4XVOqvfzqeR+6yZOD8dPv0+U7/q5Pl+2dNb0MjzGVH5p6MNQ7cOWvw62U9aHE8DprDek+McLyvDz+te+9Zhq5+YTruufMcWMabqysTmZVWjKPfnK0wyVcrsuhjZRdLkHNvD72b9abriOSGIxiLixMOoalNPXzy+wT/tf+U6HHONfsz+xe8ufHBdQWWGWLA9if0rsnmrxK5LvRZQeWsTCsrmOYy8VteVfuRfcVTtDLItLIsMYxZLdU/DbtSemxF6Z6Zo5WBXE4tFdCyVMMXMTEMZXVlS6Xec2T4e0tHsRcEuWshcJ2YsNF5rUx1E8ifCq6Z+ZP7qdCeu/aTwFd53l16/o0NOw6O3dLavP4Hbi4RdmuDk6DoYaninC0+o4uZjbJ7Rxeu0/FbuFg+q7DVS6fQe0rZ6NDGUNNU6DEqOaLTicKnYZMnBWruljQxoaS3dZhocDge0bSTyOvdAbG5hxe2xji7E/L55xX13wWNDi6HCekcFxfCPGxY0MXC+s7afWaMdDyjyr+o8Rudm/NabOZvdl274zH4f5XK9z6On1Pe/K5TdPAslg77BjuO6Y3eO7GqvOPG/stknp1leyvLL0Z7bl9I4noMvLkzytLhWYzrOZzLXCORe028rORzOg4N/L0HlMOQ3Pgmnbb6KczlabORpu980q37TBqRu0/p3PO6234Bl03Ynuz+9W7gnsEcmvYaYY3aMYY0wx3pYd+ujsXauWdaY5Xkbtl23fPzFHiDB/QMo0yFjBllYxTQYYyxkrwn7JufwJ/PfgJ+C83X69ni6zvXcnyXabv0ncbLwsceS+RNlyN2mnneJtX0ngYO0+e+0+UnA+Wch3ji8hj5an4h+i6XBySU4n+R0roVcbw5yvHrmr4Yw8Y7x6c+9POPYHI5HI5HI5HI5HGXGww4nE4nrVyOR8XeqPEO7PLOiukYa3Novk5hV4cdtYZLI93e+uxff2jRo0aNGjRo0aNG1bVtW1dy3m83m8+tQ5ZzHw3nObwOu8La9Rc1dtkdS8A3eTk823tnktXWlxN6Oixe06zrN70Isd9jiOgZFq9yfkPqP/SLhN2Myl8jDM43bl1nbcb4cO57jlh8Jow6pzXZdL4dyODTuuhu77FyO27DdwdRxmvO+O+3N2+BdqyTwLHVczDVY4UPE4O66/ZO2cx1LFzVdSXtF7G4HMbrauOHRw6c8FdZ5m9fHZHYZXfTlZquyynSyTTKke6vcffSD9pzPA/G7n7jxPmuhc1DHMynPMrGL6AdewYmwu5ko+UUyTwrMv27rPH1v1nGqd87+p6N6LU8k3NEng53xXyHS97+44OSg/sy/hn+Se6yfYNjW0/uTgP+PvWYzLMmjhcLB/gGpri6H83/84eUXWT6T9Hsv7785z/7z4icpW+zfXypuR7rx/gMdZb1/wC678pcs8/2a3mDitGHxl9mfPlll5MafWWqxk/eYuTDgcNMzDGWLWvsuglNxs53GtN6uWpktlW1tZZYcuinMMWmnNnJydze3b2Y1McBxrBkXw799izLMZZYyy0TkbsGM4p03S2uVu5s/XXUdSdec6smVxZYYGpVmT8A+8ajuEyV5FatkvVru2x6uxGXXbH4A+jvgP4GMYy3iPLXzq/6z65+E005ey+cwMZD3fZcqc6xpjTFjQ0P3U+e++cPYmTIwj0nrK5NPTfl3WvpfLtXDcb2HQMudYOxFXQBor4L4T6vrOauFctYXJQ++NUWmJe5bmx1jDiZS1dTqWxo4GR8jm3fttpmPHppk9PEyv4/y8/sO07XacOmcqc0x2Vi9BvNJvN5oW8x4mOsydpidRxMYJPx06m1bqPzq9KtK8sxXNXFodD/+MYYaJTLwOhc9brCsV18oOR1i4tXChyTkq4lf4y1Ke+9axjDHqs1mfBbMXuP4Hzi+X7t8vzv7bHerrUPgPCxhjre4fXdfLNtNM+Jd+Zdh8xd8wP87uNPoPgv4W7/5P2BuxfsMabNnMnza+54Pdi5U671GPZY8CehX8Voeoo7FHpkeEc6715FwHZrIrUrHaviPUbPZHND+IhczrP6FcYvhOZ0Di/ETt0OI+YwNWR9r7tpf6WDeZKZDB1+z2IthOl1mPyb5FluvEx9h9d0NnM0Y1XPFkWIsk1WotJ0PBMmkvjvQTd0e71tfeV+8r8lQ/tpzpsmxJ+InrI/dj2UajUajVTUajatRqNRtGo1Go1Go4wjeMpZFMVV9CHbofPraLsJ3JpWV2XOoanCuFky4y3PPNxucK2uKC1Lbdb1eo+m5XomN6HfeZsabHLHRX/K+offtNGGmHWctcVcG44MdSqsOLY9VzX+Zxfxn2HPdWTpzWvkrtJ8M5zorrKcquRytJ5N5DZmcaW02l76nWO+BqPXm1A2Ry/0q71dH/mqrqeFjkYxjEXtsX8qubTk67rGycyqsdm4tZx5D6D5hhi0waaWmiaMP81Yjii5qxPlPuU/GfTL1Y5E6Jyfiq63qTa39A4J0sOGDgO9WF9bOXl0XfPRbsY2bPNKPy1YrFYrFYmRhhlTIyMjJWJYZHXuCXI8OoXsvfljGLFicNifpp2XunoPiG1wtx3p1Tah+/DD66OnVtVXP9rKbVxOnL0tR/rHtqB5UDErUVcl11D4qqvjpOcxX7armUNJB3LpW6bxVvD08e8h3odKKvyCFZBdSh2FVcST9xV3n3T8t1j7Kr9qgrqXg+13Pt5U7JCvFXVIV1YG5lRhkVYZJYYDDD4KOIMoHCp26WS8GB7uBh2zIdgq/PKyInjV2STShuoapUdCpX1yTwqq/z1VvET7Kh5nVPkO8YyxjLt2MaaMmWTLQvx3qnzltnXW0p2jxgbEtSny/Osv8Y9pLMXYoHVPAhkVdWVeODhR6q9/Sxe2liwwZWMVvFXfRkeIDxAePUPIrdJ4ey6yquzH+PD/bUOWAu05qVHtFd8rrKHSoeNIOUqrYr3FXyToqfYJgwmJdKpXXOwYYegNNGMzfZPp/t3t/DVs4zjNTN61rRqaWaa4NYbRjTa0tWwy2Y2tGN8ZO8ofNKq4j9SL7I+cSm4/6ovLV5HNXLI0jJidwrtk6ynCaP6Z++GjRlWS3tLeW129Mi9evxU9mtz6s5J3Z7M2ngTgnKvmpomxpaLCzPfmx0JWE+m3NLDDGOX47RctdYYNK5jakdqLkRlI39n590T5zctGSwwZZDJj6kW8XSi6ot2MmWWJ0DUT3nuvebBudScjZ79g8cWJ8av0k+/bE5WKd5MdbFpbDVMxu1DVMmtNZGJvq1mtRbn6M+g/kP0FwDwr7quZs7xosNGpbscyxhhd9TyJyFwbLcxlTasg75vW7TsV5K7ji44XPMMrdoj+Y3rT0Hie62nlYV/pwczzOmdLqLhYkzGMzCZWGMQzGMSsZYY6Di1t4nlJ+Em63mJxrVLxPbYxNEdgc1dU2iOKyoYYWjNrEeHTYybVk0atSa7ehuwsWMWTqn1TrnS6hYsi71d1+s+k+ic70e20fzE/VaTdxT9ZtU4GIXdeNx3X77guYYfpHeTQjaMX6brOu4OY4K7Y2d9mbHarI5ox3p4GpJ2Vd/Tst60f7j999pppjR+Q/Qf8J/VaORs3cji7FfFuN61+ui9s8hix1OCh5KGVV23BPXvZfz3CLyHpix+exi8z/KnCnosY2eunor+cxyPO/xJ0vKey9OvE9VjqaYu0x3Z3jd6o2b1T12D+F8l232lwaaacD5LE8LBxu7WTlbWraWpew8Xexjel3E+wWD4APITdNqR8F3R3T0lunCQ4GaE9R37DxeCYfcHi4xci5ovKfxVs55y2hf+65E/Xdp6jR5nrebTmi5incpkyOjs50JvrZwstbbW6kfuuQw+2mykf/EXNFzxfKTrxew929TR6bWnGL//F3JFOFCQT3K4lQ"
kernels = Kernel(
bz2.decompress(base64.b64decode(quantization_code)),
[
"int4WeightCompression",
"int4WeightExtractionFloat",
"int4WeightExtractionHalf",
"int8WeightExtractionFloat",
"int8WeightExtractionHalf",
],
)
except Exception as exception:
kernels = None
logger.warning("Failed to load cpm_kernels:" + str(exception))
class W8A16Linear(torch.autograd.Function):
@staticmethod
def forward(ctx, inp: torch.Tensor, quant_w: torch.Tensor, scale_w: torch.Tensor, weight_bit_width):
ctx.inp_shape = inp.size()
ctx.weight_bit_width = weight_bit_width
out_features = quant_w.size(0)
inp = inp.contiguous().view(-1, inp.size(-1))
weight = extract_weight_to_half(quant_w, scale_w, weight_bit_width)
ctx.weight_shape = weight.size()
output = inp.mm(weight.t())
ctx.save_for_backward(inp, quant_w, scale_w)
return output.view(*(ctx.inp_shape[:-1] + (out_features,)))
@staticmethod
def backward(ctx, grad_output: torch.Tensor):
inp, quant_w, scale_w = ctx.saved_tensors
weight = extract_weight_to_half(quant_w, scale_w, ctx.weight_bit_width)
grad_output = grad_output.contiguous().view(-1, weight.size(0))
grad_input = grad_output.mm(weight)
grad_weight = grad_output.t().mm(inp)
return grad_input.view(ctx.inp_shape), grad_weight.view(ctx.weight_shape), None, None
def compress_int4_weight(weight: torch.Tensor): # (n, m)
with torch.cuda.device(weight.device):
n, m = weight.size(0), weight.size(1)
assert m % 2 == 0
m = m // 2
out = torch.empty(n, m, dtype=torch.int8, device="cuda")
stream = torch.cuda.current_stream()
gridDim = (n, 1, 1)
blockDim = (min(round_up(m, 32), 1024), 1, 1)
kernels.int4WeightCompression(
gridDim,
blockDim,
0,
stream,
[ctypes.c_void_p(weight.data_ptr()), ctypes.c_void_p(out.data_ptr()), ctypes.c_int32(n), ctypes.c_int32(m)],
)
return out
def extract_weight_to_half(weight: torch.Tensor, scale_list: torch.Tensor, source_bit_width: int):
if source_bit_width == 8:
func = kernels.int8WeightExtractionHalf
elif source_bit_width == 4:
func = kernels.int4WeightExtractionHalf
else:
assert False, "Unsupported bit-width"
with torch.cuda.device(weight.device):
n, m = weight.size(0), weight.size(1)
out = torch.empty(n, m * (8 // source_bit_width), dtype=torch.half, device="cuda")
stream = torch.cuda.current_stream()
gridDim = (n, 1, 1)
blockDim = (min(round_up(m, 32), 1024), 1, 1)
func(
gridDim,
blockDim,
0,
stream,
[
ctypes.c_void_p(weight.data_ptr()),
ctypes.c_void_p(scale_list.data_ptr()),
ctypes.c_void_p(out.data_ptr()),
ctypes.c_int32(n),
ctypes.c_int32(m),
],
)
return out
class QuantizedLinear(Linear):
def __init__(self, weight_bit_width: int, weight_tensor=None, bias_tensor=None, empty_init=False, *args, **kwargs):
super(QuantizedLinear, self).__init__(*args, **kwargs)
self.weight_bit_width = weight_bit_width
shape = self.weight.shape
del self.weight
if weight_tensor is None or empty_init:
self.weight = torch.empty(
shape[0], shape[1] * weight_bit_width // 8, dtype=torch.int8, device=kwargs["device"]
)
self.weight_scale = torch.empty(shape[0], dtype=kwargs["dtype"], device=kwargs["device"])
else:
self.weight_scale = (weight_tensor.abs().max(dim=-1).values / ((2 ** (weight_bit_width - 1)) - 1)).half()
self.weight = torch.round(weight_tensor / self.weight_scale[:, None]).to(torch.int8)
if weight_bit_width == 4:
self.weight = compress_int4_weight(self.weight)
self.weight = Parameter(self.weight.to(kwargs["device"]), requires_grad=False)
self.weight_scale = Parameter(self.weight_scale.to(kwargs["device"]), requires_grad=False)
if bias_tensor is not None:
self.bias = Parameter(bias_tensor.to(kwargs["device"]), requires_grad=False)
else:
self.bias = None
def forward(self, input):
output = W8A16Linear.apply(input, self.weight, self.weight_scale, self.weight_bit_width)
if self.bias is not None:
output = output + self.bias
return output
def quantize(model, weight_bit_width, empty_init=False, **kwargs):
"""Replace fp16 linear with quantized linear"""
for layer in model.layers:
layer.attention.query_key_value = QuantizedLinear(
weight_bit_width=weight_bit_width,
weight_tensor=layer.attention.query_key_value.weight.to(torch.cuda.current_device()),
bias_tensor=layer.attention.query_key_value.bias,
in_features=layer.attention.query_key_value.in_features,
out_features=layer.attention.query_key_value.out_features,
bias=True,
dtype=torch.half,
device=layer.attention.query_key_value.weight.device,
empty_init=empty_init
)
layer.attention.dense = QuantizedLinear(
weight_bit_width=weight_bit_width,
weight_tensor=layer.attention.dense.weight.to(torch.cuda.current_device()),
bias_tensor=layer.attention.dense.bias,
in_features=layer.attention.dense.in_features,
out_features=layer.attention.dense.out_features,
bias=True,
dtype=torch.half,
device=layer.attention.dense.weight.device,
empty_init=empty_init
)
layer.mlp.dense_h_to_4h = QuantizedLinear(
weight_bit_width=weight_bit_width,
weight_tensor=layer.mlp.dense_h_to_4h.weight.to(torch.cuda.current_device()),
bias_tensor=layer.mlp.dense_h_to_4h.bias,
in_features=layer.mlp.dense_h_to_4h.in_features,
out_features=layer.mlp.dense_h_to_4h.out_features,
bias=True,
dtype=torch.half,
device=layer.mlp.dense_h_to_4h.weight.device,
empty_init=empty_init
)
layer.mlp.dense_4h_to_h = QuantizedLinear(
weight_bit_width=weight_bit_width,
weight_tensor=layer.mlp.dense_4h_to_h.weight.to(torch.cuda.current_device()),
bias_tensor=layer.mlp.dense_4h_to_h.bias,
in_features=layer.mlp.dense_4h_to_h.in_features,
out_features=layer.mlp.dense_4h_to_h.out_features,
bias=True,
dtype=torch.half,
device=layer.mlp.dense_4h_to_h.weight.device,
empty_init=empty_init
)
return model
| 15,054 | 73.529703 | 7,375 | py |
XFL | XFL-master/demo/horizontal/chatglm/chatglm-demo/modeling_chatglm.py | """ PyTorch ChatGLM model. """
import math
import copy
import os
import warnings
import re
import sys
import torch
import torch.utils.checkpoint
import torch.nn.functional as F
from torch import nn
from torch.nn import CrossEntropyLoss, LayerNorm
from torch.nn.utils import skip_init
from typing import Optional, Tuple, Union, List, Callable, Dict, Any
from transformers.utils import (
add_code_sample_docstrings,
add_start_docstrings,
add_start_docstrings_to_model_forward,
)
from transformers.modeling_outputs import (
BaseModelOutputWithPast,
CausalLMOutputWithPast,
BaseModelOutputWithPastAndCrossAttentions,
)
from transformers.modeling_utils import PreTrainedModel
from transformers.utils import logging
from transformers.generation.logits_process import LogitsProcessor
from transformers.generation.utils import LogitsProcessorList, StoppingCriteriaList, GenerationConfig, ModelOutput
from .configuration_chatglm import ChatGLMConfig
# flags required to enable jit fusion kernels
if sys.platform != 'darwin':
torch._C._jit_set_profiling_mode(False)
torch._C._jit_set_profiling_executor(False)
torch._C._jit_override_can_fuse_on_cpu(True)
torch._C._jit_override_can_fuse_on_gpu(True)
logger = logging.get_logger(__name__)
_CHECKPOINT_FOR_DOC = "THUDM/ChatGLM-6B"
_CONFIG_FOR_DOC = "ChatGLM6BConfig"
CHATGLM_6B_PRETRAINED_MODEL_ARCHIVE_LIST = [
"THUDM/chatglm-6b",
# See all ChatGLM-6B models at https://huggingface.co/models?filter=chatglm
]
class InvalidScoreLogitsProcessor(LogitsProcessor):
def __call__(self, input_ids: torch.LongTensor, scores: torch.FloatTensor) -> torch.FloatTensor:
if torch.isnan(scores).any() or torch.isinf(scores).any():
scores.zero_()
scores[..., 5] = 5e4
return scores
def load_tf_weights_in_chatglm_6b(model, config, tf_checkpoint_path):
"""Load tf checkpoints in a pytorch model."""
try:
import re
import numpy as np
import tensorflow as tf
except ImportError:
logger.error(
"Loading a TensorFlow model in PyTorch, requires TensorFlow to be installed. Please see "
"https://www.tensorflow.org/install/ for installation instructions."
)
raise
tf_path = os.path.abspath(tf_checkpoint_path)
logger.info(f"Converting TensorFlow checkpoint from {tf_path}")
# Load weights from TF model
init_vars = tf.train.list_variables(tf_path)
names = []
arrays = []
for name, shape in init_vars:
logger.info(f"Loading TF weight {name} with shape {shape}")
array = tf.train.load_variable(tf_path, name)
names.append(name)
arrays.append(array)
for name, array in zip(names, arrays):
name = name.split("/")
# adam_v and adam_m are variables used in AdamWeightDecayOptimizer to calculated m and v
# which are not required for using pretrained model
if any(
n in ["adam_v", "adam_m", "AdamWeightDecayOptimizer", "AdamWeightDecayOptimizer_1", "global_step"]
for n in name
):
logger.info(f"Skipping {'/'.join(name)}")
continue
pointer = model
for m_name in name:
if re.fullmatch(r"[A-Za-z]+_\d+", m_name):
scope_names = re.split(r"_(\d+)", m_name)
else:
scope_names = [m_name]
if scope_names[0] == "kernel" or scope_names[0] == "gamma":
pointer = getattr(pointer, "weight")
elif scope_names[0] == "output_bias" or scope_names[0] == "beta":
pointer = getattr(pointer, "bias")
elif scope_names[0] == "output_weights":
pointer = getattr(pointer, "weight")
elif scope_names[0] == "squad":
pointer = getattr(pointer, "classifier")
else:
try:
pointer = getattr(pointer, scope_names[0])
except AttributeError:
logger.info(f"Skipping {'/'.join(name)}")
continue
if len(scope_names) >= 2:
num = int(scope_names[1])
pointer = pointer[num]
if m_name[-11:] == "_embeddings":
pointer = getattr(pointer, "weight")
elif m_name == "kernel":
array = np.transpose(array)
try:
assert (
pointer.shape == array.shape
), f"Pointer shape {pointer.shape} and array shape {array.shape} mismatched"
except AssertionError as e:
e.args += (pointer.shape, array.shape)
raise
logger.info(f"Initialize PyTorch weight {name}")
pointer.data = torch.from_numpy(array)
return model
class PrefixEncoder(torch.nn.Module):
"""
The torch.nn model to encode the prefix
Input shape: (batch-size, prefix-length)
Output shape: (batch-size, prefix-length, 2*layers*hidden)
"""
def __init__(self, config):
super().__init__()
self.prefix_projection = config.prefix_projection
if self.prefix_projection:
# Use a two-layer MLP to encode the prefix
self.embedding = torch.nn.Embedding(config.pre_seq_len, config.hidden_size)
self.trans = torch.nn.Sequential(
torch.nn.Linear(config.hidden_size, config.hidden_size),
torch.nn.Tanh(),
torch.nn.Linear(config.hidden_size, config.num_layers * config.hidden_size * 2)
)
else:
self.embedding = torch.nn.Embedding(config.pre_seq_len, config.num_layers * config.hidden_size * 2)
def forward(self, prefix: torch.Tensor):
if self.prefix_projection:
prefix_tokens = self.embedding(prefix)
past_key_values = self.trans(prefix_tokens)
else:
past_key_values = self.embedding(prefix)
return past_key_values
@torch.jit.script
def gelu_impl(x):
"""OpenAI's gelu implementation."""
return 0.5 * x * (1.0 + torch.tanh(0.7978845608028654 * x *
(1.0 + 0.044715 * x * x)))
def gelu(x):
return gelu_impl(x)
class RotaryEmbedding(torch.nn.Module):
def __init__(self, dim, base=10000, precision=torch.half, learnable=False):
super().__init__()
inv_freq = 1. / (base ** (torch.arange(0, dim, 2).float() / dim))
inv_freq = inv_freq.half()
self.learnable = learnable
if learnable:
self.inv_freq = torch.nn.Parameter(inv_freq)
self.max_seq_len_cached = None
else:
self.register_buffer('inv_freq', inv_freq)
self.max_seq_len_cached = None
self.cos_cached = None
self.sin_cached = None
self.precision = precision
def _load_from_state_dict(self, state_dict, prefix, local_metadata, strict, missing_keys, unexpected_keys,
error_msgs):
pass
def forward(self, x, seq_dim=1, seq_len=None):
if seq_len is None:
seq_len = x.shape[seq_dim]
if self.max_seq_len_cached is None or (seq_len > self.max_seq_len_cached):
self.max_seq_len_cached = None if self.learnable else seq_len
t = torch.arange(seq_len, device=x.device, dtype=self.inv_freq.dtype)
freqs = torch.einsum('i,j->ij', t, self.inv_freq)
# Different from paper, but it uses a different permutation in order to obtain the same calculation
emb = torch.cat((freqs, freqs), dim=-1).to(x.device)
if self.precision == torch.bfloat16:
emb = emb.float()
# [sx, 1 (b * np), hn]
cos_cached = emb.cos()[:, None, :]
sin_cached = emb.sin()[:, None, :]
if self.precision == torch.bfloat16:
cos_cached = cos_cached.bfloat16()
sin_cached = sin_cached.bfloat16()
if self.learnable:
return cos_cached, sin_cached
self.cos_cached, self.sin_cached = cos_cached, sin_cached
return self.cos_cached[:seq_len, ...], self.sin_cached[:seq_len, ...]
def _apply(self, fn):
if self.cos_cached is not None:
self.cos_cached = fn(self.cos_cached)
if self.sin_cached is not None:
self.sin_cached = fn(self.sin_cached)
return super()._apply(fn)
def rotate_half(x):
x1, x2 = x[..., :x.shape[-1] // 2], x[..., x.shape[-1] // 2:]
return torch.cat((-x2, x1), dim=x1.ndim - 1) # dim=-1 triggers a bug in earlier torch versions
@torch.jit.script
def apply_rotary_pos_emb_index(q, k, cos, sin, position_id):
# position_id: [sq, b], q, k: [sq, b, np, hn], cos: [sq, 1, hn] -> [sq, b, 1, hn]
cos, sin = F.embedding(position_id, cos.squeeze(1)).unsqueeze(2), \
F.embedding(position_id, sin.squeeze(1)).unsqueeze(2)
q, k = (q * cos) + (rotate_half(q) * sin), (k * cos) + (rotate_half(k) * sin)
return q, k
def attention_fn(
self,
query_layer,
key_layer,
value_layer,
attention_mask,
hidden_size_per_partition,
layer_id,
layer_past=None,
scaling_attention_score=True,
use_cache=False,
):
if layer_past is not None:
past_key, past_value = layer_past[0], layer_past[1]
key_layer = torch.cat((past_key, key_layer), dim=0)
value_layer = torch.cat((past_value, value_layer), dim=0)
# seqlen, batch, num_attention_heads, hidden_size_per_attention_head
seq_len, b, nh, hidden_size = key_layer.shape
if use_cache:
present = (key_layer, value_layer)
else:
present = None
query_key_layer_scaling_coeff = float(layer_id + 1)
if scaling_attention_score:
query_layer = query_layer / (math.sqrt(hidden_size) * query_key_layer_scaling_coeff)
# ===================================
# Raw attention scores. [b, np, s, s]
# ===================================
# [b, np, sq, sk]
output_size = (query_layer.size(1), query_layer.size(2), query_layer.size(0), key_layer.size(0))
# [sq, b, np, hn] -> [sq, b * np, hn]
query_layer = query_layer.view(output_size[2], output_size[0] * output_size[1], -1)
# [sk, b, np, hn] -> [sk, b * np, hn]
key_layer = key_layer.view(output_size[3], output_size[0] * output_size[1], -1)
matmul_result = torch.zeros(
1, 1, 1,
dtype=query_layer.dtype,
device=query_layer.device,
)
matmul_result = torch.baddbmm(
matmul_result,
query_layer.transpose(0, 1), # [b * np, sq, hn]
key_layer.transpose(0, 1).transpose(1, 2), # [b * np, hn, sk]
beta=0.0,
alpha=1.0,
)
# change view to [b, np, sq, sk]
attention_scores = matmul_result.view(*output_size)
if self.scale_mask_softmax:
self.scale_mask_softmax.scale = query_key_layer_scaling_coeff
attention_probs = self.scale_mask_softmax(attention_scores, attention_mask.contiguous())
else:
if not (attention_mask == 0).all():
# if auto-regressive, skip
attention_scores.masked_fill_(attention_mask, -10000.0)
dtype = attention_scores.dtype
attention_scores = attention_scores.float()
attention_scores = attention_scores * query_key_layer_scaling_coeff
attention_probs = F.softmax(attention_scores, dim=-1)
attention_probs = attention_probs.type(dtype)
# =========================
# Context layer. [sq, b, hp]
# =========================
# value_layer -> context layer.
# [sk, b, np, hn] --> [b, np, sq, hn]
# context layer shape: [b, np, sq, hn]
output_size = (value_layer.size(1), value_layer.size(2), query_layer.size(0), value_layer.size(3))
# change view [sk, b * np, hn]
value_layer = value_layer.view(value_layer.size(0), output_size[0] * output_size[1], -1)
# change view [b * np, sq, sk]
attention_probs = attention_probs.view(output_size[0] * output_size[1], output_size[2], -1)
# matmul: [b * np, sq, hn]
context_layer = torch.bmm(attention_probs, value_layer.transpose(0, 1))
# change view [b, np, sq, hn]
context_layer = context_layer.view(*output_size)
# [b, np, sq, hn] --> [sq, b, np, hn]
context_layer = context_layer.permute(2, 0, 1, 3).contiguous()
# [sq, b, np, hn] --> [sq, b, hp]
new_context_layer_shape = context_layer.size()[:-2] + (hidden_size_per_partition,)
context_layer = context_layer.view(*new_context_layer_shape)
outputs = (context_layer, present, attention_probs)
return outputs
def default_init(cls, *args, **kwargs):
return cls(*args, **kwargs)
class SelfAttention(torch.nn.Module):
def __init__(self, hidden_size, num_attention_heads,
layer_id, hidden_size_per_attention_head=None, bias=True,
params_dtype=torch.float, position_encoding_2d=True, empty_init=True):
if empty_init:
init_method = skip_init
else:
init_method = default_init
super(SelfAttention, self).__init__()
self.layer_id = layer_id
self.hidden_size = hidden_size
self.hidden_size_per_partition = hidden_size
self.num_attention_heads = num_attention_heads
self.num_attention_heads_per_partition = num_attention_heads
self.position_encoding_2d = position_encoding_2d
self.rotary_emb = RotaryEmbedding(
self.hidden_size // (self.num_attention_heads * 2)
if position_encoding_2d
else self.hidden_size // self.num_attention_heads,
base=10000,
precision=torch.half,
learnable=False,
)
self.scale_mask_softmax = None
if hidden_size_per_attention_head is None:
self.hidden_size_per_attention_head = hidden_size // num_attention_heads
else:
self.hidden_size_per_attention_head = hidden_size_per_attention_head
self.inner_hidden_size = num_attention_heads * self.hidden_size_per_attention_head
# Strided linear layer.
self.query_key_value = init_method(
torch.nn.Linear,
hidden_size,
3 * self.inner_hidden_size,
bias=bias,
dtype=params_dtype,
)
self.dense = init_method(
torch.nn.Linear,
self.inner_hidden_size,
hidden_size,
bias=bias,
dtype=params_dtype,
)
@staticmethod
def attention_mask_func(attention_scores, attention_mask):
attention_scores.masked_fill_(attention_mask, -10000.0)
return attention_scores
def split_tensor_along_last_dim(self, tensor, num_partitions,
contiguous_split_chunks=False):
"""Split a tensor along its last dimension.
Arguments:
tensor: input tensor.
num_partitions: number of partitions to split the tensor
contiguous_split_chunks: If True, make each chunk contiguous
in memory.
"""
# Get the size and dimension.
last_dim = tensor.dim() - 1
last_dim_size = tensor.size()[last_dim] // num_partitions
# Split.
tensor_list = torch.split(tensor, last_dim_size, dim=last_dim)
# Note: torch.split does not create contiguous tensors by default.
if contiguous_split_chunks:
return tuple(chunk.contiguous() for chunk in tensor_list)
return tensor_list
def forward(
self,
hidden_states: torch.Tensor,
position_ids,
attention_mask: torch.Tensor,
layer_id,
layer_past: Optional[Tuple[torch.Tensor, torch.Tensor]] = None,
use_cache: bool = False,
output_attentions: bool = False,
):
"""
hidden_states: [seq_len, batch, hidden_size]
attention_mask: [(1, 1), seq_len, seq_len]
"""
# [seq_len, batch, 3 * hidden_size]
mixed_raw_layer = self.query_key_value(hidden_states)
# [seq_len, batch, 3 * hidden_size] --> [seq_len, batch, num_attention_heads, 3 * hidden_size_per_attention_head]
new_tensor_shape = mixed_raw_layer.size()[:-1] + (
self.num_attention_heads_per_partition,
3 * self.hidden_size_per_attention_head,
)
mixed_raw_layer = mixed_raw_layer.view(*new_tensor_shape)
# [seq_len, batch, num_attention_heads, hidden_size_per_attention_head]
(query_layer, key_layer, value_layer) = self.split_tensor_along_last_dim(mixed_raw_layer, 3)
if self.position_encoding_2d:
q1, q2 = query_layer.chunk(2, dim=(query_layer.ndim - 1))
k1, k2 = key_layer.chunk(2, dim=(key_layer.ndim - 1))
cos, sin = self.rotary_emb(q1, seq_len=position_ids.max() + 1)
position_ids, block_position_ids = position_ids[:, 0, :].transpose(0, 1).contiguous(), \
position_ids[:, 1, :].transpose(0, 1).contiguous()
q1, k1 = apply_rotary_pos_emb_index(q1, k1, cos, sin, position_ids)
q2, k2 = apply_rotary_pos_emb_index(q2, k2, cos, sin, block_position_ids)
query_layer = torch.concat([q1, q2], dim=(q1.ndim - 1))
key_layer = torch.concat([k1, k2], dim=(k1.ndim - 1))
else:
position_ids = position_ids.transpose(0, 1)
cos, sin = self.rotary_emb(value_layer, seq_len=position_ids.max() + 1)
# [seq_len, batch, num_attention_heads, hidden_size_per_attention_head]
query_layer, key_layer = apply_rotary_pos_emb_index(query_layer, key_layer, cos, sin, position_ids)
# [seq_len, batch, hidden_size]
context_layer, present, attention_probs = attention_fn(
self=self,
query_layer=query_layer,
key_layer=key_layer,
value_layer=value_layer,
attention_mask=attention_mask,
hidden_size_per_partition=self.hidden_size_per_partition,
layer_id=layer_id,
layer_past=layer_past,
use_cache=use_cache
)
output = self.dense(context_layer)
outputs = (output, present)
if output_attentions:
outputs += (attention_probs,)
return outputs # output, present, attention_probs
class GEGLU(torch.nn.Module):
def __init__(self):
super().__init__()
self.activation_fn = F.gelu
def forward(self, x):
# dim=-1 breaks in jit for pt<1.10
x1, x2 = x.chunk(2, dim=(x.ndim - 1))
return x1 * self.activation_fn(x2)
class GLU(torch.nn.Module):
def __init__(self, hidden_size, inner_hidden_size=None,
layer_id=None, bias=True, activation_func=gelu, params_dtype=torch.float, empty_init=True):
super(GLU, self).__init__()
if empty_init:
init_method = skip_init
else:
init_method = default_init
self.layer_id = layer_id
self.activation_func = activation_func
# Project to 4h.
self.hidden_size = hidden_size
if inner_hidden_size is None:
inner_hidden_size = 4 * hidden_size
self.inner_hidden_size = inner_hidden_size
self.dense_h_to_4h = init_method(
torch.nn.Linear,
self.hidden_size,
self.inner_hidden_size,
bias=bias,
dtype=params_dtype,
)
# Project back to h.
self.dense_4h_to_h = init_method(
torch.nn.Linear,
self.inner_hidden_size,
self.hidden_size,
bias=bias,
dtype=params_dtype,
)
def forward(self, hidden_states):
"""
hidden_states: [seq_len, batch, hidden_size]
"""
# [seq_len, batch, inner_hidden_size]
intermediate_parallel = self.dense_h_to_4h(hidden_states)
intermediate_parallel = self.activation_func(intermediate_parallel)
output = self.dense_4h_to_h(intermediate_parallel)
return output
class GLMBlock(torch.nn.Module):
def __init__(
self,
hidden_size,
num_attention_heads,
layernorm_epsilon,
layer_id,
inner_hidden_size=None,
hidden_size_per_attention_head=None,
layernorm=LayerNorm,
use_bias=True,
params_dtype=torch.float,
num_layers=28,
position_encoding_2d=True,
empty_init=True
):
super(GLMBlock, self).__init__()
# Set output layer initialization if not provided.
self.layer_id = layer_id
# Layernorm on the input data.
self.input_layernorm = layernorm(hidden_size, eps=layernorm_epsilon)
self.position_encoding_2d = position_encoding_2d
# Self attention.
self.attention = SelfAttention(
hidden_size,
num_attention_heads,
layer_id,
hidden_size_per_attention_head=hidden_size_per_attention_head,
bias=use_bias,
params_dtype=params_dtype,
position_encoding_2d=self.position_encoding_2d,
empty_init=empty_init
)
# Layernorm on the input data.
self.post_attention_layernorm = layernorm(hidden_size, eps=layernorm_epsilon)
self.num_layers = num_layers
# GLU
self.mlp = GLU(
hidden_size,
inner_hidden_size=inner_hidden_size,
bias=use_bias,
layer_id=layer_id,
params_dtype=params_dtype,
empty_init=empty_init
)
def forward(
self,
hidden_states: torch.Tensor,
position_ids,
attention_mask: torch.Tensor,
layer_id,
layer_past: Optional[Tuple[torch.Tensor, torch.Tensor]] = None,
use_cache: bool = False,
output_attentions: bool = False,
):
"""
hidden_states: [seq_len, batch, hidden_size]
attention_mask: [(1, 1), seq_len, seq_len]
"""
# Layer norm at the begining of the transformer layer.
# [seq_len, batch, hidden_size]
attention_input = self.input_layernorm(hidden_states)
# Self attention.
attention_outputs = self.attention(
attention_input,
position_ids,
attention_mask=attention_mask,
layer_id=layer_id,
layer_past=layer_past,
use_cache=use_cache,
output_attentions=output_attentions
)
attention_output = attention_outputs[0]
outputs = attention_outputs[1:]
# Residual connection.
alpha = (2 * self.num_layers) ** 0.5
hidden_states = attention_input * alpha + attention_output
mlp_input = self.post_attention_layernorm(hidden_states)
# MLP.
mlp_output = self.mlp(mlp_input)
# Second residual connection.
output = mlp_input * alpha + mlp_output
if use_cache:
outputs = (output,) + outputs
else:
outputs = (output,) + outputs[1:]
return outputs # hidden_states, present, attentions
class ChatGLMPreTrainedModel(PreTrainedModel):
"""
An abstract class to handle weights initialization and
a simple interface for downloading and loading pretrained models.
"""
is_parallelizable = False
supports_gradient_checkpointing = True
config_class = ChatGLMConfig
base_model_prefix = "transformer"
_no_split_modules = ["GLMBlock"]
def __init__(self, *inputs, **kwargs):
super().__init__(*inputs, **kwargs)
def _init_weights(self, module: nn.Module):
"""Initialize the weights."""
return
def get_masks(self, input_ids, device):
batch_size, seq_length = input_ids.shape
context_lengths = [seq.tolist().index(self.config.bos_token_id) for seq in input_ids]
attention_mask = torch.ones((batch_size, seq_length, seq_length), device=device)
attention_mask.tril_()
for i, context_length in enumerate(context_lengths):
attention_mask[i, :, :context_length] = 1
attention_mask.unsqueeze_(1)
attention_mask = (attention_mask < 0.5).bool()
return attention_mask
def get_position_ids(self, input_ids, mask_positions, device, use_gmasks=None):
batch_size, seq_length = input_ids.shape
if use_gmasks is None:
use_gmasks = [False] * batch_size
context_lengths = [seq.tolist().index(self.config.bos_token_id) for seq in input_ids]
if self.position_encoding_2d:
position_ids = torch.arange(seq_length, dtype=torch.long, device=device).unsqueeze(0).repeat(batch_size, 1)
for i, context_length in enumerate(context_lengths):
position_ids[i, context_length:] = mask_positions[i]
block_position_ids = [torch.cat((
torch.zeros(context_length, dtype=torch.long, device=device),
torch.arange(seq_length - context_length, dtype=torch.long, device=device) + 1
)) for context_length in context_lengths]
block_position_ids = torch.stack(block_position_ids, dim=0)
position_ids = torch.stack((position_ids, block_position_ids), dim=1)
else:
position_ids = torch.arange(seq_length, dtype=torch.long, device=device).unsqueeze(0).repeat(batch_size, 1)
for i, context_length in enumerate(context_lengths):
if not use_gmasks[i]:
position_ids[i, context_length:] = mask_positions[i]
return position_ids
def _set_gradient_checkpointing(self, module, value=False):
if isinstance(module, ChatGLMModel):
module.gradient_checkpointing = value
CHATGLM_6B_START_DOCSTRING = r"""
This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) sub-class.
Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general
usage and behavior.
Parameters:
config ([`~ChatGLM6BConfig`]): Model configuration class with all the parameters of the model.
Initializing with a config file does not load the weights associated with the model, only the configuration.
Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.
"""
CHATGLM_6B_INPUTS_DOCSTRING = r"""
Args:
input_ids (`torch.LongTensor` of shape `({0})`):
Indices of input sequence tokens in the vocabulary.
Indices can be obtained using [`ChatGLM6BTokenizer`].
See [`PreTrainedTokenizer.encode`] and
[`PreTrainedTokenizer.__call__`] for details.
[What are input IDs?](../glossary#input-ids)
attention_mask (`torch.FloatTensor` of shape `({0})`, *optional*):
Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
- 1 for tokens that are **not masked**,
- 0 for tokens that are **masked**.
[What are attention masks?](../glossary#attention-mask)
token_type_ids (`torch.LongTensor` of shape `({0})`, *optional*):
Segment token indices to indicate first and second portions of the inputs. Indices are selected in `[0, 1]`:
- 0 corresponds to a *sentence A* token,
- 1 corresponds to a *sentence B* token.
[What are token type IDs?](../glossary#token-type-ids)
position_ids (`torch.LongTensor` of shape `({0})`, *optional*):
Indices of positions of each input sequence tokens in the position embeddings.
Selected in the range `[0, config.max_position_embeddings - 1]`.
[What are position IDs?](../glossary#position-ids)
head_mask (`torch.FloatTensor` of shape `(num_heads,)` or `(num_layers, num_heads)`, *optional*):
Mask to nullify selected heads of the self-attention modules. Mask values selected in `[0, 1]`:
- 1 indicates the head is **not masked**,
- 0 indicates the head is **masked**.
inputs_embeds (`torch.FloatTensor` of shape `({0}, hidden_size)`, *optional*):
Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation.
This is useful if you want more control over how to convert *input_ids* indices into associated vectors
than the model's internal embedding lookup matrix.
output_attentions (`bool`, *optional*):
Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned
tensors for more detail.
output_hidden_states (`bool`, *optional*):
Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
more detail.
return_dict (`bool`, *optional*):
Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
"""
@add_start_docstrings(
"The bare ChatGLM-6B Model transformer outputting raw hidden-states without any specific head on top.",
CHATGLM_6B_START_DOCSTRING,
)
class ChatGLMModel(ChatGLMPreTrainedModel):
"""
The model can behave as an encoder (with only self-attention) as well
as a decoder, in which case a layer of cross-attention is added between
the self-attention layers, following the architecture described in [Attention is
all you need](https://arxiv.org/abs/1706.03762) by Ashish Vaswani,
Noam Shazeer, Niki Parmar, Jakob Uszkoreit, Llion Jones, Aidan N. Gomez, Lukasz Kaiser and Illia Polosukhin.
To behave as an decoder the model needs to be initialized with the
`is_decoder` argument of the configuration set to `True`.
To be used in a Seq2Seq model, the model needs to initialized with both `is_decoder`
argument and `add_cross_attention` set to `True`; an
`encoder_hidden_states` is then expected as an input to the forward pass.
"""
def __init__(self, config: ChatGLMConfig, empty_init=True):
super().__init__(config)
if empty_init:
init_method = skip_init
else:
init_method = default_init
# recording parameters
self.max_sequence_length = config.max_sequence_length
self.hidden_size = config.hidden_size
self.params_dtype = torch.half
self.num_attention_heads = config.num_attention_heads
self.vocab_size = config.vocab_size
self.num_layers = config.num_layers
self.layernorm_epsilon = config.layernorm_epsilon
self.inner_hidden_size = config.inner_hidden_size
self.hidden_size_per_attention_head = self.hidden_size // self.num_attention_heads
self.position_encoding_2d = config.position_encoding_2d
self.pre_seq_len = config.pre_seq_len
self.prefix_projection = config.prefix_projection
self.word_embeddings = init_method(
torch.nn.Embedding,
num_embeddings=self.vocab_size, embedding_dim=self.hidden_size,
dtype=self.params_dtype
)
self.gradient_checkpointing = False
def get_layer(layer_id):
return GLMBlock(
self.hidden_size,
self.num_attention_heads,
self.layernorm_epsilon,
layer_id,
inner_hidden_size=self.inner_hidden_size,
hidden_size_per_attention_head=self.hidden_size_per_attention_head,
layernorm=LayerNorm,
use_bias=True,
params_dtype=self.params_dtype,
position_encoding_2d=self.position_encoding_2d,
empty_init=empty_init
)
self.layers = torch.nn.ModuleList(
[get_layer(layer_id) for layer_id in range(self.num_layers)]
)
# Final layer norm before output.
self.final_layernorm = LayerNorm(self.hidden_size, eps=self.layernorm_epsilon)
if self.pre_seq_len is not None:
for param in self.parameters():
param.requires_grad = False
self.prefix_tokens = torch.arange(self.pre_seq_len).long()
self.prefix_encoder = PrefixEncoder(config)
self.dropout = torch.nn.Dropout(0.1)
# total_params = sum(p.numel() for p in self.parameters())
# trainable_params = sum(p.numel() for p in self.parameters() if p.requires_grad)
# print("Using p-tuning v2: # trainable_params = {} / {}".format(trainable_params, total_params))
def get_input_embeddings(self):
return self.word_embeddings
def set_input_embeddings(self, new_embeddings: torch.Tensor):
self.word_embeddings = new_embeddings
def get_prompt(self, batch_size, device, dtype=torch.half):
prefix_tokens = self.prefix_tokens.unsqueeze(0).expand(batch_size, -1).to(device)
past_key_values = self.prefix_encoder(prefix_tokens).type(dtype)
past_key_values = past_key_values.view(
batch_size,
self.pre_seq_len,
self.num_layers * 2,
self.num_attention_heads,
self.hidden_size // self.num_attention_heads
)
# seq_len, b, nh, hidden_size
past_key_values = self.dropout(past_key_values)
past_key_values = past_key_values.permute([2, 1, 0, 3, 4]).split(2)
# past_key_values = [(v[0], v[1]) for v in past_key_values]
return past_key_values
@add_start_docstrings_to_model_forward(CHATGLM_6B_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
@add_code_sample_docstrings(
checkpoint=_CHECKPOINT_FOR_DOC,
output_type=BaseModelOutputWithPastAndCrossAttentions,
config_class=_CONFIG_FOR_DOC,
)
def forward(
self,
input_ids: Optional[torch.LongTensor] = None,
position_ids: Optional[torch.LongTensor] = None,
attention_mask: Optional[torch.Tensor] = None,
past_key_values: Optional[Tuple[Tuple[torch.Tensor, torch.Tensor], ...]] = None,
inputs_embeds: Optional[torch.LongTensor] = None,
use_cache: Optional[bool] = None,
output_attentions: Optional[bool] = None,
output_hidden_states: Optional[bool] = None,
return_dict: Optional[bool] = None,
) -> Union[Tuple[torch.Tensor, ...], BaseModelOutputWithPast]:
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
output_hidden_states = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
use_cache = use_cache if use_cache is not None else self.config.use_cache
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
if self.gradient_checkpointing and self.training:
if use_cache:
logger.warning_once(
"`use_cache=True` is incompatible with gradient checkpointing. Setting `use_cache=False`..."
)
use_cache = False
if input_ids is not None and inputs_embeds is not None:
raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time")
elif input_ids is not None:
batch_size, seq_length = input_ids.shape[:2]
elif inputs_embeds is not None:
batch_size, seq_length = inputs_embeds.shape[:2]
else:
raise ValueError("You have to specify either input_ids or inputs_embeds")
if inputs_embeds is None:
inputs_embeds = self.word_embeddings(input_ids)
if past_key_values is None:
if self.pre_seq_len is not None:
past_key_values = self.get_prompt(batch_size=input_ids.shape[0], device=input_ids.device,
dtype=inputs_embeds.dtype)
else:
past_key_values = tuple([None] * len(self.layers))
if attention_mask is None:
attention_mask = self.get_masks(
input_ids,
device=input_ids.device
)
if position_ids is None:
MASK, gMASK = self.config.mask_token_id, self.config.gmask_token_id
seqs = input_ids.tolist()
mask_positions, use_gmasks = [], []
for seq in seqs:
mask_token = gMASK if gMASK in seq else MASK
use_gmask = mask_token == gMASK
mask_positions.append(seq.index(mask_token))
use_gmasks.append(use_gmask)
position_ids = self.get_position_ids(
input_ids,
mask_positions=mask_positions,
device=input_ids.device,
use_gmasks=use_gmasks
)
if self.pre_seq_len is not None and attention_mask is not None:
prefix_attention_mask = torch.ones(batch_size, 1, input_ids.size(-1), self.pre_seq_len).to(
attention_mask.device)
prefix_attention_mask = (prefix_attention_mask < 0.5).bool()
attention_mask = torch.cat((prefix_attention_mask, attention_mask), dim=3)
# [seq_len, batch, hidden_size]
hidden_states = inputs_embeds.transpose(0, 1)
presents = () if use_cache else None
all_self_attentions = () if output_attentions else None
all_hidden_states = () if output_hidden_states else None
if attention_mask is None:
attention_mask = torch.zeros(1, 1, device=input_ids.device).bool()
else:
attention_mask = attention_mask.to(hidden_states.device)
for i, layer in enumerate(self.layers):
if output_hidden_states:
all_hidden_states = all_hidden_states + (hidden_states,)
layer_past = past_key_values[i]
if self.gradient_checkpointing and self.training:
layer_ret = torch.utils.checkpoint.checkpoint(
layer,
hidden_states,
position_ids,
attention_mask,
torch.tensor(i),
layer_past,
use_cache,
output_attentions
)
else:
layer_ret = layer(
hidden_states,
position_ids=position_ids,
attention_mask=attention_mask,
layer_id=torch.tensor(i),
layer_past=layer_past,
use_cache=use_cache,
output_attentions=output_attentions
)
hidden_states = layer_ret[0]
if use_cache:
presents = presents + (layer_ret[1],)
if output_attentions:
all_self_attentions = all_self_attentions + (layer_ret[2 if use_cache else 1],)
# Final layer norm.
hidden_states = self.final_layernorm(hidden_states)
if output_hidden_states:
all_hidden_states = all_hidden_states + (hidden_states,)
if not return_dict:
return tuple(v for v in [hidden_states, presents, all_hidden_states, all_self_attentions] if v is not None)
return BaseModelOutputWithPast(
last_hidden_state=hidden_states,
past_key_values=presents,
hidden_states=all_hidden_states,
attentions=all_self_attentions,
)
class ChatGLMForConditionalGeneration(ChatGLMPreTrainedModel):
def __init__(self, config: ChatGLMConfig, empty_init=True):
super().__init__(config)
if empty_init:
init_method = skip_init
else:
init_method = default_init
# self.hidden_size = config.hidden_size
# self.params_dtype = torch.half
# self.vocab_size = config.vocab_size
self.max_sequence_length = config.max_sequence_length
self.position_encoding_2d = config.position_encoding_2d
self.transformer = ChatGLMModel(config, empty_init=empty_init)
self.lm_head = init_method(
nn.Linear,
config.hidden_size,
config.vocab_size,
bias=False,
dtype=torch.half
)
self.config = config
self.quantized = False
if self.config.quantization_bit:
self.quantize(self.config.quantization_bit, empty_init=True)
def get_output_embeddings(self):
return self.lm_head
def set_output_embeddings(self, new_embeddings):
self.lm_head = new_embeddings
def _update_model_kwargs_for_generation(
self,
outputs: ModelOutput,
model_kwargs: Dict[str, Any],
is_encoder_decoder: bool = False,
standardize_cache_format: bool = False,
) -> Dict[str, Any]:
# update past_key_values
model_kwargs["past_key_values"] = self._extract_past_from_model_output(
outputs, standardize_cache_format=standardize_cache_format
)
# update attention mask
if "attention_mask" in model_kwargs:
attention_mask = model_kwargs["attention_mask"]
if attention_mask is not None and attention_mask.dtype == torch.bool:
attention_mask = torch.cat(
[attention_mask, attention_mask.new_ones((*attention_mask.shape[:3], 1))], dim=3)
new_attention_mask = attention_mask[:, :, -1:].clone()
new_attention_mask[..., -1] = False
model_kwargs["attention_mask"] = torch.cat(
[attention_mask, new_attention_mask], dim=2
)
# update position ids
if "position_ids" in model_kwargs:
position_ids = model_kwargs["position_ids"]
new_position_id = position_ids[..., -1:].clone()
new_position_id[:, 1, :] += 1
model_kwargs["position_ids"] = torch.cat(
[position_ids, new_position_id], dim=-1
)
return model_kwargs
def prepare_inputs_for_generation(
self,
input_ids: torch.LongTensor,
past: Optional[torch.Tensor] = None,
past_key_values: Optional[torch.Tensor] = None,
attention_mask: Optional[torch.Tensor] = None,
position_ids: Optional[torch.Tensor] = None,
**kwargs
) -> dict:
batch_size, seq_length = input_ids.shape
MASK, gMASK = self.config.mask_token_id, self.config.gmask_token_id
seqs = input_ids.tolist()
mask_positions, use_gmasks = [], []
for seq in seqs:
mask_token = gMASK if gMASK in seq else MASK
use_gmask = mask_token == gMASK
mask_positions.append(seq.index(mask_token))
use_gmasks.append(use_gmask)
# only last token for input_ids if past is not None
if past is not None or past_key_values is not None:
last_token = input_ids[:, -1].unsqueeze(-1)
if attention_mask is not None and attention_mask.dtype == torch.bool:
attention_mask = attention_mask[:, :, -1:]
else:
attention_mask = None
if position_ids is not None:
position_ids = position_ids[..., -1:]
else:
context_lengths = [seq.index(self.config.bos_token_id) for seq in seqs]
if self.position_encoding_2d:
position_ids = torch.tensor(
[[mask_position, seq_length - context_length] for mask_position, context_length in
zip(mask_positions, context_lengths)], dtype=torch.long, device=input_ids.device).unsqueeze(-1)
else:
position_ids = torch.tensor([mask_position for mask_position in mask_positions], dtype=torch.long,
device=input_ids.device).unsqueeze(-1)
if past is None:
past = past_key_values
return {
"input_ids": last_token,
"past_key_values": past,
"position_ids": position_ids,
"attention_mask": attention_mask
}
else:
if attention_mask is not None and attention_mask.dtype != torch.bool:
logger.warning_once(f"The dtype of attention mask ({attention_mask.dtype}) is not bool")
attention_mask = None
if attention_mask is None:
attention_mask = self.get_masks(
input_ids,
device=input_ids.device
)
if position_ids is None:
position_ids = self.get_position_ids(
input_ids,
device=input_ids.device,
mask_positions=mask_positions,
use_gmasks=use_gmasks
)
return {
"input_ids": input_ids,
"past_key_values": past,
"position_ids": position_ids,
"attention_mask": attention_mask
}
def forward(
self,
input_ids: Optional[torch.Tensor] = None,
position_ids: Optional[torch.Tensor] = None,
attention_mask: Optional[torch.Tensor] = None,
past_key_values: Optional[Tuple[torch.FloatTensor]] = None,
inputs_embeds: Optional[torch.Tensor] = None,
labels: Optional[torch.Tensor] = None,
use_cache: Optional[bool] = None,
output_attentions: Optional[bool] = None,
output_hidden_states: Optional[bool] = None,
return_dict: Optional[bool] = None,
):
use_cache = use_cache if use_cache is not None else self.config.use_cache
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
transformer_outputs = self.transformer(
input_ids=input_ids,
position_ids=position_ids,
attention_mask=attention_mask,
past_key_values=past_key_values,
inputs_embeds=inputs_embeds,
use_cache=use_cache,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
hidden_states = transformer_outputs[0]
lm_logits = self.lm_head(hidden_states).permute(1, 0, 2).contiguous()
loss = None
if labels is not None:
lm_logits = lm_logits.to(torch.float32)
# Shift so that tokens < n predict n
shift_logits = lm_logits[..., :-1, :].contiguous()
shift_labels = labels[..., 1:].contiguous()
# Flatten the tokens
loss_fct = CrossEntropyLoss(ignore_index=-100)
loss = loss_fct(shift_logits.view(-1, shift_logits.size(-1)), shift_labels.view(-1))
lm_logits = lm_logits.to(hidden_states.dtype)
loss = loss.to(hidden_states.dtype)
if not return_dict:
output = (lm_logits,) + transformer_outputs[1:]
return ((loss,) + output) if loss is not None else output
return CausalLMOutputWithPast(
loss=loss,
logits=lm_logits,
past_key_values=transformer_outputs.past_key_values,
hidden_states=transformer_outputs.hidden_states,
attentions=transformer_outputs.attentions,
)
@staticmethod
def _reorder_cache(
past: Tuple[Tuple[torch.Tensor, torch.Tensor], ...], beam_idx: torch.LongTensor
) -> Tuple[Tuple[torch.Tensor, torch.Tensor], ...]:
"""
This function is used to re-order the `past_key_values` cache if [`~PreTrainedModel.beam_search`] or
[`~PreTrainedModel.beam_sample`] is called. This is required to match `past_key_values` with the correct
beam_idx at every generation step.
Output shares the same memory storage as `past`.
"""
return tuple(
(
layer_past[0].index_select(1, beam_idx.to(layer_past[0].device)),
layer_past[1].index_select(1, beam_idx.to(layer_past[1].device)),
)
for layer_past in past
)
def process_response(self, response):
response = response.strip()
response = response.replace("[[训练时间]]", "2023年")
punkts = [
[",", ","],
["!", "!"],
[":", ":"],
[";", ";"],
["\?", "?"],
]
for item in punkts:
response = re.sub(r"([\u4e00-\u9fff])%s" % item[0], r"\1%s" % item[1], response)
response = re.sub(r"%s([\u4e00-\u9fff])" % item[0], r"%s\1" % item[1], response)
return response
@torch.no_grad()
def chat(self, tokenizer, query: str, history: List[Tuple[str, str]] = None, max_length: int = 2048, num_beams=1,
do_sample=True, top_p=0.7, temperature=0.95, logits_processor=None, **kwargs):
if history is None:
history = []
if logits_processor is None:
logits_processor = LogitsProcessorList()
logits_processor.append(InvalidScoreLogitsProcessor())
gen_kwargs = {"max_length": max_length, "num_beams": num_beams, "do_sample": do_sample, "top_p": top_p,
"temperature": temperature, "logits_processor": logits_processor, **kwargs}
if not history:
prompt = query
else:
prompt = ""
for i, (old_query, response) in enumerate(history):
prompt += "[Round {}]\n问:{}\n答:{}\n".format(i, old_query, response)
prompt += "[Round {}]\n问:{}\n答:".format(len(history), query)
inputs = tokenizer([prompt], return_tensors="pt")
inputs = inputs.to(self.device)
outputs = self.generate(**inputs, **gen_kwargs)
outputs = outputs.tolist()[0][len(inputs["input_ids"][0]):]
response = tokenizer.decode(outputs)
response = self.process_response(response)
history = history + [(query, response)]
return response, history
@torch.no_grad()
def stream_chat(self, tokenizer, query: str, history: List[Tuple[str, str]] = None, max_length: int = 2048,
do_sample=True, top_p=0.7, temperature=0.95, logits_processor=None, **kwargs):
if history is None:
history = []
if logits_processor is None:
logits_processor = LogitsProcessorList()
logits_processor.append(InvalidScoreLogitsProcessor())
gen_kwargs = {"max_length": max_length, "do_sample": do_sample, "top_p": top_p,
"temperature": temperature, "logits_processor": logits_processor, **kwargs}
if not history:
prompt = query
else:
prompt = ""
for i, (old_query, response) in enumerate(history):
prompt += "[Round {}]\n问:{}\n答:{}\n".format(i, old_query, response)
prompt += "[Round {}]\n问:{}\n答:".format(len(history), query)
inputs = tokenizer([prompt], return_tensors="pt")
inputs = inputs.to(self.device)
for outputs in self.stream_generate(**inputs, **gen_kwargs):
outputs = outputs.tolist()[0][len(inputs["input_ids"][0]):]
response = tokenizer.decode(outputs)
response = self.process_response(response)
new_history = history + [(query, response)]
yield response, new_history
@torch.no_grad()
def stream_generate(
self,
input_ids,
generation_config: Optional[GenerationConfig] = None,
logits_processor: Optional[LogitsProcessorList] = None,
stopping_criteria: Optional[StoppingCriteriaList] = None,
prefix_allowed_tokens_fn: Optional[Callable[[int, torch.Tensor], List[int]]] = None,
**kwargs,
):
batch_size, input_ids_seq_length = input_ids.shape[0], input_ids.shape[-1]
if generation_config is None:
generation_config = self.generation_config
generation_config = copy.deepcopy(generation_config)
model_kwargs = generation_config.update(**kwargs)
bos_token_id, eos_token_id = generation_config.bos_token_id, generation_config.eos_token_id
if isinstance(eos_token_id, int):
eos_token_id = [eos_token_id]
has_default_max_length = kwargs.get("max_length") is None and generation_config.max_length is not None
if has_default_max_length and generation_config.max_new_tokens is None:
warnings.warn(
f"Using `max_length`'s default ({generation_config.max_length}) to control the generation length. "
"This behaviour is deprecated and will be removed from the config in v5 of Transformers -- we"
" recommend using `max_new_tokens` to control the maximum length of the generation.",
UserWarning,
)
elif generation_config.max_new_tokens is not None:
generation_config.max_length = generation_config.max_new_tokens + input_ids_seq_length
if not has_default_max_length:
logger.warn(
f"Both `max_new_tokens` (={generation_config.max_new_tokens}) and `max_length`(="
f"{generation_config.max_length}) seem to have been set. `max_new_tokens` will take precedence. "
"Please refer to the documentation for more information. "
"(https://huggingface.co/docs/transformers/main/en/main_classes/text_generation)",
UserWarning,
)
if input_ids_seq_length >= generation_config.max_length:
input_ids_string = "decoder_input_ids" if self.config.is_encoder_decoder else "input_ids"
logger.warning(
f"Input length of {input_ids_string} is {input_ids_seq_length}, but `max_length` is set to"
f" {generation_config.max_length}. This can lead to unexpected behavior. You should consider"
" increasing `max_new_tokens`."
)
# 2. Set generation parameters if not already defined
logits_processor = logits_processor if logits_processor is not None else LogitsProcessorList()
stopping_criteria = stopping_criteria if stopping_criteria is not None else StoppingCriteriaList()
logits_processor = self._get_logits_processor(
generation_config=generation_config,
input_ids_seq_length=input_ids_seq_length,
encoder_input_ids=input_ids,
prefix_allowed_tokens_fn=prefix_allowed_tokens_fn,
logits_processor=logits_processor,
)
stopping_criteria = self._get_stopping_criteria(
generation_config=generation_config, stopping_criteria=stopping_criteria
)
logits_warper = self._get_logits_warper(generation_config)
unfinished_sequences = input_ids.new(input_ids.shape[0]).fill_(1)
scores = None
while True:
model_inputs = self.prepare_inputs_for_generation(input_ids, **model_kwargs)
# forward pass to get next token
outputs = self(
**model_inputs,
return_dict=True,
output_attentions=False,
output_hidden_states=False,
)
next_token_logits = outputs.logits[:, -1, :]
# pre-process distribution
next_token_scores = logits_processor(input_ids, next_token_logits)
next_token_scores = logits_warper(input_ids, next_token_scores)
# sample
probs = nn.functional.softmax(next_token_scores, dim=-1)
if generation_config.do_sample:
next_tokens = torch.multinomial(probs, num_samples=1).squeeze(1)
else:
next_tokens = torch.argmax(probs, dim=-1)
# update generated ids, model inputs, and length for next step
input_ids = torch.cat([input_ids, next_tokens[:, None]], dim=-1)
model_kwargs = self._update_model_kwargs_for_generation(
outputs, model_kwargs, is_encoder_decoder=self.config.is_encoder_decoder
)
unfinished_sequences = unfinished_sequences.mul((sum(next_tokens != i for i in eos_token_id)).long())
# stop when each sentence is finished, or if we exceed the maximum length
if unfinished_sequences.max() == 0 or stopping_criteria(input_ids, scores):
break
yield input_ids
def quantize(self, bits: int, empty_init=False, **kwargs):
if bits == 0:
return
from .quantization import quantize
if self.quantized:
logger.info("Already quantized.")
return self
self.quantized = True
self.config.quantization_bit = bits
self.transformer = quantize(self.transformer, bits, empty_init=empty_init, **kwargs)
return self
| 57,568 | 39.089833 | 121 | py |
XFL | XFL-master/docs/en/source/conf.py | # Copyright 2022 The XFL Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Configuration file for the Sphinx documentation builder.
#
# This file only contains a selection of the most common options. For a full
# list see the documentation:
# https://www.sphinx-doc.org/en/master/usage/configuration.html
# -- Path setup --------------------------------------------------------------
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
# import os
# import sys
# sys.path.insert(0, os.path.abspath('.'))
# -- Project information -----------------------------------------------------
project = 'XFL'
copyright = '2022, The XFL Authors.'
author = 'chi.zhang'
# The full version, including alpha/beta/rc tags
release = '1.2.0'
# -- General configuration ---------------------------------------------------
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx_markdown_tables',
'recommonmark',
'sphinx.ext.autodoc',
'sphinx.ext.mathjax',
'sphinx.ext.viewcode'
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = 'en'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This pattern also affects html_static_path and html_extra_path.
exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store']
master_doc = 'index'
# -- Options for HTML output -------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
import sphinx_rtd_theme
extensions.append('sphinx_rtd_theme')
html_theme = 'sphinx_rtd_theme'
html_theme_path = [sphinx_rtd_theme.get_html_theme_path()]
html_theme_options = {
'logo_only': True,
'navigation_depth': 5,
}
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
from recommonmark.parser import CommonMarkParser
source_parsers = {
'.md': CommonMarkParser,
}
source_suffix = {
'.rst': 'restructuredtext',
'.txt': 'markdown',
'.md': 'markdown',
} | 3,375 | 32.76 | 79 | py |
XFL | XFL-master/docs/zh_CN/source/conf.py | # Copyright 2022 The XFL Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Configuration file for the Sphinx documentation builder.
#
# This file only contains a selection of the most common options. For a full
# list see the documentation:
# https://www.sphinx-doc.org/en/master/usage/configuration.html
# -- Path setup --------------------------------------------------------------
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
# import os
# import sys
# sys.path.insert(0, os.path.abspath('.'))
# -- Project information -----------------------------------------------------
project = 'XFL'
copyright = '2022, The XFL Authors.'
author = 'chi.zhang'
# The full version, including alpha/beta/rc tags
release = '1.2.0'
# -- General configuration ---------------------------------------------------
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx_markdown_tables',
'recommonmark',
'sphinx.ext.autodoc',
'sphinx.ext.mathjax',
'sphinx.ext.viewcode'
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = 'zh_CN'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This pattern also affects html_static_path and html_extra_path.
exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store']
master_doc = 'index'
# -- Options for HTML output -------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
import sphinx_rtd_theme
extensions.append('sphinx_rtd_theme')
html_theme = 'sphinx_rtd_theme'
html_theme_path = [sphinx_rtd_theme.get_html_theme_path()]
html_theme_options = {
'logo_only': True,
'navigation_depth': 5,
}
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
from recommonmark.parser import CommonMarkParser
source_parsers = {
'.md': CommonMarkParser,
}
source_suffix = {
'.rst': 'restructuredtext',
'.txt': 'markdown',
'.md': 'markdown',
} | 3,378 | 32.79 | 79 | py |
ultrasound-nerve-segmentation | ultrasound-nerve-segmentation-master/train.py | from __future__ import print_function
import os
from skimage.transform import resize
from skimage.io import imsave
import numpy as np
from keras.models import Model
from keras.layers import Input, concatenate, Conv2D, MaxPooling2D, Conv2DTranspose
from keras.optimizers import Adam
from keras.callbacks import ModelCheckpoint
from keras import backend as K
from data import load_train_data, load_test_data
K.set_image_data_format('channels_last') # TF dimension ordering in this code
img_rows = 96
img_cols = 96
smooth = 1.
def dice_coef(y_true, y_pred):
y_true_f = K.flatten(y_true)
y_pred_f = K.flatten(y_pred)
intersection = K.sum(y_true_f * y_pred_f)
return (2. * intersection + smooth) / (K.sum(y_true_f) + K.sum(y_pred_f) + smooth)
def dice_coef_loss(y_true, y_pred):
return -dice_coef(y_true, y_pred)
def get_unet():
inputs = Input((img_rows, img_cols, 1))
conv1 = Conv2D(32, (3, 3), activation='relu', padding='same')(inputs)
conv1 = Conv2D(32, (3, 3), activation='relu', padding='same')(conv1)
pool1 = MaxPooling2D(pool_size=(2, 2))(conv1)
conv2 = Conv2D(64, (3, 3), activation='relu', padding='same')(pool1)
conv2 = Conv2D(64, (3, 3), activation='relu', padding='same')(conv2)
pool2 = MaxPooling2D(pool_size=(2, 2))(conv2)
conv3 = Conv2D(128, (3, 3), activation='relu', padding='same')(pool2)
conv3 = Conv2D(128, (3, 3), activation='relu', padding='same')(conv3)
pool3 = MaxPooling2D(pool_size=(2, 2))(conv3)
conv4 = Conv2D(256, (3, 3), activation='relu', padding='same')(pool3)
conv4 = Conv2D(256, (3, 3), activation='relu', padding='same')(conv4)
pool4 = MaxPooling2D(pool_size=(2, 2))(conv4)
conv5 = Conv2D(512, (3, 3), activation='relu', padding='same')(pool4)
conv5 = Conv2D(512, (3, 3), activation='relu', padding='same')(conv5)
up6 = concatenate([Conv2DTranspose(256, (2, 2), strides=(2, 2), padding='same')(conv5), conv4], axis=3)
conv6 = Conv2D(256, (3, 3), activation='relu', padding='same')(up6)
conv6 = Conv2D(256, (3, 3), activation='relu', padding='same')(conv6)
up7 = concatenate([Conv2DTranspose(128, (2, 2), strides=(2, 2), padding='same')(conv6), conv3], axis=3)
conv7 = Conv2D(128, (3, 3), activation='relu', padding='same')(up7)
conv7 = Conv2D(128, (3, 3), activation='relu', padding='same')(conv7)
up8 = concatenate([Conv2DTranspose(64, (2, 2), strides=(2, 2), padding='same')(conv7), conv2], axis=3)
conv8 = Conv2D(64, (3, 3), activation='relu', padding='same')(up8)
conv8 = Conv2D(64, (3, 3), activation='relu', padding='same')(conv8)
up9 = concatenate([Conv2DTranspose(32, (2, 2), strides=(2, 2), padding='same')(conv8), conv1], axis=3)
conv9 = Conv2D(32, (3, 3), activation='relu', padding='same')(up9)
conv9 = Conv2D(32, (3, 3), activation='relu', padding='same')(conv9)
conv10 = Conv2D(1, (1, 1), activation='sigmoid')(conv9)
model = Model(inputs=[inputs], outputs=[conv10])
model.compile(optimizer=Adam(lr=1e-5), loss=dice_coef_loss, metrics=[dice_coef])
return model
def preprocess(imgs):
imgs_p = np.ndarray((imgs.shape[0], img_rows, img_cols), dtype=np.uint8)
for i in range(imgs.shape[0]):
imgs_p[i] = resize(imgs[i], (img_cols, img_rows), preserve_range=True)
imgs_p = imgs_p[..., np.newaxis]
return imgs_p
def train_and_predict():
print('-'*30)
print('Loading and preprocessing train data...')
print('-'*30)
imgs_train, imgs_mask_train = load_train_data()
imgs_train = preprocess(imgs_train)
imgs_mask_train = preprocess(imgs_mask_train)
imgs_train = imgs_train.astype('float32')
mean = np.mean(imgs_train) # mean for data centering
std = np.std(imgs_train) # std for data normalization
imgs_train -= mean
imgs_train /= std
imgs_mask_train = imgs_mask_train.astype('float32')
imgs_mask_train /= 255. # scale masks to [0, 1]
print('-'*30)
print('Creating and compiling model...')
print('-'*30)
model = get_unet()
model_checkpoint = ModelCheckpoint('weights.h5', monitor='val_loss', save_best_only=True)
print('-'*30)
print('Fitting model...')
print('-'*30)
model.fit(imgs_train, imgs_mask_train, batch_size=32, nb_epoch=20, verbose=1, shuffle=True,
validation_split=0.2,
callbacks=[model_checkpoint])
print('-'*30)
print('Loading and preprocessing test data...')
print('-'*30)
imgs_test, imgs_id_test = load_test_data()
imgs_test = preprocess(imgs_test)
imgs_test = imgs_test.astype('float32')
imgs_test -= mean
imgs_test /= std
print('-'*30)
print('Loading saved weights...')
print('-'*30)
model.load_weights('weights.h5')
print('-'*30)
print('Predicting masks on test data...')
print('-'*30)
imgs_mask_test = model.predict(imgs_test, verbose=1)
np.save('imgs_mask_test.npy', imgs_mask_test)
print('-' * 30)
print('Saving predicted masks to files...')
print('-' * 30)
pred_dir = 'preds'
if not os.path.exists(pred_dir):
os.mkdir(pred_dir)
for image, image_id in zip(imgs_mask_test, imgs_id_test):
image = (image[:, :, 0] * 255.).astype(np.uint8)
imsave(os.path.join(pred_dir, str(image_id) + '_pred.png'), image)
if __name__ == '__main__':
train_and_predict()
| 5,352 | 33.75974 | 107 | py |
deep-video-mvs | deep-video-mvs-master/dataset/scannet-export/scannet-export.py | import os
import random
import numpy as np
from multiprocessing import Pool
import copy
import os
import struct
import zlib
from itertools import groupby
import cv2
import imageio
import numpy as np
import torch
COMPRESSION_TYPE_COLOR = {-1: 'unknown', 0: 'raw', 1: 'png', 2: 'jpeg'}
COMPRESSION_TYPE_DEPTH = {-1: 'unknown', 0: 'raw_ushort', 1: 'zlib_ushort', 2: 'occi_ushort'}
def process_color_image(color, depth, K_color, K_depth):
old_height, old_width = np.shape(color)[0:2]
new_height, new_width = np.shape(depth)
x = np.linspace(0, new_width - 1, num=new_width)
y = np.linspace(0, new_height - 1, num=new_height)
ones = np.ones(shape=(new_height, new_width))
x_grid, y_grid = np.meshgrid(x, y)
warp_grid = np.stack((x_grid, y_grid, ones), axis=-1)
warp_grid = torch.from_numpy(warp_grid).float()
warp_grid = warp_grid.view(-1, 3).t().unsqueeze(0)
H = K_color.dot(np.linalg.inv(K_depth))
H = torch.from_numpy(H).float().unsqueeze(0)
width_normalizer = old_width / 2.0
height_normalizer = old_height / 2.0
warping = H.bmm(warp_grid).transpose(dim0=1, dim1=2)
warping = warping[:, :, 0:2] / (warping[:, :, 2].unsqueeze(-1) + 1e-8)
warping = warping.view(1, new_height, new_width, 2)
warping[:, :, :, 0] = (warping[:, :, :, 0] - width_normalizer) / width_normalizer
warping[:, :, :, 1] = (warping[:, :, :, 1] - height_normalizer) / height_normalizer
image = torch.from_numpy(np.transpose(color, axes=(2, 0, 1))).float().unsqueeze(0)
warped_image = torch.nn.functional.grid_sample(input=image,
grid=warping,
mode='nearest',
padding_mode='zeros',
align_corners=True)
warped_image = warped_image.squeeze(0).numpy().astype(np.uint8)
warped_image = np.transpose(warped_image, axes=(1, 2, 0))
return warped_image
class RGBDFrame():
def load(self, file_handle):
self.camera_to_world = np.asarray(struct.unpack('f' * 16, file_handle.read(16 * 4)), dtype=np.float32).reshape(4, 4)
self.timestamp_color = struct.unpack('Q', file_handle.read(8))[0]
self.timestamp_depth = struct.unpack('Q', file_handle.read(8))[0]
self.color_size_bytes = struct.unpack('Q', file_handle.read(8))[0]
self.depth_size_bytes = struct.unpack('Q', file_handle.read(8))[0]
self.color_data = ''.join(struct.unpack('c' * self.color_size_bytes, file_handle.read(self.color_size_bytes)))
self.depth_data = ''.join(struct.unpack('c' * self.depth_size_bytes, file_handle.read(self.depth_size_bytes)))
def decompress_depth(self, compression_type):
if compression_type == 'zlib_ushort':
return self.decompress_depth_zlib()
else:
raise
def decompress_depth_zlib(self):
return zlib.decompress(self.depth_data)
def decompress_color(self, compression_type):
if compression_type == 'jpeg':
return self.decompress_color_jpeg()
else:
raise
def decompress_color_jpeg(self):
return imageio.imread(self.color_data)
def find_longest_reliable_subsequence(is_ok):
longest_interval_length = 0
longest_interval = None
index = 0
for k, g in groupby(is_ok, None):
length = len(list(g))
if k:
start_index = copy.deepcopy(index)
end_index = start_index + length
if length > longest_interval_length:
longest_interval_length = copy.deepcopy(length)
longest_interval = [start_index, end_index]
index += length
return longest_interval
class SensorData:
def __init__(self, filename):
self.version = 4
with open(filename, 'rb') as f:
version = struct.unpack('I', f.read(4))[0]
assert self.version == version
strlen = struct.unpack('Q', f.read(8))[0]
self.sensor_name = ''.join(struct.unpack('c' * strlen, f.read(strlen)))
self.intrinsic_color = np.asarray(struct.unpack('f' * 16, f.read(16 * 4)), dtype=np.float32).reshape(4, 4)
self.extrinsic_color = np.asarray(struct.unpack('f' * 16, f.read(16 * 4)), dtype=np.float32).reshape(4, 4)
self.intrinsic_depth = np.asarray(struct.unpack('f' * 16, f.read(16 * 4)), dtype=np.float32).reshape(4, 4)
self.extrinsic_depth = np.asarray(struct.unpack('f' * 16, f.read(16 * 4)), dtype=np.float32).reshape(4, 4)
self.color_compression_type = COMPRESSION_TYPE_COLOR[struct.unpack('i', f.read(4))[0]]
self.depth_compression_type = COMPRESSION_TYPE_DEPTH[struct.unpack('i', f.read(4))[0]]
self.color_width = struct.unpack('I', f.read(4))[0]
self.color_height = struct.unpack('I', f.read(4))[0]
self.depth_width = struct.unpack('I', f.read(4))[0]
self.depth_height = struct.unpack('I', f.read(4))[0]
self.depth_shift = struct.unpack('f', f.read(4))[0]
self.num_frames = struct.unpack('Q', f.read(8))[0]
self.frames = []
for i in range(self.num_frames):
frame = RGBDFrame()
frame.load(f)
self.frames.append(frame)
def export_train(self, output_path, frame_skip):
counter = 0
poses = []
for index in range(0, len(self.frames), frame_skip):
pose = self.frames[index].camera_to_world
if np.any(np.isnan(pose)) or np.any(np.isinf(pose)) or np.any(np.isneginf(pose)):
print("Pose NaN, Inf or -Inf encountered!, Skipping...")
continue
poses.append(np.ravel(pose).tolist())
depth = self.frames[index].decompress_depth(self.depth_compression_type)
depth = np.fromstring(depth, dtype=np.uint16).reshape(self.depth_height, self.depth_width)
color = self.frames[index].decompress_color(self.color_compression_type)
color = process_color_image(color=color,
depth=depth,
K_color=self.intrinsic_color[0:3, 0:3],
K_depth=self.intrinsic_depth[0:3, 0:3])
output_file = os.path.join(output_path, str(counter).zfill(6))
np.savez_compressed(output_file, image=color, depth=depth)
counter += 1
np.savetxt(fname=os.path.join(output_path, "poses.txt"), X=np.array(poses), fmt='%.8e')
np.savetxt(fname=os.path.join(output_path, "K.txt"), X=self.intrinsic_depth[0:3, 0:3])
def export_test(self, output_path, frame_skip):
poses = []
for f in range(0, len(self.frames)):
pose = self.frames[f].camera_to_world
poses.append(np.ravel(pose).tolist())
poses = np.array(poses)
np.savetxt(fname=os.path.join(output_path, "poses.txt"), X=poses, fmt='%.8e')
np.savetxt(fname=os.path.join(output_path, "K.txt"), X=self.intrinsic_depth[0:3, 0:3])
print 'exporting', self.num_frames // frame_skip, ' frames to', output_path
image_folder = os.path.join(output_path, 'images')
depth_folder = os.path.join(output_path, 'depth')
os.mkdir(image_folder)
os.mkdir(depth_folder)
for f in range(0, self.num_frames, frame_skip):
depth = self.frames[f].decompress_depth(self.depth_compression_type)
depth = np.fromstring(depth, dtype=np.uint16).reshape(self.depth_height, self.depth_width)
color = self.frames[f].decompress_color(self.color_compression_type)
color = process_color_image(color=color,
depth=depth,
K_color=self.intrinsic_color[0:3, 0:3],
K_depth=self.intrinsic_depth[0:3, 0:3])
color = cv2.cvtColor(color, cv2.COLOR_BGR2RGB)
cv2.imwrite(os.path.join(image_folder, str(f).zfill(6) + '.png'), color, [cv2.IMWRITE_PNG_COMPRESSION, 3])
cv2.imwrite(os.path.join(depth_folder, str(f).zfill(6) + '.png'), depth, [cv2.IMWRITE_PNG_COMPRESSION, 3])
def export_samples(scene_path):
scene_name = scene_path.split("/")[-1]
scene_output_path = os.path.join(output_path, scene_name)
if not os.path.exists(scene_output_path):
# load the data
print 'loading sensor data for %s...' % scene_path
sd = SensorData(os.path.join(scene_path, scene_name + ".sens"))
os.mkdir(scene_output_path)
if is_train:
sd.export_train(scene_output_path, frame_skip=frame_skip)
else:
sd.export_test(scene_output_path, frame_skip=frame_skip)
else:
print 'existing scene %s, skipping...' % scene_path
def sanity_check_test():
exported_scenes = sorted(os.listdir(output_path))
for exported_scene in exported_scenes:
n_images = len(os.listdir(os.path.join(output_path, exported_scene, "images")))
n_depths = len(os.listdir(os.path.join(output_path, exported_scene, "depth")))
n_poses = len(np.loadtxt(os.path.join(output_path, exported_scene, "poses.txt")))
if n_images != n_poses or n_images != n_depths or n_depths != n_poses:
print exported_scene, "is problematic"
def sanity_check_train():
exported_scenes = sorted(os.listdir(output_path))
for exported_scene in exported_scenes:
if ".txt" not in exported_scene:
n_poses = len(np.loadtxt(os.path.join(output_path, exported_scene, "poses.txt")))
K = np.loadtxt(os.path.join(output_path, exported_scene, "K.txt"))
n_files = len(os.listdir(os.path.join(output_path, exported_scene)))
if n_files - 2 != n_poses:
print exported_scene, "is problematic"
frame_skip = 1
is_train = False
is_sanity_check = False
if is_train:
input_path = "/home/ardaduz/HDD/Downloads/ScanNet/scans"
output_path = "/media/ardaduz/T5/train"
else:
input_path = "/home/ardaduz/HDD/Downloads/ScanNet/scans_test"
output_path = "/media/ardaduz/T5/test/scannet"
if __name__ == '__main__':
if is_sanity_check:
if is_train:
sanity_check_train()
else:
sanity_check_test()
exit(0)
sequence_names = sorted(os.listdir(input_path))
if is_train:
scene_names_dict = dict()
for sequence_name in sequence_names:
scene_name, idx = sequence_name.split('_')
if scene_name in scene_names_dict:
scene_names_dict[scene_name].append(idx)
else:
scene_names_dict[scene_name] = [idx]
scene_names = scene_names_dict.keys()
random.seed(123)
random.shuffle(scene_names)
n_scenes = len(scene_names)
n_training = int(n_scenes * 0.9)
training_scenes = scene_names[:n_training]
validation_scenes = scene_names[n_training:]
training_sequences = []
for training_scene in training_scenes:
idxs = scene_names_dict[training_scene]
for idx in idxs:
training_sequences.append(training_scene + "_" + idx)
validation_sequences = []
for validation_scene in validation_scenes:
idxs = scene_names_dict[validation_scene]
for idx in idxs:
validation_sequences.append(validation_scene + "_" + idx)
np.savetxt(os.path.join(output_path, "train.txt"), np.array(training_sequences), fmt='%s')
np.savetxt(os.path.join(output_path, "validation.txt"), np.array(validation_sequences), fmt='%s')
sequence_names.sort()
sequence_paths = []
for index, sequence_name in enumerate(sequence_names):
sequence_paths.append(os.path.join(input_path, sequence_name))
pool = Pool(6)
pool.map(export_samples, sequence_paths)
pool.join()
pool.close()
| 12,059 | 40.586207 | 124 | py |
deep-video-mvs | deep-video-mvs-master/dvmvs/losses.py | from __future__ import division
import torch
from torch import nn
class LossMeter(object):
def __init__(self):
self.count = 0.0
self.sum = 0.0
self.avg = 0.0
self.item_average = 0.0
def update(self, loss, count):
self.sum += loss
self.count += count
self.avg = self.sum / self.count
self.item_average = loss / count
def __repr__(self):
return '{:.4f} ({:.4f})'.format(self.item_average, self.avg)
def update_losses(predictions, weights, groundtruth, is_training, l1_meter, huber_meter, l1_inv_meter, l1_rel_meter, loss_type):
optimizer_loss = 0
sample_l1_loss, sample_huber_loss, sample_l1_inv_loss, sample_l1_rel_loss, sample_valid_count = None, None, None, None, None
if is_training:
for j, prediction in enumerate(predictions):
sample_l1_loss, sample_huber_loss, sample_l1_inv_loss, sample_l1_rel_loss, sample_valid_count = \
calculate_loss(groundtruth=groundtruth, prediction=prediction)
if loss_type == "L1":
optimizer_loss = optimizer_loss + weights[j] * (sample_l1_loss / sample_valid_count)
elif loss_type == "L1-inv":
optimizer_loss = optimizer_loss + weights[j] * (sample_l1_inv_loss / sample_valid_count)
elif loss_type == "L1-rel":
optimizer_loss = optimizer_loss + weights[j] * (sample_l1_rel_loss / sample_valid_count)
elif loss_type == "Huber":
optimizer_loss = optimizer_loss + weights[j] * (sample_huber_loss / sample_valid_count)
else:
sample_l1_loss, sample_huber_loss, sample_l1_inv_loss, sample_l1_rel_loss, sample_valid_count = calculate_loss(groundtruth=groundtruth,
prediction=predictions[-1])
l1_meter.update(sample_l1_loss.item(), sample_valid_count)
huber_meter.update(sample_huber_loss.item(), sample_valid_count)
l1_inv_meter.update(sample_l1_inv_loss.item(), sample_valid_count)
l1_rel_meter.update(sample_l1_rel_loss.item(), sample_valid_count)
return optimizer_loss
def calculate_loss(groundtruth, prediction):
batch, height_original, width_original = groundtruth.size()
groundtruth = groundtruth.view(batch, 1, height_original, width_original)
batch, height_scaled, width_scaled = prediction.size()
prediction = prediction.view(batch, 1, height_scaled, width_scaled)
groundtruth_scaled = nn.functional.interpolate(groundtruth,
size=(height_scaled, width_scaled),
mode='nearest')
valid_mask = groundtruth_scaled != 0
valid_count = valid_mask.nonzero().size()[0]
groundtruth_valid = groundtruth_scaled[valid_mask]
prediction_valid = prediction[valid_mask]
groundtruth_inverse_valid = 1.0 / groundtruth_valid
prediction_inverse_valid = 1.0 / prediction_valid
l1_diff = torch.abs(groundtruth_valid - prediction_valid)
smooth_l1_loss = torch.nn.functional.smooth_l1_loss(prediction_valid, groundtruth_valid, reduction='none')
smooth_l1_loss = torch.sum(smooth_l1_loss)
l1_loss = torch.sum(l1_diff)
l1_inv_loss = torch.sum(torch.abs(groundtruth_inverse_valid - prediction_inverse_valid))
l1_rel_loss = torch.sum(l1_diff / groundtruth_valid)
return l1_loss, smooth_l1_loss, l1_inv_loss, l1_rel_loss, valid_count
| 3,529 | 41.53012 | 146 | py |
deep-video-mvs | deep-video-mvs-master/dvmvs/utils.py | from __future__ import division
import os
import zipfile
import cv2
import kornia
import numpy as np
import torch
from path import Path
from pytorch3d import structures, renderer
from dvmvs.errors import compute_errors
# GEOMETRIC UTILS
def pose_distance(reference_pose, measurement_pose):
"""
:param reference_pose: 4x4 numpy array, reference frame camera-to-world pose (not extrinsic matrix!)
:param measurement_pose: 4x4 numpy array, measurement frame camera-to-world pose (not extrinsic matrix!)
:return combined_measure: float, combined pose distance measure
:return R_measure: float, rotation distance measure
:return t_measure: float, translation distance measure
"""
rel_pose = np.dot(np.linalg.inv(reference_pose), measurement_pose)
R = rel_pose[:3, :3]
t = rel_pose[:3, 3]
R_measure = np.sqrt(2 * (1 - min(3.0, np.matrix.trace(R)) / 3))
t_measure = np.linalg.norm(t)
combined_measure = np.sqrt(t_measure ** 2 + R_measure ** 2)
return combined_measure, R_measure, t_measure
def get_warp_grid_for_cost_volume_calculation(width, height, device):
x = np.linspace(0, width - 1, num=int(width))
y = np.linspace(0, height - 1, num=int(height))
ones = np.ones(shape=(height, width))
x_grid, y_grid = np.meshgrid(x, y)
warp_grid = np.stack((x_grid, y_grid, ones), axis=-1)
warp_grid = torch.from_numpy(warp_grid).float()
warp_grid = warp_grid.view(-1, 3).t().to(device)
return warp_grid
def calculate_cost_volume_by_warping(image1, image2, pose1, pose2, K, warp_grid, min_depth, max_depth, n_depth_levels, device, dot_product):
batch_size, channels, height, width = image1.size()
warp_grid = torch.cat(batch_size * [warp_grid.unsqueeze(dim=0)])
cost_volume = torch.empty(size=(batch_size, n_depth_levels, height, width), dtype=torch.float32).to(device)
extrinsic2 = torch.inverse(pose2).bmm(pose1)
R = extrinsic2[:, 0:3, 0:3]
t = extrinsic2[:, 0:3, 3].unsqueeze(-1)
Kt = K.bmm(t)
K_R_Kinv = K.bmm(R).bmm(torch.inverse(K))
K_R_Kinv_UV = K_R_Kinv.bmm(warp_grid)
inverse_depth_base = 1.0 / max_depth
inverse_depth_step = (1.0 / min_depth - 1.0 / max_depth) / (n_depth_levels - 1)
width_normalizer = width / 2.0
height_normalizer = height / 2.0
for depth_i in range(n_depth_levels):
this_depth = 1 / (inverse_depth_base + depth_i * inverse_depth_step)
warping = K_R_Kinv_UV + (Kt / this_depth)
warping = warping.transpose(dim0=1, dim1=2)
warping = warping[:, :, 0:2] / (warping[:, :, 2].unsqueeze(-1) + 1e-8)
warping = warping.view(batch_size, height, width, 2)
warping[:, :, :, 0] = (warping[:, :, :, 0] - width_normalizer) / width_normalizer
warping[:, :, :, 1] = (warping[:, :, :, 1] - height_normalizer) / height_normalizer
warped_image2 = torch.nn.functional.grid_sample(input=image2,
grid=warping,
mode='bilinear',
padding_mode='zeros',
align_corners=True)
if dot_product:
cost_volume[:, depth_i, :, :] = torch.sum(image1 * warped_image2, dim=1) / channels
else:
cost_volume[:, depth_i, :, :] = torch.sum(torch.abs(image1 - warped_image2), dim=1)
return cost_volume
def cost_volume_fusion(image1, image2s, pose1, pose2s, K, warp_grid, min_depth, max_depth, n_depth_levels, device, dot_product):
batch_size, channels, height, width = image1.size()
fused_cost_volume = torch.zeros(size=(batch_size, n_depth_levels, height, width), dtype=torch.float32).to(device)
for pose2, image2 in zip(pose2s, image2s):
cost_volume = calculate_cost_volume_by_warping(image1=image1,
image2=image2,
pose1=pose1,
pose2=pose2,
K=K,
warp_grid=warp_grid,
min_depth=min_depth,
max_depth=max_depth,
n_depth_levels=n_depth_levels,
device=device,
dot_product=dot_product)
fused_cost_volume += cost_volume
fused_cost_volume /= len(pose2s)
return fused_cost_volume
def get_non_differentiable_rectangle_depth_estimation(reference_pose_torch,
measurement_pose_torch,
previous_depth_torch,
full_K_torch,
half_K_torch,
original_width,
original_height):
batch_size, _, _ = reference_pose_torch.shape
half_width = int(original_width / 2)
half_height = int(original_height / 2)
trans = torch.bmm(torch.inverse(reference_pose_torch), measurement_pose_torch)
points_3d_src = kornia.depth_to_3d(previous_depth_torch, full_K_torch, normalize_points=False)
points_3d_src = points_3d_src.permute(0, 2, 3, 1)
points_3d_dst = kornia.transform_points(trans[:, None], points_3d_src)
points_3d_dst = points_3d_dst.view(batch_size, -1, 3)
z_values = points_3d_dst[:, :, -1]
z_values = torch.relu(z_values)
sorting_indices = torch.argsort(z_values, descending=True)
z_values = torch.gather(z_values, dim=1, index=sorting_indices)
sorting_indices_for_points = torch.stack([sorting_indices] * 3, dim=-1)
points_3d_dst = torch.gather(points_3d_dst, dim=1, index=sorting_indices_for_points)
projections = torch.round(kornia.project_points(points_3d_dst, half_K_torch.unsqueeze(1))).long()
is_valid_below = (projections[:, :, 0] >= 0) & (projections[:, :, 1] >= 0)
is_valid_above = (projections[:, :, 0] < half_width) & (projections[:, :, 1] < half_height)
is_valid = is_valid_below & is_valid_above
depth_hypothesis = torch.zeros(size=(batch_size, 1, half_height, half_width)).cuda()
for projection_index in range(0, batch_size):
valid_points_zs = z_values[projection_index][is_valid[projection_index]]
valid_projections = projections[projection_index][is_valid[projection_index]]
i_s = valid_projections[:, 1]
j_s = valid_projections[:, 0]
ij_combined = i_s * half_width + j_s
_, ij_combined_unique_indices = np.unique(ij_combined.cpu().numpy(), return_index=True)
ij_combined_unique_indices = torch.from_numpy(ij_combined_unique_indices).long().cuda()
i_s = i_s[ij_combined_unique_indices]
j_s = j_s[ij_combined_unique_indices]
valid_points_zs = valid_points_zs[ij_combined_unique_indices]
torch.index_put_(depth_hypothesis[projection_index, 0], (i_s, j_s), valid_points_zs)
return depth_hypothesis
def get_differentiable_square_depth_estimation(reference_pose_torch,
measurement_pose_torch,
previous_depth_torch,
full_K_torch,
half_K_torch,
original_image_size,
device):
batch_size, _, _ = full_K_torch.size()
R_render = torch.eye(3, dtype=torch.float, device=device)
T_render = torch.zeros(3, dtype=torch.float, device=device)
R_render = torch.stack(batch_size * [R_render], dim=0)
T_render = torch.stack(batch_size * [T_render], dim=0)
R_render[:, 0, 0] *= -1
R_render[:, 1, 1] *= -1
trans = torch.bmm(torch.inverse(reference_pose_torch), measurement_pose_torch)
points_3d_src = kornia.depth_to_3d(previous_depth_torch, full_K_torch, normalize_points=False)
points_3d_src = points_3d_src.permute(0, 2, 3, 1)
points_3d_dst = kornia.transform_points(trans[:, None], points_3d_src).view(batch_size, -1, 3)
point_cloud_p3d = structures.Pointclouds(points=points_3d_dst, features=None)
width_normalizer = original_image_size / 4.0
height_normalizer = original_image_size / 4.0
px_ndc = (half_K_torch[:, 0, 2] - width_normalizer) / width_normalizer
py_ndc = (half_K_torch[:, 1, 2] - height_normalizer) / height_normalizer
fx_ndc = half_K_torch[:, 0, 0] / width_normalizer
fy_ndc = half_K_torch[:, 1, 1] / height_normalizer
principal_point = torch.stack([px_ndc, py_ndc], dim=-1)
focal_length = torch.stack([fx_ndc, fy_ndc], dim=-1)
cameras = renderer.SfMPerspectiveCameras(focal_length=focal_length,
principal_point=principal_point,
R=R_render,
T=T_render,
device=torch.device('cuda'))
raster_settings = renderer.PointsRasterizationSettings(
image_size=int(original_image_size / 2.0),
radius=0.02,
points_per_pixel=3)
depth_renderer = renderer.PointsRasterizer(cameras=cameras, raster_settings=raster_settings)
rendered_depth = torch.min(depth_renderer(point_cloud_p3d).zbuf, dim=-1)[0]
depth_hypothesis = torch.relu(rendered_depth).unsqueeze(1)
return depth_hypothesis
def warp_frame_depth(
image_src: torch.Tensor,
depth_dst: torch.Tensor,
src_trans_dst: torch.Tensor,
camera_matrix: torch.Tensor,
normalize_points: bool = False,
sampling_mode='bilinear') -> torch.Tensor:
# TAKEN FROM KORNIA LIBRARY
if not isinstance(image_src, torch.Tensor):
raise TypeError(f"Input image_src type is not a torch.Tensor. Got {type(image_src)}.")
if not len(image_src.shape) == 4:
raise ValueError(f"Input image_src musth have a shape (B, D, H, W). Got: {image_src.shape}")
if not isinstance(depth_dst, torch.Tensor):
raise TypeError(f"Input depht_dst type is not a torch.Tensor. Got {type(depth_dst)}.")
if not len(depth_dst.shape) == 4 and depth_dst.shape[-3] == 1:
raise ValueError(f"Input depth_dst musth have a shape (B, 1, H, W). Got: {depth_dst.shape}")
if not isinstance(src_trans_dst, torch.Tensor):
raise TypeError(f"Input src_trans_dst type is not a torch.Tensor. "
f"Got {type(src_trans_dst)}.")
if not len(src_trans_dst.shape) == 3 and src_trans_dst.shape[-2:] == (3, 3):
raise ValueError(f"Input src_trans_dst must have a shape (B, 3, 3). "
f"Got: {src_trans_dst.shape}.")
if not isinstance(camera_matrix, torch.Tensor):
raise TypeError(f"Input camera_matrix type is not a torch.Tensor. "
f"Got {type(camera_matrix)}.")
if not len(camera_matrix.shape) == 3 and camera_matrix.shape[-2:] == (3, 3):
raise ValueError(f"Input camera_matrix must have a shape (B, 3, 3). "
f"Got: {camera_matrix.shape}.")
# unproject source points to camera frame
points_3d_dst: torch.Tensor = kornia.depth_to_3d(depth_dst, camera_matrix, normalize_points) # Bx3xHxW
# transform points from source to destination
points_3d_dst = points_3d_dst.permute(0, 2, 3, 1) # BxHxWx3
# apply transformation to the 3d points
points_3d_src = kornia.transform_points(src_trans_dst[:, None], points_3d_dst) # BxHxWx3
points_3d_src[:, :, :, 2] = torch.relu(points_3d_src[:, :, :, 2])
# project back to pixels
camera_matrix_tmp: torch.Tensor = camera_matrix[:, None, None] # Bx1x1xHxW
points_2d_src: torch.Tensor = kornia.project_points(points_3d_src, camera_matrix_tmp) # BxHxWx2
# normalize points between [-1 / 1]
height, width = depth_dst.shape[-2:]
points_2d_src_norm: torch.Tensor = kornia.normalize_pixel_coordinates(points_2d_src, height, width) # BxHxWx2
return torch.nn.functional.grid_sample(image_src, points_2d_src_norm, align_corners=True, mode=sampling_mode)
def is_pose_available(pose):
is_nan = np.isnan(pose).any()
is_inf = np.isinf(pose).any()
is_neg_inf = np.isneginf(pose).any()
if is_nan or is_inf or is_neg_inf:
return False
else:
return True
# TRAINING UTILS
def freeze_batchnorm(module):
if isinstance(module, torch.nn.BatchNorm1d) or isinstance(module, torch.nn.BatchNorm2d) or isinstance(module, torch.nn.BatchNorm3d):
module.eval()
module.weight.requires_grad = False
module.bias.requires_grad = False
def zip_code(run_directory):
zip_file_path = os.path.join(run_directory, "code.zip")
zip_handle = zipfile.ZipFile(zip_file_path, 'w', zipfile.ZIP_DEFLATED)
files = Path("./").files("*.py")
for file in files:
zip_handle.write(file)
files = Path("../").files("*.py")
for file in files:
zip_handle.write(file)
zip_handle.close()
def save_checkpoint(save_path, models, step, loss, filename='checkpoint.pth.tar'):
save_path = Path(save_path)
for model in models:
prefix = model['name']
model_state = model['state_dict']
torch.save(model_state, save_path / '{}_{}_epoch:{}_l1:{:.4f}_l1-inv:{:.4f}_l1-rel:{:.4f}_huber:{:.4f}'.format(prefix,
filename,
step,
loss[0],
loss[1],
loss[2],
loss[3]))
def save_optimizer(save_path, optimizer, step, loss, filename='checkpoint.pth.tar'):
save_path = Path(save_path)
optimizer_state = optimizer.state_dict()
torch.save(optimizer_state, save_path / 'optimizer_{}_epoch:{}_l1:{:.4f}_l1-inv:{:.4f}_l1-rel:{:.4f}_huber:{:.4f}'.format(filename,
step,
loss[0],
loss[1],
loss[2],
loss[3]))
def print_number_of_trainable_parameters(optimizer):
parameter_counter = 0
for param_group in optimizer.param_groups:
for parameter in param_group['params']:
if parameter.requires_grad:
parameter_counter += parameter.nelement()
print("Number of trainable parameters:", f"{parameter_counter:,d}")
# TESTING UTILS
def save_results(predictions, groundtruths, system_name, scene_name, save_folder, max_depth=np.inf):
if groundtruths is not None:
errors = []
for i, prediction in enumerate(predictions):
errors.append(compute_errors(groundtruths[i], prediction, max_depth))
error_names = ['abs_error', 'abs_relative_error', 'abs_inverse_error',
'squared_relative_error', 'rmse', 'ratio_125', 'ratio_125_2', 'ratio_125_3']
errors = np.array(errors)
mean_errors = np.nanmean(errors, 0)
print("Metrics of {} for scene {}:".format(system_name, scene_name))
print("{:>25}, {:>25}, {:>25}, {:>25}, {:>25}, {:>25}, {:>25}, {:>25}".format(*error_names))
print("{:25.4f}, {:25.4f}, {:25.4f}, {:25.4f}, {:25.4f}, {:25.4f}, {:25.4f}, {:25.4f}".format(*mean_errors))
np.savez_compressed(Path(save_folder) / system_name + "_errors_" + scene_name, errors)
predictions = np.array(predictions)
np.savez_compressed(Path(save_folder) / system_name + "_predictions_" + scene_name, predictions)
def save_predictions(predictions, system_name, scene_name, save_folder):
np.savez_compressed(Path(save_folder) / system_name + "_predictions_" + scene_name, predictions)
def visualize_predictions(numpy_reference_image, numpy_measurement_image, numpy_predicted_depth, normalization_mean, normalization_std, normalization_scale,
depth_multiplier_for_visualization=5000):
numpy_reference_image = numpy_reference_image * np.array(normalization_std) + np.array(normalization_mean)
numpy_reference_image = (numpy_reference_image * normalization_scale).astype(np.uint8)
numpy_measurement_image = numpy_measurement_image * np.array(normalization_std) + np.array(normalization_mean)
numpy_measurement_image = (numpy_measurement_image * normalization_scale).astype(np.uint8)
cv2.imshow("Reference Image", cv2.cvtColor(numpy_reference_image, cv2.COLOR_RGB2BGR))
cv2.imshow("A Measurement Image", cv2.cvtColor(numpy_measurement_image, cv2.COLOR_RGB2BGR))
cv2.imshow("Predicted Depth", (depth_multiplier_for_visualization * numpy_predicted_depth).astype(np.uint16))
cv2.waitKey()
class InferenceTimer:
def __init__(self, n_skip=20):
self.times = []
self.n_skip = n_skip
self.forward_pass_start = torch.cuda.Event(enable_timing=True)
self.forward_pass_end = torch.cuda.Event(enable_timing=True)
def record_start_time(self):
self.forward_pass_start.record()
def record_end_time_and_elapsed_time(self):
self.forward_pass_end.record()
torch.cuda.synchronize()
elapsed_time = self.forward_pass_start.elapsed_time(self.forward_pass_end)
self.times.append(elapsed_time)
def print_statistics(self):
times = np.array(self.times[self.n_skip:])
if len(times) > 0:
mean_time = np.mean(times)
std_time = np.std(times)
min_time = np.min(times)
max_time = np.max(times)
median_time = np.median(times)
print("Number of Forward Passes:", len(times))
print("--- Mean Inference Time:", mean_time)
print("--- Std Inference Time:", std_time)
print("--- Median Inference Time:", median_time)
print("--- Min Inference Time:", min_time)
print("--- Max Inference Time:", max_time)
else:
print("Not enough time measurements are taken!")
| 19,349 | 47.014888 | 156 | py |
deep-video-mvs | deep-video-mvs-master/dvmvs/layers.py | import torch
def down_conv_layer(input_channels, output_channels, kernel_size):
return torch.nn.Sequential(
torch.nn.Conv2d(
input_channels,
output_channels,
kernel_size,
padding=(kernel_size - 1) // 2,
stride=1,
bias=False),
torch.nn.BatchNorm2d(output_channels),
torch.nn.ReLU(),
torch.nn.Conv2d(
output_channels,
output_channels,
kernel_size,
padding=(kernel_size - 1) // 2,
stride=2,
bias=False),
torch.nn.BatchNorm2d(output_channels),
torch.nn.ReLU())
def up_conv_layer(input_channels, output_channels, kernel_size):
return torch.nn.Sequential(
torch.nn.Upsample(scale_factor=2, mode='bilinear', align_corners=True),
torch.nn.Conv2d(
input_channels,
output_channels,
kernel_size,
padding=(kernel_size - 1) // 2,
bias=False),
torch.nn.BatchNorm2d(output_channels),
torch.nn.ReLU())
def conv_layer(input_channels, output_channels, kernel_size, stride, apply_bn_relu):
if apply_bn_relu:
return torch.nn.Sequential(
torch.nn.Conv2d(
input_channels,
output_channels,
kernel_size,
padding=(kernel_size - 1) // 2,
stride=stride,
bias=False),
torch.nn.BatchNorm2d(output_channels),
torch.nn.ReLU(inplace=True))
else:
return torch.nn.Sequential(
torch.nn.Conv2d(
input_channels,
output_channels,
kernel_size,
padding=(kernel_size - 1) // 2,
stride=stride,
bias=False))
def depth_layer_3x3(input_channels):
return torch.nn.Sequential(
torch.nn.Conv2d(input_channels, 1, 3, padding=1),
torch.nn.Sigmoid())
| 1,984 | 29.075758 | 84 | py |
deep-video-mvs | deep-video-mvs-master/dvmvs/dataset_loader.py | import copy
import random
from functools import partial
from multiprocessing import Manager
from multiprocessing.pool import Pool
import cv2
import numpy as np
import torch
from kornia import adjust_brightness, adjust_gamma, adjust_contrast
from path import Path
from torch.utils.data import Dataset, DataLoader
from dvmvs.config import Config
from dvmvs.utils import pose_distance
def is_valid_pair(reference_pose, measurement_pose, pose_dist_min, pose_dist_max, t_norm_threshold=0.05, return_measure=False):
combined_measure, R_measure, t_measure = pose_distance(reference_pose, measurement_pose)
if pose_dist_min <= combined_measure <= pose_dist_max and t_measure >= t_norm_threshold:
result = True
else:
result = False
if return_measure:
return result, combined_measure
else:
return result
def gather_pairs_train(poses, used_pairs, is_backward, initial_pose_dist_min, initial_pose_dist_max):
sequence_length = len(poses)
while_range = range(0, sequence_length)
pose_dist_min = copy.deepcopy(initial_pose_dist_min)
pose_dist_max = copy.deepcopy(initial_pose_dist_max)
used_measurement_indices = set()
# Gather pairs
check_future = False
pairs = []
if is_backward:
i = sequence_length - 1
step = -1
first_limit = 5
second_limit = sequence_length - 5
else:
i = 0
step = 1
first_limit = sequence_length - 5
second_limit = 5
loosening_counter = 0
while i in while_range:
pair = (i, -1)
if check_future:
for j in range(i + step, first_limit, step):
if j not in used_measurement_indices and (i, j) not in used_pairs:
valid = is_valid_pair(poses[i], poses[j], pose_dist_min, pose_dist_max)
if valid:
pair = (i, j)
pairs.append(pair)
used_pairs.add(pair)
used_pairs.add((pair[1], pair[0]))
used_measurement_indices.add(j)
pose_dist_min = copy.deepcopy(initial_pose_dist_min)
pose_dist_max = copy.deepcopy(initial_pose_dist_max)
i += step
check_future = False
loosening_counter = 0
break
else:
for j in range(i - step, second_limit, -step):
if j not in used_measurement_indices and (i, j) not in used_pairs:
valid = is_valid_pair(poses[i], poses[j], pose_dist_min, pose_dist_max)
if valid:
pair = (i, j)
pairs.append(pair)
used_pairs.add(pair)
used_pairs.add((pair[1], pair[0]))
used_measurement_indices.add(j)
pose_dist_min = copy.deepcopy(initial_pose_dist_min)
pose_dist_max = copy.deepcopy(initial_pose_dist_max)
i += step
check_future = False
loosening_counter = 0
break
if pair[1] == -1:
if check_future:
pose_dist_min = pose_dist_min / 1.1
pose_dist_max = pose_dist_max * 1.1
check_future = False
loosening_counter += 1
if loosening_counter > 1:
i += step
loosening_counter = 0
else:
check_future = True
else:
check_future = False
return pairs
def crawl_subprocess_short(scene, dataset_path, count, progress):
scene_path = Path(dataset_path) / scene
poses = np.reshape(np.loadtxt(scene_path / "poses.txt"), newshape=(-1, 4, 4))
samples = []
used_pairs = set()
for multiplier in [(1.0, False), (0.666, True), (1.5, False)]:
pairs = gather_pairs_train(poses, used_pairs,
is_backward=multiplier[1],
initial_pose_dist_min=multiplier[0] * Config.train_minimum_pose_distance,
initial_pose_dist_max=multiplier[0] * Config.train_maximum_pose_distance)
for pair in pairs:
i, j = pair
sample = {'scene': scene,
'indices': [i, j]}
samples.append(sample)
progress.value += 1
print(progress.value, "/", count, end='\r')
return samples
def crawl_subprocess_long(scene, dataset_path, count, progress, subsequence_length):
scene_path = Path(dataset_path) / scene
poses = np.reshape(np.loadtxt(scene_path / "poses.txt"), newshape=(-1, 4, 4))
sequence_length = np.shape(poses)[0]
used_pairs = set()
usage_threshold = 1
used_nodes = dict()
for i in range(sequence_length):
used_nodes[i] = 0
calculated_step = Config.train_crawl_step
samples = []
for offset, multiplier, is_backward in [(0 % calculated_step, 1.0, False),
(1 % calculated_step, 0.666, True),
(2 % calculated_step, 1.5, False),
(3 % calculated_step, 0.8, True),
(4 % calculated_step, 1.25, False),
(5 % calculated_step, 1.0, True),
(6 % calculated_step, 0.666, False),
(7 % calculated_step, 1.5, True),
(8 % calculated_step, 0.8, False),
(9 % calculated_step, 1.25, True)]:
if is_backward:
start = sequence_length - 1 - offset
step = -calculated_step
limit = subsequence_length
else:
start = offset
step = calculated_step
limit = sequence_length - subsequence_length + 1
for i in range(start, limit, step):
if used_nodes[i] > usage_threshold:
continue
sample = {'scene': scene,
'indices': [i]}
previous_index = i
valid_counter = 1
any_counter = 1
reached_sequence_limit = False
while valid_counter < subsequence_length:
if is_backward:
j = i - any_counter
reached_sequence_limit = j < 0
else:
j = i + any_counter
reached_sequence_limit = j >= sequence_length
if not reached_sequence_limit:
current_index = j
check1 = used_nodes[current_index] <= usage_threshold
check2 = (previous_index, current_index) not in used_pairs
check3 = is_valid_pair(poses[previous_index],
poses[current_index],
multiplier * Config.train_minimum_pose_distance,
multiplier * Config.train_maximum_pose_distance,
t_norm_threshold=multiplier * Config.train_minimum_pose_distance * 0.5)
if check1 and check2 and check3:
sample['indices'].append(current_index)
previous_index = copy.deepcopy(current_index)
valid_counter += 1
any_counter += 1
else:
break
if not reached_sequence_limit:
previous_node = sample['indices'][0]
used_nodes[previous_node] += 1
for current_node in sample['indices'][1:]:
used_nodes[current_node] += 1
used_pairs.add((previous_node, current_node))
used_pairs.add((current_node, previous_node))
previous_node = copy.deepcopy(current_node)
samples.append(sample)
progress.value += 1
print(progress.value, "/", count, end='\r')
return samples
def crawl(dataset_path, scenes, subsequence_length, num_workers=1):
pool = Pool(num_workers)
manager = Manager()
count = len(scenes)
progress = manager.Value('i', 0)
samples = []
if subsequence_length == 2:
for scene_samples in pool.imap_unordered(partial(crawl_subprocess_short,
dataset_path=dataset_path,
count=count,
progress=progress), scenes):
samples.extend(scene_samples)
else:
for scene_samples in pool.imap_unordered(partial(crawl_subprocess_long,
dataset_path=dataset_path,
count=count,
progress=progress,
subsequence_length=subsequence_length), scenes):
samples.extend(scene_samples)
random.shuffle(samples)
return samples
def read_split(path):
scenes_txt = np.loadtxt(path, dtype=str, delimiter="\n")
return scenes_txt
def load_image(path):
image = cv2.imread(path, cv2.IMREAD_COLOR).astype(np.float32)
image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
return image
def load_depth(path, scaling=1000.0):
depth = np.load(path).astype(np.float32) / scaling
return depth
class PreprocessImage:
def __init__(self, K, old_width, old_height, new_width, new_height, distortion_crop=0, perform_crop=True):
self.fx = K[0, 0]
self.fy = K[1, 1]
self.cx = K[0, 2]
self.cy = K[1, 2]
self.new_width = new_width
self.new_height = new_height
self.perform_crop = perform_crop
original_height = np.copy(old_height)
original_width = np.copy(old_width)
if self.perform_crop:
old_height -= 2 * distortion_crop
old_width -= 2 * distortion_crop
old_aspect_ratio = float(old_width) / float(old_height)
new_aspect_ratio = float(new_width) / float(new_height)
if old_aspect_ratio > new_aspect_ratio:
# we should crop horizontally to decrease image width
target_width = old_height * new_aspect_ratio
self.crop_x = int(np.floor((old_width - target_width) / 2.0)) + distortion_crop
self.crop_y = distortion_crop
else:
# we should crop vertically to decrease image height
target_height = old_width / new_aspect_ratio
self.crop_x = distortion_crop
self.crop_y = int(np.floor((old_height - target_height) / 2.0)) + distortion_crop
self.cx -= self.crop_x
self.cy -= self.crop_y
intermediate_height = original_height - 2 * self.crop_y
intermediate_width = original_width - 2 * self.crop_x
factor_x = float(new_width) / float(intermediate_width)
factor_y = float(new_height) / float(intermediate_height)
self.fx *= factor_x
self.fy *= factor_y
self.cx *= factor_x
self.cy *= factor_y
else:
self.crop_x = 0
self.crop_y = 0
factor_x = float(new_width) / float(original_width)
factor_y = float(new_height) / float(original_height)
self.fx *= factor_x
self.fy *= factor_y
self.cx *= factor_x
self.cy *= factor_y
def apply_depth(self, depth):
raw_height, raw_width = depth.shape
cropped_depth = depth[self.crop_y:raw_height - self.crop_y, self.crop_x:raw_width - self.crop_x]
resized_cropped_depth = cv2.resize(cropped_depth, (self.new_width, self.new_height), interpolation=cv2.INTER_NEAREST)
return resized_cropped_depth
def apply_rgb(self, image, scale_rgb, mean_rgb, std_rgb, normalize_colors=True):
raw_height, raw_width, _ = image.shape
cropped_image = image[self.crop_y:raw_height - self.crop_y, self.crop_x:raw_width - self.crop_x, :]
cropped_image = cv2.resize(cropped_image, (self.new_width, self.new_height), interpolation=cv2.INTER_LINEAR)
if normalize_colors:
cropped_image = cropped_image / scale_rgb
cropped_image[:, :, 0] = (cropped_image[:, :, 0] - mean_rgb[0]) / std_rgb[0]
cropped_image[:, :, 1] = (cropped_image[:, :, 1] - mean_rgb[1]) / std_rgb[1]
cropped_image[:, :, 2] = (cropped_image[:, :, 2] - mean_rgb[2]) / std_rgb[2]
return cropped_image
def get_updated_intrinsics(self):
return np.array([[self.fx, 0, self.cx],
[0, self.fy, self.cy],
[0, 0, 1]])
class MVSDataset(Dataset):
def __init__(self, root, seed, split, subsequence_length, scale_rgb, mean_rgb, std_rgb, geometric_scale_augmentation=False):
np.random.seed(seed)
random.seed(seed)
self.subsequence_length = subsequence_length
self.geometric_scale_augmentation = geometric_scale_augmentation
self.root = Path(root)
self.split = split
if split == "TRAINING":
self.scenes = read_split(self.root / "train.txt")
elif split == "VALIDATION":
self.scenes = read_split(self.root / "validation.txt")
# self.scenes = self.scenes[0:20]
self.samples = crawl(dataset_path=self.root,
scenes=self.scenes,
subsequence_length=self.subsequence_length,
num_workers=Config.train_data_pipeline_workers)
self.scale_rgb = scale_rgb
self.mean_rgb = mean_rgb
self.std_rgb = std_rgb
def __getitem__(self, sample_index):
sample = self.samples[sample_index]
scene = sample['scene']
indices = sample['indices']
scene_path = self.root / scene
K = np.loadtxt(scene_path / 'K.txt', dtype=np.float32)
scene_poses = np.reshape(np.loadtxt(scene_path / 'poses.txt', dtype=np.float32), newshape=(-1, 4, 4))
scene_npzs = sorted(scene_path.files('*.npz'))
if self.split == "TRAINING" and np.random.random() > 0.5:
indices.reverse()
raw_poses = []
raw_images = []
raw_depths = []
for i in indices:
data = np.load(scene_npzs[i])
raw_images.append(data['image'])
raw_depths.append(data['depth'])
raw_poses.append(scene_poses[i])
preprocessor = PreprocessImage(K=K,
old_width=raw_images[0].shape[1],
old_height=raw_depths[0].shape[0],
new_width=Config.train_image_width,
new_height=Config.train_image_height,
distortion_crop=0)
output_images = []
output_depths = []
output_poses = []
rgb_sum = 0
min_depth_in_sequence = Config.train_max_depth
max_depth_in_sequence = Config.train_min_depth
intermediate_depths = []
intermediate_images = []
for i in range(len(raw_images)):
depth = (raw_depths[i]).astype(np.float32) / 1000.0
depth_nan = depth == np.nan
depth_inf = depth == np.inf
depth_nan_or_inf = np.logical_or(depth_inf, depth_nan)
depth[depth_nan_or_inf] = 0
depth = preprocessor.apply_depth(depth)
intermediate_depths.append(depth)
valid_mask = depth > 0
valid_depth_values = depth[valid_mask]
if len(valid_depth_values) > 0:
current_min_depth = np.min(valid_depth_values)
current_max_depth = np.max(valid_depth_values)
min_depth_in_sequence = min(min_depth_in_sequence, current_min_depth)
max_depth_in_sequence = max(max_depth_in_sequence, current_max_depth)
image = raw_images[i]
image = preprocessor.apply_rgb(image=image,
scale_rgb=1.0,
mean_rgb=[0.0, 0.0, 0.0],
std_rgb=[1.0, 1.0, 1.0],
normalize_colors=False)
rgb_sum += np.sum(image)
intermediate_images.append(image)
rgb_average = rgb_sum / (len(raw_images) * Config.train_image_height * Config.train_image_width * 3)
# GEOMETRIC AUGMENTATION
geometric_scale_factor = 1.0
if self.geometric_scale_augmentation:
possible_low_scale_value = Config.train_min_depth / min_depth_in_sequence
possible_high_scale_value = Config.train_max_depth / max_depth_in_sequence
if np.random.random() > 0.5:
low = max(possible_low_scale_value, 0.666)
high = min(possible_high_scale_value, 1.5)
else:
low = max(possible_low_scale_value, 0.8)
high = min(possible_high_scale_value, 1.25)
geometric_scale_factor = np.random.uniform(low=low, high=high)
# COLOR AUGMENTATION
color_transforms = []
brightness = random.uniform(-0.03, 0.03)
contrast = random.uniform(0.8, 1.2)
gamma = random.uniform(0.8, 1.2)
color_transforms.append((adjust_gamma, gamma))
color_transforms.append((adjust_contrast, contrast))
color_transforms.append((adjust_brightness, brightness))
random.shuffle(color_transforms)
K = preprocessor.get_updated_intrinsics()
for i in range(len(raw_images)):
image = intermediate_images[i]
depth = intermediate_depths[i] * geometric_scale_factor
image = np.transpose(image, (2, 0, 1))
image = torch.from_numpy(image.astype(np.float32))
image = image / 255.0
if self.split == "TRAINING" and (55.0 < rgb_average < 200.0):
for (color_transform_function, color_transform_value) in color_transforms:
image = color_transform_function(image, color_transform_value)
image = (image * 255.0) / self.scale_rgb
image[0, :, :] = (image[0, :, :] - self.mean_rgb[0]) / self.std_rgb[0]
image[1, :, :] = (image[1, :, :] - self.mean_rgb[1]) / self.std_rgb[1]
image[2, :, :] = (image[2, :, :] - self.mean_rgb[2]) / self.std_rgb[2]
pose = raw_poses[i].astype(np.float32)
pose[0:3, 3] = pose[0:3, 3] * geometric_scale_factor
pose = torch.from_numpy(pose)
depth = torch.from_numpy(depth.astype(np.float32))
output_poses.append(pose)
output_depths.append(depth)
output_images.append(image)
K = torch.from_numpy(K.astype(np.float32))
return output_images, output_depths, output_poses, K
def __len__(self):
return len(self.samples)
def main():
subsequence_length = 8
dataset = MVSDataset(
root=Config.dataset,
seed=Config.train_seed,
split="TRAINING",
subsequence_length=subsequence_length,
scale_rgb=255.0,
mean_rgb=[0.0, 0.0, 0.0],
std_rgb=[1.0, 1.0, 1.0],
geometric_scale_augmentation=False)
print("Number of samples:", len(dataset))
loader = DataLoader(dataset, batch_size=1, shuffle=False, num_workers=12, pin_memory=True)
for i, (images, depths, poses, K) in enumerate(loader):
for j in range(1, len(images)):
current_image = images[j]
current_depth = depths[j].unsqueeze(1)
previous_image = images[j - 1]
previous_depth = depths[j - 1].unsqueeze(1)
print(np.max(current_depth.squeeze(1).numpy()[0]))
print(np.min(current_depth.squeeze(1).numpy()[0]))
current_image = (np.transpose(current_image.numpy()[0], (1, 2, 0)) * 255).astype(np.uint8)
current_depth = (current_depth.squeeze(1).numpy()[0] * 5000).astype(np.uint16)
measurement_image = (np.transpose(previous_image.numpy()[0], (1, 2, 0)) * 255).astype(np.uint8)
measurement_depth = (previous_depth.squeeze(1).numpy()[0] * 5000).astype(np.uint16)
cv2.imshow("Reference Image", cv2.cvtColor(current_image, cv2.COLOR_BGR2RGB))
cv2.imshow("Reference Depth", current_depth)
cv2.imshow("Measurement Image", cv2.cvtColor(measurement_image, cv2.COLOR_BGR2RGB))
cv2.imshow("Measurement Depth", measurement_depth)
cv2.waitKey()
if __name__ == '__main__':
main()
| 21,202 | 38.192237 | 128 | py |
deep-video-mvs | deep-video-mvs-master/dvmvs/convlstm.py | import torch
import torch.nn as nn
from dvmvs.utils import warp_frame_depth
class MVSLayernormConvLSTMCell(nn.Module):
def __init__(self, input_dim, hidden_dim, kernel_size, activation_function=None):
super(MVSLayernormConvLSTMCell, self).__init__()
self.activation_function = activation_function
self.input_dim = input_dim
self.hidden_dim = hidden_dim
self.kernel_size = kernel_size
self.padding = kernel_size[0] // 2, kernel_size[1] // 2
self.conv = nn.Conv2d(in_channels=self.input_dim + self.hidden_dim,
out_channels=4 * self.hidden_dim,
kernel_size=self.kernel_size,
padding=self.padding,
bias=False)
def forward(self, input_tensor, cur_state, previous_pose, current_pose, estimated_current_depth, camera_matrix):
h_cur, c_cur = cur_state
if previous_pose is not None:
transformation = torch.bmm(torch.inverse(previous_pose), current_pose)
non_valid = estimated_current_depth <= 0.01
h_cur = warp_frame_depth(image_src=h_cur,
depth_dst=estimated_current_depth,
src_trans_dst=transformation,
camera_matrix=camera_matrix,
normalize_points=False,
sampling_mode='bilinear')
b, c, h, w = h_cur.size()
non_valid = torch.cat([non_valid] * c, dim=1)
h_cur.data[non_valid] = 0.0
combined = torch.cat([input_tensor, h_cur], dim=1) # concatenate along channel axis
combined_conv = self.conv(combined)
cc_i, cc_f, cc_o, cc_g = torch.split(combined_conv, self.hidden_dim, dim=1)
b, c, h, w = h_cur.size()
i = torch.sigmoid(cc_i)
f = torch.sigmoid(cc_f)
o = torch.sigmoid(cc_o)
cc_g = torch.layer_norm(cc_g, [h, w])
g = self.activation_function(cc_g)
c_next = f * c_cur + i * g
c_next = torch.layer_norm(c_next, [h, w])
h_next = o * self.activation_function(c_next)
return h_next, c_next
def init_hidden(self, batch_size, image_size):
height, width = image_size
return (torch.zeros(batch_size, self.hidden_dim, height, width, device=self.conv.weight.device),
torch.zeros(batch_size, self.hidden_dim, height, width, device=self.conv.weight.device))
| 2,554 | 38.307692 | 116 | py |
deep-video-mvs | deep-video-mvs-master/dvmvs/train.py | import torch
import torchvision
from tqdm import tqdm
from dvmvs.config import Config
from dvmvs.losses import LossMeter
from dvmvs.utils import save_checkpoint, save_optimizer, freeze_batchnorm
def switch_mode(model, mode):
if mode == 'train':
for module in model:
module.train()
if Config.train_freeze_batch_normalization:
module.apply(freeze_batchnorm)
elif mode == 'eval':
for module in model:
module.eval()
def train(train_loader, val_loader, model, optimizer, summary_writer, epoch, best_loss, run_directory, forward_pass_function):
training_l1_meter = LossMeter()
training_huber_meter = LossMeter()
training_l1_inv_meter = LossMeter()
training_l1_rel_meter = LossMeter()
info_printer = tqdm(total=0, position=1, bar_format='{desc}')
info = 'L1 Loss: {} --- L1-inv Loss: {} --- L1-rel Loss: {} --- Huber Loss: {}'
# switch to train mode
switch_mode(model=model, mode='train')
for i, (images, depths, poses, K) in enumerate(tqdm(train_loader)):
batch_l1_meter, batch_huber_meter, batch_l1_inv_meter, batch_l1_rel_meter, \
optimizer_loss, predictions, predictions_names = forward_pass_function(images=images,
depths=depths,
poses=poses,
K=K,
model=model,
is_training=True)
# record losses
training_l1_meter.update(loss=batch_l1_meter.sum, count=batch_l1_meter.count)
training_huber_meter.update(loss=batch_huber_meter.sum, count=batch_huber_meter.count)
training_l1_inv_meter.update(loss=batch_l1_inv_meter.sum, count=batch_l1_inv_meter.count)
training_l1_rel_meter.update(loss=batch_l1_rel_meter.sum, count=batch_l1_rel_meter.count)
if i > 0 and i % Config.train_print_frequency == 0:
rgb_debug_image = images[-1][0].cpu().detach()
depth_debug_image = depths[-1][0].cpu().repeat(3, 1, 1).detach()
debug_images = [rgb_debug_image, depth_debug_image]
debug_names = "input_image ground_truth"
for index, prediction in enumerate(predictions):
debug_names += " " + predictions_names[index]
prediction = prediction[0].cpu().repeat(3, 1, 1).detach().unsqueeze(0)
_, channel, height, width = prediction.size()
scale_factor = Config.train_image_width / width
prediction = torch.nn.functional.interpolate(prediction, scale_factor=scale_factor, mode='bilinear', align_corners=True)
prediction = prediction.squeeze(0)
debug_images.append(prediction)
debug_images_grid = torchvision.utils.make_grid(debug_images,
nrow=3,
normalize=True,
scale_each=True)
summary_writer.add_image(debug_names, debug_images_grid, epoch * len(train_loader) + i)
# compute gradient and do Adam step
optimizer.zero_grad()
optimizer_loss.backward()
optimizer.step()
summary_writer.add_scalar('Batch Loss/L1', training_l1_meter.item_average, epoch * len(train_loader) + i)
summary_writer.add_scalar('Batch Loss/Huber', training_huber_meter.item_average, epoch * len(train_loader) + i)
summary_writer.add_scalar('Batch Loss/L1-inv', training_l1_inv_meter.item_average, epoch * len(train_loader) + i)
summary_writer.add_scalar('Batch Loss/L1-rel', training_l1_rel_meter.item_average, epoch * len(train_loader) + i)
info_printer.set_description_str(info.format(training_l1_meter, training_l1_inv_meter, training_l1_rel_meter, training_huber_meter))
if Config.train_validate:
validation_l1_loss, validation_huber_loss, validation_l1_inv_loss, validation_l1_rel_loss = validate(val_loader=val_loader,
model=model,
forward_pass_function=forward_pass_function)
summary_writer.add_scalar('L1 Loss/Training', training_l1_meter.avg, (epoch + 1) * len(train_loader))
summary_writer.add_scalar('L1 Loss/Validation', validation_l1_loss, (epoch + 1) * len(train_loader))
summary_writer.add_scalar('Huber Loss/Training', training_huber_meter.avg, (epoch + 1) * len(train_loader))
summary_writer.add_scalar('Huber Loss/Validation', validation_huber_loss, (epoch + 1) * len(train_loader))
summary_writer.add_scalar('L1-inv Loss/Training', training_l1_inv_meter.avg, (epoch + 1) * len(train_loader))
summary_writer.add_scalar('L1-inv Loss/Validation', validation_l1_inv_loss, (epoch + 1) * len(train_loader))
summary_writer.add_scalar('L1-rel Loss/Training', training_l1_rel_meter.avg, (epoch + 1) * len(train_loader))
summary_writer.add_scalar('L1-rel Loss/Validation', validation_l1_rel_loss, (epoch + 1) * len(train_loader))
if validation_l1_loss < best_loss[0] or validation_huber_loss < best_loss[1] or \
validation_l1_inv_loss < best_loss[2] or validation_l1_rel_loss < best_loss[3]:
best_loss[0] = min(validation_l1_loss, best_loss[0])
best_loss[1] = min(validation_huber_loss, best_loss[1])
best_loss[2] = min(validation_l1_inv_loss, best_loss[2])
best_loss[3] = min(validation_l1_rel_loss, best_loss[3])
# save best checkpoint
checkpoint_list = []
for k, module in enumerate(model):
entry = {
'name': "module_" + str(k),
'epoch': epoch + 1,
'state_dict': module.state_dict()
}
checkpoint_list.append(entry)
save_checkpoint(run_directory,
checkpoint_list,
step=(epoch + 1) * len(train_loader),
loss=[validation_l1_loss, validation_l1_inv_loss, validation_l1_rel_loss, validation_huber_loss])
save_optimizer(run_directory,
optimizer=optimizer,
step=(epoch + 1) * len(train_loader),
loss=[validation_l1_loss, validation_l1_inv_loss, validation_l1_rel_loss, validation_huber_loss])
# switch back to train mode !!!
switch_mode(model=model, mode='train')
def validate(val_loader, model, forward_pass_function):
validation_l1_meter = LossMeter()
validation_huber_meter = LossMeter()
validation_l1_inv_meter = LossMeter()
validation_l1_rel_meter = LossMeter()
# switch to evaluate mode
switch_mode(model=model, mode='eval')
with torch.no_grad():
for i, (images, depths, poses, K) in enumerate(tqdm(val_loader)):
batch_l1_meter, batch_huber_meter, batch_l1_inv_meter, batch_l1_rel_meter, \
optimizer_loss, predictions, predictions_names = forward_pass_function(images=images,
depths=depths,
poses=poses,
K=K,
model=model,
is_training=False)
# record losses
validation_l1_meter.update(loss=batch_l1_meter.sum, count=batch_l1_meter.count)
validation_huber_meter.update(loss=batch_huber_meter.sum, count=batch_huber_meter.count)
validation_l1_inv_meter.update(loss=batch_l1_inv_meter.sum, count=batch_l1_inv_meter.count)
validation_l1_rel_meter.update(loss=batch_l1_rel_meter.sum, count=batch_l1_rel_meter.count)
return validation_l1_meter.avg, validation_huber_meter.avg, validation_l1_inv_meter.avg, validation_l1_rel_meter.avg
| 8,579 | 56.583893 | 153 | py |
deep-video-mvs | deep-video-mvs-master/dvmvs/baselines/mvdepthnet/encoder.py | import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.autograd import Variable
from dvmvs.utils import freeze_batchnorm
def down_conv_layer(input_channels, output_channels, kernel_size):
return nn.Sequential(
nn.Conv2d(
input_channels,
output_channels,
kernel_size,
padding=(kernel_size - 1) // 2,
stride=1,
bias=False),
nn.BatchNorm2d(output_channels),
nn.ReLU(),
nn.Conv2d(
output_channels,
output_channels,
kernel_size,
padding=(kernel_size - 1) // 2,
stride=2,
bias=False),
nn.BatchNorm2d(output_channels),
nn.ReLU())
def conv_layer(input_channels, output_channels, kernel_size):
return nn.Sequential(
nn.Conv2d(
input_channels,
output_channels,
kernel_size,
padding=(kernel_size - 1) // 2,
bias=False),
nn.BatchNorm2d(output_channels),
nn.ReLU())
def depth_layer(input_channels):
return nn.Sequential(
nn.Conv2d(input_channels, 1, 3, padding=1), nn.Sigmoid())
def refine_layer(input_channels):
return nn.Conv2d(input_channels, 1, 3, padding=1)
def up_conv_layer(input_channels, output_channels, kernel_size):
return nn.Sequential(
nn.Upsample(scale_factor=2, mode='bilinear'),
nn.Conv2d(
input_channels,
output_channels,
kernel_size,
padding=(kernel_size - 1) // 2,
bias=False),
nn.BatchNorm2d(output_channels),
nn.ReLU())
def get_trainable_number(variable):
num = 1
shape = list(variable.shape)
for i in shape:
num *= i
return num
class Encoder(nn.Module):
def __init__(self):
super(Encoder, self).__init__()
self.conv1 = down_conv_layer(67, 128, 7)
self.conv2 = down_conv_layer(128, 256, 5)
self.conv3 = down_conv_layer(256, 512, 3)
self.conv4 = down_conv_layer(512, 512, 3)
self.conv5 = down_conv_layer(512, 512, 3)
def getVolume(self, left_image, right_image, KRKiUV_T, KT_T):
idepth_base = 1.0 / 50.0
idepth_step = (1.0 / 0.5 - 1.0 / 50.0) / 63.0
costvolume = Variable(
torch.cuda.FloatTensor(left_image.shape[0], 64,
left_image.shape[2], left_image.shape[3]))
image_height = 256
image_width = 320
batch_number = left_image.shape[0]
normalize_base = torch.cuda.FloatTensor(
[image_width / 2.0, image_height / 2.0])
normalize_base = normalize_base.unsqueeze(0).unsqueeze(-1)
for depth_i in range(64):
this_depth = 1.0 / (idepth_base + depth_i * idepth_step)
transformed = KRKiUV_T * this_depth + KT_T
demon = transformed[:, 2, :].unsqueeze(1)
warp_uv = transformed[:, 0: 2, :] / (demon + 1e-6)
warp_uv = (warp_uv - normalize_base) / normalize_base
warp_uv = warp_uv.view(
batch_number, 2, image_width,
image_height)
warp_uv = Variable(warp_uv.permute(
0, 3, 2, 1))
warped = F.grid_sample(right_image, warp_uv)
costvolume[:, depth_i, :, :] = torch.sum(
torch.abs(warped - left_image), dim=1)
return costvolume
def forward(self, image, plane_sweep_volume):
x = torch.cat((image, plane_sweep_volume), 1)
conv1 = self.conv1(x)
conv2 = self.conv2(conv1)
conv3 = self.conv3(conv2)
conv4 = self.conv4(conv3)
conv5 = self.conv5(conv4)
return [conv5, conv4, conv3, conv2, conv1]
def train(self, mode=True):
"""
Override the default train() to freeze the BN parameters
"""
super(Encoder, self).train(mode)
self.apply(freeze_batchnorm)
| 3,979 | 28.051095 | 77 | py |
deep-video-mvs | deep-video-mvs-master/dvmvs/baselines/mvdepthnet/run-testing.py | import cv2
import numpy as np
import torch
from path import Path
from tqdm import tqdm
from dvmvs.baselines.mvdepthnet.decoder import Decoder
from dvmvs.baselines.mvdepthnet.encoder import Encoder
from dvmvs.config import Config
from dvmvs.dataset_loader import PreprocessImage, load_image
from dvmvs.utils import cost_volume_fusion, save_results, InferenceTimer, visualize_predictions, get_warp_grid_for_cost_volume_calculation
def predict():
predict_with_finetuned = True
if predict_with_finetuned:
extension = "finetuned"
else:
extension = "without_ft"
input_image_width = 320
input_image_height = 256
print("System: MVDEPTHNET, is_finetuned = ", predict_with_finetuned)
device = torch.device('cuda')
encoder = Encoder()
decoder = Decoder()
if predict_with_finetuned:
encoder_weights = torch.load(Path("finetuned-weights").files("*encoder*")[0])
decoder_weights = torch.load(Path("finetuned-weights").files("*decoder*")[0])
else:
mvdepth_weights = torch.load(Path("original-weights") / "pretrained_mvdepthnet_combined")
pretrained_dict = mvdepth_weights['state_dict']
encoder_weights = encoder.state_dict()
pretrained_dict_encoder = {k: v for k, v in pretrained_dict.items() if k in encoder_weights}
encoder_weights.update(pretrained_dict_encoder)
decoder_weights = decoder.state_dict()
pretrained_dict_decoder = {k: v for k, v in pretrained_dict.items() if k in decoder_weights}
decoder_weights.update(pretrained_dict_decoder)
encoder.load_state_dict(encoder_weights)
decoder.load_state_dict(decoder_weights)
encoder = encoder.to(device)
decoder = decoder.to(device)
encoder.eval()
decoder.eval()
warp_grid = get_warp_grid_for_cost_volume_calculation(width=input_image_width,
height=input_image_height,
device=device)
min_depth = 0.5
max_depth = 50.0
n_depth_levels = 64
scale_rgb = 1.0
mean_rgb = [81.0, 81.0, 81.0]
std_rgb = [35.0, 35.0, 35.0]
data_path = Path(Config.test_offline_data_path)
if Config.test_dataset_name is None:
keyframe_index_files = sorted((Path(Config.test_offline_data_path) / "indices").files())
else:
keyframe_index_files = sorted((Path(Config.test_offline_data_path) / "indices").files("*" + Config.test_dataset_name + "*"))
for iteration, keyframe_index_file in enumerate(keyframe_index_files):
keyframing_type, dataset_name, scene_name, _, n_measurement_frames = keyframe_index_file.split("/")[-1].split("+")
scene_folder = data_path / dataset_name / scene_name
print("Predicting for scene:", dataset_name + "-" + scene_name, " - ", iteration, "/", len(keyframe_index_files))
keyframe_index_file_lines = np.loadtxt(keyframe_index_file, dtype=str, delimiter="\n")
K = np.loadtxt(scene_folder / 'K.txt').astype(np.float32)
poses = np.fromfile(scene_folder / "poses.txt", dtype=float, sep="\n ").reshape((-1, 4, 4))
image_filenames = sorted((scene_folder / 'images').files("*.png"))
depth_filenames = sorted((scene_folder / 'depth').files("*.png"))
input_filenames = []
for image_filename in image_filenames:
input_filenames.append(image_filename.split("/")[-1])
inference_timer = InferenceTimer()
predictions = []
reference_depths = []
with torch.no_grad():
for i in tqdm(range(0, len(keyframe_index_file_lines))):
keyframe_index_file_line = keyframe_index_file_lines[i]
if keyframe_index_file_line == "TRACKING LOST":
continue
else:
current_input_filenames = keyframe_index_file_line.split(" ")
current_indices = [input_filenames.index(current_input_filenames[x]) for x in range(len(current_input_filenames))]
reference_index = current_indices[0]
measurement_indices = current_indices[1:]
reference_pose = poses[reference_index]
reference_image = load_image(image_filenames[reference_index])
reference_depth = cv2.imread(depth_filenames[reference_index], -1).astype(float) / 1000.0
preprocessor = PreprocessImage(K=K,
old_width=reference_image.shape[1],
old_height=reference_image.shape[0],
new_width=input_image_width,
new_height=input_image_height,
distortion_crop=0,
perform_crop=False)
reference_image = preprocessor.apply_rgb(image=reference_image,
scale_rgb=scale_rgb,
mean_rgb=mean_rgb,
std_rgb=std_rgb)
reference_depth = preprocessor.apply_depth(reference_depth)
reference_image_torch = torch.from_numpy(np.transpose(reference_image, (2, 0, 1))).float().to(device).unsqueeze(0)
reference_pose_torch = torch.from_numpy(reference_pose).float().to(device).unsqueeze(0)
measurement_poses_torch = []
measurement_images_torch = []
for measurement_index in measurement_indices:
measurement_image = load_image(image_filenames[measurement_index])
measurement_image = preprocessor.apply_rgb(image=measurement_image,
scale_rgb=scale_rgb,
mean_rgb=mean_rgb,
std_rgb=std_rgb)
measurement_image_torch = torch.from_numpy(np.transpose(measurement_image, (2, 0, 1))).float().to(device).unsqueeze(0)
measurement_pose_torch = torch.from_numpy(poses[measurement_index]).float().to(device).unsqueeze(0)
measurement_images_torch.append(measurement_image_torch)
measurement_poses_torch.append(measurement_pose_torch)
full_K_torch = torch.from_numpy(preprocessor.get_updated_intrinsics()).float().to(device).unsqueeze(0)
inference_timer.record_start_time()
cost_volume = cost_volume_fusion(image1=reference_image_torch,
image2s=measurement_images_torch,
pose1=reference_pose_torch,
pose2s=measurement_poses_torch,
K=full_K_torch,
warp_grid=warp_grid,
min_depth=min_depth,
max_depth=max_depth,
n_depth_levels=n_depth_levels,
device=device,
dot_product=False)
conv5, conv4, conv3, conv2, conv1 = encoder(reference_image_torch, cost_volume)
prediction, _, _, _ = decoder(conv5, conv4, conv3, conv2, conv1)
prediction = torch.clamp(prediction, min=0.02, max=2.0)
prediction = 1 / prediction
inference_timer.record_end_time_and_elapsed_time()
prediction = prediction.cpu().numpy().squeeze()
reference_depths.append(reference_depth)
predictions.append(prediction)
if Config.test_visualize:
visualize_predictions(numpy_reference_image=reference_image,
numpy_measurement_image=measurement_image,
numpy_predicted_depth=prediction,
normalization_mean=mean_rgb,
normalization_std=std_rgb,
normalization_scale=scale_rgb)
inference_timer.print_statistics()
system_name = "{}_{}_{}_{}_{}_mvdepthnet_{}".format(keyframing_type,
dataset_name,
input_image_width,
input_image_height,
n_measurement_frames,
extension)
save_results(predictions=predictions,
groundtruths=reference_depths,
system_name=system_name,
scene_name=scene_name,
save_folder=Config.test_result_folder)
if __name__ == '__main__':
predict()
| 9,324 | 48.078947 | 138 | py |
deep-video-mvs | deep-video-mvs-master/dvmvs/baselines/mvdepthnet/decoder.py | import torch
import torch.nn as nn
import torch.nn.functional as F
from dvmvs.utils import freeze_batchnorm
def down_conv_layer(input_channels, output_channels, kernel_size):
return nn.Sequential(
nn.Conv2d(
input_channels,
output_channels,
kernel_size,
padding=(kernel_size - 1) // 2,
stride=1,
bias=False),
nn.BatchNorm2d(output_channels),
nn.ReLU(),
nn.Conv2d(
output_channels,
output_channels,
kernel_size,
padding=(kernel_size - 1) // 2,
stride=2,
bias=False),
nn.BatchNorm2d(output_channels),
nn.ReLU())
def conv_layer(input_channels, output_channels, kernel_size):
return nn.Sequential(
nn.Conv2d(
input_channels,
output_channels,
kernel_size,
padding=(kernel_size - 1) // 2,
bias=False),
nn.BatchNorm2d(output_channels),
nn.ReLU())
def depth_layer(input_channels):
return nn.Sequential(
nn.Conv2d(input_channels, 1, 3, padding=1), nn.Sigmoid())
def refine_layer(input_channels):
return nn.Conv2d(input_channels, 1, 3, padding=1)
def up_conv_layer(input_channels, output_channels, kernel_size):
return nn.Sequential(
nn.Upsample(scale_factor=2, mode='bilinear', align_corners=True),
nn.Conv2d(
input_channels,
output_channels,
kernel_size,
padding=(kernel_size - 1) // 2,
bias=False),
nn.BatchNorm2d(output_channels),
nn.ReLU())
def get_trainable_number(variable):
num = 1
shape = list(variable.shape)
for i in shape:
num *= i
return num
class Decoder(nn.Module):
def __init__(self):
super(Decoder, self).__init__()
self.upconv5 = up_conv_layer(512, 512, 3)
self.iconv5 = conv_layer(1024, 512, 3) # input upconv5 + conv4
self.upconv4 = up_conv_layer(512, 512, 3)
self.iconv4 = conv_layer(1024, 512, 3) # input upconv4 + conv3
self.disp4 = depth_layer(512)
self.upconv3 = up_conv_layer(512, 256, 3)
self.iconv3 = conv_layer(
513, 256, 3) # input upconv3 + conv2 + disp4 = 256 + 256 + 1 = 513
self.disp3 = depth_layer(256)
self.upconv2 = up_conv_layer(256, 128, 3)
self.iconv2 = conv_layer(
257, 128, 3) # input upconv2 + conv1 + disp3 = 128 + 128 + 1 = 257
self.disp2 = depth_layer(128)
self.upconv1 = up_conv_layer(128, 64, 3)
self.iconv1 = conv_layer(65, 64,
3) # input upconv1 + disp2 = 64 + 1 = 65
self.disp1 = depth_layer(64)
def forward(self, conv5, conv4, conv3, conv2, conv1):
upconv5 = self.upconv5(conv5)
iconv5 = self.iconv5(torch.cat((upconv5, conv4), 1))
upconv4 = self.upconv4(iconv5)
iconv4 = self.iconv4(torch.cat((upconv4, conv3), 1))
disp4 = 2.0 * self.disp4(iconv4)
udisp4 = F.interpolate(disp4, scale_factor=2)
upconv3 = self.upconv3(iconv4)
iconv3 = self.iconv3(torch.cat((upconv3, conv2, udisp4), 1))
disp3 = 2.0 * self.disp3(iconv3)
udisp3 = F.interpolate(disp3, scale_factor=2)
upconv2 = self.upconv2(iconv3)
iconv2 = self.iconv2(torch.cat((upconv2, conv1, udisp3), 1))
disp2 = 2.0 * self.disp2(iconv2)
udisp2 = F.interpolate(disp2, scale_factor=2)
upconv1 = self.upconv1(iconv2)
iconv1 = self.iconv1(torch.cat((upconv1, udisp2), 1))
disp1 = 2.0 * self.disp1(iconv1)
return [disp1, disp2, disp3, disp4]
def train(self, mode=True):
"""
Override the default train() to freeze the BN parameters
"""
super(Decoder, self).train(mode)
self.apply(freeze_batchnorm)
| 3,920 | 28.044444 | 80 | py |
deep-video-mvs | deep-video-mvs-master/dvmvs/baselines/deltas/base_model.py | import collections
from abc import ABCMeta, abstractmethod
from torch import nn
def dict_update(d, u):
"""Improved update for nested dictionaries.
Arguments:
d: The dictionary to be updated.
u: The update dictionary.
Returns:
The updated dictionary.
"""
d = d.copy()
for k, v in u.items():
if isinstance(v, collections.Mapping):
d[k] = dict_update(d.get(k, {}), v)
else:
d[k] = v
return d
class BaseModel(nn.Module, metaclass=ABCMeta):
"""Base Model"""
base_config = {
'name': None,
'trainable': True,
}
default_config = {}
required_data_keys = []
def __init__(self, config):
nn.Module.__init__(self)
default_config = dict_update(self.base_config, self.default_config)
new_keys = set(config.keys()) - set(default_config.keys())
if len(new_keys) > 0:
raise ValueError(
'Detected new keys in config: {}'.format(new_keys))
self.config = dict_update(default_config, config)
self._init()
if not self.config['trainable']:
for param in self.parameters():
param.requires_grad = False
def forward(self, data, **kwarg):
for key in self.required_data_keys:
assert key in data, 'Missing key {} in data'.format(key)
return self._forward(data, **kwarg)
@abstractmethod
def _init(self):
raise NotImplementedError
@abstractmethod
def _forward(self, data):
raise NotImplementedError
@abstractmethod
def loss(self, pred, data):
raise NotImplementedError
@abstractmethod
def metrics(self):
raise NotImplementedError
| 1,752 | 24.042857 | 75 | py |
deep-video-mvs | deep-video-mvs-master/dvmvs/baselines/deltas/utils.py | import torch
def reorder_desc(desc, batch_sz):
"""Reorders Descriptors"""
b, c, h, w = desc.shape
desc = desc.view(-1, batch_sz, c, h, w)
desc = desc.transpose(1, 0)
return desc
def pose_square(pose):
"""Converts pose matrix of size 3x4 to a square matrix of size 4x4"""
pose_sh = pose.shape
if pose_sh[2] == 3:
pose_row = torch.tensor([0., 0., 0., 1.])
if pose.is_cuda:
pose_row = pose_row.to(pose.device)
pose_row = pose_row.repeat(pose_sh[0], pose_sh[1], 1, 1)
pose = torch.cat((pose, pose_row), 2)
return pose
def make_symmetric(anc, ref):
"""Makes anchor and reference tensors symmetric"""
if (anc is None) or (ref is None):
return None
ancs = anc.shape
views = torch.stack(ref, 0)
if len(ancs) == 3:
views = views.view(-1, ancs[1], ancs[2])
else:
views = views.view(-1, anc.shape[1], ancs[2], ancs[3])
anc_ref = torch.cat((anc, views), 0)
return anc_ref
| 1,006 | 24.175 | 73 | py |
deep-video-mvs | deep-video-mvs-master/dvmvs/baselines/deltas/superpoint.py | import torch
import torchvision
from .base_model import BaseModel
def simple_nms(scores, radius):
"""Performs non maximum suppression on the heatmap using max-pooling.
This method does not suppress contiguous points that have the same score.
Arguments:
scores: the score heatmap, with shape `[B, H, W]`.
size: an interger scalar, the radius of the NMS window.
"""
def max_pool(x):
return torch.nn.functional.max_pool2d(
x, kernel_size=radius * 2 + 1, stride=1, padding=radius)
zeros = torch.zeros_like(scores)
max_mask = scores == max_pool(scores)
for _ in range(2):
supp_mask = max_pool(max_mask.float()) > 0
supp_scores = torch.where(supp_mask, zeros, scores)
new_max_mask = supp_scores == max_pool(supp_scores)
max_mask = max_mask | (new_max_mask & (~supp_mask))
return torch.where(max_mask, scores, zeros)
def remove_borders(keypoints, scores, b, h, w):
mask_h = (keypoints[:, 0] >= b) & (keypoints[:, 0] < (h - b))
mask_w = (keypoints[:, 1] >= b) & (keypoints[:, 1] < (w - b))
mask = mask_h & mask_w
return keypoints[mask], scores[mask]
def top_k_keypoints(keypoints, scores, k):
if k >= len(keypoints):
return keypoints, scores
scores, indices = torch.topk(scores, k, dim=0)
return keypoints[indices], scores
class Superpoint(BaseModel):
default_config = {
'has_detector': True,
'has_descriptor': True,
'descriptor_dim': 128,
# Inference for Anchor
'sparse_outputs': True,
'nms_radius': 9,
'detection_threshold': 0.0005,
'top_k_keypoints': 128,
'force_num_keypoints': True,
'remove_borders': 4,
'unique_keypoints': True,
'frac_superpoint': 1.,
'dense_depth': True,
'min_depth': 0.5,
'max_depth': 10.0,
'model_type': 'resnet50',
'align_corners': False,
'height': 240,
'width': 320,
}
def _init(self):
self.relu = torch.nn.ReLU(inplace=True)
self.pool = torch.nn.MaxPool2d(kernel_size=2, stride=2)
pretrained_features = torchvision.models.resnet50(pretrained=False)
c_out = [2048, 8, 10]
c_d = 512
c_k = 64 + 256
self.conv1 = pretrained_features.conv1
self.bn1 = pretrained_features.bn1
self.maxpool = pretrained_features.maxpool
self.layer1 = pretrained_features.layer1
self.layer2 = pretrained_features.layer2
self.layer3 = pretrained_features.layer3
self.layer4 = pretrained_features.layer4
self.rgb_to_gray = torch.tensor([0.299, 0.587, 0.114])
self.rgb_to_gray = self.rgb_to_gray.view(1, -1, 1, 1)
self.mean_add_rgb = torch.tensor([0.485, 0.456, 0.406]).view(1, -1, 1, 1)
self.std_mul_rgb = torch.tensor([0.229, 0.224, 0.225]).view(1, -1, 1, 1)
self.mean_add = torch.tensor([0.5, 0.5, 0.5]).view(1, -1, 1, 1)
self.std_mul = torch.tensor([0.5, 0.5, 0.5]).view(1, -1, 1, 1)
if self.config['has_detector']:
c_1, c_2 = 256, 128
self.convPa = torch.nn.Conv2d(c_out[0], c_1, kernel_size=3, stride=1, padding=1)
self.bnPa = torch.nn.BatchNorm2d(c_1)
self.scale_factorPa = 4
self.convPb = torch.nn.Conv2d(c_1, c_2, kernel_size=3, stride=1, padding=1)
self.bnPb = torch.nn.BatchNorm2d(c_2)
self.convPc = torch.nn.Conv2d(c_2, 65, kernel_size=1, stride=1, padding=0)
if self.config['has_descriptor']:
c_3, c_4 = 128, 256
self.convDa = torch.nn.Conv2d(c_out[0], c_3, kernel_size=3, stride=1, padding=1)
self.bnDa = torch.nn.BatchNorm2d(c_3)
self.convDb = torch.nn.Conv2d(c_3 + c_d, c_4, kernel_size=1, stride=1, padding=0)
self.bnDb = torch.nn.BatchNorm2d(c_4)
self.convDc = torch.nn.Conv2d(c_4, c_4, kernel_size=3, stride=1, padding=1)
self.bnDc = torch.nn.BatchNorm2d(c_4)
self.convDd = torch.nn.Conv2d(c_4 + c_k, self.config['descriptor_dim'], kernel_size=1, stride=1, padding=0)
def _forward(self, data):
img_rgb = data['img']
tsp = data['process_tsp']
img_rgb = (img_rgb - self.mean_add_rgb.to(img_rgb.device)) / self.std_mul_rgb.to(img_rgb.device)
img = img_rgb
##Run superpoint
pred = {}
pred['img_rgb'] = img_rgb
x = self.relu(self.bn1(self.conv1(img)))
if self.config['dense_depth']:
pred['skip_half'] = x
x = self.maxpool(x)
x = self.layer1(x)
if self.config['dense_depth']:
pred['skip_quarter'] = x
x = self.layer2(x)
if self.config['dense_depth']:
pred['skip_eight'] = x
x = self.layer3(x)
if self.config['dense_depth']:
pred['skip_sixteenth'] = x
x = self.layer4(x)
if self.config['dense_depth']:
pred['features'] = x
# Detector Head.
if self.config['has_detector'] and ('t' in tsp):
cPa = self.relu(self.bnPa(self.convPa(x)))
cPa = torch.nn.functional.interpolate(cPa, size=(self.config['height'] // 8, self.config['width'] // 8), mode='bilinear',
align_corners=self.config['align_corners'])
cPa = self.relu(self.bnPb(self.convPb(cPa)))
pred['scores'] = self.convPc(cPa)
# Descriptor Head.
if self.config['has_descriptor'] and ('s' in tsp):
cDa = self.relu(self.bnDa(self.convDa(x)))
cDa = torch.nn.functional.interpolate(cDa, size=(self.config['height'] // 8, self.config['width'] // 8), mode='bilinear',
align_corners=self.config['align_corners'])
cDa = torch.cat((cDa, pred['skip_eight']), 1)
cDa = self.relu(self.bnDb(self.convDb(cDa)))
cDa = self.relu(self.bnDc(self.convDc(cDa)))
skip_4 = torch.nn.functional.interpolate(pred['skip_quarter'], scale_factor=0.5, mode='bilinear', align_corners=self.config['align_corners'])
skip_2 = torch.nn.functional.interpolate(pred['skip_half'], scale_factor=0.25, mode='bilinear', align_corners=self.config['align_corners'])
cDa = torch.cat((cDa, skip_4, skip_2), 1)
desc = self.convDd(cDa)
desc = torch.nn.functional.normalize(desc, p=2, dim=1)
pred['descriptors'] = desc
# Sparse Key-Points
if self.config['sparse_outputs'] and ('t' in tsp):
st = 8 # encoder stride
if self.config['has_detector']:
scores = torch.nn.functional.softmax(pred['scores'], 1)[:, :-1]
b, c, h, w = scores.shape
scores = scores.permute(0, 2, 3, 1).reshape(b, h, w, st, st)
scores = scores.permute(0, 1, 3, 2, 4).reshape(b, h * st, w * st)
dense_scores = scores
if self.config['nms_radius']:
scores = simple_nms(scores, self.config['nms_radius'])
keypoints = [torch.nonzero(s > self.config['detection_threshold'], as_tuple=False) for s in scores]
scores = [s[tuple(k.t())] for s, k in zip(scores, keypoints)]
if self.config['remove_borders']:
keypoints, scores = list(zip(*[
remove_borders(
k, s, self.config['remove_borders'], h * st, w * st)
for k, s in zip(keypoints, scores)]))
if self.config['top_k_keypoints']:
keypoints, scores = list(zip(*[
top_k_keypoints(k, s, int(self.config['frac_superpoint'] * self.config['top_k_keypoints']))
for k, s in zip(keypoints, scores)]))
if self.config['force_num_keypoints']:
new_keypoints, new_scores = [], []
for k, sc in zip(keypoints, scores):
num = self.config['top_k_keypoints'] - len(k)
new_x = torch.randint_like(k.new_empty(num), w * st)
new_y = torch.randint_like(k.new_empty(num), h * st)
new_k = torch.stack([new_y, new_x], -1)
if self.config['unique_keypoints']:
curr_k = torch.cat([k, new_k])
not_all_unique = True
while not_all_unique:
unique_k = torch.unique(curr_k, dim=1)
if unique_k.shape[0] == curr_k.shape[0]:
not_all_unique = False
else:
new_x = torch.randint_like(k.new_empty(num), w * st)
new_y = torch.randint_like(k.new_empty(num), h * st)
new_k = torch.stack([new_y, new_x], -1)
curr_k = torch.cat([k, new_k])
new_sc = sc.new_zeros(num)
new_keypoints.append(torch.cat([k, new_k], 0))
new_scores.append(torch.cat([sc, new_sc], 0))
keypoints, scores = new_keypoints, new_scores
keypoints = [torch.flip(k, [1]).float() for k in keypoints]
keypoints = torch.stack(keypoints, 0)
pred['keypoints'] = keypoints
pred['scores_sparse'] = scores
return pred
def loss(self, pred, data):
raise NotImplementedError
def metrics(self):
raise NotImplementedError
| 9,883 | 38.536 | 153 | py |
deep-video-mvs | deep-video-mvs-master/dvmvs/baselines/deltas/densedepth.py | import math
import torch
import torch.nn as nn
import torch.nn.functional as F
from .base_model import BaseModel
from .resnet_s2d import resnet50
def conv3x3(in_planes, out_planes, stride=1):
"""3x3 convolution with padding"""
return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride,
padding=1, bias=False)
class Unpool(nn.Module):
"""Unpool: 2*2 unpooling with zero padding"""
def __init__(self, num_channels, stride=2):
super(Unpool, self).__init__()
self.num_channels = num_channels
self.stride = stride
# create kernel [1, 0; 0, 0]
self.weights = torch.autograd.Variable(torch.zeros(num_channels, 1, stride, stride))
self.weights[:, :, 0, 0] = 1
def forward(self, x):
return F.conv_transpose2d(x, self.weights.to(x.device), stride=self.stride, groups=self.num_channels)
class Gudi_UpProj_Block(nn.Module):
"""UpProjection block from CSPN paper (Cheng et.al.)"""
def __init__(self, in_channels, out_channels, oheight=0, owidth=0, side_channels=0, do_5x5=True, interp_nearest=True):
super(Gudi_UpProj_Block, self).__init__()
if do_5x5:
self.conv1 = nn.Conv2d(in_channels, out_channels, kernel_size=5, stride=1, padding=2, bias=False)
else:
self.conv1 = nn.Conv2d(in_channels, out_channels, kernel_size=1, stride=1, padding=0, bias=False)
self.bn1 = nn.BatchNorm2d(out_channels)
self.conv2 = nn.Conv2d(out_channels, out_channels, kernel_size=3, stride=1, padding=1, bias=False)
self.bn2 = nn.BatchNorm2d(out_channels)
if do_5x5:
self.sc_conv1 = nn.Conv2d(in_channels, out_channels, kernel_size=5, stride=1, padding=2, bias=False)
else:
self.sc_conv1 = nn.Conv2d(in_channels, out_channels, kernel_size=1, stride=1, padding=0, bias=False)
self.sc_bn1 = nn.BatchNorm2d(out_channels)
self.relu = nn.ReLU(inplace=True)
self.oheight = oheight
self.owidth = owidth
self.interp_nearest = interp_nearest
def _up_pooling(self, x, scale):
x = nn.Upsample(scale_factor=scale, mode='nearest')(x)
if self.oheight != 0 and self.owidth != 0:
x = x[:, :, 0:self.oheight, 0:self.owidth]
mask = torch.zeros_like(x)
for h in range(0, self.oheight, 2):
for w in range(0, self.owidth, 2):
mask[:, :, h, w] = 1
x = torch.mul(mask, x)
return x
def forward(self, x):
if self.interp_nearest:
x = self._up_pooling(x, 2)
else:
x = F.interpolate(x, scale_factor=2, mode='bilinear')
out = self.relu(self.bn1(self.conv1(x)))
out = self.bn2(self.conv2(out))
short_cut = self.sc_bn1(self.sc_conv1(x))
out += short_cut
out = self.relu(out)
return out
class Gudi_UpProj_Block_Cat(nn.Module):
"""UpProjection block with concatenation from CSPN paper (Cheng et.al.)"""
def __init__(self, in_channels, out_channels, oheight=0, owidth=0, side_channels=0, do_5x5=True, interp_nearest=True):
super(Gudi_UpProj_Block_Cat, self).__init__()
if do_5x5:
self.conv1 = nn.Conv2d(in_channels, out_channels, kernel_size=5, stride=1, padding=2, bias=False)
else:
self.conv1 = nn.Conv2d(in_channels, out_channels, kernel_size=1, stride=1, padding=0, bias=False)
self.bn1 = nn.BatchNorm2d(out_channels)
out_ch = out_channels + side_channels
self.conv1_1 = nn.Conv2d(out_ch, out_channels, kernel_size=3, stride=1, padding=1, bias=False)
self.bn1_1 = nn.BatchNorm2d(out_channels)
self.conv2 = nn.Conv2d(out_channels, out_channels, kernel_size=3, stride=1, padding=1, bias=False)
self.bn2 = nn.BatchNorm2d(out_channels)
if do_5x5:
self.sc_conv1 = nn.Conv2d(in_channels, out_channels, kernel_size=5, stride=1, padding=2, bias=False)
else:
self.sc_conv1 = nn.Conv2d(in_channels, out_channels, kernel_size=1, stride=1, padding=0, bias=False)
self.sc_bn1 = nn.BatchNorm2d(out_channels)
self.relu = nn.ReLU(inplace=True)
self.oheight = oheight
self.owidth = owidth
self._up_pool = Unpool(in_channels)
self.interp_nearest = interp_nearest
def _up_pooling(self, x, scale):
x = self._up_pool(x)
if self.oheight != 0 and self.owidth != 0:
x = x.narrow(2, 0, self.oheight)
x = x.narrow(3, 0, self.owidth)
return x
def forward(self, x, side_input):
if self.interp_nearest:
if side_input.shape[2] % x.shape[2] == 0:
x = self._up_pooling(x, 2)
else:
x = F.interpolate(x, size=(side_input.shape[2], side_input.shape[3]), mode='nearest')
else:
x = F.interpolate(x, size=(side_input.shape[2], side_input.shape[3]), mode='bilinear')
out = self.relu(self.bn1(self.conv1(x)))
out = torch.cat((out, side_input), 1)
out = self.relu(self.bn1_1(self.conv1_1(out)))
out = self.bn2(self.conv2(out))
short_cut = self.sc_bn1(self.sc_conv1(x))
out += short_cut
out = self.relu(out)
return out
class dilated_conv3x3(nn.Module):
"""Dilated convolutions"""
def __init__(self, in_channels, out_channels, dilation_rate=1):
super(dilated_conv3x3, self).__init__()
self.conv1 = nn.Conv2d(in_channels, out_channels, kernel_size=1, stride=1, bias=False)
self.bn1 = nn.BatchNorm2d(out_channels)
self.relu1 = nn.ReLU(inplace=True)
self.conv2 = nn.Conv2d(out_channels, out_channels, kernel_size=3, padding=dilation_rate, dilation=dilation_rate, bias=False)
self.bn2 = nn.BatchNorm2d(out_channels)
self.relu2 = nn.ReLU(inplace=True)
def forward(self, x):
out = self.relu1(self.bn1(self.conv1(x)))
out = self.relu2(self.bn2(self.conv2(out)))
return out
class ASPP(nn.Module):
"""Altrous Spatial Pyramid Pooling block"""
def __init__(self, in_channels):
super(ASPP, self).__init__()
out_channels = in_channels
self.daspp_1 = dilated_conv3x3(out_channels, out_channels // 2, dilation_rate=3)
self.relu = nn.ReLU(inplace=True)
self.daspp_2 = dilated_conv3x3(int(1.5 * out_channels), out_channels // 2, dilation_rate=6)
self.daspp_3 = dilated_conv3x3(int(2 * out_channels), out_channels // 2, dilation_rate=12)
self.daspp_4 = dilated_conv3x3(int(2.5 * out_channels), out_channels // 2, dilation_rate=18)
self.daspp_5 = dilated_conv3x3(int(3 * out_channels), out_channels // 2, dilation_rate=24)
self.convf = nn.Conv2d(int(3.5 * out_channels), out_channels, kernel_size=3, padding=1, bias=False)
self.bnf = nn.BatchNorm2d(out_channels)
def forward(self, x):
x_inp = x
x1_d1 = self.daspp_1(x)
x = torch.cat((x, x1_d1), 1)
x1_d2 = self.daspp_2(x)
x = torch.cat((x, x1_d2), 1)
x1_d3 = self.daspp_3(x)
x = torch.cat((x, x1_d3), 1)
x1_d4 = self.daspp_4(x)
x = torch.cat((x, x1_d4), 1)
x1_d5 = self.daspp_5(x)
x = torch.cat((x_inp, x1_d1, x1_d2, x1_d3, x1_d4, x1_d5), 1)
out = self.relu(self.bnf(self.convf(x)))
return out
class SparsetoDenseNet(BaseModel):
"""Sparse to Dense Network """
default_config = {
'model_type': 'resnet50',
'input_shape': (240, 320, 1),
'min_depth': 0.5,
'max_depth': 10.0,
'multiscale': True,
'do_5x5': True,
'interp_n': True,
}
def _init(self):
##Encoder for sparse depth
self.relu = torch.nn.ReLU(inplace=True)
self.pool = torch.nn.MaxPool2d(kernel_size=2, stride=2)
in_channel = 1
self.relu = torch.nn.ReLU(inplace=True)
self.pool = torch.nn.MaxPool2d(kernel_size=2, stride=2)
pretrained_features = resnet50()
c_out = [int(1.25 * 2048), int(1.25 * 1024), int(1.25 * 512), int(1.25 * 256), int(1.25 * 64)]
self.conv1 = pretrained_features.conv1
self.bn1 = pretrained_features.bn1
self.maxpool = pretrained_features.maxpool
self.layer1 = pretrained_features.layer1
self.layer2 = pretrained_features.layer2
self.layer3 = pretrained_features.layer3
self.layer4 = pretrained_features.layer4
d0, d1, d2, d3, d4 = 512, 256, 128, 64, 32
# Decoder for sparse to dense
block = Gudi_UpProj_Block_Cat
block_simple = Gudi_UpProj_Block
h = self.config['input_shape'][0]
w = self.config['input_shape'][1]
self.gud_up_proj_layer1 = self._make_gud_up_conv_layer(block, c_out[0], d0, math.ceil(h / 16), math.ceil(w / 16), c_out[1], self.config['do_5x5'],
self.config['interp_n'])
self.gud_up_proj_layer2 = self._make_gud_up_conv_layer(block, d0, d1, math.ceil(h / 8), math.ceil(w / 8), c_out[2], self.config['do_5x5'],
self.config['interp_n'])
self.ASPP = ASPP(d1)
self.gud_up_proj_layer3 = self._make_gud_up_conv_layer(block, d1, d2, math.ceil(h / 4), math.ceil(w / 4), c_out[3], self.config['do_5x5'],
self.config['interp_n'])
self.gud_up_proj_layer4 = self._make_gud_up_conv_layer(block, d2, d3, math.ceil(h / 2), math.ceil(w / 2), c_out[4], self.config['do_5x5'],
self.config['interp_n'])
self.gud_up_proj_layer5 = self._make_gud_up_conv_layer(block_simple, d3, d4, h, w, self.config['do_5x5'], self.config['interp_n'])
self.conv_final = nn.Conv2d(d4, 1, kernel_size=3, stride=1, padding=1, bias=True)
if self.config['multiscale']:
self.conv_scale8 = nn.Conv2d(d1, 1, kernel_size=1, stride=1, padding=0, bias=True)
self.conv_scale4 = nn.Conv2d(d2, 1, kernel_size=1, stride=1, padding=0, bias=True)
self.conv_scale2 = nn.Conv2d(d3, 1, kernel_size=1, stride=1, padding=0, bias=True)
def _make_gud_up_conv_layer(self, up_proj_block, in_channels, out_channels, oheight, owidth, side_ch=0, do_5x5=True, interp_nearest=True):
return up_proj_block(in_channels, out_channels, oheight, owidth, side_ch, do_5x5, interp_nearest)
def _forward(self, data):
# Inputs from previous modules
anchor_keypoints = data['anchor_keypoints']
keypoints_3d = data['keypoints_3d']
range_mask = data['range_mask']
features = data['features']
skip_half = data['skip_half']
skip_quarter = data['skip_quarter']
skip_eight = data['skip_eight']
skip_sixteenth = data['skip_sixteenth']
sequence_length = data['sequence_length']
del data
# Impute learnt sparse depth into a sparse image
sparse_depth_learnt = torch.zeros((anchor_keypoints.shape[0], self.config['input_shape'][0], self.config['input_shape'][1])).to(anchor_keypoints.device)
anchor_keypoints_index = anchor_keypoints.long()
bselect = torch.arange(anchor_keypoints.shape[0], dtype=torch.long)
bselect = bselect.unsqueeze(1).unsqueeze(1)
bselect = bselect.repeat(1, anchor_keypoints_index.shape[1], 1).to(anchor_keypoints.device)
anchor_keypoints_indexchunk = torch.cat((bselect, anchor_keypoints_index[:, :, [1]], anchor_keypoints_index[:, :, [0]]), 2)
anchor_keypoints_indexchunk = anchor_keypoints_indexchunk.view(-1, 3).t()
kp3d_val = keypoints_3d[:, :, 2].view(-1, 1).t()
kp3d_val = torch.clamp(kp3d_val, min=0.0, max=self.config['max_depth'])
kp3d_filter = (range_mask > 0).view(-1, 1).t()
kp3d_filter = (kp3d_filter) & (kp3d_val > self.config['min_depth']) & (kp3d_val < self.config['max_depth'])
kp3d_val = kp3d_val * kp3d_filter.float()
sparse_depth_learnt[anchor_keypoints_indexchunk.chunk(chunks=3, dim=0)] = kp3d_val
sparse_depth_learnt = sparse_depth_learnt.unsqueeze(1)
pred = {}
# Forward pass
x = self.relu(self.bn1(self.conv1(sparse_depth_learnt)))
skip_half = torch.cat((x, skip_half), 1)
x = self.maxpool(x)
x = self.layer1(x)
skip_quarter = torch.cat((x, skip_quarter), 1)
x = self.layer2(x)
skip_eight = torch.cat((x, skip_eight), 1)
x = self.layer3(x)
skip_sixteenth = torch.cat((x, skip_sixteenth), 1)
x = self.layer4(x)
x = torch.cat((features, x), 1) # 160
x = self.gud_up_proj_layer1(x, skip_sixteenth)
x = self.gud_up_proj_layer2(x, skip_eight)
x = self.ASPP(x)
if self.config['multiscale']:
x_8 = self.conv_scale8(x)
x = self.gud_up_proj_layer3(x, skip_quarter)
if self.config['multiscale']:
x_4 = self.conv_scale4(x)
x = self.gud_up_proj_layer4(x, skip_half)
if self.config['multiscale']:
x_2 = self.conv_scale2(x)
x = self.gud_up_proj_layer5(x)
x = self.conv_final(x)
if self.config['multiscale']:
pred['multiscale'] = [x_2, x_4, x_8]
depth_dense = x
pred['dense_depth'] = depth_dense
return pred
def loss(self, pred, data):
raise NotImplementedError
def metrics(self):
raise NotImplementedError
| 13,604 | 37.109244 | 160 | py |
deep-video-mvs | deep-video-mvs-master/dvmvs/baselines/deltas/run-testing.py | import argparse
import cv2
import numpy as np
import torch.backends.cudnn as cudnn
import torch.utils.data
from path import Path
from tqdm import tqdm
from dvmvs.baselines.deltas import superpoint, triangulation, densedepth
from dvmvs.baselines.deltas.utils import *
from dvmvs.config import Config
from dvmvs.dataset_loader import PreprocessImage, load_image
from dvmvs.utils import InferenceTimer, visualize_predictions, save_results
input_image_width = 320
input_image_height = 240
n_measurement_frames = 1
parser = argparse.ArgumentParser(description='DELTAS Inference',
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('--dataset-format', default='sequential', metavar='STR',
help='dataset format, stacked: sequential: sequential folders')
parser.add_argument('-j', '--workers', default=4, type=int, metavar='N', help='number of data loading workers')
parser.add_argument('-b', '--batch-size', default=1, type=int, metavar='N', help='mini-batch size')
parser.add_argument('--print-freq', default=200, type=int, metavar='N', help='print frequency')
parser.add_argument('--seed', default=1, type=int, help='seed for random functions, and network initialization')
parser.add_argument('--mindepth', type=float, default=0.5, help='minimum depth')
parser.add_argument('--maxdepth', type=float, default=10., help='maximum depth')
parser.add_argument('--width', type=int, default=input_image_width, help='image width')
parser.add_argument('--height', type=int, default=input_image_height, help='image height')
parser.add_argument('--seq_length', default=n_measurement_frames + 1, type=int, help='length of sequence')
parser.add_argument('--seq_gap', default=1, type=int, help='gap between frames for ScanNet dataset')
parser.add_argument('--resume', type=bool, default=True, help='Use pretrained network')
parser.add_argument('--pretrained', dest='pretrained', default='weights/pretrained_deltas', metavar='PATH', help='path to pre-trained model')
parser.add_argument('--do_confidence', type=bool, default=True, help='confidence in triangulation')
parser.add_argument('--dist_orthogonal', type=int, default=1, help='offset distance in pixels')
parser.add_argument('--kernel_size', type=int, default=1, help='kernel size')
parser.add_argument('--out_length', type=int, default=100, help='output length of epipolar patch')
parser.add_argument('--depth_range', type=bool, default=True, help='clamp using range of depth')
parser.add_argument('--num_kps', default=512, type=int, help='number of interest keypoints')
parser.add_argument('--model_type', type=str, default='resnet50', help='network backbone')
parser.add_argument('--align_corners', type=bool, default=False, help='align corners')
parser.add_argument('--descriptor_dim', type=int, default=128, help='dimension of descriptor')
parser.add_argument('--detection_threshold', type=float, default=0.0005, help='threshold for interest point detection')
parser.add_argument('--frac_superpoint', type=float, default=.5, help='fraction of interest points')
parser.add_argument('--nms_radius', type=int, default=9, help='radius for nms')
n_iter = 0
def get_model():
args = parser.parse_args()
torch.manual_seed(args.seed)
# create model
print("=> creating model")
# step 1 using superpoint
config_sp = {
'top_k_keypoints': args.num_kps,
'height': args.height,
'width': args.width,
'align_corners': args.align_corners,
'detection_threshold': args.detection_threshold,
'frac_superpoint': args.frac_superpoint,
'nms_radius': args.nms_radius,
}
cudnn.benchmark = True
supernet = superpoint.Superpoint(config_sp)
supernet = supernet.cuda() if torch.cuda.is_available() else supernet
# step 2 using differentiable triangulation
config_tri = {
'dist_ortogonal': args.dist_orthogonal,
'kernel_size': args.kernel_size,
'out_length': args.out_length,
'depth_range': args.depth_range,
'has_confidence': args.do_confidence,
'align_corners': args.align_corners,
}
trinet = triangulation.TriangulationNet(config_tri)
trinet = trinet.cuda() if torch.cuda.is_available() else trinet
# step 3 using sparse-to-dense
config_depth = {
'min_depth': args.mindepth,
'max_depth': args.maxdepth,
'input_shape': (args.height, args.width, 1),
}
depthnet = densedepth.SparsetoDenseNet(config_depth)
depthnet = depthnet.cuda() if torch.cuda.is_available() else depthnet
# load pre-trained weights
if args.resume:
if torch.cuda.is_available():
weights = torch.load(args.pretrained)
else:
weights = torch.load(args.pretrained, map_location=torch.device('cpu'))
supernet.load_state_dict(weights['state_dict'], strict=True)
trinet.load_state_dict(weights['state_dict_tri'], strict=True)
depthnet.load_state_dict(weights['state_dict_depth'], strict=True)
if torch.cuda.is_available():
depthnet = torch.nn.DataParallel(depthnet).cuda()
supernet = torch.nn.DataParallel(supernet).cuda()
trinet = torch.nn.DataParallel(trinet).cuda()
return args, supernet, trinet, depthnet
def predict_for_subsequence(args, supernet, trinet, depthnet, tgt_img, tgt_depth, ref_imgs, ref_depths, poses, intrinsics):
global n_iter
tgt_img_var = tgt_img
ref_imgs_var = ref_imgs
img_var = make_symmetric(tgt_img_var, ref_imgs_var)
batch_sz = tgt_img_var.shape[0]
##Pose and intrinsics
poses_var = [pose for pose in poses]
intrinsics_var = intrinsics
seq_val = args.seq_length - 1
pose = torch.cat(poses_var, 1)
pose = pose_square(pose)
##Depth
tgt_depth_var = tgt_depth
ref_depths_var = [ref_depth for ref_depth in ref_depths]
depth = tgt_depth_var
depth_ref = torch.stack(ref_depths_var, 1)
##Step 1: Detect and Describe Points
data_sp = {'img': img_var, 'process_tsp': 'ts'} # t is detector, s is descriptor
pred_sp = supernet(data_sp)
# Keypoints and descriptor logic
keypoints = pred_sp['keypoints'][:batch_sz]
features = pred_sp['features'][:batch_sz]
skip_half = pred_sp['skip_half'][:batch_sz]
skip_quarter = pred_sp['skip_quarter'][:batch_sz]
skip_eight = pred_sp['skip_eight'][:batch_sz]
skip_sixteenth = pred_sp['skip_sixteenth'][:batch_sz]
scores = pred_sp['scores'][:batch_sz]
desc = pred_sp['descriptors']
desc_anc = desc[:batch_sz, :, :, :]
desc_view = desc[batch_sz:, :, :, :]
desc_view = reorder_desc(desc_view, batch_sz)
## Step 2: Match & Triangulate Points
data_sd = {'iter': n_iter, 'intrinsics': intrinsics_var, 'pose': pose, 'depth': depth, 'ref_depths': depth_ref, 'scores': scores,
'keypoints': keypoints, 'descriptors': desc_anc, 'descriptors_views': desc_view, 'img_shape': tgt_img_var.shape,
'sequence_length': seq_val}
pred_sd = trinet(data_sd)
view_matches = pred_sd['multiview_matches']
anchor_keypoints = pred_sd['keypoints']
keypoints3d_gt = pred_sd['keypoints3d_gt']
range_mask_view = pred_sd['range_kp']
range_mask = torch.sum(range_mask_view, 1)
d_shp = tgt_depth_var.shape
keypoints_3d = pred_sd['keypoints_3d']
kp3d_val = keypoints_3d[:, :, 2].view(-1, 1).t()
kp3d_filter = (range_mask > 0).view(-1, 1).t()
kp3d_filter = (kp3d_filter) & (kp3d_val > args.mindepth) & (kp3d_val < args.maxdepth)
## Step 3: Densify using Sparse-to-Dense
data_dd = {'anchor_keypoints': keypoints, 'keypoints_3d': keypoints_3d, 'sequence_length': args.seq_length, 'skip_sixteenth': skip_sixteenth,
'range_mask': range_mask, 'features': features, 'skip_half': skip_half, 'skip_quarter': skip_quarter, 'skip_eight': skip_eight}
pred_dd = depthnet(data_dd)
output = pred_dd['dense_depth']
return output
def predict():
print("System: DELTAS")
device = torch.device('cuda')
cudnn.benchmark = True
args, supernet, trinet, depthnet = get_model()
supernet.eval()
trinet.eval()
depthnet.eval()
scale_rgb = 255.0
mean_rgb = [0.5, 0.5, 0.5]
std_rgb = [0.5, 0.5, 0.5]
dummy_input = torch.empty(size=(1, input_image_height, input_image_width), dtype=torch.float).to(device)
data_path = Path(Config.test_offline_data_path)
if Config.test_dataset_name is None:
keyframe_index_files = sorted((Path(Config.test_offline_data_path) / "indices").files("*nmeas+{}*".format(n_measurement_frames)))
else:
keyframe_index_files = sorted((Path(Config.test_offline_data_path) / "indices").files("*" + Config.test_dataset_name + "*nmeas+{}*".format(n_measurement_frames)))
for iteration, keyframe_index_file in enumerate(keyframe_index_files):
keyframing_type, dataset_name, scene_name, _, _ = keyframe_index_file.split("/")[-1].split("+")
scene_folder = data_path / dataset_name / scene_name
print("Predicting for scene:", dataset_name + "-" + scene_name, " - ", iteration, "/", len(keyframe_index_files))
keyframe_index_file_lines = np.loadtxt(keyframe_index_file, dtype=str, delimiter="\n")
K = np.loadtxt(scene_folder / 'K.txt').astype(np.float32)
poses = np.fromfile(scene_folder / "poses.txt", dtype=float, sep="\n ").reshape((-1, 4, 4))
image_filenames = sorted((scene_folder / 'images').files("*.png"))
depth_filenames = sorted((scene_folder / 'depth').files("*.png"))
input_filenames = []
for image_filename in image_filenames:
input_filenames.append(image_filename.split("/")[-1])
inference_timer = InferenceTimer()
predictions = []
reference_depths = []
with torch.no_grad():
for i in tqdm(range(0, len(keyframe_index_file_lines))):
keyframe_index_file_line = keyframe_index_file_lines[i]
if keyframe_index_file_line == "TRACKING LOST":
continue
else:
current_input_filenames = keyframe_index_file_line.split(" ")
current_indices = [input_filenames.index(current_input_filenames[x]) for x in range(len(current_input_filenames))]
reference_index = current_indices[0]
measurement_indices = current_indices[1:]
reference_pose = poses[reference_index]
reference_image = load_image(image_filenames[reference_index])
reference_depth = cv2.imread(depth_filenames[reference_index], -1).astype(float) / 1000.0
preprocessor = PreprocessImage(K=K,
old_width=reference_image.shape[1],
old_height=reference_image.shape[0],
new_width=input_image_width,
new_height=input_image_height,
distortion_crop=0,
perform_crop=False)
reference_image = preprocessor.apply_rgb(image=reference_image,
scale_rgb=scale_rgb,
mean_rgb=mean_rgb,
std_rgb=std_rgb)
reference_depth = preprocessor.apply_depth(reference_depth)
reference_image_torch = torch.from_numpy(np.transpose(reference_image, (2, 0, 1))).float().to(device).unsqueeze(0)
# DELTAS ALWAYS REQUIRE A PREDETERMINED NUMBER OF MEASUREMENT FRAMES, SO FAKE IT
while len(measurement_indices) < n_measurement_frames:
measurement_indices.append(measurement_indices[0])
measurement_poses_torch = []
measurement_images_torch = []
for measurement_index in measurement_indices:
measurement_image = load_image(image_filenames[measurement_index])
measurement_image = preprocessor.apply_rgb(image=measurement_image,
scale_rgb=scale_rgb,
mean_rgb=mean_rgb,
std_rgb=std_rgb)
measurement_image_torch = torch.from_numpy(np.transpose(measurement_image, (2, 0, 1))).float().to(device).unsqueeze(0)
measurement_pose = poses[measurement_index]
measurement_pose = (np.linalg.inv(measurement_pose) @ reference_pose)
measurement_pose_torch = torch.from_numpy(measurement_pose).float().to(device).unsqueeze(0).unsqueeze(0)
measurement_poses_torch.append(measurement_pose_torch)
measurement_images_torch.append(measurement_image_torch)
K_torch = torch.from_numpy(preprocessor.get_updated_intrinsics()).float().to(device).unsqueeze(0)
tgt_depth = dummy_input
ref_depths = [dummy_input for _ in range(n_measurement_frames)]
inference_timer.record_start_time()
prediction = predict_for_subsequence(args, supernet, trinet, depthnet,
tgt_img=reference_image_torch,
tgt_depth=tgt_depth,
ref_imgs=measurement_images_torch,
ref_depths=ref_depths,
poses=measurement_poses_torch,
intrinsics=K_torch)
inference_timer.record_end_time_and_elapsed_time()
prediction = prediction.cpu().numpy().squeeze()
reference_depths.append(reference_depth)
predictions.append(prediction)
if Config.test_visualize:
visualize_predictions(numpy_reference_image=reference_image,
numpy_measurement_image=measurement_image,
numpy_predicted_depth=prediction,
normalization_mean=mean_rgb,
normalization_std=std_rgb,
normalization_scale=scale_rgb)
inference_timer.print_statistics()
system_name = "{}_{}_{}_{}_{}_deltas".format(keyframing_type,
dataset_name,
input_image_width,
input_image_height,
n_measurement_frames)
save_results(predictions=predictions,
groundtruths=reference_depths,
system_name=system_name,
scene_name=scene_name,
save_folder=Config.test_result_folder)
if __name__ == '__main__':
predict()
| 15,377 | 46.462963 | 170 | py |
deep-video-mvs | deep-video-mvs-master/dvmvs/baselines/deltas/resnet_s2d.py | import torch.nn as nn
__all__ = ['ResNet', 'resnet18', 'resnet34', 'resnet50', 'resnet101', 'resnet152']
def conv3x3(in_planes, out_planes, stride=1, groups=1, dilation=1):
"""3x3 convolution with padding"""
return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride,
padding=dilation, groups=groups, bias=False, dilation=dilation)
def conv1x1(in_planes, out_planes, stride=1):
"""1x1 convolution"""
return nn.Conv2d(in_planes, out_planes, kernel_size=1, stride=stride, bias=False)
class BasicBlock(nn.Module):
expansion = 1
__constants__ = ['downsample']
def __init__(self, inplanes, planes, stride=1, downsample=None, groups=1,
base_width=64, dilation=1, norm_layer=None):
super(BasicBlock, self).__init__()
if norm_layer is None:
norm_layer = nn.BatchNorm2d
if groups != 1 or base_width != 64:
raise ValueError('BasicBlock only supports groups=1 and base_width=64')
if dilation > 1:
raise NotImplementedError("Dilation > 1 not supported in BasicBlock")
# Both self.conv1 and self.downsample layers downsample the input when stride != 1
self.conv1 = conv3x3(inplanes, planes, stride)
self.bn1 = norm_layer(planes)
self.relu = nn.ReLU(inplace=True)
self.conv2 = conv3x3(planes, planes)
self.bn2 = norm_layer(planes)
self.downsample = downsample
self.stride = stride
def forward(self, x):
identity = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
if self.downsample is not None:
identity = self.downsample(x)
out += identity
out = self.relu(out)
return out
class Bottleneck(nn.Module):
expansion = 4
__constants__ = ['downsample']
def __init__(self, inplanes, planes, stride=1, downsample=None, groups=1,
base_width=64, dilation=1, norm_layer=None):
super(Bottleneck, self).__init__()
if norm_layer is None:
norm_layer = nn.BatchNorm2d
width = int(planes * (base_width / 64.)) * groups
# Both self.conv2 and self.downsample layers downsample the input when stride != 1
self.conv1 = conv1x1(inplanes, width)
self.bn1 = norm_layer(width)
self.conv2 = conv3x3(width, width, stride, groups, dilation)
self.bn2 = norm_layer(width)
self.conv3 = conv1x1(width, planes * self.expansion)
self.bn3 = norm_layer(planes * self.expansion)
self.relu = nn.ReLU(inplace=True)
self.downsample = downsample
self.stride = stride
def forward(self, x):
identity = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
out = self.relu(out)
out = self.conv3(out)
out = self.bn3(out)
if self.downsample is not None:
identity = self.downsample(x)
out += identity
out = self.relu(out)
return out
class ResNet(nn.Module):
def __init__(self, block, layers, num_classes=1000, zero_init_residual=False,
groups=1, width_per_group=64, replace_stride_with_dilation=None,
norm_layer=None):
super(ResNet, self).__init__()
if norm_layer is None:
norm_layer = nn.BatchNorm2d
self._norm_layer = norm_layer
self.input_ch = 1
self.dilation = 1
c1, c2, c3, c4, c5 = 16, 16, 32, 64, 128
self.inplanes = c1
if replace_stride_with_dilation is None:
# each element in the tuple indicates if we should replace
# the 2x2 stride with a dilated convolution instead
replace_stride_with_dilation = [False, False, False]
if len(replace_stride_with_dilation) != 3:
raise ValueError("replace_stride_with_dilation should be None "
"or a 3-element tuple, got {}".format(replace_stride_with_dilation))
self.groups = groups
self.base_width = width_per_group
self.conv1 = nn.Conv2d(self.input_ch, self.inplanes, kernel_size=7, stride=2, padding=3,
bias=False)
self.bn1 = norm_layer(self.inplanes)
self.relu = nn.ReLU(inplace=True)
self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
self.layer1 = self._make_layer(block, c2, layers[0])
self.layer2 = self._make_layer(block, c3, layers[1], stride=2,
dilate=replace_stride_with_dilation[0])
self.layer3 = self._make_layer(block, c4, layers[2], stride=2,
dilate=replace_stride_with_dilation[1])
self.layer4 = self._make_layer(block, c5, layers[3], stride=2,
dilate=replace_stride_with_dilation[2])
for m in self.modules():
if isinstance(m, nn.Conv2d):
nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')
elif isinstance(m, (nn.BatchNorm2d, nn.GroupNorm)):
nn.init.constant_(m.weight, 1)
nn.init.constant_(m.bias, 0)
# Zero-initialize the last BN in each residual branch,
# so that the residual branch starts with zeros, and each residual block behaves like an identity.
# This improves the model by 0.2~0.3% according to https://arxiv.org/abs/1706.02677
if zero_init_residual:
for m in self.modules():
if isinstance(m, Bottleneck):
nn.init.constant_(m.bn3.weight, 0)
elif isinstance(m, BasicBlock):
nn.init.constant_(m.bn2.weight, 0)
def _make_layer(self, block, planes, blocks, stride=1, dilate=False):
norm_layer = self._norm_layer
downsample = None
previous_dilation = self.dilation
if dilate:
self.dilation *= stride
stride = 1
if stride != 1 or self.inplanes != planes * block.expansion:
downsample = nn.Sequential(
conv1x1(self.inplanes, planes * block.expansion, stride),
norm_layer(planes * block.expansion),
)
layers = []
layers.append(block(self.inplanes, planes, stride, downsample, self.groups,
self.base_width, previous_dilation, norm_layer))
self.inplanes = planes * block.expansion
for _ in range(1, blocks):
layers.append(block(self.inplanes, planes, groups=self.groups,
base_width=self.base_width, dilation=self.dilation,
norm_layer=norm_layer))
return nn.Sequential(*layers)
def _forward_impl(self, x):
# See note [TorchScript super()]
x = self.conv1(x)
x = self.bn1(x)
x = self.relu(x)
x = self.maxpool(x)
x = self.layer1(x)
x = self.layer2(x)
x = self.layer3(x)
x = self.layer4(x)
return x
def forward(self, x):
return self._forward_impl(x)
def _resnet(arch, block, layers, pretrained, progress, **kwargs):
model = ResNet(block, layers, **kwargs)
if pretrained:
state_dict = load_state_dict_from_url(model_urls[arch],
progress=progress)
model.load_state_dict(state_dict)
return model
def resnet18(pretrained=False, progress=True, **kwargs):
"""ResNet-18 model from
`"Deep Residual Learning for Image Recognition" <https://arxiv.org/pdf/1512.03385.pdf>`_
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
progress (bool): If True, displays a progress bar of the download to stderr
"""
return _resnet('resnet18', BasicBlock, [2, 2, 2, 2], pretrained, progress,
**kwargs)
def resnet34(pretrained=False, progress=True, **kwargs):
"""ResNet-34 model from
`"Deep Residual Learning for Image Recognition" <https://arxiv.org/pdf/1512.03385.pdf>`_
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
progress (bool): If True, displays a progress bar of the download to stderr
"""
return _resnet('resnet34', BasicBlock, [3, 4, 6, 3], pretrained, progress,
**kwargs)
def resnet50(pretrained=False, progress=True, **kwargs):
"""ResNet-50 model from
`"Deep Residual Learning for Image Recognition" <https://arxiv.org/pdf/1512.03385.pdf>`_
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
progress (bool): If True, displays a progress bar of the download to stderr
"""
return _resnet('resnet50', Bottleneck, [3, 4, 6, 3], pretrained, progress,
**kwargs)
def resnet101(pretrained=False, progress=True, **kwargs):
"""ResNet-101 model from
`"Deep Residual Learning for Image Recognition" <https://arxiv.org/pdf/1512.03385.pdf>`_
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
progress (bool): If True, displays a progress bar of the download to stderr
"""
return _resnet('resnet101', Bottleneck, [3, 4, 23, 3], pretrained, progress,
**kwargs)
def resnet152(pretrained=False, progress=True, **kwargs):
"""ResNet-152 model from
`"Deep Residual Learning for Image Recognition" <https://arxiv.org/pdf/1512.03385.pdf>`_
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
progress (bool): If True, displays a progress bar of the download to stderr
"""
return _resnet('resnet152', Bottleneck, [3, 8, 36, 3], pretrained, progress,
**kwargs)
| 9,946 | 36.394737 | 106 | py |
deep-video-mvs | deep-video-mvs-master/dvmvs/baselines/deltas/triangulation.py | import numpy as np
import torch
from torch import svd
from .base_model import BaseModel
def homogeneous_to_euclidean(points):
"""Converts homogeneous points to euclidean
Args:
points numpy array or torch tensor of shape (N, M + 1): N homogeneous points of dimension M
Returns:
numpy array or torch tensor of shape (N, M): euclidean points
"""
if isinstance(points, np.ndarray):
return (points.T[:-1] / points.T[-1]).T
elif torch.is_tensor(points):
return (points.transpose(1, 0)[:-1] / points.transpose(1, 0)[-1]).transpose(1, 0)
else:
raise TypeError("Works only with numpy arrays and PyTorch tensors.")
def triangulate_point_from_multiple_views_linear_torch_batch(proj_matricies, points, confidences=None):
"""Similar as triangulate_point_from_multiple_views_linear() but for PyTorch.
For more information see its documentation.
Args:
proj_matricies torch tensor of shape (N, 3, 4): sequence of projection matricies (3x4)
points torch tensor of of shape (N, 2): sequence of points' coordinates
confidences None or torch tensor of shape (N,): confidences of points [0.0, 1.0].
If None, all confidences are supposed to be 1.0
Returns:
point_3d numpy torch tensor of shape (3,): triangulated point
"""
assert len(proj_matricies) == len(points)
n_views = len(proj_matricies)
if confidences is None:
confidences = torch.ones(points.shape[1], n_views, dtype=torch.float32, device=points.device)
##multiple points
points_t = points.transpose(0, 1)
proj_mat = proj_matricies[:, 2:3].expand(n_views, 2, 4).unsqueeze(0)
points_tview = points_t.view(points_t.size(0), n_views, 2, 1).expand(points_t.size(0), n_views, 2, 4)
A_all = proj_mat * points_tview
A_all -= proj_matricies[:, :2].unsqueeze(0)
A_all *= confidences.view(confidences.size(0), n_views, 1, 1)
A_all = A_all.contiguous().view(A_all.size(0), A_all.size(1) * A_all.size(2), 4)
U, S, V = svd(A_all)
points_3d_homo_all = -V[:, :, 3]
points_3d = homogeneous_to_euclidean(points_3d_homo_all)
return points_3d
def triangulate_batch_of_points(proj_matricies_batch, points_batch, confidences_batch=None):
"""Triangulates for a batch of points"""
batch_size, n_views = proj_matricies_batch.shape[:2]
points_3d_batch = []
for batch_i in range(batch_size):
n_points = points_batch[batch_i].shape[1]
points = points_batch[batch_i]
confidences = confidences_batch[batch_i] if confidences_batch is not None else None
points_3d = triangulate_point_from_multiple_views_linear_torch_batch(proj_matricies_batch[batch_i], points, confidences=confidences)
points_3d_batch.append(points_3d)
return points_3d_batch
def integrate_tensor_2d(heatmaps, softmax=True): # ,temperature = 1.0):
"""Applies softmax to heatmaps and integrates them to get their's "center of masses"
Args:
heatmaps torch tensor of shape (batch_size, n_heatmaps, h, w): input heatmaps
Returns:
coordinates torch tensor of shape (batch_size, n_heatmaps, 2): coordinates of center of masses of all heatmaps
"""
batch_size, n_heatmaps, h, w = heatmaps.shape
heatmaps = heatmaps.reshape((batch_size, n_heatmaps, -1))
if softmax:
heatmaps = torch.nn.functional.softmax(heatmaps, dim=2)
else:
heatmaps = torch.nn.functional.relu(heatmaps)
heatmaps = heatmaps.reshape((batch_size, n_heatmaps, h, w))
mass_x = heatmaps.sum(dim=2)
mass_y = heatmaps.sum(dim=3)
mass_times_coord_x = mass_x * torch.arange(w).type(torch.float).to(mass_x.device)
mass_times_coord_y = mass_y * torch.arange(h).type(torch.float).to(mass_y.device)
x = mass_times_coord_x.sum(dim=2, keepdim=True)
y = mass_times_coord_y.sum(dim=2, keepdim=True)
if not softmax:
x = x / mass_x.sum(dim=2, keepdim=True)
y = y / mass_y.sum(dim=2, keepdim=True)
coordinates = torch.cat((x, y), dim=2)
coordinates = coordinates.reshape((batch_size, n_heatmaps, 2))
return coordinates
def unproject_ij(keypoints_2d, z, camera_matrix):
"""Unprojects points into 3D using intrinsics"""
z = z.squeeze(2).squeeze(1)
x = ((keypoints_2d[:, :, 0] - camera_matrix[:, [0], [2]]) / camera_matrix[:, [0], [0]]) * z
y = ((keypoints_2d[:, :, 1] - camera_matrix[:, [1], [2]]) / camera_matrix[:, [1], [1]]) * z
xyz = torch.cat((x.unsqueeze(1), y.unsqueeze(1), z.unsqueeze(1)), dim=1)
return xyz
def reproject_points(pose, pts, intrinsic, Z):
"""Projects 3d points onto 2D image plane"""
kp_arr = torch.ones((pts.shape[0], pts.shape[1], 3)).to(pts.device)
kp_arr[:, :, :2] = pts
K = intrinsic.unsqueeze(1)
R = pose[:, :, :3, :3]
T = pose[:, :, :3, 3:]
kp_arr = kp_arr.unsqueeze(1)
reproj_val = ((K @ R) @ (torch.inverse(K))) @ kp_arr.transpose(3, 2)
proj_z = K @ T / Z
reproj = reproj_val + proj_z
reproj = reproj / reproj[:, :, 2:, :]
return reproj[:, :, :2, :]
def patch_for_kp(keypoints, ker_size, out_length, roi_patch):
"""Creates patch for key-point"""
keypts_array = keypoints.unsqueeze(1)
n_view = roi_patch.shape[1]
keypts_array = keypts_array.repeat(1, n_view, 1, 1)
xc = keypts_array[:, :, :, 0]
yc = keypts_array[:, :, :, 1]
h = torch.ones((keypts_array.shape[0], n_view, keypts_array.shape[2])).to(roi_patch.device) * ker_size # 3 #kernel_size
w = ker_size * roi_patch[:, :, :, 3] / out_length
theta = torch.zeros((keypts_array.shape[0], n_view, keypts_array.shape[2])).to(roi_patch.device)
keypoint_patch = torch.stack((xc, yc, h, w, theta), 3)
return keypoint_patch
def match_corr(embed_ref, embed_srch):
""" Matches the two embeddings using the correlation layer. As per usual
it expects input tensors of the form [B, C, H, W].
Args:
embed_ref: (torch.Tensor) The embedding of the reference image, or
the template of reference (the average of many embeddings for
example).
embed_srch: (torch.Tensor) The embedding of the search image.
Returns:
match_map: (torch.Tensor) The correlation between
"""
_, _, k1, k2 = embed_ref.shape
b, c, h, w = embed_srch.shape
if k1 == 1 and k2 == 1:
pad_img = (0, 0)
else:
pad_img = (0, 1)
match_map = torch.nn.functional.conv2d(embed_srch.contiguous().view(1, b * c, h, w), embed_ref, groups=b, padding=pad_img)
match_map = match_map.permute(1, 0, 2, 3)
return match_map
def create_transform_matrix(roi_patch):
"""Creates a 3x3 transformation matrix for the patches"""
transform_matrix = torch.zeros((roi_patch.shape[0], roi_patch.shape[1], roi_patch.shape[2], 3, 3)).to(roi_patch.device)
transform_matrix[:, :, :, 0, 0] = torch.cos(roi_patch[:, :, :, 4])
transform_matrix[:, :, :, 0, 1] = -torch.sin(roi_patch[:, :, :, 4])
transform_matrix[:, :, :, 0, 2] = roi_patch[:, :, :, 0]
transform_matrix[:, :, :, 1, 0] = torch.sin(roi_patch[:, :, :, 4])
transform_matrix[:, :, :, 1, 1] = torch.cos(roi_patch[:, :, :, 4])
transform_matrix[:, :, :, 1, 2] = roi_patch[:, :, :, 1]
transform_matrix[:, :, :, 2, 2] = 1.0
return transform_matrix
def patch_sampler(roi_patch, out_length=640, distance=2, do_img=True, align_corners=False):
"""Creates, scales and aligns the patch"""
##create a regular grid centered at xc,yc
if out_length > 1:
width_sample = torch.linspace(-0.5, 0.5, steps=out_length)
else:
width_sample = torch.tensor([0.])
height_sample = torch.linspace(-distance, distance, steps=2 * distance + 1)
xv, yv = torch.meshgrid([width_sample, height_sample])
zv = torch.ones(xv.shape)
patch_sample = torch.stack((xv, yv, zv), 2).to(roi_patch.device)
arange_array = patch_sample.repeat(roi_patch.shape[0], roi_patch.shape[1], roi_patch.shape[2], 1, 1, 1)
## scaling the x dimension to ensure unform sampling
arange_array[:, :, :, :, :, 0] = (roi_patch[:, :, :, [3]].unsqueeze(4)) * arange_array[:, :, :, :, :, 0]
aras = arange_array.shape
arange_array = arange_array.contiguous().view(aras[0], aras[1], aras[2], aras[3] * aras[4], aras[5]).transpose(4, 3)
# create matrix transform
transform_matrix = create_transform_matrix(roi_patch)
# transform
patch_kp = transform_matrix @ arange_array
patch_kp = patch_kp.view(aras[0], aras[1], aras[2], aras[5], aras[3], aras[4])
patch_kp = patch_kp[:, :, :, :2, :, :].transpose(5, 3)
return patch_kp, transform_matrix
def patch_for_depth_guided_range(keypoints, pose, intrinsic, img_shape, distance=2, min_depth=0.5, max_depth=10.0, align_corners=False):
"""Represents search patch for a key-point using xc,yc, h,w, theta"""
# get epilines
n_view = pose.shape[1]
pts = keypoints
kp_arr = torch.ones((pts.shape[0], pts.shape[1], 3)).to(pts.device)
kp_arr[:, :, :2] = pts
kp_arr = kp_arr.unsqueeze(1)
Fund, _ = get_fundamental_matrix(pose, intrinsic, intrinsic)
lines_epi = (Fund @ (kp_arr.transpose(3, 2))).transpose(3, 2)
# image shape
height = img_shape[2]
width = img_shape[3]
# default intercepts
array_zeros = torch.zeros((pts.shape[0], n_view, pts.shape[1])).to(pts.device)
array_ones = torch.ones((pts.shape[0], n_view, pts.shape[1])).to(pts.device)
x2ord = array_zeros.clone().detach()
y2ord = array_zeros.clone().detach()
x3ord = array_zeros.clone().detach()
y3ord = array_zeros.clone().detach()
x0_f = array_zeros.clone().detach()
y0_f = array_zeros.clone().detach()
x1_f = array_zeros.clone().detach()
y1_f = array_zeros.clone().detach()
##get x2,x3 and order
x2_y2 = reproject_points(pose, keypoints, intrinsic, min_depth)
x2 = x2_y2[:, :, 0, :]
y2 = x2_y2[:, :, 1, :]
x3_y3 = reproject_points(pose, keypoints, intrinsic, max_depth)
x3 = x3_y3[:, :, 0, :]
y3 = x3_y3[:, :, 1, :]
x_ord = x3 >= x2
x2ord[x_ord] = x2[x_ord]
y2ord[x_ord] = y2[x_ord]
x3ord[x_ord] = x3[x_ord]
y3ord[x_ord] = y3[x_ord]
cx_ord = x2 > x3
x2ord[cx_ord] = x3[cx_ord]
y2ord[cx_ord] = y3[cx_ord]
x3ord[cx_ord] = x2[cx_ord]
y3ord[cx_ord] = y2[cx_ord]
if align_corners:
x_ord0 = (x2ord >= 0) & (x2ord < width)
x_ord1 = (x3ord >= 0) & (x3ord < width)
y_ord0 = (y2ord >= 0) & (y2ord < height)
y_ord1 = (y3ord >= 0) & (y3ord < height)
else:
x_ord0 = (x2ord >= -0.5) & (x2ord < (width - 0.5))
x_ord1 = (x3ord >= -0.5) & (x3ord < (width - 0.5))
y_ord0 = (y2ord >= -0.5) & (y2ord < (height - 0.5))
y_ord1 = (y3ord >= -0.5) & (y3ord < (height - 0.5))
all_range = x_ord0 & x_ord1 & y_ord0 & y_ord1
x0_f[all_range] = x2ord[all_range]
y0_f[all_range] = y2ord[all_range]
x1_f[all_range] = x3ord[all_range]
y1_f[all_range] = y3ord[all_range]
cond_null = ~all_range
x0_f[cond_null] = array_zeros.clone().detach()[cond_null]
y0_f[cond_null] = array_zeros.clone().detach()[cond_null]
x1_f[cond_null] = array_zeros.clone().detach()[cond_null]
y1_f[cond_null] = array_zeros.clone().detach()[cond_null]
## find box representation using #xc,yc, h,w, theta
xc = (x0_f + x1_f) / 2.
yc = (y0_f + y1_f) / 2.
h = torch.ones((pts.shape[0], n_view, pts.shape[1])).to(pts.device) * max(2 * distance, 1)
w = torch.sqrt((x1_f - x0_f) ** 2 + (y1_f - y0_f) ** 2)
theta = torch.atan2(-lines_epi[:, :, :, 0], lines_epi[:, :, :, 1])
if torch.sum(torch.isnan(theta)):
import pdb;
pdb.set_trace()
roi_patch = torch.stack((xc, yc, h, w, theta), 3)
return roi_patch
def sample_descriptors_epi(keypoints, descriptors, s, normalize=True, align_corner=False):
"""Samples descriptors at point locations"""
b, c, h, w = descriptors.shape
keypoints = keypoints - s / 2 + 0.5
keypoints /= torch.tensor([(w * s - s / 2 - 0.5), (h * s - s / 2 - 0.5)], device=keypoints.device)[None]
keypoints = keypoints * 2 - 1
if len(keypoints.shape) == 4:
descriptors = torch.nn.functional.grid_sample(descriptors, keypoints.view(b, keypoints.shape[1], keypoints.shape[2], 2), mode='bilinear',
align_corners=align_corner) ##pythorch 1.3+
elif len(keypoints.shape) == 3:
descriptors = torch.nn.functional.grid_sample(descriptors, keypoints.view(b, 1, -1, 2), mode='bilinear', align_corners=align_corner) ##pythorch 1.3+
if normalize:
descriptors = torch.nn.functional.normalize(descriptors, p=2, dim=1)
return descriptors
def vec_to_skew_symmetric(v):
"""Creates skew-symmetric matrix"""
zero = torch.zeros_like(v[:, 0])
M = torch.stack([
zero, -v[:, 2], v[:, 1],
v[:, 2], zero, -v[:, 0],
-v[:, 1], v[:, 0], zero,
], dim=1)
return M.reshape(-1, 3, 3)
def get_fundamental_matrix(T_10, K0, K1):
"""Generates fundamental matrix"""
##Expects BX3x3 matrix
k0 = torch.inverse(K0)
k1 = torch.inverse(K1).transpose(1, 2)
k0 = k0.unsqueeze(1)
k1 = k1.unsqueeze(1)
T_10 = T_10.view(-1, 4, 4)
t_skew = vec_to_skew_symmetric(T_10[:, :3, 3])
E = t_skew @ T_10[:, :3, :3] ##Essential matrix
E = E.view(k0.shape[0], -1, 3, 3)
Fu = (k1 @ E) @ k0 ##Fundamental matrix
F_norm = Fu[:, :, 2:, 2:]
F_norm[F_norm == 0.] = 1.
Fu = Fu / F_norm ##normalize it
return Fu, E
class TriangulationNet(BaseModel):
"""Triangulation module"""
default_config = {
'depth_range': True,
'arg_max_weight': 1.0,
'dist_ortogonal': 1,
'kernel_size': 1,
'out_length': 100,
'has_confidence': True,
'min_depth': 0.5,
'max_depth': 10.0,
'align_corners': False,
}
def _init(self):
self.relu = torch.nn.ReLU(inplace=False)
self.bn_match_convD = torch.nn.BatchNorm2d(1)
##confidence layers
pool_shape = (self.config['out_length'], 1 + (5 - self.config['kernel_size']))
pad_shape = (0, 1) if self.config['dist_ortogonal'] == 2 else (1, 1)
if self.config['has_confidence']:
self.convD_confa = torch.nn.Conv2d(1, 1, kernel_size=3, stride=1, padding=pad_shape)
self.bnconvD_confa = torch.nn.BatchNorm2d(1)
self.pool_convD_conf = torch.nn.MaxPool2d(pool_shape, stride=self.config['out_length'], return_indices=False)
def _forward(self, data):
pose = data['pose']
intrinsic = data['intrinsics']
img_shape = data['img_shape']
desc = data['descriptors']
desc_views = data['descriptors_views']
sequence_length = data['sequence_length']
keypoints = data['keypoints']
depth_all = data['depth']
depth_ref = data['ref_depths']
del data
st = img_shape[2] // desc.shape[2]
dist = self.config['dist_ortogonal']
ker_size = self.config['kernel_size']
out_length = self.config['out_length']
pred = {}
pred['keypoints'] = keypoints
## Creates patches for matching
depth_at_kp = sample_descriptors_epi(keypoints, depth_all.unsqueeze(1), 1, False, self.config['align_corners'])
roi_patch = patch_for_depth_guided_range(keypoints, pose, intrinsic, img_shape, distance=dist, min_depth=self.config['min_depth'],
max_depth=self.config['max_depth'], align_corners=self.config['align_corners'])
keypoint_patch = patch_for_kp(keypoints, ker_size, out_length, roi_patch)
## Extract sampled keypoints
kp_image, transform_matrix = patch_sampler(roi_patch, out_length=out_length, distance=dist, do_img=True, align_corners=self.config['align_corners'])
kp_anchor, _ = patch_sampler(keypoint_patch, out_length=ker_size, distance=ker_size // 2, do_img=False, align_corners=self.config['align_corners'])
## Reshape along batch dimenstion
kp_image_shp = kp_image.shape
kp_image = kp_image.contiguous().view(kp_image_shp[0] * kp_image_shp[1], kp_image_shp[2], kp_image_shp[3] * kp_image_shp[4], kp_image_shp[5])
kp_anchor_shp = kp_anchor.shape
kp_anchor = kp_anchor.contiguous().view(kp_anchor_shp[0] * kp_anchor_shp[1], kp_image_shp[2], kp_anchor_shp[3] * kp_anchor_shp[4], kp_anchor_shp[5])
## Sample
desc_views_shp = desc_views.shape
desc_views = desc_views.reshape(desc_views_shp[0] * desc_views_shp[1], desc_views_shp[2], desc_views_shp[3], desc_views_shp[4])
descriptor_at_image = sample_descriptors_epi(kp_image.detach(), desc_views, st, True, self.config['align_corners'])
descriptor_at_anchor = sample_descriptors_epi(kp_anchor.detach(), desc.repeat_interleave(sequence_length, dim=0), st, True,
self.config['align_corners'])
del kp_image, kp_anchor, keypoint_patch, desc, desc_views
descriptor_at_anchor = descriptor_at_anchor.contiguous().view(descriptor_at_anchor.shape[0], descriptor_at_anchor.shape[1], kp_anchor_shp[2],
kp_anchor_shp[3], kp_anchor_shp[4])
descriptor_at_image = descriptor_at_image.contiguous().view(descriptor_at_image.shape[0], descriptor_at_image.shape[1], kp_image_shp[2],
kp_image_shp[3], kp_image_shp[4])
descriptor_at_anchor = descriptor_at_anchor.transpose(2, 1)
descriptor_at_image = descriptor_at_image.transpose(2, 1)
dancs = descriptor_at_anchor.shape
dimgs = descriptor_at_image.shape
descriptor_at_anchor = descriptor_at_anchor.contiguous().view(dancs[0] * dancs[1], dancs[2], dancs[3], dancs[4])
descriptor_at_image = descriptor_at_image.contiguous().view(dimgs[0] * dimgs[1], dimgs[2], dimgs[3], dimgs[4])
## Do cross correlation
match_map = match_corr(descriptor_at_anchor, descriptor_at_image)
match_map = self.bn_match_convD(match_map)
match_map = self.relu(match_map)
del descriptor_at_anchor, descriptor_at_image
if self.config['has_confidence']:
conf_da = match_map
conf_da = torch.nn.functional.adaptive_max_pool2d(conf_da, (1, 1))
conf_da = conf_da.contiguous().view(kp_image_shp[0], kp_image_shp[1], -1)
sc_factor = 1.0
conf_da = torch.sigmoid(sc_factor * conf_da)
conf_damp = roi_patch[:, :, :, 3] > 0.
conf_da = conf_da * (conf_damp.float() + 0.001)
self_confidence = torch.ones((conf_da.shape[0], 1, conf_da.shape[2])).to(conf_da.device)
conf_da = torch.cat((self_confidence, conf_da), 1)
conf_da = conf_da.transpose(2, 1)
pred['confidence'] = conf_da
else:
pred['confidence'] = None
## SOFTARGMAX
out_kp_match = integrate_tensor_2d(match_map * self.config['arg_max_weight'], True)
## Change from local coordinates to image coordinates
out_kp_match /= torch.tensor([match_map.shape[3] - 1., max(match_map.shape[2] - 1., 1.)], device=out_kp_match.device)[None]
if match_map.shape[2] == 1:
sub_roi = (torch.tensor([0.5, 0.]).unsqueeze(0).unsqueeze(1)).to(out_kp_match.device)
else:
sub_roi = 0.5
out_kp_match -= sub_roi
out_ones = torch.ones((out_kp_match.shape[0], 1, 1)).to(out_kp_match.device)
out_kp_match = torch.cat((out_kp_match, out_ones), 2)
out_kp_match = out_kp_match.view(kp_image_shp[0], kp_image_shp[1], kp_image_shp[2], 3)
## scale the local x coordinate to match sampling frequency
mult_0 = roi_patch[:, :, :, [3]]
mult_1 = torch.ones_like(mult_0)
mult_1[mult_0 == 0.] = 0.
roi_mult = torch.cat((mult_0, mult_1, mult_1), 3)
out_kp_match *= roi_mult
range_kp = roi_patch[:, :, :, 3] > 0.
pred['range_kp'] = range_kp
##global coordinates
val_kp_match = ((transform_matrix @ out_kp_match.unsqueeze(4))[:, :, :, :2, :]).squeeze(4)
pred['multiview_matches'] = val_kp_match
del out_kp_match, transform_matrix, match_map
## 3d GT
keypoints_3d_gt = unproject_ij(keypoints, depth_at_kp, intrinsic)
pred['keypoints3d_gt'] = keypoints_3d_gt.transpose(2, 1)
#### Triangulation
pose_tiled = pose[:, :, :3, :]
intrinsic_tiled = intrinsic
confidence = pred['confidence']
anchor_keypoints = keypoints.unsqueeze(1)
multiview_matches = torch.cat((anchor_keypoints, val_kp_match), 1)
projection_mat = []
projection_ref = []
proj_identity = torch.tensor([[1., 0., 0., 0.], [0., 1., 0., 0.], [0., 0., 1., 0.]])
if torch.cuda.is_available():
proj_identity = proj_identity.cuda()
for batch_idx in range(pose_tiled.size(0)):
proj_ref_idx = torch.mm(intrinsic_tiled[batch_idx], proj_identity).unsqueeze(0)
projection_ref.append(proj_ref_idx)
projection_mat_view = []
for j in range(sequence_length):
proj_mat_idx = torch.mm(intrinsic_tiled[batch_idx], pose_tiled[batch_idx][j]).unsqueeze(0)
projection_mat_view.append(proj_mat_idx)
projection_mat_view = torch.cat(projection_mat_view, 0).unsqueeze(0)
projection_mat.append(projection_mat_view)
projection_mat = torch.cat(projection_mat, 0)
projection_ref = torch.cat(projection_ref, 0).unsqueeze(1)
proj_matrices = torch.cat([projection_ref, projection_mat], 1)
del projection_ref, projection_mat
if self.config['has_confidence']:
keypoints_3d = triangulate_batch_of_points(proj_matrices, multiview_matches, confidence)
else:
keypoints_3d = triangulate_batch_of_points(proj_matrices, multiview_matches)
keypoints_3d = torch.stack(keypoints_3d, 0)
if torch.sum(torch.isinf(keypoints_3d)) > 0:
keypoints_3d = torch.clamp(keypoints_3d, min=-1000.0, max=1000.0)
pred['keypoints_3d'] = keypoints_3d
return pred
def loss(self, pred, data):
raise NotImplementedError
def metrics(self):
raise NotImplementedError
| 22,576 | 37.527304 | 157 | py |
deep-video-mvs | deep-video-mvs-master/dvmvs/baselines/gpmvs/encoder.py | import torch
import torch.nn as nn
from dvmvs.utils import freeze_batchnorm
def down_conv_layer(input_channels, output_channels, kernel_size):
return nn.Sequential(
nn.Conv2d(
input_channels,
output_channels,
kernel_size,
padding=(kernel_size - 1) // 2,
stride=1,
bias=False),
nn.BatchNorm2d(output_channels),
nn.ReLU(),
nn.Conv2d(
output_channels,
output_channels,
kernel_size,
padding=(kernel_size - 1) // 2,
stride=2,
bias=False),
nn.BatchNorm2d(output_channels),
nn.ReLU())
def conv_layer(input_channels, output_channels, kernel_size):
return nn.Sequential(
nn.Conv2d(
input_channels,
output_channels,
kernel_size,
padding=(kernel_size - 1) // 2,
bias=False),
nn.BatchNorm2d(output_channels),
nn.ReLU())
def depth_layer(input_channels):
return nn.Sequential(
nn.Conv2d(input_channels, 1, 3, padding=1), nn.Sigmoid())
def refine_layer(input_channels):
return nn.Conv2d(input_channels, 1, 3, padding=1)
def up_conv_layer(input_channels, output_channels, kernel_size):
return nn.Sequential(
nn.Upsample(scale_factor=2, mode='bilinear'),
nn.Conv2d(
input_channels,
output_channels,
kernel_size,
padding=(kernel_size - 1) // 2,
bias=False),
nn.BatchNorm2d(output_channels),
nn.ReLU())
def get_trainable_number(variable):
num = 1
shape = list(variable.shape)
for i in shape:
num *= i
return num
class Encoder(nn.Module):
def __init__(self):
super(Encoder, self).__init__()
self.conv1 = down_conv_layer(67, 128, 7)
self.conv2 = down_conv_layer(128, 256, 5)
self.conv3 = down_conv_layer(256, 512, 3)
self.conv4 = down_conv_layer(512, 512, 3)
self.conv5 = down_conv_layer(512, 512, 3)
def forward(self, image, plane_sweep_volume):
x = torch.cat((image, plane_sweep_volume), 1)
conv1 = self.conv1(x)
conv2 = self.conv2(conv1)
conv3 = self.conv3(conv2)
conv4 = self.conv4(conv3)
conv5 = self.conv5(conv4)
return [conv5, conv4, conv3, conv2, conv1]
def train(self, mode=True):
"""
Override the default train() to freeze the BN parameters
"""
super(Encoder, self).train(mode)
self.apply(freeze_batchnorm) | 2,588 | 26.542553 | 66 | py |
deep-video-mvs | deep-video-mvs-master/dvmvs/baselines/gpmvs/run-testing.py | from copy import deepcopy
import cv2
import numpy as np
import torch
from path import Path
from scipy.linalg import expm
from tqdm import tqdm
from dvmvs.baselines.gpmvs.decoder import Decoder
from dvmvs.baselines.gpmvs.encoder import Encoder
from dvmvs.baselines.gpmvs.gplayer import GPlayer
from dvmvs.config import Config
from dvmvs.dataset_loader import PreprocessImage, load_image
from dvmvs.utils import cost_volume_fusion, pose_distance, save_results, InferenceTimer, visualize_predictions, get_warp_grid_for_cost_volume_calculation
def predict():
predict_with_finetuned = True
if predict_with_finetuned:
extension = "finetuned"
else:
extension = "without_ft"
input_image_width = 320
input_image_height = 256
print("System: GPMVS, is_finetuned = ", predict_with_finetuned)
device = torch.device('cuda')
if predict_with_finetuned:
encoder_weights = torch.load(Path("finetuned-weights").files("*encoder*")[0])
gp_weights = torch.load(Path("finetuned-weights").files("*gplayer*")[0])
decoder_weights = torch.load(Path("finetuned-weights").files("*decoder*")[0])
else:
encoder_weights = torch.load(Path("original-weights").files("*encoder*")[0])['state_dict']
gp_weights = torch.load(Path("original-weights").files("*gplayer*")[0])['state_dict']
decoder_weights = torch.load(Path("original-weights").files("*decoder*")[0])['state_dict']
encoder = Encoder()
encoder = torch.nn.DataParallel(encoder)
encoder.load_state_dict(encoder_weights)
encoder.eval()
encoder = encoder.to(device)
decoder = Decoder()
decoder = torch.nn.DataParallel(decoder)
decoder.load_state_dict(decoder_weights)
decoder.eval()
decoder = decoder.to(device)
# load GP values
gplayer = GPlayer(device=device)
gplayer.load_state_dict(gp_weights)
gplayer.eval()
gamma2 = np.exp(gp_weights['gamma2'][0].item())
ell = np.exp(gp_weights['ell'][0].item())
sigma2 = np.exp(gp_weights['sigma2'][0].item())
warp_grid = get_warp_grid_for_cost_volume_calculation(width=input_image_width,
height=input_image_height,
device=device)
min_depth = 0.5
max_depth = 50.0
n_depth_levels = 64
scale_rgb = 1.0
mean_rgb = [81.0, 81.0, 81.0]
std_rgb = [35.0, 35.0, 35.0]
data_path = Path(Config.test_offline_data_path)
if Config.test_dataset_name is None:
keyframe_index_files = sorted((Path(Config.test_offline_data_path) / "indices").files())
else:
keyframe_index_files = sorted((Path(Config.test_offline_data_path) / "indices").files("*" + Config.test_dataset_name + "*"))
for iteration, keyframe_index_file in enumerate(keyframe_index_files[20:]):
keyframing_type, dataset_name, scene_name, _, n_measurement_frames = keyframe_index_file.split("/")[-1].split("+")
scene_folder = data_path / dataset_name / scene_name
print("Predicting for scene:", dataset_name + "-" + scene_name, " - ", iteration, "/", len(keyframe_index_files))
keyframe_index_file_lines = np.loadtxt(keyframe_index_file, dtype=str, delimiter="\n")
K = np.loadtxt(scene_folder / 'K.txt').astype(np.float32)
poses = np.fromfile(scene_folder / "poses.txt", dtype=float, sep="\n ").reshape((-1, 4, 4))
image_filenames = sorted((scene_folder / 'images').files("*.png"))
depth_filenames = sorted((scene_folder / 'depth').files("*.png"))
input_filenames = []
for image_filename in image_filenames:
input_filenames.append(image_filename.split("/")[-1])
lam = np.sqrt(3) / ell
F = np.array([[0, 1], [-lam ** 2, -2 * lam]])
Pinf = np.array([[gamma2, 0], [0, gamma2 * lam ** 2]])
h = np.array([[1], [0]])
# State mean and covariance
M = np.zeros((F.shape[0], 512 * 8 * 10))
P = Pinf
inference_timer = InferenceTimer()
previous_index = None
predictions = []
reference_depths = []
with torch.no_grad():
for i in tqdm(range(0, len(keyframe_index_file_lines))):
keyframe_index_file_line = keyframe_index_file_lines[i]
if keyframe_index_file_line == "TRACKING LOST":
continue
else:
current_input_filenames = keyframe_index_file_line.split(" ")
current_indices = [input_filenames.index(current_input_filenames[x]) for x in range(len(current_input_filenames))]
reference_index = current_indices[0]
measurement_indices = current_indices[1:]
reference_pose = poses[reference_index]
reference_image = load_image(image_filenames[reference_index])
reference_depth = cv2.imread(depth_filenames[reference_index], -1).astype(float) / 1000.0
preprocessor = PreprocessImage(K=K,
old_width=reference_image.shape[1],
old_height=reference_image.shape[0],
new_width=input_image_width,
new_height=input_image_height,
distortion_crop=0,
perform_crop=False)
reference_image = preprocessor.apply_rgb(image=reference_image,
scale_rgb=scale_rgb,
mean_rgb=mean_rgb,
std_rgb=std_rgb)
reference_depth = preprocessor.apply_depth(reference_depth)
reference_image_torch = torch.from_numpy(np.transpose(reference_image, (2, 0, 1))).float().to(device).unsqueeze(0)
reference_pose_torch = torch.from_numpy(reference_pose).float().to(device).unsqueeze(0)
measurement_poses_torch = []
measurement_images_torch = []
for measurement_index in measurement_indices:
measurement_image = load_image(image_filenames[measurement_index])
measurement_image = preprocessor.apply_rgb(image=measurement_image,
scale_rgb=scale_rgb,
mean_rgb=mean_rgb,
std_rgb=std_rgb)
measurement_image_torch = torch.from_numpy(np.transpose(measurement_image, (2, 0, 1))).float().to(device).unsqueeze(0)
measurement_pose_torch = torch.from_numpy(poses[measurement_index]).float().to(device).unsqueeze(0)
measurement_images_torch.append(measurement_image_torch)
measurement_poses_torch.append(measurement_pose_torch)
full_K_torch = torch.from_numpy(preprocessor.get_updated_intrinsics()).float().to(device).unsqueeze(0)
inference_timer.record_start_time()
cost_volume = cost_volume_fusion(image1=reference_image_torch,
image2s=measurement_images_torch,
pose1=reference_pose_torch,
pose2s=measurement_poses_torch,
K=full_K_torch,
warp_grid=warp_grid,
min_depth=min_depth,
max_depth=max_depth,
n_depth_levels=n_depth_levels,
device=device,
dot_product=False)
conv5, conv4, conv3, conv2, conv1 = encoder(reference_image_torch, cost_volume)
batch, channel, height, width = conv5.size()
y = np.expand_dims(conv5.cpu().numpy().flatten(), axis=0)
if previous_index is None:
previous_index = measurement_index
dt, _, _ = pose_distance(poses[reference_index], poses[previous_index])
A = expm(F * dt)
Q = Pinf - A.dot(Pinf).dot(A.T)
M = A.dot(M)
P = A.dot(P).dot(A.T) + Q
# Update step
v = y - h.T.dot(M)
s = h.T.dot(P).dot(h) + sigma2
k = P.dot(h) / s
M += k.dot(v)
P -= k.dot(h.T).dot(P)
Z = torch.from_numpy(M[0]).view(batch, channel, height, width).float().to(device)
Z = torch.nn.functional.relu(Z)
prediction, _, _, _ = decoder(Z, conv4, conv3, conv2, conv1)
prediction = torch.clamp(prediction, min=0.02, max=2.0)
prediction = 1 / prediction
inference_timer.record_end_time_and_elapsed_time()
prediction = prediction.cpu().numpy().squeeze()
previous_index = deepcopy(reference_index)
reference_depths.append(reference_depth)
predictions.append(prediction)
if Config.test_visualize:
visualize_predictions(numpy_reference_image=reference_image,
numpy_measurement_image=measurement_image,
numpy_predicted_depth=prediction,
normalization_mean=mean_rgb,
normalization_std=std_rgb,
normalization_scale=scale_rgb)
inference_timer.print_statistics()
system_name = "{}_{}_{}_{}_{}_gpmvs_{}".format(keyframing_type,
dataset_name,
input_image_width,
input_image_height,
n_measurement_frames,
extension)
save_results(predictions=predictions,
groundtruths=reference_depths,
system_name=system_name,
scene_name=scene_name,
save_folder=Config.test_result_folder)
if __name__ == '__main__':
predict()
| 10,783 | 45.283262 | 153 | py |
deep-video-mvs | deep-video-mvs-master/dvmvs/baselines/gpmvs/decoder.py | import torch
import torch.nn as nn
import torch.nn.functional as F
from dvmvs.utils import freeze_batchnorm
def down_conv_layer(input_channels, output_channels, kernel_size):
return nn.Sequential(
nn.Conv2d(
input_channels,
output_channels,
kernel_size,
padding=(kernel_size - 1) // 2,
stride=1,
bias=False),
nn.BatchNorm2d(output_channels),
nn.ReLU(),
nn.Conv2d(
output_channels,
output_channels,
kernel_size,
padding=(kernel_size - 1) // 2,
stride=2,
bias=False),
nn.BatchNorm2d(output_channels),
nn.ReLU())
def conv_layer(input_channels, output_channels, kernel_size):
return nn.Sequential(
nn.Conv2d(
input_channels,
output_channels,
kernel_size,
padding=(kernel_size - 1) // 2,
bias=False),
nn.BatchNorm2d(output_channels),
nn.ReLU())
def depth_layer(input_channels):
return nn.Sequential(
nn.Conv2d(input_channels, 1, 3, padding=1), nn.Sigmoid())
def refine_layer(input_channels):
return nn.Conv2d(input_channels, 1, 3, padding=1)
def up_conv_layer(input_channels, output_channels, kernel_size):
return nn.Sequential(
nn.Upsample(scale_factor=2, mode='bilinear', align_corners=True),
nn.Conv2d(
input_channels,
output_channels,
kernel_size,
padding=(kernel_size - 1) // 2,
bias=False),
nn.BatchNorm2d(output_channels),
nn.ReLU())
def get_trainable_number(variable):
num = 1
shape = list(variable.shape)
for i in shape:
num *= i
return num
class Decoder(nn.Module):
def __init__(self):
super(Decoder, self).__init__()
self.upconv5 = up_conv_layer(512, 512, 3)
self.iconv5 = conv_layer(1024, 512, 3) # input upconv5 + conv4
self.upconv4 = up_conv_layer(512, 512, 3)
self.iconv4 = conv_layer(1024, 512, 3) # input upconv4 + conv3
self.disp4 = depth_layer(512)
self.upconv3 = up_conv_layer(512, 256, 3)
self.iconv3 = conv_layer(
513, 256, 3) # input upconv3 + conv2 + disp4 = 256 + 256 + 1 = 513
self.disp3 = depth_layer(256)
self.upconv2 = up_conv_layer(256, 128, 3)
self.iconv2 = conv_layer(
257, 128, 3) # input upconv2 + conv1 + disp3 = 128 + 128 + 1 = 257
self.disp2 = depth_layer(128)
self.upconv1 = up_conv_layer(128, 64, 3)
self.iconv1 = conv_layer(65, 64,
3) # input upconv1 + disp2 = 64 + 1 = 65
self.disp1 = depth_layer(64)
def forward(self, conv5, conv4, conv3, conv2, conv1):
upconv5 = self.upconv5(conv5)
iconv5 = self.iconv5(torch.cat((upconv5, conv4), 1))
upconv4 = self.upconv4(iconv5)
iconv4 = self.iconv4(torch.cat((upconv4, conv3), 1))
disp4 = 2.0 * self.disp4(iconv4)
udisp4 = F.interpolate(disp4, scale_factor=2)
upconv3 = self.upconv3(iconv4)
iconv3 = self.iconv3(torch.cat((upconv3, conv2, udisp4), 1))
disp3 = 2.0 * self.disp3(iconv3)
udisp3 = F.interpolate(disp3, scale_factor=2)
upconv2 = self.upconv2(iconv3)
iconv2 = self.iconv2(torch.cat((upconv2, conv1, udisp3), 1))
disp2 = 2.0 * self.disp2(iconv2)
udisp2 = F.interpolate(disp2, scale_factor=2)
upconv1 = self.upconv1(iconv2)
iconv1 = self.iconv1(torch.cat((upconv1, udisp2), 1))
disp1 = 2.0 * self.disp1(iconv1)
return [disp1, disp2, disp3, disp4]
def train(self, mode=True):
"""
Override the default train() to freeze the BN parameters
"""
super(Decoder, self).train(mode)
self.apply(freeze_batchnorm)
| 3,920 | 28.044444 | 80 | py |
deep-video-mvs | deep-video-mvs-master/dvmvs/baselines/gpmvs/gplayer.py | import math
import torch
from dvmvs.utils import freeze_batchnorm
class GPlayer(torch.nn.Module):
def __init__(self, device):
super(GPlayer, self).__init__()
self.gamma2 = torch.nn.Parameter(torch.randn(1).to(device).float(), requires_grad=True)
self.ell = torch.nn.Parameter(torch.randn(1).to(device).float(), requires_grad=True)
self.sigma2 = torch.nn.Parameter(torch.randn(1).to(device).float(), requires_grad=True)
self.device = device
def forward(self, D, Y):
"""
:param D: Distance matrix
:param Y: Stacked outputs from encoder
:return: Z: transformed latent space
"""
# Support for these operations on Half precision is low at the moment, handle everything in Float precision
batch, latents, channel, height, width = Y.size()
Y = Y.view(batch, latents, -1).float()
D = D.to(self.device).float()
# MATERN CLASS OF COVARIANCE FUNCTION
# ell > 0, gamma2 > 0, sigma2 > 0 : EXPONENTIATE THEM !!!
K = torch.exp(self.gamma2) * (1 + math.sqrt(3) * D / torch.exp(self.ell)) * torch.exp(-math.sqrt(3) * D / torch.exp(self.ell))
I = torch.eye(latents, device=self.device, dtype=torch.float32).expand(batch, latents, latents)
C = K + torch.exp(self.sigma2) * I
Cinv = C.inverse()
Z = K.bmm(Cinv).bmm(Y)
Z = torch.nn.functional.relu(Z)
return Z
def train(self, mode=True):
"""
Override the default train() to freeze the BN parameters
"""
super(GPlayer, self).train(mode)
self.apply(freeze_batchnorm)
| 1,637 | 37.093023 | 134 | py |
deep-video-mvs | deep-video-mvs-master/dvmvs/baselines/dpsnet/run-testing.py | import cv2
import numpy as np
import torch
from path import Path
from tqdm import tqdm
from dvmvs.baselines.dpsnet.dpsnet import PSNet
from dvmvs.config import Config
from dvmvs.dataset_loader import PreprocessImage, load_image
from dvmvs.utils import save_results, InferenceTimer, visualize_predictions
def predict():
predict_with_finetuned = True
if predict_with_finetuned:
extension = "finetuned"
else:
extension = "without_ft"
input_image_width = 320
input_image_height = 240
print("System: DPSNET, is_finetuned = ", predict_with_finetuned)
device = torch.device('cuda')
dpsnet = PSNet(64, 0.5)
if predict_with_finetuned:
weights = torch.load(Path("finetuned-weights").files("*dpsnet*")[0])
else:
weights = torch.load(Path("original-weights").files("*dpsnet*")[0])['state_dict']
dpsnet.load_state_dict(weights)
dpsnet = dpsnet.to(device)
dpsnet.eval()
scale_rgb = 255.0
mean_rgb = [0.5, 0.5, 0.5]
std_rgb = [0.5, 0.5, 0.5]
data_path = Path(Config.test_offline_data_path)
if Config.test_dataset_name is None:
keyframe_index_files = sorted((Path(Config.test_offline_data_path) / "indices").files())
else:
keyframe_index_files = sorted((Path(Config.test_offline_data_path) / "indices").files("*" + Config.test_dataset_name + "*"))
for iteration, keyframe_index_file in enumerate(keyframe_index_files):
keyframing_type, dataset_name, scene_name, _, n_measurement_frames = keyframe_index_file.split("/")[-1].split("+")
scene_folder = data_path / dataset_name / scene_name
print("Predicting for scene:", dataset_name + "-" + scene_name, " - ", iteration, "/", len(keyframe_index_files))
keyframe_index_file_lines = np.loadtxt(keyframe_index_file, dtype=str, delimiter="\n")
K = np.loadtxt(scene_folder / 'K.txt').astype(np.float32)
poses = np.fromfile(scene_folder / "poses.txt", dtype=float, sep="\n ").reshape((-1, 4, 4))
image_filenames = sorted((scene_folder / 'images').files("*.png"))
depth_filenames = sorted((scene_folder / 'depth').files("*.png"))
input_filenames = []
for image_filename in image_filenames:
input_filenames.append(image_filename.split("/")[-1])
inference_timer = InferenceTimer()
predictions = []
reference_depths = []
with torch.no_grad():
for i in tqdm(range(0, len(keyframe_index_file_lines))):
keyframe_index_file_line = keyframe_index_file_lines[i]
if keyframe_index_file_line == "TRACKING LOST":
continue
else:
current_input_filenames = keyframe_index_file_line.split(" ")
current_indices = [input_filenames.index(current_input_filenames[x]) for x in range(len(current_input_filenames))]
reference_index = current_indices[0]
measurement_indices = current_indices[1:]
reference_pose = poses[reference_index]
reference_image = load_image(image_filenames[reference_index])
reference_depth = cv2.imread(depth_filenames[reference_index], -1).astype(float) / 1000.0
preprocessor = PreprocessImage(K=K,
old_width=reference_image.shape[1],
old_height=reference_image.shape[0],
new_width=input_image_width,
new_height=input_image_height,
distortion_crop=0,
perform_crop=False)
reference_image = preprocessor.apply_rgb(image=reference_image,
scale_rgb=scale_rgb,
mean_rgb=mean_rgb,
std_rgb=std_rgb)
reference_depth = preprocessor.apply_depth(reference_depth)
reference_image_torch = torch.from_numpy(np.transpose(reference_image, (2, 0, 1))).float().to(device).unsqueeze(0)
measurement_poses_torch = []
measurement_images_torch = []
for measurement_index in measurement_indices:
measurement_image = load_image(image_filenames[measurement_index])
measurement_image = preprocessor.apply_rgb(image=measurement_image,
scale_rgb=scale_rgb,
mean_rgb=mean_rgb,
std_rgb=std_rgb)
measurement_image_torch = torch.from_numpy(np.transpose(measurement_image, (2, 0, 1))).float().to(device).unsqueeze(0)
measurement_pose = poses[measurement_index]
measurement_pose = (np.linalg.inv(measurement_pose) @ reference_pose)[0:3, :]
measurement_pose_torch = torch.from_numpy(measurement_pose).float().to(device).unsqueeze(0)
measurement_poses_torch.append(measurement_pose_torch)
measurement_images_torch.append(measurement_image_torch)
camera_k = preprocessor.get_updated_intrinsics()
camera_k_inv = np.linalg.inv(camera_k)
camera_k_torch = torch.from_numpy(camera_k).float().to(device).unsqueeze(0)
camera_k_inv_torch = torch.from_numpy(camera_k_inv).float().to(device).unsqueeze(0)
inference_timer.record_start_time()
_, prediction = dpsnet(reference_image_torch,
measurement_images_torch,
measurement_poses_torch,
camera_k_torch,
camera_k_inv_torch)
inference_timer.record_end_time_and_elapsed_time()
prediction = prediction.cpu().numpy().squeeze()
reference_depths.append(reference_depth)
predictions.append(prediction)
if Config.test_visualize:
visualize_predictions(numpy_reference_image=reference_image,
numpy_measurement_image=measurement_image,
numpy_predicted_depth=prediction,
normalization_mean=mean_rgb,
normalization_std=std_rgb,
normalization_scale=scale_rgb)
inference_timer.print_statistics()
system_name = "{}_{}_{}_{}_{}_dpsnet_{}".format(keyframing_type,
dataset_name,
input_image_width,
input_image_height,
n_measurement_frames,
extension)
save_results(predictions=predictions,
groundtruths=reference_depths,
system_name=system_name,
scene_name=scene_name,
save_folder=Config.test_result_folder)
if __name__ == '__main__':
predict()
| 7,607 | 46.849057 | 138 | py |
deep-video-mvs | deep-video-mvs-master/dvmvs/baselines/dpsnet/dpsnet.py | from __future__ import print_function
import math
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.utils.data
import torch.utils.data
from torch.autograd import Variable
from dvmvs.utils import freeze_batchnorm
pixel_coords = None
def set_id_grid(depth):
global pixel_coords
b, h, w = depth.size()
i_range = Variable(torch.arange(0, h).view(1, h, 1).expand(1, h, w)).type_as(depth) # [1, H, W]
j_range = Variable(torch.arange(0, w).view(1, 1, w).expand(1, h, w)).type_as(depth) # [1, H, W]
ones = Variable(torch.ones(1, h, w)).type_as(depth)
pixel_coords = torch.stack((j_range, i_range, ones), dim=1) # [1, 3, H, W]
def check_sizes(input, input_name, expected):
condition = [input.ndimension() == len(expected)]
for i, size in enumerate(expected):
if size.isdigit():
condition.append(input.size(i) == int(size))
assert (all(condition)), "wrong size for {}, expected {}, got {}".format(input_name, 'x'.join(expected), list(input.size()))
def pixel2cam(depth, intrinsics_inv):
global pixel_coords
"""Transform coordinates in the pixel frame to the camera frame.
Args:
depth: depth maps -- [B, H, W]
intrinsics_inv: intrinsics_inv matrix for each element of batch -- [B, 3, 3]
Returns:
array of (u,v,1) cam coordinates -- [B, 3, H, W]
"""
b, h, w = depth.size()
if (pixel_coords is None) or pixel_coords.size(2) < h:
set_id_grid(depth)
current_pixel_coords = pixel_coords[:, :, :h, :w].expand(b, 3, h, w).contiguous().view(b, 3, -1).cuda() # [B, 3, H*W]
cam_coords = intrinsics_inv.bmm(current_pixel_coords).view(b, 3, h, w)
return cam_coords * depth.unsqueeze(1)
def cam2pixel(cam_coords, proj_c2p_rot, proj_c2p_tr, padding_mode):
"""Transform coordinates in the camera frame to the pixel frame.
Args:
cam_coords: pixel coordinates defined in the first camera coordinates system -- [B, 4, H, W]
proj_c2p_rot: rotation matrix of cameras -- [B, 3, 4]
proj_c2p_tr: translation vectors of cameras -- [B, 3, 1]
Returns:
array of [-1,1] coordinates -- [B, 2, H, W]
"""
b, _, h, w = cam_coords.size()
cam_coords_flat = cam_coords.view(b, 3, -1) # [B, 3, H*W]
if proj_c2p_rot is not None:
pcoords = proj_c2p_rot.bmm(cam_coords_flat)
else:
pcoords = cam_coords_flat
if proj_c2p_tr is not None:
pcoords = pcoords + proj_c2p_tr # [B, 3, H*W]
X = pcoords[:, 0]
Y = pcoords[:, 1]
Z = pcoords[:, 2].clamp(min=1e-3)
X_norm = 2 * (X / Z) / (w - 1) - 1 # Normalized, -1 if on extreme left, 1 if on extreme right (x = w-1) [B, H*W]
Y_norm = 2 * (Y / Z) / (h - 1) - 1 # Idem [B, H*W]
if padding_mode == 'zeros':
X_mask = ((X_norm > 1) + (X_norm < -1)).detach()
X_norm[X_mask] = 2 # make sure that no point in warped image is a combinaison of im and gray
Y_mask = ((Y_norm > 1) + (Y_norm < -1)).detach()
Y_norm[Y_mask] = 2
pixel_coords = torch.stack([X_norm, Y_norm], dim=2) # [B, H*W, 2]
return pixel_coords.view(b, h, w, 2)
def inverse_warp(feat, depth, pose, intrinsics, intrinsics_inv, padding_mode='zeros'):
"""
Inverse warp a source image to the target image plane.
Args:
feat: the source feature (where to sample pixels) -- [B, CH, H, W]
depth: depth map of the target image -- [B, H, W]
pose: 6DoF pose parameters from target to source -- [B, 6]
intrinsics: camera intrinsic matrix -- [B, 3, 3]
intrinsics_inv: inverse of the intrinsic matrix -- [B, 3, 3]
Returns:
Source image warped to the target image plane
"""
check_sizes(depth, 'depth', 'BHW')
check_sizes(pose, 'pose', 'B34')
check_sizes(intrinsics, 'intrinsics', 'B33')
check_sizes(intrinsics_inv, 'intrinsics', 'B33')
assert (intrinsics_inv.size() == intrinsics.size())
batch_size, _, feat_height, feat_width = feat.size()
cam_coords = pixel2cam(depth, intrinsics_inv)
pose_mat = pose
pose_mat = pose_mat.cuda()
# Get projection matrix for tgt camera frame to source pixel frame
proj_cam_to_src_pixel = intrinsics.bmm(pose_mat) # [B, 3, 4]
src_pixel_coords = cam2pixel(cam_coords, proj_cam_to_src_pixel[:, :, :3], proj_cam_to_src_pixel[:, :, -1:], padding_mode) # [B,H,W,2]
projected_feat = torch.nn.functional.grid_sample(feat, src_pixel_coords, padding_mode=padding_mode, align_corners=True)
return projected_feat
def convbn(in_planes, out_planes, kernel_size, stride, pad, dilation):
return nn.Sequential(
nn.Conv2d(in_planes, out_planes, kernel_size=kernel_size, stride=stride, padding=dilation if dilation > 1 else pad, dilation=dilation, bias=False),
nn.BatchNorm2d(out_planes))
def convbn_3d(in_planes, out_planes, kernel_size, stride, pad):
return nn.Sequential(nn.Conv3d(in_planes, out_planes, kernel_size=kernel_size, padding=pad, stride=stride, bias=False),
nn.BatchNorm3d(out_planes))
class BasicBlock(nn.Module):
expansion = 1
def __init__(self, inplanes, planes, stride, downsample, pad, dilation):
super(BasicBlock, self).__init__()
self.conv1 = nn.Sequential(convbn(inplanes, planes, 3, stride, pad, dilation),
nn.ReLU(inplace=True))
self.conv2 = convbn(planes, planes, 3, 1, pad, dilation)
self.downsample = downsample
self.stride = stride
def forward(self, x):
out = self.conv1(x)
out = self.conv2(out)
if self.downsample is not None:
x = self.downsample(x)
out += x
return out
class matchshifted(nn.Module):
def __init__(self):
super(matchshifted, self).__init__()
def forward(self, left, right, shift):
batch, filters, height, width = left.size()
shifted_left = F.pad(torch.index_select(left, 3, Variable(torch.LongTensor([i for i in range(shift, width)])).cuda()), (shift, 0, 0, 0))
shifted_right = F.pad(torch.index_select(right, 3, Variable(torch.LongTensor([i for i in range(width - shift)])).cuda()), (shift, 0, 0, 0))
out = torch.cat((shifted_left, shifted_right), 1).view(batch, filters * 2, 1, height, width)
return out
class disparityregression(nn.Module):
def __init__(self, maxdisp):
super(disparityregression, self).__init__()
self.disp = Variable(torch.Tensor(np.reshape(np.array(range(maxdisp)), [1, maxdisp, 1, 1])).cuda(), requires_grad=False)
def forward(self, x):
disp = self.disp.repeat(x.size()[0], 1, x.size()[2], x.size()[3])
out = torch.sum(x * disp, 1)
return out
class feature_extraction(nn.Module):
def __init__(self):
super(feature_extraction, self).__init__()
self.inplanes = 32
self.firstconv = nn.Sequential(convbn(3, 32, 3, 2, 1, 1),
nn.ReLU(inplace=True),
convbn(32, 32, 3, 1, 1, 1),
nn.ReLU(inplace=True),
convbn(32, 32, 3, 1, 1, 1),
nn.ReLU(inplace=True))
self.layer1 = self._make_layer(BasicBlock, 32, 3, 1, 1, 1)
self.layer2 = self._make_layer(BasicBlock, 64, 16, 2, 1, 1)
self.layer3 = self._make_layer(BasicBlock, 128, 3, 1, 1, 1)
self.layer4 = self._make_layer(BasicBlock, 128, 3, 1, 1, 2)
self.branch1 = nn.Sequential(nn.AvgPool2d((32, 32), stride=(32, 32)),
convbn(128, 32, 1, 1, 0, 1),
nn.ReLU(inplace=True))
self.branch2 = nn.Sequential(nn.AvgPool2d((16, 16), stride=(16, 16)),
convbn(128, 32, 1, 1, 0, 1),
nn.ReLU(inplace=True))
self.branch3 = nn.Sequential(nn.AvgPool2d((8, 8), stride=(8, 8)),
convbn(128, 32, 1, 1, 0, 1),
nn.ReLU(inplace=True))
self.branch4 = nn.Sequential(nn.AvgPool2d((4, 4), stride=(4, 4)),
convbn(128, 32, 1, 1, 0, 1),
nn.ReLU(inplace=True))
self.lastconv = nn.Sequential(convbn(320, 128, 3, 1, 1, 1),
nn.ReLU(inplace=True),
nn.Conv2d(128, 32, kernel_size=1, padding=0, stride=1, bias=False))
def _make_layer(self, block, planes, blocks, stride, pad, dilation):
downsample = None
if stride != 1 or self.inplanes != planes * block.expansion:
downsample = nn.Sequential(
nn.Conv2d(self.inplanes, planes * block.expansion,
kernel_size=1, stride=stride, bias=False),
nn.BatchNorm2d(planes * block.expansion), )
layers = []
layers.append(block(self.inplanes, planes, stride, downsample, pad, dilation))
self.inplanes = planes * block.expansion
for i in range(1, blocks):
layers.append(block(self.inplanes, planes, 1, None, pad, dilation))
return nn.Sequential(*layers)
def forward(self, x):
output = self.firstconv(x)
output = self.layer1(output)
output_raw = self.layer2(output)
output = self.layer3(output_raw)
output_skip = self.layer4(output)
output_branch1 = self.branch1(output_skip)
output_branch1 = F.interpolate(output_branch1, (output_skip.size()[2], output_skip.size()[3]), mode='bilinear', align_corners=False)
output_branch2 = self.branch2(output_skip)
output_branch2 = F.interpolate(output_branch2, (output_skip.size()[2], output_skip.size()[3]), mode='bilinear', align_corners=False)
output_branch3 = self.branch3(output_skip)
output_branch3 = F.interpolate(output_branch3, (output_skip.size()[2], output_skip.size()[3]), mode='bilinear', align_corners=False)
output_branch4 = self.branch4(output_skip)
output_branch4 = F.interpolate(output_branch4, (output_skip.size()[2], output_skip.size()[3]), mode='bilinear', align_corners=False)
output_feature = torch.cat((output_raw, output_skip, output_branch4, output_branch3, output_branch2, output_branch1), 1)
output_feature = self.lastconv(output_feature)
return output_feature
def convtext(in_planes, out_planes, kernel_size=3, stride=1, dilation=1):
return nn.Sequential(
nn.Conv2d(in_planes, out_planes, kernel_size=kernel_size, stride=stride, dilation=dilation, padding=((kernel_size - 1) * dilation) // 2, bias=False),
nn.LeakyReLU(0.1, inplace=True)
)
class PSNet(nn.Module):
def __init__(self, nlabel, mindepth):
super(PSNet, self).__init__()
self.nlabel = nlabel
self.mindepth = mindepth
self.feature_extraction = feature_extraction()
self.convs = nn.Sequential(
convtext(33, 128, 3, 1, 1),
convtext(128, 128, 3, 1, 2),
convtext(128, 128, 3, 1, 4),
convtext(128, 96, 3, 1, 8),
convtext(96, 64, 3, 1, 16),
convtext(64, 32, 3, 1, 1),
convtext(32, 1, 3, 1, 1)
)
self.dres0 = nn.Sequential(convbn_3d(64, 32, 3, 1, 1),
nn.ReLU(inplace=True),
convbn_3d(32, 32, 3, 1, 1),
nn.ReLU(inplace=True))
self.dres1 = nn.Sequential(convbn_3d(32, 32, 3, 1, 1),
nn.ReLU(inplace=True),
convbn_3d(32, 32, 3, 1, 1))
self.dres2 = nn.Sequential(convbn_3d(32, 32, 3, 1, 1),
nn.ReLU(inplace=True),
convbn_3d(32, 32, 3, 1, 1))
self.dres3 = nn.Sequential(convbn_3d(32, 32, 3, 1, 1),
nn.ReLU(inplace=True),
convbn_3d(32, 32, 3, 1, 1))
self.dres4 = nn.Sequential(convbn_3d(32, 32, 3, 1, 1),
nn.ReLU(inplace=True),
convbn_3d(32, 32, 3, 1, 1))
self.classify = nn.Sequential(convbn_3d(32, 32, 3, 1, 1),
nn.ReLU(inplace=True),
nn.Conv3d(32, 1, kernel_size=3, padding=1, stride=1, bias=False))
for m in self.modules():
if isinstance(m, nn.Conv2d):
n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
m.weight.data.normal_(0, math.sqrt(2. / n))
elif isinstance(m, nn.Conv3d):
n = m.kernel_size[0] * m.kernel_size[1] * m.kernel_size[2] * m.out_channels
m.weight.data.normal_(0, math.sqrt(2. / n))
elif isinstance(m, nn.BatchNorm2d):
m.weight.data.fill_(1)
m.bias.data.zero_()
elif isinstance(m, nn.BatchNorm3d):
m.weight.data.fill_(1)
m.bias.data.zero_()
elif isinstance(m, nn.Linear):
m.bias.data.zero_()
def init_weights(self):
for m in self.modules():
if isinstance(m, nn.Conv2d) or isinstance(m, nn.ConvTranspose2d):
nn.init.xavier_uniform(m.weight.data)
if m.bias is not None:
m.bias.data.zero_()
def forward(self, ref, targets, pose, intrinsics, intrinsics_inv):
intrinsics4 = intrinsics.clone()
intrinsics_inv4 = intrinsics_inv.clone()
intrinsics4[:, :2, :] = intrinsics4[:, :2, :] / 4
intrinsics_inv4[:, :2, :2] = intrinsics_inv4[:, :2, :2] * 4
refimg_fea = self.feature_extraction(ref)
disp2depth = Variable(torch.ones(refimg_fea.size(0), refimg_fea.size(2), refimg_fea.size(3))).cuda() * self.mindepth * self.nlabel
for j, target in enumerate(targets):
cost = Variable(
torch.FloatTensor(refimg_fea.size()[0], refimg_fea.size()[1] * 2, self.nlabel, refimg_fea.size()[2], refimg_fea.size()[3]).zero_()).cuda()
targetimg_fea = self.feature_extraction(target)
for i in range(self.nlabel):
depth = torch.div(disp2depth, i + 1e-16)
targetimg_fea_t = inverse_warp(targetimg_fea, depth, pose[j], intrinsics4, intrinsics_inv4)
cost[:, :refimg_fea.size()[1], i, :, :] = refimg_fea
cost[:, refimg_fea.size()[1]:, i, :, :] = targetimg_fea_t
cost = cost.contiguous()
cost0 = self.dres0(cost)
cost0 = self.dres1(cost0) + cost0
cost0 = self.dres2(cost0) + cost0
cost0 = self.dres3(cost0) + cost0
cost0 = self.dres4(cost0) + cost0
cost0 = self.classify(cost0)
if j == 0:
costs = cost0
else:
costs = costs + cost0
costs = costs / len(targets)
costss = Variable(torch.FloatTensor(refimg_fea.size()[0], 1, self.nlabel, refimg_fea.size()[2], refimg_fea.size()[3]).zero_()).cuda()
for i in range(self.nlabel):
costt = costs[:, :, i, :, :]
costss[:, :, i, :, :] = self.convs(torch.cat([refimg_fea, costt], 1)) + costt
costs = F.interpolate(costs, [self.nlabel, ref.size()[2], ref.size()[3]], mode='trilinear', align_corners=False)
costs = torch.squeeze(costs, 1)
pred0 = F.softmax(costs, dim=1)
pred0 = disparityregression(self.nlabel)(pred0)
depth0 = self.mindepth * self.nlabel / (pred0.unsqueeze(1) + 1e-16)
costss = F.interpolate(costss, [self.nlabel, ref.size()[2], ref.size()[3]], mode='trilinear', align_corners=False)
costss = torch.squeeze(costss, 1)
pred = F.softmax(costss, dim=1)
pred = disparityregression(self.nlabel)(pred)
depth = self.mindepth * self.nlabel / (pred.unsqueeze(1) + 1e-16)
return depth0, depth
def train(self, mode=True):
"""
Override the default train() to freeze the BN parameters
"""
super(PSNet, self).train(mode)
self.apply(freeze_batchnorm)
| 16,381 | 40.684478 | 157 | py |
deep-video-mvs | deep-video-mvs-master/dvmvs/pairnet/run-training.py | import datetime
import itertools
import os
import numpy as np
from path import Path
from tensorboardX import SummaryWriter
from torch.backends import cudnn
from torch.utils.data import DataLoader
from dvmvs.dataset_loader import MVSDataset
from dvmvs.losses import LossMeter, update_losses
from dvmvs.pairnet.model import *
from dvmvs.train import train
from dvmvs.utils import zip_code, print_number_of_trainable_parameters, calculate_cost_volume_by_warping
class TrainingHyperparameters:
Config.train_subsequence_length = 2
Config.train_predict_two_way = True
batch_size = 14
learning_rate = 1e-4
momentum = 0.9
beta = 0.999
weight_decay = 0
# loss_type = "Huber"
# loss_type = "L1"
loss_type = "L1-inv"
# loss_type = "L1-rel"
finetune_epochs = 2
use_augmentation = True
use_checkpoint = False
scaling = 0.5
x = np.linspace(0, Config.train_image_width * scaling - 1, num=int(Config.train_image_width * scaling))
y = np.linspace(0, Config.train_image_height * scaling - 1, num=int(Config.train_image_height * scaling))
ones = np.ones(shape=(int(Config.train_image_height * scaling), int(Config.train_image_width * scaling)))
x_grid, y_grid = np.meshgrid(x, y)
warp_grid = np.stack((x_grid, y_grid, ones), axis=-1)
warp_grid = torch.from_numpy(warp_grid).float()
warp_grid = warp_grid.view(-1, 3).t().cuda()
def main():
# set the manual seed for reproducibility
torch.manual_seed(Config.train_seed)
# create the directory for this run of the training
run_directory = os.path.join(Config.train_run_directory, datetime.datetime.now().strftime("%Y%m%d-%H%M%S"))
os.mkdir(run_directory)
# zip every code file
zip_code(run_directory)
summary_writer = SummaryWriter(run_directory)
print("=> fetching scenes in '{}'".format(Config.dataset))
train_set = MVSDataset(
root=Config.dataset,
seed=Config.train_seed,
split="TRAINING",
subsequence_length=Config.train_subsequence_length,
scale_rgb=255.0,
mean_rgb=[0.485, 0.456, 0.406],
std_rgb=[0.229, 0.224, 0.225],
geometric_scale_augmentation=True
)
val_set = MVSDataset(
root=Config.dataset,
seed=Config.train_seed,
split="VALIDATION",
subsequence_length=Config.train_subsequence_length,
scale_rgb=255.0,
mean_rgb=[0.485, 0.456, 0.406],
std_rgb=[0.229, 0.224, 0.225]
)
print('{} samples found in {} train scenes'.format(len(train_set), len(train_set.scenes)))
print('{} samples found in {} valid scenes'.format(len(val_set), len(val_set.scenes)))
train_loader = DataLoader(dataset=train_set,
batch_size=TrainingHyperparameters.batch_size,
shuffle=True,
num_workers=Config.train_data_pipeline_workers,
pin_memory=True,
drop_last=True)
val_loader = DataLoader(dataset=val_set,
batch_size=TrainingHyperparameters.batch_size,
shuffle=False,
num_workers=Config.train_data_pipeline_workers,
pin_memory=True,
drop_last=True)
feature_extractor = FeatureExtractor()
feature_shrinker = FeatureShrinker()
cost_volume_encoder = CostVolumeEncoder()
cost_volume_decoder = CostVolumeDecoder()
feature_extractor = feature_extractor.cuda()
feature_shrinker = feature_shrinker.cuda()
cost_volume_encoder = cost_volume_encoder.cuda()
cost_volume_decoder = cost_volume_decoder.cuda()
model = [feature_extractor, feature_shrinker, cost_volume_encoder, cost_volume_decoder]
if TrainingHyperparameters.use_checkpoint:
for i in range(len(model)):
try:
checkpoint = Path(".").files("module_" + str(i) + "*")[0]
weights = torch.load(checkpoint)
model[i].load_state_dict(weights)
print("Loaded weights for", checkpoint)
except Exception as e:
print(e)
print("Skipping...")
cudnn.benchmark = True
best_loss = [np.inf, np.inf, np.inf, np.inf]
# TRAIN MY PARTS
parameters = itertools.chain(feature_shrinker.parameters(),
cost_volume_encoder.parameters(),
cost_volume_decoder.parameters())
optimizer = torch.optim.Adam(parameters,
lr=TrainingHyperparameters.learning_rate,
betas=(TrainingHyperparameters.momentum, TrainingHyperparameters.beta),
weight_decay=TrainingHyperparameters.weight_decay)
print_number_of_trainable_parameters(optimizer)
for epoch in range(TrainingHyperparameters.finetune_epochs):
print("\n\nEPOCH:", epoch)
train(train_loader=train_loader,
val_loader=val_loader,
model=model,
optimizer=optimizer,
summary_writer=summary_writer,
epoch=epoch,
best_loss=best_loss,
run_directory=run_directory,
forward_pass_function=forward_pass)
# TRAIN EVERYTHING
parameters = itertools.chain(feature_extractor.parameters(),
feature_shrinker.parameters(),
cost_volume_encoder.parameters(),
cost_volume_decoder.parameters())
optimizer = torch.optim.Adam(parameters,
lr=TrainingHyperparameters.learning_rate,
betas=(TrainingHyperparameters.momentum, TrainingHyperparameters.beta),
weight_decay=TrainingHyperparameters.weight_decay)
print_number_of_trainable_parameters(optimizer)
for epoch in range(TrainingHyperparameters.finetune_epochs, Config.train_epochs):
print("\n\nEPOCH:", epoch)
train(train_loader=train_loader,
val_loader=val_loader,
model=model,
optimizer=optimizer,
summary_writer=summary_writer,
epoch=epoch,
best_loss=best_loss,
run_directory=run_directory,
forward_pass_function=forward_pass)
def forward_pass(images, depths, poses, K, model, is_training):
feature_extractor = model[0]
feature_shrinker = model[1]
cost_volume_encoder = model[2]
cost_volume_decoder = model[3]
K[:, 0:2, :] = K[:, 0:2, :] * scaling
K = K.cuda()
images_cuda = []
depths_cuda = []
poses_cuda = []
feature_halfs = []
feature_quarters = []
feature_one_eights = []
feature_one_sixteens = []
# Extract image features
for i in range(0, len(images)):
images_cuda.append(images[i].cuda())
poses_cuda.append(poses[i].cuda())
depths_cuda.append(depths[i].cuda())
feature_half, feature_quarter, feature_one_eight, feature_one_sixteen = feature_shrinker(*feature_extractor(images_cuda[i]))
feature_halfs.append(feature_half)
feature_quarters.append(feature_quarter)
feature_one_eights.append(feature_one_eight)
feature_one_sixteens.append(feature_one_sixteen)
optimizer_loss = 0
predictions = None
l1_meter = LossMeter()
huber_meter = LossMeter()
l1_inv_meter = LossMeter()
l1_rel_meter = LossMeter()
for i in range(1, len(images)):
reference_index = i
measurement_index = i - 1
if Config.train_predict_two_way:
iterations = [[measurement_index, reference_index],
[reference_index, measurement_index]]
else:
iterations = [[reference_index, measurement_index]]
for [index1, index2] in iterations:
initial_cost_volume = calculate_cost_volume_by_warping(image1=feature_halfs[index1],
image2=feature_halfs[index2],
pose1=poses_cuda[index1],
pose2=poses_cuda[index2],
K=K,
warp_grid=warp_grid,
min_depth=Config.train_min_depth,
max_depth=Config.train_max_depth,
n_depth_levels=Config.train_n_depth_levels,
device=torch.device('cuda'),
dot_product=True)
flipped = False
to_be_used_feature_one_sixteen = feature_one_sixteens[index1]
to_be_used_feature_one_eight = feature_one_eights[index1]
to_be_used_feature_quarter = feature_quarters[index1]
to_be_used_feature_half = feature_halfs[index1]
to_be_used_image = images_cuda[index1]
to_be_used_depth = depths_cuda[index1]
to_be_used_cost_volume = initial_cost_volume
if is_training and TrainingHyperparameters.use_augmentation and np.random.random() > 0.5:
to_be_used_feature_one_sixteen = torch.flip(feature_one_sixteens[index1], dims=[-1])
to_be_used_feature_one_eight = torch.flip(feature_one_eights[index1], dims=[-1])
to_be_used_feature_quarter = torch.flip(feature_quarters[index1], dims=[-1])
to_be_used_feature_half = torch.flip(feature_halfs[index1], dims=[-1])
to_be_used_image = torch.flip(images_cuda[index1], dims=[-1])
to_be_used_depth = torch.flip(depths_cuda[index1], dims=[-1])
to_be_used_cost_volume = torch.flip(initial_cost_volume, dims=[-1])
flipped = True
skip0, skip1, skip2, skip3, bottom = cost_volume_encoder(features_half=to_be_used_feature_half,
features_quarter=to_be_used_feature_quarter,
features_one_eight=to_be_used_feature_one_eight,
features_one_sixteen=to_be_used_feature_one_sixteen,
cost_volume=to_be_used_cost_volume)
depth_full, depth_half, depth_quarter, depth_one_eight, depth_one_sixteen = cost_volume_decoder(to_be_used_image,
skip0,
skip1,
skip2,
skip3,
bottom)
predictions = [depth_one_sixteen, depth_one_eight, depth_quarter, depth_half, depth_full]
weights = [1, 1, 1, 1, 1]
optimizer_loss = optimizer_loss + update_losses(predictions=predictions,
weights=weights,
groundtruth=to_be_used_depth,
is_training=is_training,
l1_meter=l1_meter,
huber_meter=huber_meter,
l1_inv_meter=l1_inv_meter,
l1_rel_meter=l1_rel_meter,
loss_type=TrainingHyperparameters.loss_type)
if flipped and index1 == len(images) - 1:
depth_quarter = torch.flip(depth_quarter, dims=[-1])
depth_half = torch.flip(depth_half, dims=[-1])
depth_full = torch.flip(depth_full, dims=[-1])
predictions = [depth_quarter, depth_half, depth_full]
predictions_names = ["prediction_quarter", "prediction_half", "prediction_full"]
return l1_meter, huber_meter, l1_inv_meter, l1_rel_meter, optimizer_loss, predictions, predictions_names
if __name__ == '__main__':
main()
| 12,924 | 45.160714 | 132 | py |
deep-video-mvs | deep-video-mvs-master/dvmvs/pairnet/model.py | from collections import OrderedDict
import torch
from torchvision import models
from torchvision.ops import FeaturePyramidNetwork
from dvmvs.config import Config
from dvmvs.layers import conv_layer, depth_layer_3x3
fpn_output_channels = 32
hyper_channels = 32
class StandardLayer(torch.nn.Module):
def __init__(self, channels, kernel_size, apply_bn_relu):
super(StandardLayer, self).__init__()
self.conv1 = conv_layer(input_channels=channels,
output_channels=channels,
stride=1,
kernel_size=kernel_size,
apply_bn_relu=True)
self.conv2 = conv_layer(input_channels=channels,
output_channels=channels,
stride=1,
kernel_size=kernel_size,
apply_bn_relu=apply_bn_relu)
def forward(self, inp):
x = self.conv1(inp)
x = self.conv2(x)
return x
class DownconvolutionLayer(torch.nn.Module):
def __init__(self, input_channels, output_channels, kernel_size):
super(DownconvolutionLayer, self).__init__()
self.down_conv = conv_layer(input_channels=input_channels,
output_channels=output_channels,
stride=2,
kernel_size=kernel_size,
apply_bn_relu=True)
def forward(self, inp):
x = self.down_conv(inp)
return x
class UpconvolutionLayer(torch.nn.Module):
def __init__(self, input_channels, output_channels, kernel_size):
super(UpconvolutionLayer, self).__init__()
self.conv = conv_layer(input_channels=input_channels,
output_channels=output_channels,
stride=1,
kernel_size=kernel_size,
apply_bn_relu=True)
def forward(self, inp):
x = torch.nn.functional.interpolate(input=inp, scale_factor=2, mode='bilinear', align_corners=True)
x = self.conv(x)
return x
class EncoderBlock(torch.nn.Module):
def __init__(self, input_channels, output_channels, kernel_size):
super(EncoderBlock, self).__init__()
self.down_convolution = DownconvolutionLayer(input_channels=input_channels,
output_channels=output_channels,
kernel_size=kernel_size)
self.standard_convolution = StandardLayer(channels=output_channels,
kernel_size=kernel_size,
apply_bn_relu=True)
def forward(self, inp):
x = self.down_convolution(inp)
x = self.standard_convolution(x)
return x
class DecoderBlock(torch.nn.Module):
def __init__(self, input_channels, output_channels, kernel_size, apply_bn_relu, plus_one):
super(DecoderBlock, self).__init__()
# Upsample the inpput coming from previous layer
self.up_convolution = UpconvolutionLayer(input_channels=input_channels,
output_channels=output_channels,
kernel_size=kernel_size)
if plus_one:
next_input_channels = input_channels + 1
else:
next_input_channels = input_channels
# Aggregate skip and upsampled input
self.convolution1 = conv_layer(input_channels=next_input_channels,
output_channels=output_channels,
kernel_size=kernel_size,
stride=1,
apply_bn_relu=True)
# Learn from aggregation
self.convolution2 = conv_layer(input_channels=output_channels,
output_channels=output_channels,
kernel_size=kernel_size,
stride=1,
apply_bn_relu=apply_bn_relu)
def forward(self, inp, skip, depth):
inp = self.up_convolution(inp)
if depth is None:
x = torch.cat([inp, skip], dim=1)
else:
depth = torch.nn.functional.interpolate(depth, scale_factor=2, mode='bilinear', align_corners=True)
x = torch.cat([inp, skip, depth], dim=1)
x = self.convolution1(x)
x = self.convolution2(x)
return x
class FeatureExtractor(torch.nn.Module):
def __init__(self):
super(FeatureExtractor, self).__init__()
backbone_mobile_layers = list(models.mnasnet1_0(pretrained=True).layers.children())
self.layer1 = torch.nn.Sequential(*backbone_mobile_layers[0:8])
self.layer2 = torch.nn.Sequential(*backbone_mobile_layers[8:9])
self.layer3 = torch.nn.Sequential(*backbone_mobile_layers[9:10])
self.layer4 = torch.nn.Sequential(*backbone_mobile_layers[10:12])
self.layer5 = torch.nn.Sequential(*backbone_mobile_layers[12:14])
def forward(self, image):
layer1 = self.layer1(image)
layer2 = self.layer2(layer1)
layer3 = self.layer3(layer2)
layer4 = self.layer4(layer3)
layer5 = self.layer5(layer4)
return layer1, layer2, layer3, layer4, layer5
class FeatureShrinker(torch.nn.Module):
def __init__(self):
super(FeatureShrinker, self).__init__()
self.fpn = FeaturePyramidNetwork(in_channels_list=[16, 24, 40, 96, 320],
out_channels=fpn_output_channels,
extra_blocks=None)
def forward(self, layer1, layer2, layer3, layer4, layer5):
fpn_input = OrderedDict()
fpn_input['layer1'] = layer1
fpn_input['layer2'] = layer2
fpn_input['layer3'] = layer3
fpn_input['layer4'] = layer4
fpn_input['layer5'] = layer5
fpn_output = self.fpn(fpn_input)
features_half = fpn_output['layer1']
features_quarter = fpn_output['layer2']
features_one_eight = fpn_output['layer3']
features_one_sixteen = fpn_output['layer4']
return features_half, features_quarter, features_one_eight, features_one_sixteen
class CostVolumeEncoder(torch.nn.Module):
def __init__(self):
super(CostVolumeEncoder, self).__init__()
self.aggregator0 = conv_layer(input_channels=Config.train_n_depth_levels + fpn_output_channels,
output_channels=hyper_channels,
kernel_size=5,
stride=1,
apply_bn_relu=True)
self.encoder_block0 = EncoderBlock(input_channels=hyper_channels,
output_channels=hyper_channels * 2,
kernel_size=5)
###
self.aggregator1 = conv_layer(input_channels=hyper_channels * 2 + fpn_output_channels,
output_channels=hyper_channels * 2,
kernel_size=3,
stride=1,
apply_bn_relu=True)
self.encoder_block1 = EncoderBlock(input_channels=hyper_channels * 2,
output_channels=hyper_channels * 4,
kernel_size=3)
###
self.aggregator2 = conv_layer(input_channels=hyper_channels * 4 + fpn_output_channels,
output_channels=hyper_channels * 4,
kernel_size=3,
stride=1,
apply_bn_relu=True)
self.encoder_block2 = EncoderBlock(input_channels=hyper_channels * 4,
output_channels=hyper_channels * 8,
kernel_size=3)
###
self.aggregator3 = conv_layer(input_channels=hyper_channels * 8 + fpn_output_channels,
output_channels=hyper_channels * 8,
kernel_size=3,
stride=1,
apply_bn_relu=True)
self.encoder_block3 = EncoderBlock(input_channels=hyper_channels * 8,
output_channels=hyper_channels * 16,
kernel_size=3)
def forward(self, features_half, features_quarter, features_one_eight, features_one_sixteen, cost_volume):
inp0 = torch.cat([features_half, cost_volume], dim=1)
inp0 = self.aggregator0(inp0)
out0 = self.encoder_block0(inp0)
inp1 = torch.cat([features_quarter, out0], dim=1)
inp1 = self.aggregator1(inp1)
out1 = self.encoder_block1(inp1)
inp2 = torch.cat([features_one_eight, out1], dim=1)
inp2 = self.aggregator2(inp2)
out2 = self.encoder_block2(inp2)
inp3 = torch.cat([features_one_sixteen, out2], dim=1)
inp3 = self.aggregator3(inp3)
out3 = self.encoder_block3(inp3)
return inp0, inp1, inp2, inp3, out3
class CostVolumeDecoder(torch.nn.Module):
def __init__(self):
super(CostVolumeDecoder, self).__init__()
self.inverse_depth_base = 1 / Config.train_max_depth
self.inverse_depth_multiplier = 1 / Config.train_min_depth - 1 / Config.train_max_depth
self.decoder_block1 = DecoderBlock(input_channels=hyper_channels * 16,
output_channels=hyper_channels * 8,
kernel_size=3,
apply_bn_relu=True,
plus_one=False)
self.decoder_block2 = DecoderBlock(input_channels=hyper_channels * 8,
output_channels=hyper_channels * 4,
kernel_size=3,
apply_bn_relu=True,
plus_one=True)
self.decoder_block3 = DecoderBlock(input_channels=hyper_channels * 4,
output_channels=hyper_channels * 2,
kernel_size=3,
apply_bn_relu=True,
plus_one=True)
self.decoder_block4 = DecoderBlock(input_channels=hyper_channels * 2,
output_channels=hyper_channels,
kernel_size=5,
apply_bn_relu=True,
plus_one=True)
self.refine = torch.nn.Sequential(conv_layer(input_channels=hyper_channels + 4,
output_channels=hyper_channels,
kernel_size=5,
stride=1,
apply_bn_relu=True),
conv_layer(input_channels=hyper_channels,
output_channels=hyper_channels,
kernel_size=5,
stride=1,
apply_bn_relu=True))
self.depth_layer_one_sixteen = depth_layer_3x3(hyper_channels * 8)
self.depth_layer_one_eight = depth_layer_3x3(hyper_channels * 4)
self.depth_layer_quarter = depth_layer_3x3(hyper_channels * 2)
self.depth_layer_half = depth_layer_3x3(hyper_channels)
self.depth_layer_full = depth_layer_3x3(hyper_channels)
def forward(self, image, skip0, skip1, skip2, skip3, bottom):
# work on cost volume
decoder_block1 = self.decoder_block1(bottom, skip3, None)
sigmoid_depth_one_sixteen = self.depth_layer_one_sixteen(decoder_block1)
inverse_depth_one_sixteen = self.inverse_depth_multiplier * sigmoid_depth_one_sixteen + self.inverse_depth_base
decoder_block2 = self.decoder_block2(decoder_block1, skip2, sigmoid_depth_one_sixteen)
sigmoid_depth_one_eight = self.depth_layer_one_eight(decoder_block2)
inverse_depth_one_eight = self.inverse_depth_multiplier * sigmoid_depth_one_eight + self.inverse_depth_base
decoder_block3 = self.decoder_block3(decoder_block2, skip1, sigmoid_depth_one_eight)
sigmoid_depth_quarter = self.depth_layer_quarter(decoder_block3)
inverse_depth_quarter = self.inverse_depth_multiplier * sigmoid_depth_quarter + self.inverse_depth_base
decoder_block4 = self.decoder_block4(decoder_block3, skip0, sigmoid_depth_quarter)
sigmoid_depth_half = self.depth_layer_half(decoder_block4)
inverse_depth_half = self.inverse_depth_multiplier * sigmoid_depth_half + self.inverse_depth_base
scaled_depth = torch.nn.functional.interpolate(sigmoid_depth_half, scale_factor=2, mode='bilinear', align_corners=True)
scaled_decoder = torch.nn.functional.interpolate(decoder_block4, scale_factor=2, mode='bilinear', align_corners=True)
scaled_combined = torch.cat([scaled_decoder, scaled_depth, image], dim=1)
scaled_combined = self.refine(scaled_combined)
inverse_depth_full = self.inverse_depth_multiplier * self.depth_layer_full(scaled_combined) + self.inverse_depth_base
depth_full = 1.0 / inverse_depth_full.squeeze(1)
depth_half = 1.0 / inverse_depth_half.squeeze(1)
depth_quarter = 1.0 / inverse_depth_quarter.squeeze(1)
depth_one_eight = 1.0 / inverse_depth_one_eight.squeeze(1)
depth_one_sixteen = 1.0 / inverse_depth_one_sixteen.squeeze(1)
return depth_full, depth_half, depth_quarter, depth_one_eight, depth_one_sixteen
| 14,393 | 46.193443 | 127 | py |
deep-video-mvs | deep-video-mvs-master/dvmvs/pairnet/run-testing.py | import cv2
import numpy as np
import torch
from path import Path
from tqdm import tqdm
from dvmvs.config import Config
from dvmvs.dataset_loader import PreprocessImage, load_image
from dvmvs.pairnet.model import FeatureExtractor, FeatureShrinker, CostVolumeEncoder, CostVolumeDecoder
from dvmvs.utils import cost_volume_fusion, save_results, visualize_predictions, InferenceTimer, get_warp_grid_for_cost_volume_calculation
def predict():
print("System: PAIRNET")
device = torch.device("cuda")
feature_extractor = FeatureExtractor()
feature_shrinker = FeatureShrinker()
cost_volume_encoder = CostVolumeEncoder()
cost_volume_decoder = CostVolumeDecoder()
feature_extractor = feature_extractor.to(device)
feature_shrinker = feature_shrinker.to(device)
cost_volume_encoder = cost_volume_encoder.to(device)
cost_volume_decoder = cost_volume_decoder.to(device)
model = [feature_extractor, feature_shrinker, cost_volume_encoder, cost_volume_decoder]
for i in range(len(model)):
try:
checkpoint = sorted(Path("weights").files())[i]
weights = torch.load(checkpoint)
model[i].load_state_dict(weights)
model[i].eval()
print("Loaded weights for", checkpoint)
except Exception as e:
print(e)
print("Could not find the checkpoint for module", i)
exit(1)
feature_extractor = model[0]
feature_shrinker = model[1]
cost_volume_encoder = model[2]
cost_volume_decoder = model[3]
warp_grid = get_warp_grid_for_cost_volume_calculation(width=int(Config.test_image_width / 2),
height=int(Config.test_image_height / 2),
device=device)
scale_rgb = 255.0
mean_rgb = [0.485, 0.456, 0.406]
std_rgb = [0.229, 0.224, 0.225]
min_depth = 0.25
max_depth = 20.0
n_depth_levels = 64
data_path = Path(Config.test_offline_data_path)
if Config.test_dataset_name is None:
keyframe_index_files = sorted((Path(Config.test_offline_data_path) / "indices").files())
else:
keyframe_index_files = sorted((Path(Config.test_offline_data_path) / "indices").files("*" + Config.test_dataset_name + "*"))
for iteration, keyframe_index_file in enumerate(keyframe_index_files):
keyframing_type, dataset_name, scene_name, _, n_measurement_frames = keyframe_index_file.split("/")[-1].split("+")
scene_folder = data_path / dataset_name / scene_name
print("Predicting for scene:", dataset_name + "-" + scene_name, " - ", iteration, "/", len(keyframe_index_files))
keyframe_index_file_lines = np.loadtxt(keyframe_index_file, dtype=str, delimiter="\n")
K = np.loadtxt(scene_folder / 'K.txt').astype(np.float32)
poses = np.fromfile(scene_folder / "poses.txt", dtype=float, sep="\n ").reshape((-1, 4, 4))
image_filenames = sorted((scene_folder / 'images').files("*.png"))
depth_filenames = sorted((scene_folder / 'depth').files("*.png"))
input_filenames = []
for image_filename in image_filenames:
input_filenames.append(image_filename.split("/")[-1])
inference_timer = InferenceTimer()
predictions = []
reference_depths = []
with torch.no_grad():
for i in tqdm(range(0, len(keyframe_index_file_lines))):
keyframe_index_file_line = keyframe_index_file_lines[i]
if keyframe_index_file_line == "TRACKING LOST":
continue
else:
current_input_filenames = keyframe_index_file_line.split(" ")
current_indices = [input_filenames.index(current_input_filenames[x]) for x in range(len(current_input_filenames))]
reference_index = current_indices[0]
measurement_indices = current_indices[1:]
reference_pose = poses[reference_index]
reference_image = load_image(image_filenames[reference_index])
reference_depth = cv2.imread(depth_filenames[reference_index], -1).astype(float) / 1000.0
preprocessor = PreprocessImage(K=K,
old_width=reference_image.shape[1],
old_height=reference_image.shape[0],
new_width=Config.test_image_width,
new_height=Config.test_image_height,
distortion_crop=Config.test_distortion_crop,
perform_crop=Config.test_perform_crop)
reference_image = preprocessor.apply_rgb(image=reference_image,
scale_rgb=scale_rgb,
mean_rgb=mean_rgb,
std_rgb=std_rgb)
reference_depth = preprocessor.apply_depth(reference_depth)
reference_image_torch = torch.from_numpy(np.transpose(reference_image, (2, 0, 1))).float().to(device).unsqueeze(0)
reference_pose_torch = torch.from_numpy(reference_pose).float().to(device).unsqueeze(0)
measurement_poses_torch = []
measurement_images_torch = []
for measurement_index in measurement_indices:
measurement_image = load_image(image_filenames[measurement_index])
measurement_image = preprocessor.apply_rgb(image=measurement_image,
scale_rgb=scale_rgb,
mean_rgb=mean_rgb,
std_rgb=std_rgb)
measurement_image_torch = torch.from_numpy(np.transpose(measurement_image, (2, 0, 1))).float().to(device).unsqueeze(0)
measurement_pose_torch = torch.from_numpy(poses[measurement_index]).float().to(device).unsqueeze(0)
measurement_images_torch.append(measurement_image_torch)
measurement_poses_torch.append(measurement_pose_torch)
full_K_torch = torch.from_numpy(preprocessor.get_updated_intrinsics()).float().to(device).unsqueeze(0)
half_K_torch = full_K_torch.clone().cuda()
half_K_torch[:, 0:2, :] = half_K_torch[:, 0:2, :] / 2.0
inference_timer.record_start_time()
measurement_feature_halfs = []
for measurement_image_torch in measurement_images_torch:
measurement_feature_half, _, _, _ = feature_shrinker(*feature_extractor(measurement_image_torch))
measurement_feature_halfs.append(measurement_feature_half)
reference_feature_half, reference_feature_quarter, \
reference_feature_one_eight, reference_feature_one_sixteen = feature_shrinker(*feature_extractor(reference_image_torch))
cost_volume = cost_volume_fusion(image1=reference_feature_half,
image2s=measurement_feature_halfs,
pose1=reference_pose_torch,
pose2s=measurement_poses_torch,
K=half_K_torch,
warp_grid=warp_grid,
min_depth=min_depth,
max_depth=max_depth,
n_depth_levels=n_depth_levels,
device=device,
dot_product=True)
skip0, skip1, skip2, skip3, bottom = cost_volume_encoder(features_half=reference_feature_half,
features_quarter=reference_feature_quarter,
features_one_eight=reference_feature_one_eight,
features_one_sixteen=reference_feature_one_sixteen,
cost_volume=cost_volume)
prediction, _, _, _, _ = cost_volume_decoder(reference_image_torch, skip0, skip1, skip2, skip3, bottom)
inference_timer.record_end_time_and_elapsed_time()
prediction = prediction.cpu().numpy().squeeze()
reference_depths.append(reference_depth)
predictions.append(prediction)
if Config.test_visualize:
visualize_predictions(numpy_reference_image=reference_image,
numpy_measurement_image=measurement_image,
numpy_predicted_depth=prediction,
normalization_mean=mean_rgb,
normalization_std=std_rgb,
normalization_scale=scale_rgb)
inference_timer.print_statistics()
system_name = "{}_{}_{}_{}_{}_dvmvs_pairnet".format(keyframing_type,
dataset_name,
Config.test_image_width,
Config.test_image_height,
n_measurement_frames)
save_results(predictions=predictions,
groundtruths=reference_depths,
system_name=system_name,
scene_name=scene_name,
save_folder=Config.test_result_folder)
if __name__ == '__main__':
predict()
| 10,187 | 50.715736 | 138 | py |
deep-video-mvs | deep-video-mvs-master/dvmvs/pairnet/run-testing-online.py | import cv2
import numpy as np
import torch
from path import Path
from tqdm import tqdm
from dvmvs.config import Config
from dvmvs.dataset_loader import PreprocessImage, load_image
from dvmvs.keyframe_buffer import KeyframeBuffer
from dvmvs.pairnet.model import FeatureExtractor, FeatureShrinker, CostVolumeEncoder, CostVolumeDecoder
from dvmvs.utils import cost_volume_fusion, save_results, visualize_predictions, InferenceTimer, get_warp_grid_for_cost_volume_calculation
def predict():
dataset_name = Config.test_online_scene_path.split("/")[-2]
system_name = "keyframe_{}_{}_{}_{}_dvmvs_fusionnet_online".format(dataset_name,
Config.test_image_width,
Config.test_image_height,
Config.test_n_measurement_frames)
print("Predicting with System:", system_name)
print("# of Measurement Frames:", Config.test_n_measurement_frames)
device = torch.device("cuda")
feature_extractor = FeatureExtractor()
feature_shrinker = FeatureShrinker()
cost_volume_encoder = CostVolumeEncoder()
cost_volume_decoder = CostVolumeDecoder()
feature_extractor = feature_extractor.to(device)
feature_shrinker = feature_shrinker.to(device)
cost_volume_encoder = cost_volume_encoder.to(device)
cost_volume_decoder = cost_volume_decoder.to(device)
model = [feature_extractor, feature_shrinker, cost_volume_encoder, cost_volume_decoder]
for i in range(len(model)):
try:
checkpoint = sorted(Path("weights").files())[i]
weights = torch.load(checkpoint)
model[i].load_state_dict(weights)
model[i].eval()
print("Loaded weights for", checkpoint)
except Exception as e:
print(e)
print("Could not find the checkpoint for module", i)
exit(1)
feature_extractor = model[0]
feature_shrinker = model[1]
cost_volume_encoder = model[2]
cost_volume_decoder = model[3]
warp_grid = get_warp_grid_for_cost_volume_calculation(width=int(Config.test_image_width / 2),
height=int(Config.test_image_height / 2),
device=device)
scale_rgb = 255.0
mean_rgb = [0.485, 0.456, 0.406]
std_rgb = [0.229, 0.224, 0.225]
min_depth = 0.25
max_depth = 20.0
n_depth_levels = 64
scene_folder = Path(Config.test_online_scene_path)
scene = scene_folder.split("/")[-1]
print("Predicting for scene:", scene)
keyframe_buffer = KeyframeBuffer(buffer_size=Config.test_keyframe_buffer_size,
keyframe_pose_distance=Config.test_keyframe_pose_distance,
optimal_t_score=Config.test_optimal_t_measure,
optimal_R_score=Config.test_optimal_R_measure,
store_return_indices=False)
K = np.loadtxt(scene_folder / 'K.txt').astype(np.float32)
poses = np.fromfile(scene_folder / "poses.txt", dtype=float, sep="\n ").reshape((-1, 4, 4))
image_filenames = sorted((scene_folder / 'images').files("*.png"))
depth_filenames = sorted((scene_folder / 'depth').files("*.png"))
inference_timer = InferenceTimer()
predictions = []
reference_depths = []
with torch.no_grad():
for i in tqdm(range(0, len(poses))):
reference_pose = poses[i]
reference_image = load_image(image_filenames[i])
reference_depth = cv2.imread(depth_filenames[i], -1).astype(float) / 1000.0
# POLL THE KEYFRAME BUFFER
response = keyframe_buffer.try_new_keyframe(reference_pose, reference_image)
if response != 1:
continue
preprocessor = PreprocessImage(K=K,
old_width=reference_image.shape[1],
old_height=reference_image.shape[0],
new_width=Config.test_image_width,
new_height=Config.test_image_height,
distortion_crop=Config.test_distortion_crop,
perform_crop=Config.test_perform_crop)
reference_image = preprocessor.apply_rgb(image=reference_image,
scale_rgb=scale_rgb,
mean_rgb=mean_rgb,
std_rgb=std_rgb)
reference_depth = preprocessor.apply_depth(reference_depth)
reference_image_torch = torch.from_numpy(np.transpose(reference_image, (2, 0, 1))).float().to(device).unsqueeze(0)
reference_pose_torch = torch.from_numpy(reference_pose).float().to(device).unsqueeze(0)
full_K_torch = torch.from_numpy(preprocessor.get_updated_intrinsics()).float().to(device).unsqueeze(0)
half_K_torch = full_K_torch.clone().cuda()
half_K_torch[:, 0:2, :] = half_K_torch[:, 0:2, :] / 2.0
measurement_poses_torch = []
measurement_images_torch = []
measurement_frames = keyframe_buffer.get_best_measurement_frames(Config.test_n_measurement_frames)
for (measurement_pose, measurement_image) in measurement_frames:
measurement_image = preprocessor.apply_rgb(image=measurement_image,
scale_rgb=scale_rgb,
mean_rgb=mean_rgb,
std_rgb=std_rgb)
measurement_image_torch = torch.from_numpy(np.transpose(measurement_image, (2, 0, 1))).float().to(device).unsqueeze(0)
measurement_pose_torch = torch.from_numpy(measurement_pose).float().to(device).unsqueeze(0)
measurement_images_torch.append(measurement_image_torch)
measurement_poses_torch.append(measurement_pose_torch)
inference_timer.record_start_time()
measurement_feature_halfs = []
for measurement_image_torch in measurement_images_torch:
measurement_feature_half, _, _, _ = feature_shrinker(*feature_extractor(measurement_image_torch))
measurement_feature_halfs.append(measurement_feature_half)
reference_feature_half, reference_feature_quarter, \
reference_feature_one_eight, reference_feature_one_sixteen = feature_shrinker(*feature_extractor(reference_image_torch))
cost_volume = cost_volume_fusion(image1=reference_feature_half,
image2s=measurement_feature_halfs,
pose1=reference_pose_torch,
pose2s=measurement_poses_torch,
K=half_K_torch,
warp_grid=warp_grid,
min_depth=min_depth,
max_depth=max_depth,
n_depth_levels=n_depth_levels,
device=device,
dot_product=True)
skip0, skip1, skip2, skip3, bottom = cost_volume_encoder(features_half=reference_feature_half,
features_quarter=reference_feature_quarter,
features_one_eight=reference_feature_one_eight,
features_one_sixteen=reference_feature_one_sixteen,
cost_volume=cost_volume)
prediction, _, _, _, _ = cost_volume_decoder(reference_image_torch, skip0, skip1, skip2, skip3, bottom)
inference_timer.record_end_time_and_elapsed_time()
prediction = prediction.cpu().numpy().squeeze()
reference_depths.append(reference_depth)
predictions.append(prediction)
if Config.test_visualize:
visualize_predictions(numpy_reference_image=reference_image,
numpy_measurement_image=measurement_image,
numpy_predicted_depth=prediction,
normalization_mean=mean_rgb,
normalization_std=std_rgb,
normalization_scale=scale_rgb)
inference_timer.print_statistics()
save_results(predictions=predictions,
groundtruths=reference_depths,
system_name=system_name,
scene_name=scene,
save_folder=".")
if __name__ == '__main__':
predict()
| 9,280 | 48.897849 | 138 | py |
deep-video-mvs | deep-video-mvs-master/dvmvs/fusionnet/run-training.py | import datetime
import itertools
import os
import numpy as np
from path import Path
from tensorboardX import SummaryWriter
from torch.backends import cudnn
from torch.utils.data import DataLoader
from dvmvs.dataset_loader import MVSDataset
from dvmvs.fusionnet.model import *
from dvmvs.losses import LossMeter, update_losses
from dvmvs.train import train
from dvmvs.utils import zip_code, print_number_of_trainable_parameters, calculate_cost_volume_by_warping
class TrainingHyperparameters:
Config.train_subsequence_length = 8
batch_size = 4
learning_rate = 1e-4
momentum = 0.9
beta = 0.999
weight_decay = 0
# loss_type = "Huber"
# loss_type = "L1"
loss_type = "L1-inv"
# loss_type = "L1-rel"
finetune_epochs = 1
use_checkpoint = True
scaling = 0.5
x = np.linspace(0, Config.train_image_width * scaling - 1, num=int(Config.train_image_width * scaling))
y = np.linspace(0, Config.train_image_height * scaling - 1, num=int(Config.train_image_height * scaling))
ones = np.ones(shape=(int(Config.train_image_height * scaling), int(Config.train_image_width * scaling)))
x_grid, y_grid = np.meshgrid(x, y)
warp_grid = np.stack((x_grid, y_grid, ones), axis=-1)
warp_grid = torch.from_numpy(warp_grid).float()
warp_grid = warp_grid.view(-1, 3).t().cuda()
def main():
# set the manual seed for reproducibility
torch.manual_seed(Config.train_seed)
# create the directory for this run of the training
run_directory = os.path.join(Config.train_run_directory, datetime.datetime.now().strftime("%Y%m%d-%H%M%S"))
os.mkdir(run_directory)
# zip every code file
zip_code(run_directory)
summary_writer = SummaryWriter(run_directory)
print("=> fetching scenes in '{}'".format(Config.dataset))
train_set = MVSDataset(
root=Config.dataset,
seed=Config.train_seed,
split="TRAINING",
subsequence_length=Config.train_subsequence_length,
scale_rgb=255.0,
mean_rgb=[0.485, 0.456, 0.406],
std_rgb=[0.229, 0.224, 0.225],
geometric_scale_augmentation=True
)
val_set = MVSDataset(
root=Config.dataset,
seed=Config.train_seed,
split="VALIDATION",
subsequence_length=Config.train_subsequence_length,
scale_rgb=255.0,
mean_rgb=[0.485, 0.456, 0.406],
std_rgb=[0.229, 0.224, 0.225]
)
print('{} samples found in {} train scenes'.format(len(train_set), len(train_set.scenes)))
print('{} samples found in {} valid scenes'.format(len(val_set), len(val_set.scenes)))
train_loader = DataLoader(dataset=train_set,
batch_size=TrainingHyperparameters.batch_size,
shuffle=True,
num_workers=Config.train_data_pipeline_workers,
pin_memory=True,
drop_last=True)
val_loader = DataLoader(dataset=val_set,
batch_size=TrainingHyperparameters.batch_size,
shuffle=False,
num_workers=Config.train_data_pipeline_workers,
pin_memory=True,
drop_last=True)
feature_extractor = FeatureExtractor().cuda()
feature_shrinker = FeatureShrinker().cuda()
cost_volume_encoder = CostVolumeEncoder().cuda()
lstm_fusion = LSTMFusion().cuda()
cost_volume_decoder = CostVolumeDecoder().cuda()
model = [feature_extractor, feature_shrinker, cost_volume_encoder, lstm_fusion, cost_volume_decoder]
if TrainingHyperparameters.use_checkpoint:
checkpoints = sorted(Path("weights").files())
for i in range(len(model)):
try:
weights = torch.load(checkpoints[i])
model[i].load_state_dict(weights)
print("Loaded weights for", checkpoints[i])
except Exception as e:
print(e)
print("Skipping...")
cudnn.benchmark = True
best_loss = [np.inf, np.inf, np.inf, np.inf]
# TRAIN LSTM, DECODER
parameters = itertools.chain(lstm_fusion.parameters(),
cost_volume_decoder.parameters())
optimizer = torch.optim.Adam(parameters,
lr=TrainingHyperparameters.learning_rate,
betas=(TrainingHyperparameters.momentum, TrainingHyperparameters.beta),
weight_decay=TrainingHyperparameters.weight_decay)
print_number_of_trainable_parameters(optimizer)
for epoch in range(TrainingHyperparameters.finetune_epochs):
print("\n\nEPOCH:", epoch)
train(train_loader=train_loader,
val_loader=val_loader,
model=model,
optimizer=optimizer,
summary_writer=summary_writer,
epoch=epoch,
best_loss=best_loss,
run_directory=run_directory,
forward_pass_function=forward_pass)
# TRAIN MY PARTS
parameters = itertools.chain(feature_shrinker.parameters(),
cost_volume_encoder.parameters(),
lstm_fusion.parameters(),
cost_volume_decoder.parameters())
optimizer = torch.optim.Adam(parameters,
lr=TrainingHyperparameters.learning_rate,
betas=(TrainingHyperparameters.momentum, TrainingHyperparameters.beta),
weight_decay=TrainingHyperparameters.weight_decay)
print_number_of_trainable_parameters(optimizer)
for epoch in range(TrainingHyperparameters.finetune_epochs, 2 * TrainingHyperparameters.finetune_epochs):
print("\n\nEPOCH:", epoch)
train(train_loader=train_loader,
val_loader=val_loader,
model=model,
optimizer=optimizer,
summary_writer=summary_writer,
epoch=epoch,
best_loss=best_loss,
run_directory=run_directory,
forward_pass_function=forward_pass)
# TRAIN EVERYTHING
parameters = itertools.chain(feature_extractor.parameters(),
feature_shrinker.parameters(),
cost_volume_encoder.parameters(),
lstm_fusion.parameters(),
cost_volume_decoder.parameters())
optimizer = torch.optim.Adam(parameters,
lr=TrainingHyperparameters.learning_rate,
betas=(TrainingHyperparameters.momentum, TrainingHyperparameters.beta),
weight_decay=TrainingHyperparameters.weight_decay)
print_number_of_trainable_parameters(optimizer)
for epoch in range(2 * TrainingHyperparameters.finetune_epochs, Config.train_epochs):
print("\n\nEPOCH:", epoch)
train(train_loader=train_loader,
val_loader=val_loader,
model=model,
optimizer=optimizer,
summary_writer=summary_writer,
epoch=epoch,
best_loss=best_loss,
run_directory=run_directory,
forward_pass_function=forward_pass)
def forward_pass(images, depths, poses, K, model, is_training):
feature_extractor = model[0]
feature_shrinker = model[1]
cost_volume_encoder = model[2]
lstm_fusion = model[3]
cost_volume_decoder = model[4]
full_K = K.clone().cuda()
half_K = K.clone().cuda()
half_K[:, 0:2, :] = half_K[:, 0:2, :] * scaling
lstm_K = K.clone().cuda()
lstm_K[:, 0:2, :] = lstm_K[:, 0:2, :] / 32.0
images_cuda = []
depths_cuda = []
poses_cuda = []
feature_halfs = []
feature_quarters = []
feature_one_eights = []
feature_one_sixteens = []
# Extract image features
for i in range(0, len(images)):
images_cuda.append(images[i].cuda())
depths_cuda.append(depths[i].cuda())
poses_cuda.append(poses[i].cuda())
feature_half, feature_quarter, feature_one_eight, feature_one_sixteen = feature_shrinker(*feature_extractor(images_cuda[i]))
feature_halfs.append(feature_half)
feature_quarters.append(feature_quarter)
feature_one_eights.append(feature_one_eight)
feature_one_sixteens.append(feature_one_sixteen)
optimizer_loss = 0
predictions = None
l1_meter = LossMeter()
huber_meter = LossMeter()
l1_inv_meter = LossMeter()
l1_rel_meter = LossMeter()
batch_size, _, _ = full_K.size()
lstm_state_bottom = None
for i in range(1, len(images_cuda)):
reference_index = i
measurement_index = i - 1
initial_cost_volume = calculate_cost_volume_by_warping(image1=feature_halfs[reference_index],
image2=feature_halfs[measurement_index],
pose1=poses_cuda[reference_index],
pose2=poses_cuda[measurement_index],
K=half_K,
warp_grid=warp_grid,
min_depth=Config.train_min_depth,
max_depth=Config.train_max_depth,
n_depth_levels=Config.train_n_depth_levels,
device=torch.device('cuda'),
dot_product=True)
skip0, skip1, skip2, skip3, bottom = cost_volume_encoder(features_half=feature_halfs[reference_index],
features_quarter=feature_quarters[reference_index],
features_one_eight=feature_one_eights[reference_index],
features_one_sixteen=feature_one_sixteens[reference_index],
cost_volume=initial_cost_volume)
depth_estimation = depths_cuda[reference_index].view(batch_size, 1, Config.train_image_height, Config.train_image_width)
depth_estimation = torch.nn.functional.interpolate(input=depth_estimation,
scale_factor=(1.0 / 32.0),
mode="nearest")
lstm_state_bottom = lstm_fusion(current_encoding=bottom,
current_state=lstm_state_bottom,
previous_pose=poses_cuda[measurement_index],
current_pose=poses_cuda[reference_index],
estimated_current_depth=depth_estimation,
camera_matrix=lstm_K)
depth_full, depth_half, depth_quarter, depth_one_eight, depth_one_sixteen = cost_volume_decoder(images_cuda[reference_index],
skip0,
skip1,
skip2,
skip3,
lstm_state_bottom[0])
weights = [1, 1, 1, 1, 1]
optimizer_loss = optimizer_loss + update_losses(predictions=[depth_one_sixteen, depth_one_eight, depth_quarter, depth_half, depth_full],
weights=weights,
groundtruth=depths_cuda[reference_index],
is_training=is_training,
l1_meter=l1_meter,
l1_inv_meter=l1_inv_meter,
l1_rel_meter=l1_rel_meter,
huber_meter=huber_meter,
loss_type=TrainingHyperparameters.loss_type)
predictions = [depth_quarter, depth_half, depth_full]
predictions_names = ["prediction_quarter", "prediction_half", "prediction_full"]
return l1_meter, huber_meter, l1_inv_meter, l1_rel_meter, optimizer_loss, predictions, predictions_names
if __name__ == '__main__':
main()
| 13,088 | 44.290657 | 144 | py |
deep-video-mvs | deep-video-mvs-master/dvmvs/fusionnet/model.py | from collections import OrderedDict
import torch
from torchvision import models
from torchvision.ops import FeaturePyramidNetwork
from dvmvs.config import Config
from dvmvs.convlstm import MVSLayernormConvLSTMCell
from dvmvs.layers import conv_layer, depth_layer_3x3
fpn_output_channels = 32
hyper_channels = 32
class StandardLayer(torch.nn.Module):
def __init__(self, channels, kernel_size, apply_bn_relu):
super(StandardLayer, self).__init__()
self.conv1 = conv_layer(input_channels=channels,
output_channels=channels,
stride=1,
kernel_size=kernel_size,
apply_bn_relu=True)
self.conv2 = conv_layer(input_channels=channels,
output_channels=channels,
stride=1,
kernel_size=kernel_size,
apply_bn_relu=apply_bn_relu)
def forward(self, x):
x = self.conv1(x)
x = self.conv2(x)
return x
class DownconvolutionLayer(torch.nn.Module):
def __init__(self, input_channels, output_channels, kernel_size):
super(DownconvolutionLayer, self).__init__()
self.down_conv = conv_layer(input_channels=input_channels,
output_channels=output_channels,
stride=2,
kernel_size=kernel_size,
apply_bn_relu=True)
def forward(self, x):
x = self.down_conv(x)
return x
class UpconvolutionLayer(torch.nn.Module):
def __init__(self, input_channels, output_channels, kernel_size):
super(UpconvolutionLayer, self).__init__()
self.conv = conv_layer(input_channels=input_channels,
output_channels=output_channels,
stride=1,
kernel_size=kernel_size,
apply_bn_relu=True)
def forward(self, x):
x = torch.nn.functional.interpolate(input=x, scale_factor=2, mode='bilinear', align_corners=True)
x = self.conv(x)
return x
class EncoderBlock(torch.nn.Module):
def __init__(self, input_channels, output_channels, kernel_size):
super(EncoderBlock, self).__init__()
self.down_convolution = DownconvolutionLayer(input_channels=input_channels,
output_channels=output_channels,
kernel_size=kernel_size)
self.standard_convolution = StandardLayer(channels=output_channels,
kernel_size=kernel_size,
apply_bn_relu=True)
def forward(self, x):
x = self.down_convolution(x)
x = self.standard_convolution(x)
return x
class DecoderBlock(torch.nn.Module):
def __init__(self, input_channels, output_channels, kernel_size, apply_bn_relu, plus_one):
super(DecoderBlock, self).__init__()
# Upsample the inpput coming from previous layer
self.up_convolution = UpconvolutionLayer(input_channels=input_channels,
output_channels=output_channels,
kernel_size=kernel_size)
if plus_one:
next_input_channels = input_channels + 1
else:
next_input_channels = input_channels
# Aggregate skip and upsampled input
self.convolution1 = conv_layer(input_channels=next_input_channels,
output_channels=output_channels,
kernel_size=kernel_size,
stride=1,
apply_bn_relu=True)
# Learn from aggregation
self.convolution2 = conv_layer(input_channels=output_channels,
output_channels=output_channels,
kernel_size=kernel_size,
stride=1,
apply_bn_relu=apply_bn_relu)
def forward(self, x, skip, depth):
x = self.up_convolution(x)
if depth is None:
x = torch.cat([x, skip], dim=1)
else:
depth = torch.nn.functional.interpolate(depth, scale_factor=2, mode='bilinear', align_corners=True)
x = torch.cat([x, skip, depth], dim=1)
x = self.convolution1(x)
x = self.convolution2(x)
return x
class FeatureExtractor(torch.nn.Module):
def __init__(self):
super(FeatureExtractor, self).__init__()
backbone_mobile_layers = list(models.mnasnet1_0(pretrained=True).layers.children())
self.layer1 = torch.nn.Sequential(*backbone_mobile_layers[0:8])
self.layer2 = torch.nn.Sequential(*backbone_mobile_layers[8:9])
self.layer3 = torch.nn.Sequential(*backbone_mobile_layers[9:10])
self.layer4 = torch.nn.Sequential(*backbone_mobile_layers[10:12])
self.layer5 = torch.nn.Sequential(*backbone_mobile_layers[12:14])
def forward(self, image):
layer1 = self.layer1(image)
layer2 = self.layer2(layer1)
layer3 = self.layer3(layer2)
layer4 = self.layer4(layer3)
layer5 = self.layer5(layer4)
return layer1, layer2, layer3, layer4, layer5
class FeatureShrinker(torch.nn.Module):
def __init__(self):
super(FeatureShrinker, self).__init__()
self.fpn = FeaturePyramidNetwork(in_channels_list=[16, 24, 40, 96, 320],
out_channels=fpn_output_channels,
extra_blocks=None)
def forward(self, layer1, layer2, layer3, layer4, layer5):
fpn_input = OrderedDict()
fpn_input['layer1'] = layer1
fpn_input['layer2'] = layer2
fpn_input['layer3'] = layer3
fpn_input['layer4'] = layer4
fpn_input['layer5'] = layer5
fpn_output = self.fpn(fpn_input)
features_half = fpn_output['layer1']
features_quarter = fpn_output['layer2']
features_one_eight = fpn_output['layer3']
features_one_sixteen = fpn_output['layer4']
return features_half, features_quarter, features_one_eight, features_one_sixteen
class CostVolumeEncoder(torch.nn.Module):
def __init__(self):
super(CostVolumeEncoder, self).__init__()
self.aggregator0 = conv_layer(input_channels=Config.train_n_depth_levels + fpn_output_channels,
output_channels=hyper_channels,
kernel_size=5,
stride=1,
apply_bn_relu=True)
self.encoder_block0 = EncoderBlock(input_channels=hyper_channels,
output_channels=hyper_channels * 2,
kernel_size=5)
###
self.aggregator1 = conv_layer(input_channels=hyper_channels * 2 + fpn_output_channels,
output_channels=hyper_channels * 2,
kernel_size=3,
stride=1,
apply_bn_relu=True)
self.encoder_block1 = EncoderBlock(input_channels=hyper_channels * 2,
output_channels=hyper_channels * 4,
kernel_size=3)
###
self.aggregator2 = conv_layer(input_channels=hyper_channels * 4 + fpn_output_channels,
output_channels=hyper_channels * 4,
kernel_size=3,
stride=1,
apply_bn_relu=True)
self.encoder_block2 = EncoderBlock(input_channels=hyper_channels * 4,
output_channels=hyper_channels * 8,
kernel_size=3)
###
self.aggregator3 = conv_layer(input_channels=hyper_channels * 8 + fpn_output_channels,
output_channels=hyper_channels * 8,
kernel_size=3,
stride=1,
apply_bn_relu=True)
self.encoder_block3 = EncoderBlock(input_channels=hyper_channels * 8,
output_channels=hyper_channels * 16,
kernel_size=3)
def forward(self, features_half, features_quarter, features_one_eight, features_one_sixteen, cost_volume):
inp0 = torch.cat([features_half, cost_volume], dim=1)
inp0 = self.aggregator0(inp0)
out0 = self.encoder_block0(inp0)
inp1 = torch.cat([features_quarter, out0], dim=1)
inp1 = self.aggregator1(inp1)
out1 = self.encoder_block1(inp1)
inp2 = torch.cat([features_one_eight, out1], dim=1)
inp2 = self.aggregator2(inp2)
out2 = self.encoder_block2(inp2)
inp3 = torch.cat([features_one_sixteen, out2], dim=1)
inp3 = self.aggregator3(inp3)
out3 = self.encoder_block3(inp3)
return inp0, inp1, inp2, inp3, out3
class CostVolumeDecoder(torch.nn.Module):
def __init__(self):
super(CostVolumeDecoder, self).__init__()
self.inverse_depth_base = 1 / Config.train_max_depth
self.inverse_depth_multiplier = 1 / Config.train_min_depth - 1 / Config.train_max_depth
self.decoder_block1 = DecoderBlock(input_channels=hyper_channels * 16,
output_channels=hyper_channels * 8,
kernel_size=3,
apply_bn_relu=True,
plus_one=False)
self.decoder_block2 = DecoderBlock(input_channels=hyper_channels * 8,
output_channels=hyper_channels * 4,
kernel_size=3,
apply_bn_relu=True,
plus_one=True)
self.decoder_block3 = DecoderBlock(input_channels=hyper_channels * 4,
output_channels=hyper_channels * 2,
kernel_size=3,
apply_bn_relu=True,
plus_one=True)
self.decoder_block4 = DecoderBlock(input_channels=hyper_channels * 2,
output_channels=hyper_channels,
kernel_size=5,
apply_bn_relu=True,
plus_one=True)
self.refine = torch.nn.Sequential(conv_layer(input_channels=hyper_channels + 4,
output_channels=hyper_channels,
kernel_size=5,
stride=1,
apply_bn_relu=True),
conv_layer(input_channels=hyper_channels,
output_channels=hyper_channels,
kernel_size=5,
stride=1,
apply_bn_relu=True))
self.depth_layer_one_sixteen = depth_layer_3x3(hyper_channels * 8)
self.depth_layer_one_eight = depth_layer_3x3(hyper_channels * 4)
self.depth_layer_quarter = depth_layer_3x3(hyper_channels * 2)
self.depth_layer_half = depth_layer_3x3(hyper_channels)
self.depth_layer_full = depth_layer_3x3(hyper_channels)
def forward(self, image, skip0, skip1, skip2, skip3, bottom):
# work on cost volume
decoder_block1 = self.decoder_block1(bottom, skip3, None)
sigmoid_depth_one_sixteen = self.depth_layer_one_sixteen(decoder_block1)
inverse_depth_one_sixteen = self.inverse_depth_multiplier * sigmoid_depth_one_sixteen + self.inverse_depth_base
decoder_block2 = self.decoder_block2(decoder_block1, skip2, sigmoid_depth_one_sixteen)
sigmoid_depth_one_eight = self.depth_layer_one_eight(decoder_block2)
inverse_depth_one_eight = self.inverse_depth_multiplier * sigmoid_depth_one_eight + self.inverse_depth_base
decoder_block3 = self.decoder_block3(decoder_block2, skip1, sigmoid_depth_one_eight)
sigmoid_depth_quarter = self.depth_layer_quarter(decoder_block3)
inverse_depth_quarter = self.inverse_depth_multiplier * sigmoid_depth_quarter + self.inverse_depth_base
decoder_block4 = self.decoder_block4(decoder_block3, skip0, sigmoid_depth_quarter)
sigmoid_depth_half = self.depth_layer_half(decoder_block4)
inverse_depth_half = self.inverse_depth_multiplier * sigmoid_depth_half + self.inverse_depth_base
scaled_depth = torch.nn.functional.interpolate(sigmoid_depth_half, scale_factor=2, mode='bilinear', align_corners=True)
scaled_decoder = torch.nn.functional.interpolate(decoder_block4, scale_factor=2, mode='bilinear', align_corners=True)
scaled_combined = torch.cat([scaled_decoder, scaled_depth, image], dim=1)
scaled_combined = self.refine(scaled_combined)
inverse_depth_full = self.inverse_depth_multiplier * self.depth_layer_full(scaled_combined) + self.inverse_depth_base
depth_full = 1.0 / inverse_depth_full.squeeze(1)
depth_half = 1.0 / inverse_depth_half.squeeze(1)
depth_quarter = 1.0 / inverse_depth_quarter.squeeze(1)
depth_one_eight = 1.0 / inverse_depth_one_eight.squeeze(1)
depth_one_sixteen = 1.0 / inverse_depth_one_sixteen.squeeze(1)
return depth_full, depth_half, depth_quarter, depth_one_eight, depth_one_sixteen
class LSTMFusion(torch.nn.Module):
def __init__(self):
super(LSTMFusion, self).__init__()
input_size = hyper_channels * 16
hidden_size = hyper_channels * 16
self.lstm_cell = MVSLayernormConvLSTMCell(input_dim=input_size,
hidden_dim=hidden_size,
kernel_size=(3, 3),
activation_function=torch.celu)
def forward(self, current_encoding, current_state, previous_pose, current_pose, estimated_current_depth, camera_matrix):
batch, channel, height, width = current_encoding.size()
if current_state is None:
hidden_state, cell_state = self.lstm_cell.init_hidden(batch_size=batch,
image_size=(height, width))
else:
hidden_state, cell_state = current_state
next_hidden_state, next_cell_state = self.lstm_cell(input_tensor=current_encoding,
cur_state=[hidden_state, cell_state],
previous_pose=previous_pose,
current_pose=current_pose,
estimated_current_depth=estimated_current_depth,
camera_matrix=camera_matrix)
return next_hidden_state, next_cell_state
| 15,992 | 46.316568 | 127 | py |
deep-video-mvs | deep-video-mvs-master/dvmvs/fusionnet/run-testing.py | import cv2
import numpy as np
import torch
from dvmvs.config import Config
from dvmvs.dataset_loader import PreprocessImage, load_image
from dvmvs.fusionnet.model import FeatureExtractor, FeatureShrinker, CostVolumeEncoder, LSTMFusion, CostVolumeDecoder
from dvmvs.utils import cost_volume_fusion, save_results, visualize_predictions, InferenceTimer, get_non_differentiable_rectangle_depth_estimation, \
get_warp_grid_for_cost_volume_calculation
from path import Path
from tqdm import tqdm
def predict():
print("System: FUSIONNET")
device = torch.device("cuda")
feature_extractor = FeatureExtractor()
feature_shrinker = FeatureShrinker()
cost_volume_encoder = CostVolumeEncoder()
lstm_fusion = LSTMFusion()
cost_volume_decoder = CostVolumeDecoder()
feature_extractor = feature_extractor.to(device)
feature_shrinker = feature_shrinker.to(device)
cost_volume_encoder = cost_volume_encoder.to(device)
lstm_fusion = lstm_fusion.to(device)
cost_volume_decoder = cost_volume_decoder.to(device)
model = [feature_extractor, feature_shrinker, cost_volume_encoder, lstm_fusion, cost_volume_decoder]
for i in range(len(model)):
try:
checkpoint = sorted(Path("weights").files())[i]
weights = torch.load(checkpoint)
model[i].load_state_dict(weights)
model[i].eval()
print("Loaded weights for", checkpoint)
except Exception as e:
print(e)
print("Could not find the checkpoint for module", i)
exit(1)
feature_extractor = model[0]
feature_shrinker = model[1]
cost_volume_encoder = model[2]
lstm_fusion = model[3]
cost_volume_decoder = model[4]
warp_grid = get_warp_grid_for_cost_volume_calculation(width=int(Config.test_image_width / 2),
height=int(Config.test_image_height / 2),
device=device)
scale_rgb = 255.0
mean_rgb = [0.485, 0.456, 0.406]
std_rgb = [0.229, 0.224, 0.225]
min_depth = 0.25
max_depth = 20.0
n_depth_levels = 64
data_path = Path(Config.test_offline_data_path)
if Config.test_dataset_name is None:
keyframe_index_files = sorted((Path(Config.test_offline_data_path) / "indices").files())
else:
keyframe_index_files = sorted((Path(Config.test_offline_data_path) / "indices").files("*" + Config.test_dataset_name + "*"))
for iteration, keyframe_index_file in enumerate(keyframe_index_files):
keyframing_type, dataset_name, scene_name, _, n_measurement_frames = keyframe_index_file.split("/")[-1].split("+")
scene_folder = data_path / dataset_name / scene_name
print("Predicting for scene:", dataset_name + "-" + scene_name, " - ", iteration, "/", len(keyframe_index_files))
keyframe_index_file_lines = np.loadtxt(keyframe_index_file, dtype=str, delimiter="\n")
K = np.loadtxt(scene_folder / 'K.txt').astype(np.float32)
poses = np.fromfile(scene_folder / "poses.txt", dtype=float, sep="\n ").reshape((-1, 4, 4))
image_filenames = sorted((scene_folder / 'images').files("*.png"))
depth_filenames = sorted((scene_folder / 'depth').files("*.png"))
input_filenames = []
for image_filename in image_filenames:
input_filenames.append(image_filename.split("/")[-1])
inference_timer = InferenceTimer()
lstm_state = None
previous_depth = None
previous_pose = None
predictions = []
reference_depths = []
with torch.no_grad():
for i in tqdm(range(0, len(keyframe_index_file_lines))):
keyframe_index_file_line = keyframe_index_file_lines[i]
if keyframe_index_file_line == "TRACKING LOST":
lstm_state = None
previous_depth = None
previous_pose = None
continue
else:
current_input_filenames = keyframe_index_file_line.split(" ")
current_indices = [input_filenames.index(current_input_filenames[x]) for x in range(len(current_input_filenames))]
reference_index = current_indices[0]
measurement_indices = current_indices[1:]
reference_pose = poses[reference_index]
reference_image = load_image(image_filenames[reference_index])
reference_depth = cv2.imread(depth_filenames[reference_index], -1).astype(float) / 1000.0
preprocessor = PreprocessImage(K=K,
old_width=reference_image.shape[1],
old_height=reference_image.shape[0],
new_width=Config.test_image_width,
new_height=Config.test_image_height,
distortion_crop=Config.test_distortion_crop,
perform_crop=Config.test_perform_crop)
reference_image = preprocessor.apply_rgb(image=reference_image,
scale_rgb=scale_rgb,
mean_rgb=mean_rgb,
std_rgb=std_rgb)
reference_depth = preprocessor.apply_depth(reference_depth)
reference_image_torch = torch.from_numpy(np.transpose(reference_image, (2, 0, 1))).float().to(device).unsqueeze(0)
reference_pose_torch = torch.from_numpy(reference_pose).float().to(device).unsqueeze(0)
measurement_poses_torch = []
measurement_images_torch = []
for measurement_index in measurement_indices:
measurement_image = load_image(image_filenames[measurement_index])
measurement_image = preprocessor.apply_rgb(image=measurement_image,
scale_rgb=scale_rgb,
mean_rgb=mean_rgb,
std_rgb=std_rgb)
measurement_image_torch = torch.from_numpy(np.transpose(measurement_image, (2, 0, 1))).float().to(device).unsqueeze(0)
measurement_pose_torch = torch.from_numpy(poses[measurement_index]).float().to(device).unsqueeze(0)
measurement_images_torch.append(measurement_image_torch)
measurement_poses_torch.append(measurement_pose_torch)
full_K_torch = torch.from_numpy(preprocessor.get_updated_intrinsics()).float().to(device).unsqueeze(0)
half_K_torch = full_K_torch.clone().cuda()
half_K_torch[:, 0:2, :] = half_K_torch[:, 0:2, :] / 2.0
lstm_K_bottom = full_K_torch.clone().cuda()
lstm_K_bottom[:, 0:2, :] = lstm_K_bottom[:, 0:2, :] / 32.0
inference_timer.record_start_time()
measurement_feature_halfs = []
for measurement_image_torch in measurement_images_torch:
measurement_feature_half, _, _, _ = feature_shrinker(*feature_extractor(measurement_image_torch))
measurement_feature_halfs.append(measurement_feature_half)
reference_feature_half, reference_feature_quarter, \
reference_feature_one_eight, reference_feature_one_sixteen = feature_shrinker(*feature_extractor(reference_image_torch))
cost_volume = cost_volume_fusion(image1=reference_feature_half,
image2s=measurement_feature_halfs,
pose1=reference_pose_torch,
pose2s=measurement_poses_torch,
K=half_K_torch,
warp_grid=warp_grid,
min_depth=min_depth,
max_depth=max_depth,
n_depth_levels=n_depth_levels,
device=device,
dot_product=True)
skip0, skip1, skip2, skip3, bottom = cost_volume_encoder(features_half=reference_feature_half,
features_quarter=reference_feature_quarter,
features_one_eight=reference_feature_one_eight,
features_one_sixteen=reference_feature_one_sixteen,
cost_volume=cost_volume)
if previous_depth is not None:
depth_estimation = get_non_differentiable_rectangle_depth_estimation(reference_pose_torch=reference_pose_torch,
measurement_pose_torch=previous_pose,
previous_depth_torch=previous_depth,
full_K_torch=full_K_torch,
half_K_torch=half_K_torch,
original_height=Config.test_image_height,
original_width=Config.test_image_width)
depth_estimation = torch.nn.functional.interpolate(input=depth_estimation,
scale_factor=(1.0 / 16.0),
mode="nearest")
else:
depth_estimation = torch.zeros(size=(1, 1, int(Config.test_image_height / 32.0), int(Config.test_image_width / 32.0))).to(device)
lstm_state = lstm_fusion(current_encoding=bottom,
current_state=lstm_state,
previous_pose=previous_pose,
current_pose=reference_pose_torch,
estimated_current_depth=depth_estimation,
camera_matrix=lstm_K_bottom)
prediction, _, _, _, _ = cost_volume_decoder(reference_image_torch, skip0, skip1, skip2, skip3, lstm_state[0])
previous_depth = prediction.view(1, 1, Config.test_image_height, Config.test_image_width)
previous_pose = reference_pose_torch
inference_timer.record_end_time_and_elapsed_time()
prediction = prediction.cpu().numpy().squeeze()
reference_depths.append(reference_depth)
predictions.append(prediction)
if Config.test_visualize:
visualize_predictions(numpy_reference_image=reference_image,
numpy_measurement_image=measurement_image,
numpy_predicted_depth=prediction,
normalization_mean=mean_rgb,
normalization_std=std_rgb,
normalization_scale=scale_rgb)
inference_timer.print_statistics()
system_name = "{}_{}_{}_{}_{}_dvmvs_fusionnet".format(keyframing_type,
dataset_name,
Config.test_image_width,
Config.test_image_height,
n_measurement_frames)
save_results(predictions=predictions,
groundtruths=reference_depths,
system_name=system_name,
scene_name=scene_name,
save_folder=Config.test_result_folder)
if __name__ == '__main__':
predict()
| 12,702 | 53.055319 | 149 | py |
deep-video-mvs | deep-video-mvs-master/dvmvs/fusionnet/run-testing-online.py | import cv2
import numpy as np
import torch
from path import Path
from tqdm import tqdm
from dvmvs.config import Config
from dvmvs.dataset_loader import PreprocessImage, load_image
from dvmvs.fusionnet.model import FeatureExtractor, FeatureShrinker, CostVolumeEncoder, LSTMFusion, CostVolumeDecoder
from dvmvs.keyframe_buffer import KeyframeBuffer
from dvmvs.utils import cost_volume_fusion, save_results, visualize_predictions, InferenceTimer, get_non_differentiable_rectangle_depth_estimation, \
get_warp_grid_for_cost_volume_calculation
def predict(evaluate):
dataset_name = Config.test_online_scene_path.split("/")[-2]
system_name = "keyframe_{}_{}_{}_{}_dvmvs_fusionnet_online".format(dataset_name,
Config.test_image_width,
Config.test_image_height,
Config.test_n_measurement_frames)
print("Predicting with System:", system_name)
print("# of Measurement Frames:", Config.test_n_measurement_frames)
device = torch.device("cuda")
feature_extractor = FeatureExtractor()
feature_shrinker = FeatureShrinker()
cost_volume_encoder = CostVolumeEncoder()
lstm_fusion = LSTMFusion()
cost_volume_decoder = CostVolumeDecoder()
feature_extractor = feature_extractor.to(device)
feature_shrinker = feature_shrinker.to(device)
cost_volume_encoder = cost_volume_encoder.to(device)
lstm_fusion = lstm_fusion.to(device)
cost_volume_decoder = cost_volume_decoder.to(device)
model = [feature_extractor, feature_shrinker, cost_volume_encoder, lstm_fusion, cost_volume_decoder]
for i in range(len(model)):
try:
checkpoint = sorted(Path("weights").files())[i]
weights = torch.load(checkpoint)
model[i].load_state_dict(weights)
model[i].eval()
print("Loaded weights for", checkpoint)
except Exception as e:
print(e)
print("Could not find the checkpoint for module", i)
exit(1)
feature_extractor = model[0]
feature_shrinker = model[1]
cost_volume_encoder = model[2]
lstm_fusion = model[3]
cost_volume_decoder = model[4]
warp_grid = get_warp_grid_for_cost_volume_calculation(width=int(Config.test_image_width / 2),
height=int(Config.test_image_height / 2),
device=device)
scale_rgb = 255.0
mean_rgb = [0.485, 0.456, 0.406]
std_rgb = [0.229, 0.224, 0.225]
min_depth = 0.25
max_depth = 20.0
n_depth_levels = 64
scene_folder = Path(Config.test_online_scene_path)
scene = scene_folder.split("/")[-1]
print("Predicting for scene:", scene)
keyframe_buffer = KeyframeBuffer(buffer_size=Config.test_keyframe_buffer_size,
keyframe_pose_distance=Config.test_keyframe_pose_distance,
optimal_t_score=Config.test_optimal_t_measure,
optimal_R_score=Config.test_optimal_R_measure,
store_return_indices=False)
K = np.loadtxt(scene_folder / 'K.txt').astype(np.float32)
poses = np.fromfile(scene_folder / "poses.txt", dtype=float, sep="\n ").reshape((-1, 4, 4))
image_filenames = sorted((scene_folder / 'images').files("*.png"))
inference_timer = InferenceTimer()
lstm_state = None
previous_depth = None
previous_pose = None
predictions = []
if evaluate:
reference_depths = []
depth_filenames = sorted((scene_folder / 'depth').files("*.png"))
else:
# if None the system will not be evaluated and errors will not be calculated
reference_depths = None
depth_filenames = None
with torch.no_grad():
for i in tqdm(range(0, len(poses))):
reference_pose = poses[i]
reference_image = load_image(image_filenames[i])
# POLL THE KEYFRAME BUFFER
response = keyframe_buffer.try_new_keyframe(reference_pose, reference_image)
if response == 0 or response == 2 or response == 4 or response == 5:
continue
elif response == 3:
previous_depth = None
previous_pose = None
lstm_state = None
continue
preprocessor = PreprocessImage(K=K,
old_width=reference_image.shape[1],
old_height=reference_image.shape[0],
new_width=Config.test_image_width,
new_height=Config.test_image_height,
distortion_crop=Config.test_distortion_crop,
perform_crop=Config.test_perform_crop)
reference_image = preprocessor.apply_rgb(image=reference_image,
scale_rgb=scale_rgb,
mean_rgb=mean_rgb,
std_rgb=std_rgb)
if reference_depths is not None:
reference_depth = cv2.imread(depth_filenames[i], -1).astype(float) / 1000.0
reference_depth = preprocessor.apply_depth(reference_depth)
reference_depths.append(reference_depth)
reference_image_torch = torch.from_numpy(np.transpose(reference_image, (2, 0, 1))).float().to(device).unsqueeze(0)
reference_pose_torch = torch.from_numpy(reference_pose).float().to(device).unsqueeze(0)
full_K_torch = torch.from_numpy(preprocessor.get_updated_intrinsics()).float().to(device).unsqueeze(0)
half_K_torch = full_K_torch.clone().cuda()
half_K_torch[:, 0:2, :] = half_K_torch[:, 0:2, :] / 2.0
lstm_K_bottom = full_K_torch.clone().cuda()
lstm_K_bottom[:, 0:2, :] = lstm_K_bottom[:, 0:2, :] / 32.0
measurement_poses_torch = []
measurement_images_torch = []
measurement_frames = keyframe_buffer.get_best_measurement_frames(Config.test_n_measurement_frames)
for (measurement_pose, measurement_image) in measurement_frames:
measurement_image = preprocessor.apply_rgb(image=measurement_image,
scale_rgb=scale_rgb,
mean_rgb=mean_rgb,
std_rgb=std_rgb)
measurement_image_torch = torch.from_numpy(np.transpose(measurement_image, (2, 0, 1))).float().to(device).unsqueeze(0)
measurement_pose_torch = torch.from_numpy(measurement_pose).float().to(device).unsqueeze(0)
measurement_images_torch.append(measurement_image_torch)
measurement_poses_torch.append(measurement_pose_torch)
inference_timer.record_start_time()
measurement_feature_halfs = []
for measurement_image_torch in measurement_images_torch:
measurement_feature_half, _, _, _ = feature_shrinker(*feature_extractor(measurement_image_torch))
measurement_feature_halfs.append(measurement_feature_half)
reference_feature_half, reference_feature_quarter, \
reference_feature_one_eight, reference_feature_one_sixteen = feature_shrinker(*feature_extractor(reference_image_torch))
cost_volume = cost_volume_fusion(image1=reference_feature_half,
image2s=measurement_feature_halfs,
pose1=reference_pose_torch,
pose2s=measurement_poses_torch,
K=half_K_torch,
warp_grid=warp_grid,
min_depth=min_depth,
max_depth=max_depth,
n_depth_levels=n_depth_levels,
device=device,
dot_product=True)
skip0, skip1, skip2, skip3, bottom = cost_volume_encoder(features_half=reference_feature_half,
features_quarter=reference_feature_quarter,
features_one_eight=reference_feature_one_eight,
features_one_sixteen=reference_feature_one_sixteen,
cost_volume=cost_volume)
if previous_depth is not None:
depth_estimation = get_non_differentiable_rectangle_depth_estimation(reference_pose_torch=reference_pose_torch,
measurement_pose_torch=previous_pose,
previous_depth_torch=previous_depth,
full_K_torch=full_K_torch,
half_K_torch=half_K_torch,
original_height=Config.test_image_height,
original_width=Config.test_image_width)
depth_estimation = torch.nn.functional.interpolate(input=depth_estimation,
scale_factor=(1.0 / 16.0),
mode="nearest")
else:
depth_estimation = torch.zeros(size=(1, 1, int(Config.test_image_height / 32.0), int(Config.test_image_width / 32.0))).to(device)
lstm_state = lstm_fusion(current_encoding=bottom,
current_state=lstm_state,
previous_pose=previous_pose,
current_pose=reference_pose_torch,
estimated_current_depth=depth_estimation,
camera_matrix=lstm_K_bottom)
prediction, _, _, _, _ = cost_volume_decoder(reference_image_torch, skip0, skip1, skip2, skip3, lstm_state[0])
previous_depth = prediction.view(1, 1, Config.test_image_height, Config.test_image_width)
previous_pose = reference_pose_torch
inference_timer.record_end_time_and_elapsed_time()
prediction = prediction.cpu().numpy().squeeze()
predictions.append(prediction)
if Config.test_visualize:
visualize_predictions(numpy_reference_image=reference_image,
numpy_measurement_image=measurement_image,
numpy_predicted_depth=prediction,
normalization_mean=mean_rgb,
normalization_std=std_rgb,
normalization_scale=scale_rgb,
depth_multiplier_for_visualization=5000)
inference_timer.print_statistics()
save_results(predictions=predictions,
groundtruths=reference_depths,
system_name=system_name,
scene_name=scene,
save_folder=".")
if __name__ == '__main__':
predict(evaluate=True)
| 12,118 | 50.351695 | 149 | py |
featuretools | featuretools-main/featuretools/tests/testing_utils/mock_ds.py | from datetime import datetime
import numpy as np
import pandas as pd
from woodwork.logical_types import (
URL,
Boolean,
Categorical,
CountryCode,
Datetime,
Double,
EmailAddress,
Filepath,
Integer,
IPAddress,
LatLong,
NaturalLanguage,
Ordinal,
PersonFullName,
PhoneNumber,
PostalCode,
SubRegionCode,
)
from featuretools.entityset import EntitySet
def make_ecommerce_entityset(with_integer_time_index=False):
"""Makes a entityset with the following shape:
R Régions
/ \\ .
S C Stores, Customers
| .
S P Sessions, Products
\\ / .
L Log
"""
dataframes = make_ecommerce_dataframes(
with_integer_time_index=with_integer_time_index,
)
dataframe_names = dataframes.keys()
es_id = "ecommerce"
if with_integer_time_index:
es_id += "_int_time_index"
logical_types = make_logical_types(with_integer_time_index=with_integer_time_index)
semantic_tags = make_semantic_tags()
time_indexes = make_time_indexes(with_integer_time_index=with_integer_time_index)
es = EntitySet(id=es_id)
for df_name in dataframe_names:
time_index = time_indexes.get(df_name, None)
ti_name = None
secondary = None
if time_index is not None:
ti_name = time_index["name"]
secondary = time_index["secondary"]
df = dataframes[df_name]
es.add_dataframe(
df,
dataframe_name=df_name,
index="id",
logical_types=logical_types[df_name],
semantic_tags=semantic_tags[df_name],
time_index=ti_name,
secondary_time_index=secondary,
)
es.normalize_dataframe(
"customers",
"cohorts",
"cohort",
additional_columns=["cohort_name"],
make_time_index=True,
new_dataframe_time_index="cohort_end",
)
es.add_relationships(
[
("régions", "id", "customers", "région_id"),
("régions", "id", "stores", "région_id"),
("customers", "id", "sessions", "customer_id"),
("sessions", "id", "log", "session_id"),
("products", "id", "log", "product_id"),
],
)
return es
def make_ecommerce_dataframes(with_integer_time_index=False):
region_df = pd.DataFrame(
{"id": ["United States", "Mexico"], "language": ["en", "sp"]},
)
store_df = pd.DataFrame(
{
"id": range(6),
"région_id": ["United States"] * 3 + ["Mexico"] * 2 + [np.nan],
"num_square_feet": list(range(30000, 60000, 6000)) + [np.nan],
},
)
product_df = pd.DataFrame(
{
"id": [
"Haribo sugar-free gummy bears",
"car",
"toothpaste",
"brown bag",
"coke zero",
"taco clock",
],
"department": [
"food",
"electronics",
"health",
"food",
"food",
"electronics",
],
"rating": [3.5, 4.0, 4.5, 1.5, 5.0, 5.0],
"url": [
"google.com",
"https://www.featuretools.com/",
"amazon.com",
"www.featuretools.com",
"bit.ly",
"featuretools.com/demos/",
],
},
)
customer_times = {
"signup_date": [
datetime(2011, 4, 8),
datetime(2011, 4, 9),
datetime(2011, 4, 6),
],
# some point after signup date
"upgrade_date": [
datetime(2011, 4, 10),
datetime(2011, 4, 11),
datetime(2011, 4, 7),
],
"cancel_date": [
datetime(2011, 6, 8),
datetime(2011, 10, 9),
datetime(2012, 1, 6),
],
"birthday": [datetime(1993, 3, 8), datetime(1926, 8, 2), datetime(1993, 4, 20)],
}
if with_integer_time_index:
customer_times["signup_date"] = [6, 7, 4]
customer_times["upgrade_date"] = [18, 26, 5]
customer_times["cancel_date"] = [27, 28, 29]
customer_times["birthday"] = [2, 1, 3]
customer_df = pd.DataFrame(
{
"id": pd.Categorical([0, 1, 2]),
"age": [33, 25, 56],
"région_id": ["United States"] * 3,
"cohort": [0, 1, 0],
"cohort_name": ["Early Adopters", "Late Adopters", "Early Adopters"],
"loves_ice_cream": [True, False, True],
"favorite_quote": [
"The proletariat have nothing to lose but their chains",
"Capitalism deprives us all of self-determination",
"All members of the working classes must seize the "
"means of production.",
],
"signup_date": customer_times["signup_date"],
# some point after signup date
"upgrade_date": customer_times["upgrade_date"],
"cancel_date": customer_times["cancel_date"],
"cancel_reason": ["reason_1", "reason_2", "reason_1"],
"engagement_level": [1, 3, 2],
"full_name": ["Mr. John Doe", "Doe, Mrs. Jane", "James Brown"],
"email": ["john.smith@example.com", np.nan, "team@featuretools.com"],
"phone_number": ["555-555-5555", "555-555-5555", "1-(555)-555-5555"],
"birthday": customer_times["birthday"],
},
)
ips = [
"192.168.0.1",
"2001:4860:4860::8888",
"0.0.0.0",
"192.168.1.1:2869",
np.nan,
np.nan,
]
filepaths = [
"/home/user/docs/Letter.txt",
"./inthisdir",
"C:\\user\\docs\\Letter.txt",
"~/.rcinfo",
"../../greatgrandparent",
"data.json",
]
session_df = pd.DataFrame(
{
"id": [0, 1, 2, 3, 4, 5],
"customer_id": pd.Categorical([0, 0, 0, 1, 1, 2]),
"device_type": [0, 1, 1, 0, 0, 1],
"device_name": ["PC", "Mobile", "Mobile", "PC", "PC", "Mobile"],
"ip": ips,
"filepath": filepaths,
},
)
times = list(
[datetime(2011, 4, 9, 10, 30, i * 6) for i in range(5)]
+ [datetime(2011, 4, 9, 10, 31, i * 9) for i in range(4)]
+ [datetime(2011, 4, 9, 10, 40, 0)]
+ [datetime(2011, 4, 10, 10, 40, i) for i in range(2)]
+ [datetime(2011, 4, 10, 10, 41, i * 3) for i in range(3)]
+ [datetime(2011, 4, 10, 11, 10, i * 3) for i in range(2)],
)
if with_integer_time_index:
times = list(range(8, 18)) + list(range(19, 26))
values = list(
[i * 5 for i in range(5)]
+ [i * 1 for i in range(4)]
+ [0]
+ [i * 5 for i in range(2)]
+ [i * 7 for i in range(3)]
+ [np.nan] * 2,
)
values_2 = list(
[i * 2 for i in range(5)]
+ [i * 1 for i in range(4)]
+ [0]
+ [i * 2 for i in range(2)]
+ [i * 3 for i in range(3)]
+ [np.nan] * 2,
)
values_many_nans = list(
[np.nan] * 5
+ [i * 1 for i in range(4)]
+ [0]
+ [np.nan] * 2
+ [i * 3 for i in range(3)]
+ [np.nan] * 2,
)
latlong = list([(values[i], values_2[i]) for i, _ in enumerate(values)])
latlong2 = list([(values_2[i], -values[i]) for i, _ in enumerate(values)])
zipcodes = list(
["02116"] * 5
+ ["02116-3899"] * 4
+ ["0"]
+ ["1234567890"] * 2
+ ["12345-6789"] * 2
+ [np.nan] * 3,
)
countrycodes = list(["US"] * 5 + ["AL"] * 4 + [np.nan] * 5 + ["ALB"] * 2 + ["USA"])
subregioncodes = list(
["US-AZ"] * 5 + ["US-MT"] * 4 + [np.nan] * 3 + ["UG-219"] * 2 + ["ZM-06"] * 3,
)
log_df = pd.DataFrame(
{
"id": range(17),
"session_id": [0] * 5 + [1] * 4 + [2] * 1 + [3] * 2 + [4] * 3 + [5] * 2,
"product_id": ["coke zero"] * 3
+ ["car"] * 2
+ ["toothpaste"] * 3
+ ["brown bag"] * 2
+ ["Haribo sugar-free gummy bears"]
+ ["coke zero"] * 4
+ ["taco clock"] * 2,
"datetime": times,
"value": values,
"value_2": values_2,
"latlong": latlong,
"latlong2": latlong2,
"zipcode": zipcodes,
"countrycode": countrycodes,
"subregioncode": subregioncodes,
"value_many_nans": values_many_nans,
"priority_level": [0] * 2 + [1] * 5 + [0] * 6 + [2] * 2 + [1] * 2,
"purchased": [True] * 11 + [False] * 4 + [True, False],
"url": ["https://www.featuretools.com/"] * 2
+ ["amazon.com"] * 2
+ [
"www.featuretools.com",
"bit.ly",
"featuretools.com/demos/",
"www.google.co.in/" "http://lplay.google.co.in",
" ",
"invalid_url",
"an",
"microsoft.com/search/",
]
+ [np.nan] * 5,
"email_address": ["john.smith@example.com", np.nan, "team@featuretools.com"]
* 5
+ [" prefix@space.com", "suffix@space.com "],
"comments": [coke_zero_review()]
+ ["I loved it"] * 2
+ car_reviews()
+ toothpaste_reviews()
+ brown_bag_reviews()
+ [gummy_review()]
+ ["I loved it"] * 4
+ taco_clock_reviews(),
},
)
return {
"régions": region_df,
"stores": store_df,
"products": product_df,
"customers": customer_df,
"sessions": session_df,
"log": log_df,
}
def make_semantic_tags():
store_semantic_tags = {"région_id": "foreign_key"}
customer_semantic_tags = {"région_id": "foreign_key", "birthday": "date_of_birth"}
session_semantic_tags = {"customer_id": "foreign_key"}
log_semantic_tags = {"session_id": "foreign_key"}
return {
"customers": customer_semantic_tags,
"sessions": session_semantic_tags,
"log": log_semantic_tags,
"products": {},
"stores": store_semantic_tags,
"régions": {},
}
def make_logical_types(with_integer_time_index=False):
region_logical_types = {"id": Categorical, "language": Categorical}
store_logical_types = {
"id": Integer,
"région_id": Categorical,
"num_square_feet": Double,
}
product_logical_types = {
"id": Categorical,
"rating": Double,
"department": Categorical,
"url": URL,
}
customer_logical_types = {
"id": Integer,
"age": Integer,
"région_id": Categorical,
"loves_ice_cream": Boolean,
"favorite_quote": NaturalLanguage,
"signup_date": Datetime(datetime_format="%Y-%m-%d"),
"upgrade_date": Datetime(datetime_format="%Y-%m-%d"),
"cancel_date": Datetime(datetime_format="%Y-%m-%d"),
"cancel_reason": Categorical,
"engagement_level": Ordinal(order=[1, 2, 3]),
"full_name": PersonFullName,
"email": EmailAddress,
"phone_number": PhoneNumber,
"birthday": Datetime(datetime_format="%Y-%m-%d"),
"cohort_name": Categorical,
"cohort": Integer,
}
session_logical_types = {
"id": Integer,
"customer_id": Integer,
"device_type": Categorical,
"device_name": Categorical,
"ip": IPAddress,
"filepath": Filepath,
}
log_logical_types = {
"id": Integer,
"session_id": Integer,
"product_id": Categorical,
"datetime": Datetime(datetime_format="%Y-%m-%d"),
"value": Double,
"value_2": Double,
"latlong": LatLong,
"latlong2": LatLong,
"zipcode": PostalCode,
"countrycode": CountryCode,
"subregioncode": SubRegionCode,
"value_many_nans": Double,
"priority_level": Ordinal(order=[0, 1, 2]),
"purchased": Boolean,
"url": URL,
"email_address": EmailAddress,
"comments": NaturalLanguage,
}
if with_integer_time_index:
log_logical_types["datetime"] = Integer
customer_logical_types["signup_date"] = Integer
customer_logical_types["upgrade_date"] = Integer
customer_logical_types["cancel_date"] = Integer
customer_logical_types["birthday"] = Integer
return {
"customers": customer_logical_types,
"sessions": session_logical_types,
"log": log_logical_types,
"products": product_logical_types,
"stores": store_logical_types,
"régions": region_logical_types,
}
def make_time_indexes(with_integer_time_index=False):
return {
"customers": {
"name": "signup_date",
"secondary": {"cancel_date": ["cancel_reason"]},
},
"log": {"name": "datetime", "secondary": None},
}
def coke_zero_review():
return """
When it comes to Coca-Cola products, people tend to be die-hard fans. Many of us know someone who can't go a day without a Diet Coke (or two or three). And while Diet Coke has been a leading sugar-free soft drink since it was first released in 1982, it came to light that young adult males shied away from this beverage — identifying diet cola as a woman's drink. The company's answer to that predicament came in 2005 - in the form of a shiny black can - with the release of Coca-Cola Zero.
While Diet Coke was created with its own flavor profile and not as a sugar-free version of the original, Coca-Cola Zero aims to taste just like the "real Coke flavor." Despite their polar opposite advertising campaigns, the contents and nutritional information of the two sugar-free colas is nearly identical. With that information in hand we at HuffPost Taste needed to know: Which of these two artificially-sweetened Coca-Cola beverages actually tastes better? And can you even tell the difference between them?
Before we get to the results of our taste test, here are the facts:
Diet Coke
Motto: Always Great Tast
Nutritional Information: Many say that a can of Diet Coke actually contains somewhere between 1-4 calories, but if a serving size contains fewer than 5 calories a company is not obligated to note it in its nutritional information. Diet Coke's nutritional information reads 0 Calories, 0g Fat, 40mg Sodium, 0g Total Carbs, 0g Protein.
Ingredients: Carbonated water, caramel color, aspartame, phosphoric acid, potassium benzonate, natural flavors, citric acid, caffeine.
Artificial sweetener: Aspartame
Coca-Cola Zero
Motto: Real Coca-Cola Taste AND Zero Calories
Nutritional Information: While the label clearly advertises this beverage as a zero calorie cola, we are not entirely certain that its minimal calorie content is simply not required to be noted in the nutritional information. Coca-Cola Zero's nutritional information reads 0 Calories, 0g Fat, 40mg Sodium, 0g Total Carbs, 0g Protein.
Artificial sweetener: Aspartame and acesulfame potassium
Ingredients: Carbonated water, caramel color, phosphoric acid, aspartame, potassium benzonate, natural flavors, potassium citrate, acesulfame potassium, caffeine.
The Verdict:
Twenty-four editors blind-tasted the two cokes, side by side, and...
54 percent of our tasters were able to distinguish Diet Coke from Coca-Cola Zero
50 percent of our tasters preferred Diet Coke to Coca-Cola Zero, and vice versa
Here’s what our tasters thought of the two sugar-free soft drinks:
Diet Coke: "Tastes fake right away." "Much fresher brighter, crisper." "Has the wonderful flavors of Diet Coke’s artificial sweeteners."
Coca-Cola Zero: "Has more of a sharply sweet aftertaste I associate with diet sodas." "Tastes more like regular coke, less like fake sweetener." "Has an odd taste." "Tastes more like regular." "Very sweet."
Overall comments: "That was a lot more difficult than I though it would be." "Both equally palatable." A few people said Diet Coke tasted much better ... unbeknownst to them, they were actually referring to Coca-Cola Zero.
IN SUMMARY: It is a real toss up. There is not one artificially-sweetened Coca-Cola beverage that outshines the other. So how do people choose between one or the other? It is either a matter of personal taste, or maybe the marketing campaigns will influence their choice.
"""
def gummy_review():
return """
The place: BMO Harris Bradley Center
The event: Bucks VS Spurs
The snack: Satan's Diarrhea Hate Bears made by Haribo
I recently took my 4 year old son to his first NBA game. He was very excited to go to the game, and I was excited because we had fantastic seats. Row C center court to be exact. I've never sat that close before. I've never had to go DOWN stairs to get to my seats. 24 stairs to get to my seats to be exact.
His favorite candy is Skittles. Mine are anything gummy. I snuck in a bag of skittles for my son, and grabbed a handful of gummy bears for myself, to be later known as Satan's Diarrhea Hate Bears, that I received for Christmas in bulk from my parents, and put them in a zip lock bag.
After the excitement of the 1st quarter has ended I take my son out to get him a bottled water and myself a beer. We return to our seats to enjoy our candy and drinks.
..............fast forward until 1 minute before half time...........
I have begun to sweat a sweat that is only meant for a man on mile 19 of a marathon. I have kicked out my legs out so straight that I am violently pushing the gentleman wearing a suit seat in front of me forward. He is not happy, I do not care. My hands are on the side of my seat not unlike that of a gymnast on a pommel horse, lifting me off my chair. My son is oblivious to what is happening next to him, after all, there is a mascot running around somewhere and he is eating candy.
I realize that at some point in the very near to immediate future I am going to have to allow this lava from Satan to forcefully expel itself from my innards. I also realize that I have to walk up 24 stairs just to get to level ground in hopes to make it to the bathroom. I’ll just have to sit here stiff as a board for a few moments waiting for the pain to subside. About 30 seconds later there is a slight calm in the storm of the violent hurricane that is going on in my lower intestine. I muster the courage to gently relax every muscle in my lower half and stand up. My son stands up next to me and we start to ascend up the stairs. I take a very careful and calculated step up the first stair. Then a very loud horn sounds. Halftime. Great. It’s going to be crowded. The horn also seems to have awaken the Satan's Diarrhea Hate Bears that are having a mosh pit in my stomach. It literally felt like an avalanche went down my stomach and I again have to tighten every muscle and stand straight up and focus all my energy on my poor sphincter to tighten up and perform like it has never performed before. Taking another step would be the worst idea possible, the flood gates would open. Don’t worry, Daddy has a plan. I some how mumble the question, “want to play a game?” to my son, he of course says “yes”. My idea is to hop on both feet allllll the way up the stairs, using the center railing to propel me up each stair. My son is always up for a good hopping game, so he complies and joins in on the “fun”. Some old lady 4 steps up thinks its cute that we are doing this, obviously she wasn’t looking at the panic on my face. 3 rows behind her a man about the same age as me, who must have had similar situations, notices the fear/panic/desperation on my face understands the danger that I along with my pants and anyone within a 5 yard radius spray zone are in. He just mouths the words “good luck man” to me and I press on. Half way up and there is no leakage, but my legs are getting tired and my sphincter has never endured this amount of pressure for this long of time. 16 steps/hops later…….4 steps to go…….My son trips and falls on the stairs, I have two options: keep going knowing he will catch up or bend down to pick him up relieving my sphincter of all the pressure and commotion while ruining the day of roughly the 50 people that are now watching a grown man hop up stairs while sweating profusely next to a 4 year old boy.
Luckily he gets right back up and we make it to the top of the stairs. Good, the hard part was over. Or so I thought. I managed to waddle like a penguin, or someone who is about to poop their pants in 2.5 seconds, to the men's room only to find that every stall is being used. EVERY STALL. It's halftime, of course everyone has to poop at that moment. I don't know if I can wait any longer, do I go ahead and fulfil the dream of every high school boy and poop in the urinal? What kind of an example would that set for my son? On the other hand, what kind of an example would it be for his father to fill his pants with a substance that probably will be unrecognizable to man. Suddenly a stall door opens, and I think I manage to actually levitate over to the stall. I my son follows me in, luckily it was the handicap stall so there was room for him to be out of the way. I get my pants off and start to sit. I know what taking a giant poo feels like. I also know what vomiting feels like. I can now successfully say that I know what it is like to vomit out my butt. I wasn't pooping, those Satan's Diarrhea Hate Bears did something to my insides that made my sphincter vomit our the madness.
I am now conscious of my surroundings. Other than the war that the bottom half of my body is currently having with this porcelain chair, it is quiet as a pin drop in the bathroom. The other men in there can sense that something isn't right, no one has heard anyone ever poop vomit before.
I can sense that the worst part is over. But its not stopping, nor can I physically stop it at this point, I am leaking..it's horrible. I call out "does anyone have a diaper?" hoping that some gentleman was changing a baby. Nothing. No one said a word. I know people are in there, I can see the toes of shoes pointed in my direction under the stall.. "DOES ANYONE HAVE A DIAPER!?!" I am screaming, my son is now crying, he thinks he is witnessing the death of his father. I can't even assure him that I will make it.
Not a word was said, but a diaper was thrown over the stall. I catch it, line my underwear with it, put my pants back on, and walk out of that bathroom like a champ. We go straight to our seats, grab out coats and go home. As we are walking out, the gentleman that wished me good luck earlier simply put his fist out, and I happily bumped it.
My son asks me, "Daddy, why are we leaving early?"
"Well son, I need to change my diaper"
"""
def taco_clock_reviews():
return [
"""
This timer does what it is supposed to do. Setup is elementary. Replacing the old one (after 12 years) was relatively easy. It has performed flawlessly since. I'm delighted I could find an esoteric product like this at Amazon. Their service, and the customer reviews, are just excellent.
""",
"""
Funny, cute clock. A little spendy for how light the clock is, but its hard to find a taco clock.
""",
]
def brown_bag_reviews():
return [
"""
These bags looked exactly like I'd hoped, however, the handles broke off of almost every single bag as soon as items were placed in them! I used these as gift bags for out-of-town guests at my wedding, so imagine my embarassment as the handles broke off as I was handing them out. I would not recommend purchaing these bags unless you plan to fill them with nothing but paper! Anything heavier will cause the handles to snap right off.
""",
"""
I purchased these in August 2014 from Big Blue Supplies. I have no problem with the seller, these arrived new condition, fine shape.
I do have a slight problem with the bags. In case someone might want to know, the handles on these bags are set inside against the top. Then a piece of Kraft type packing tape is placed over the handles to hold them in place. On some of the bags, the tape is already starting to peel off. I would be really hesitant about using these bags unless I reinforced the current tape with a different adhesive.
I will keep the bags, and make a tape of a holiday or decorative theme and place over in order to make certain the handles stay in place.
Also in case anybody is wondering, the label on the plastic packaging bag states these are from ORIENTAL TRADING COMPANY. On the bottom of each bag it is stamped MADE IN CHINA. Again, I will be placing a sticker over that.
Even the dollar store bags I normally purchase do not have that stamped on the bottom in such prominent lettering. I purchased these because they were plain and I wanted to decorate them.
I do not think I would purchase again for all the reasons stated above.
Another thing for those still wanting to purchase, the ones I received were: 12 3/4 inches high not including handle, 10 1/4 inches wide and a 5 1/4 inch depth.
""",
]
def car_reviews():
return [
"""
The full-size pickup truck and the V-8 engine were supposed to be inseparable, like the internet and cat videos. You can’t have one without the other—or so we thought.
In America’s most popular vehicle, the Ford F-150, two turbocharged six-cylinder engines marketed under the EcoBoost name have dethroned the naturally aspirated V-8. Ford’s new 2.7-liter twin-turbo V-6 is the popular choice, while the 3.5-liter twin-turbo V-6 is the top performer. The larger six allows for greater hauling capacity, accelerates the truck more quickly, and swills less gas in EPA testing than the V-8 alternative. It’s enough to make even old-school truck buyers acknowledge that there actually is a replacement for displacement.
And yet a V-8 in a big pickup truck still feels so natural, so right. In the F-150, the Coyote 5.0-liter V-8 is tuned for torque more so than power, yet it still revs with an enthusiastic giddy-up that reminds us that this engine’s other job is powering the Mustang. The response follows the throttle pedal faithfully while the six-speed automatic clicks through gears smoothly and easily. Together they pull this 5220-pound F-150 to 60 mph in 6.3 seconds, which is 0.4 second quicker than the 5.3-liter Chevrolet Silverado with the six-speed automatic and 0.9 second quicker than the 5.3 Silverado with the new eight-speed auto. The 3.5-liter EcoBoost, though, can do the deed another half-second quicker, but its synthetic soundtrack doesn’t have the rich, multilayered tone of the V-8.
It wasn’t until we saddled our test truck with a 6400-pound trailer (well under its 9000-pound rating) that we fully understood the case for upgrading to the 3.5-liter EcoBoost. The twin-turbo engine offers an extra 2500 pounds of towing capability and handles lighter tasks with considerably less strain. The 5.0-liter truck needs more revs and a wider throttle opening to accelerate its load, so we were often coaxed into pressing the throttle to the floor for even modest acceleration. The torquier EcoBoost engine offers a heartier response at part throttle.
In real-world, non-towing situations, the twin-turbo 3.5-liter doesn’t deliver on its promise of increased fuel economy, with both the 5.0-liter V-8 and that V-6 returning 16 mpg in our hands. But given the 3.5-liter’s virtues, we can forgive it that trespass.
Trucks Are the New Luxury
Pickups once were working-class transportation. Today, they’re proxy luxury vehicles—or at least that’s how they’re priced. If you think our test truck’s $57,240 window sticker is steep, consider that our model, the Lariat, is merely a mid-spec trim. There are three additional grades—King Ranch, Platinum, and Limited—positioned and priced above it, plus the 3.5-liter EcoBoost that costs an extra $400 as well as a plethora of options to inflate the price past 60 grand. Squint and you can almost see the six-figure trucks of the future on the horizon.
For the most part, though, the equipment in this particular Lariat lives up to the price tag. The driver and passenger seats are heated and cooled, with 10-way power adjustability and supple leather. The technology includes blind-spot monitoring, navigation, and a 110-volt AC outlet. Nods to utility include spotlights built into the side mirrors and Ford’s Pro Trailer Backup Assist, which makes reversing with a trailer as easy as turning a tiny knob on the dashboard.
Middle-Child Syndrome
In the F-150, Ford has a trifecta of engines (the fourth, a naturally aspirated 3.5-liter V-6, is best left to the fleet operators). The 2.7-liter twin-turbo V-6 delivers remarkable performance at an affordable price. The 3.5-liter twin-turbo V-6 is the workhorse, with power, torque, and hauling capability to spare. Compared with those two logical options, the middle-child 5.0-liter V-8 is the right-brain choice. Its strongest selling points may be its silky power delivery and the familiar V-8 rumble. That’s a flimsy argument when it comes to rationalizing a $50,000-plus purchase, though, so perhaps it’s no surprise that today’s boosted six-cylinders are now the engines of choice in the F-150.
""",
"""
THE GOOD
The Tesla Model S 90D's electric drivetrain is substantially more efficient than any internal combustion engine, and gives the car smooth and quick acceleration. All-wheel drive comes courtesy of a smart dual motor system. The new Autopilot feature eases the stress of stop-and-go traffic and long road trips.
THE BAD
Even at Tesla's Supercharger stations, recharging the battery takes significantly longer than refilling an internal combustion engine car's gas tank, limiting where you can drive. Tesla hasn't improved its infotainment system much from the Model S' launch.
THE BOTTOM LINE
Among the different flavors of Tesla Model S, the 90D is the one to get, exhibiting the best range and all-wheel drive, while offering an uncomplicated, next-generation driving experience that shows very well against equally priced competitors.
REVIEW SPECIFICATIONS PHOTOS
Roadshow Automobiles Tesla 2016 Tesla Model S
Having tested driver assistance systems in many cars, and even ridden in fully self-driving cars, I should have been ready for Tesla's new Autopilot feature. But engaging it while cruising the freeway in the Model S 90D, I kept my foot hovering over the brake.
My trepidation didn't come so much from the adaptive cruise control, which kept the Model S following traffic ahead at a set distance, but from the self-steering, this part of Autopilot managing to keep the Model S well-centered in its lane with no help from me. Over many miles, I built up more trust in the system, letting the car do the steering in situations from bumper-to-bumper traffic and a winding road through the hills.
2016 Tesla Model S 90DEnlarge Image
Although the middle of the Model S range, the 90D offers the best range and a wealth of useful tech, such as Autopilot self-driving.
Wayne Cunningham/Roadshow
Tesla added Autopilot to its Model S line as an option last year, along with all-wheel-drive. More recently, the high-tech automaker improved its batteries, upgrading its cars from their former 65 and 85 kilowatt-hour capacity to 70 and 90 kilowatt-hour. The example I drove, the 90D, represents all these advances.
More importantly, the 90D is the current range-leader among the Model S line, boasting 288 miles on a full battery charge.
The Model S' improvements fall outside of typical automotive industry product cycles, fulfilling Tesla's promise of acting more like a technology company, constantly building and deploying new features. Tesla accomplishes that goal partially through over-the-air software updates, improving existing cars, but the 90D presents significant hardware updates over the original Model S launched four years ago.
Sit and go
Of course, this Model S exhibited the ease of use of the original. Walking up to the car with the key fob in my pocket, it automatically unlocked. When I got in the car, it powered up without me having to push a start button, so I only needed to put it in drive to get on the road.
Likewise, the design hasn't changed, its sleek, hatchback four-door body offering excellent cargo room, both front and back, and seating space. The cabin feels less cramped than most cars due to the lack of a transmission tunnel and a dashboard bare of buttons or dials.
2016 Tesla Model S 90DEnlarge Image
The flat floor in the Model S' cabin makes for enhanced passenger room.
Wayne Cunningham/Roadshow
The big, 17-inch touchscreen in the center of the dashboard shows navigation, stereo, phone, energy consumption and car settings. I easily went from full-screen to a split-screen view, the windows showing each appearing instantly. A built-in 4G/LTE data connection powers Google maps and Internet-based audio. The LCD instrument panel in front of me showed my speed, energy usage, remaining range, and intelligently swapped audio information for turn-by-turn directions when started navigation.
The instrument panel actually made the experience of driving under Autopilot more comfortable, reassuring me with graphics that showed when the Model S' sensors were detecting the lane lines and the traffic around me. Impressively, the sensors could differentiate, as shown on the screen's graphics, a passenger car from a big truck.
At speed on the freeway, Autopilot smoothly maintained the car's position in its lane, and when I took my hands off the wheel for too long, it flashed a warning on the instrument panel. In stop-and-go traffic approaching a toll booth, the car did an even better job of self-driving, recognizing traffic around it and maintaining appropriate distances.
Handling surprise
Taking over the driving myself, the ride quality proved as comfortable as any sport-luxury car, as this Model S had its optional air suspension. The electric power steering is well-tuned, turning the wheels with a quiet, natural feel and good heft.
Audi S7 vs Tesla Model S
Shootout: Audi S7 vs. Tesla Model S
Wayne Cunningham/Roadshow
The biggest surprise came when I spent the day doing laps at the Thunderhill Raceway, negotiating a series of tight, technical turns in competition with an Audi S7. I expected the Model S to get out-of-shape in the turns, but instead it proved steady and solid. The Model S' 4,647-pound curb weight made it less than ideal for a track test, but much of that weight is in the battery pack, mounted low in the chassis. That low center of gravity helped limit body roll, ensuring good grip from all four tires. In the turns, the Model S felt nicely balanced, although not entirely nimble.
Helping its grip was its native all-wheel drive, gained from having motors driving each set of wheels. The combined output of the motors comes to 417 horsepower and 485 pound-feet of torque, those numbers expressed in 0-to-60 mph times of well under 5 seconds. That thrust made for fast runs down the race track's straightaways, or simply giving me the ability to take advantage of gaps in traffic on public roads.
288 miles is more than enough for most people's daily driving needs, and if you plug in every night, you will wake up to a fully charged car every morning. The Model S makes for a far different experience than driving an internal combustion car, where you need to go to a gas station to refuel. However, longer trips in the Model S require some planning, such as scheduling stops at Tesla's free Supercharger stations.
Charging times are much lengthier than refilling a tank with gasoline. From a Level 2, 240-volt station, you get 29 miles added every hour. Tesla's Supercharger, a Level 3 charger, takes 75 minutes to fully recharge the Model S 90D's battery.
2016 Tesla Model S 90DEnlarge Image
Despite its high initial price, the Model S 90D costs less to run on a daily basis than a combustion engine car.
Wayne Cunningham/Roadshow
Low maintenance
The 2016 Tesla Model S 90D adds features to keep it competitive against the internal combustion cars in its sport luxury set. More importantly, it remains very easy to live with. In fact, the electric drivetrain should mean greatly decreased maintenance, as there are fewer moving parts. The EPA estimates that annual electricity costs for the Model S 90D should run $650, much less than buying gasoline for an equivalent internal combustion car.
Lengthy charging times mean longer trips are either out of the question or require more planning than with an internal combustion car. And while the infotainment system responds quickly to touch inputs and offers useful screens, it hasn't changed much in four years. Most notably, Tesla hasn't added any music apps beyond the ones it launched with. Along with new, useful apps, it would be nice to have some themes or other aesthetic changes to the infotainment interface.
The Model S 90D's base price of $88,000 puts it out of reach of the average buyer, and the model I drove was optioned up to around $95,000. Against its Audi, BMW and Mercedes-Benz competition, however, it makes a compelling argument, especially for its uncomplicated nature.
""",
]
def toothpaste_reviews():
return [
"""
Toothpaste can do more harm than good
The next time a patient innocently asks me, “What’s the best toothpaste to use?” I’m going to unleash a whole Chunky Soup can of “You Want The Truth? You CAN’T HANDLE THE TRUTH!!!” Gosh, that’s such an overused movie quote. Sorry about that, but still.
If you’re a dental professional, isn’t this the most annoying question you get, day after day? Do you even care which toothpaste your patients use?
No. You don’t. Asking a dentist what toothpaste to use is like asking your physician which bar of soap or body scrub you should use to clean your skin. Your dentist and dental hygienist have never seen a tube of toothpaste that singlehandedly improves the health of all patients in their practice, and the reason is simple:
Toothpaste is a cosmetic.
We brush our teeth so that out mouths no longer taste like… mouth. Mouth tastes gross, right? It tastes like putrefied skin. It tastes like tongue cheese. It tastes like Cream of Barf.
On the other hand, toothpaste has been exquisitely designed to bring you a brisk rush of York Peppermint Patty, or Triple Cinnamon Heaven, or whatever flavor that drives those tubes off of the shelves in the confusing dental aisle of your local supermarket or drugstore.
Toothpaste definitely tastes better than Cream of Barf. And that’s why you use it. Not because it’s good for you. You use toothpaste because it tastes good, and because it makes you accept your mouth as part of your face again.
From a marketing perspective, all of the other things that are in your toothpaste are in there to give it additional perceived value. So let’s deconstruct these ingredients, shall we?
1. Fluoride.
This was probably the first additive to toothpaste that brought it under the jurisdiction of the Food & Drug Administration and made toothpaste part drug, part cosmetic. Over time, a fluoride toothpaste can improve the strength of teeth, but the fluoride itself does nothing to make teeth cleaner. Some people are scared of fluoride so they don’t use it. Their choice. Professionally speaking, I know that the benefits of a fluoride additive far outweigh the risks.
2. Foam.
Sodium Lauryl Sulfate is soap. Soap has a creamy, thick texture that American tongues especially like and equate to the feeling of cleanliness. There’s not enough surfactant, though, in toothpaste foam to break up the goo that grows on your teeth. If these bubbles scrubbed, you’d better believe that they would also scrub your delicate gum tissues into a bloody pulp.
3. Abrasive particles.
Most toothpastes use hydrated silica as the grit that polishes teeth. You’re probably most familiar with it as the clear beady stuff in the “Do Not Eat” packets. Depending on the size and shape of the particles, silica is the whitening ingredient in most whitening toothpastes. But whitening toothpaste cannot get your teeth any whiter than a professional dental cleaning, because it only cleans the surface. Two weeks to a whiter smile? How about 30 minutes with your hygienist? It’s much more efficient and less harsh.
4. Desensitizers.
Teeth that are sensitive to hot, cold, sweets, or a combination can benefit from the addition of potassium nitrate or stannous fluoride to a toothpaste. This is more of a palliative treatment, when the pain is the problem. Good old Time will usually make teeth feel better, too, unless the pain is coming from a cavity. Yeah, I’m talking to you, the person who is trying to heal the hole in their tooth with Sensodyne.
5. Tartar control.
It burns! It burns! If your toothpaste has a particular biting flavor, it might contain tetrasodium pyrophosphate, an ingredient that is supposed to keep calcium phosphate salts (tartar, or calculus) from fossilizing on the back of your lower front teeth. A little tartar on your teeth doesn’t harm you unless it gets really thick and you can no longer keep it clean. One problem with tartar control toothpastes is that in order for the active ingredient to work, it has to be dissolved in a stronger detergent than usual, which can affect people that are sensitive to a high pH.
6. Triclosan.
This antimicrobial is supposed to reduce infections between the gum and tooth. However, if you just keep the germs off of your teeth in the first place it’s pretty much a waste of an extra ingredient. Its safety has been questioned but, like fluoride, the bulk of the scientific research easily demonstrates that the addition of triclosan in toothpaste does much more good than harm.
Why toothpaste can be bad for you.
Let’s just say it’s not the toothpaste’s fault. It’s yours. The toothpaste is just the co-dependent enabler. You’re the one with the problem.
Remember, toothpaste is a cosmetic, first and foremost. It doesn’t clean your teeth by itself. Just in case you think I’m making this up I’ve included clinical studies in the references at the end of this article that show how ineffective toothpaste really is.
peasized
• You’re using too much.
Don’t be so suggestible! Toothpaste ads show you how to use up the tube more quickly. Just use 1/3 as much, the size of a pea. It will still taste good, I promise! And too much foam can make you lose track of where your teeth actually are located.
• You’re not taking enough time.
At least two minutes. Any less and you’re missing spots. Just ’cause it tastes better doesn’t mean you did a good job.
• You’re not paying attention.
I’ve seen people brush the same four spots for two minutes and miss the other 60% of their mouth.brushguide The toothbrush needs to touch every crevice of every tooth, not just where it lands when you go into autopilot and start thinking about what you’re going to wear that day. It’s the toothbrush friction that cleans your teeth, not the cleaning product. Plaque is a growth, like the pink or grey mildew that grows around the edges of your shower. You’ve gotta rub it off to get it off. No tooth cleaning liquid, paste, creme, gel, or powder is going to make as much of a difference as your attention to detail will.
The solution.
Use what you like. It’s that simple. If it tastes good and feels clean to you, you’ll use it more often, brush longer, feel better, be healthier.
You can use baking soda, or coconut oil, or your favorite toothpaste, or even just plain water. The key is to have a good technique and to brush often. A music video makes this demonstration a little more fun than your usual lecture at the dental office, although, in my opinion you really still need to feel what it is like to MASH THE BRISTLES OF A SOFT TOOTHBRUSH INTO YOUR GUMS:
A little more serious video from my pal Dr. Mark Burhenne where he demonstrates how to be careful with your toothbrush bristles:
Final word.
♬ It’s all about that Bass, ’bout that Bass, no bubbles. ♬ Heh, dentistry in-joke there.
Seriously, though, the bottom line is that your paste will mask brushing technique issues, so don’t put so much faith in the power of toothpaste.
Also you may have heard that some toothpastes contain decorative plastic that can get swallowed. Yeah, that was a DentalBuzz report I wrote that went viral earlier this year. And while I can’t claim total victory on that front, at least the company in question has promised that the plastic will no longer be added to their toothpaste lines very soon due to the overwhelming amount of letters, emails, and phone calls that they received as a result of people reading that article and making a difference.
But now I’m tired of talking about toothpaste.
Next topic?
I’m bringing pyorrhea back.
""",
"""
I’ve been a user of Colgate Total Whitening Toothpaste for many years because I’ve always tried to maintain a healthy smile (I’m a receptionist so I need a white smile). But because I drink coffee at least twice a day (sometimes more!) and a lot of herbal teas, I’ve found that using just this toothpaste alone doesn’t really get my teeth white...
The best way to get white teeth is to really try some professional products specifically for tooth whitening. I’ve tried a few products, like Crest White Strips and found that the strips are really not as good as the trays. Although the Crest White Strips are easy to use, they really DO NOT cover your teeth perfectly like some other professional dental whitening kits. This Product did cover my teeth well however because of their custom heat trays, and whitening my teeth A LOT. I would say if you really want white teeth, use the Colgate Toothpaste and least 2 times a day, along side a professional Gel product like Shine Whitening.
""",
"""
The first feature is the price, and it is right.
Next, I consider whether it will be neat to use. It is. Sometimes when I buy those new hard plastic containers, they actually get messy. Also I cannot get all the toothpaste out. It is easy to get the paste out of Colgate Total Whitening Paste without spraying it all over the cabinet.
If it does not taste good, I won't use it. Some toothpaste burns my mouth so bad that brushing my teeth is a painful experience. This one doesn't burn. It tastes simply the way toothpaste is supposed to taste.
Whitening is important. This one is supposed ot whiten. After spending money to whiten my teeth, I need a product to help ward off the bad effects of coffee and tea.
Avoiding all kinds of oral pathology is a major consideration. This toothpaste claims that it can help fight cavities, gingivitis, plaque, tartar, and bad breath.
I hope this product stays on the market a long time and does not change.
""",
]
| 47,055 | 61.741333 | 2,445 | py |
scientific-re | scientific-re-main/main.py | import os
import random
import torch
import numpy
import torch.backends.cudnn
from src.data.prepare_data import Preparedata
from src.model.CNN import CNN
from src.model.run import Run
from src.parameters.parameters import Parameters
class Controller(Parameters):
def __init__(self):
# prepare the data
self.data = Preparedata(Parameters)
self.data.prepare_data(Parameters)
def initialise_model(self):
# prepare the model
self.model = CNN(Parameters)
def train(self):
# train the model and return train and dev scores
self.ran_model = Run(self.model, self.data, Parameters)
macro_fscores_train, macro_fscores_dev = self.ran_model.train()
return macro_fscores_train, macro_fscores_dev
def test(self):
# run the model on the test data
macro_fscore_test = self.ran_model.test()
return macro_fscore_test
def block_seeds(seed):
torch.manual_seed(int(seed))
torch.cuda.manual_seed_all(int(seed))
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = False
numpy.random.seed(int(seed))
random.seed(int(seed))
os.environ['PYTHONHASHSEED'] = str(seed)
if __name__ == '__main__':
params = Parameters()
print(f'--------------------- Preparing data ---------------------')
controller = Controller()
train, dev, test = [], [], []
for seed in params.seeds:
print(f'--------------- Seed: {seed} ---------------')
block_seeds(seed)
controller.initialise_model()
macro_fscores_train, macro_fscores_dev = controller.train()
macro_fscore_test = controller.test()
train.append(macro_fscores_train)
dev.append(macro_fscores_dev)
test.append(macro_fscore_test)
mean_train = numpy.matrix(train).mean(axis=0).tolist()[0]
mean_dev = numpy.matrix(dev).mean(axis=0).tolist()[0]
print(f'Macro f-score on the test set: {round(numpy.mean(test), 2)}')
| 1,999 | 26.027027 | 73 | py |
scientific-re | scientific-re-main/src/model/CNN.py | import torch
from torch import nn
import torch.nn.functional as F
class CNN(torch.nn.Module):
def __init__(self, params):
super().__init__()
self.device = params.device
self.dropout = nn.Dropout(params.dropout)
self.embedding_size = params.bert_emb_size + 2 * params.position_emb_size
# Embedding layers definition
self.emb_pos = nn.Embedding(params.num_positions, params.position_emb_size, padding_idx=0)
# Convolution layers definition
self.conv_1 = nn.Conv1d(in_channels=self.embedding_size, out_channels=params.out_size, kernel_size=params.kernels[0])
self.conv_2 = nn.Conv1d(in_channels=self.embedding_size, out_channels=params.out_size, kernel_size=params.kernels[1])
self.conv_3 = nn.Conv1d(in_channels=self.embedding_size, out_channels=params.out_size, kernel_size=params.kernels[2])
# Fully connected layer definition
self.fc = nn.Linear(3 * params.out_size, len(params.relations))
def forward(self, w, p1, p2):
# Prepare the input from the embeddings layers
pos1_emb = self.emb_pos(p1)
pos2_emb = self.emb_pos(p2)
x = torch.cat((w, pos1_emb, pos2_emb), 2)
x = x.permute(0,2,1)
# Convolution layer 1 is applied
x1 = torch.relu(self.conv_1(x))
x1 = F.max_pool1d(x1, kernel_size=x1.size(2)).squeeze(2)
# Convolution layer 2 is applied
x2 = torch.relu(self.conv_2(x))
x2 = F.max_pool1d(x2, x2.size(2)).squeeze(2)
# Convolution layer 3 is applied
x3 = torch.relu(self.conv_3(x))
x3 = F.max_pool1d(x3, x3.size(2)).squeeze(2)
# Linear layer
out = torch.cat((x1, x2, x3), dim=1)
out = self.fc(self.dropout(out))
return out | 1,788 | 33.403846 | 125 | py |
scientific-re | scientific-re-main/src/model/run.py | import torch
from sklearn.metrics import f1_score
from torch import optim, nn
from torch.utils.data import Dataset, DataLoader
from transformers import AutoTokenizer, AutoModel
class DatasetMaper(Dataset):
def __init__(self, s, p1, p2, y):
self.s = s
self.p1 = p1
self.p2 = p2
self.y = y
def __len__(self):
return len(self.s)
def __getitem__(self, idx):
return self.s[idx], self.p1[idx], self.p2[idx], self.y[idx]
class Run:
def __init__(self, model, prepared_data, params):
self.data = prepared_data
# Prepare batches
train = DatasetMaper(self.data.train.sentence, self.data.train.pos1, self.data.train.pos2, self.data.train.y)
dev = DatasetMaper(self.data.dev.sentence, self.data.dev.pos1, self.data.dev.pos2, self.data.dev.y)
test = DatasetMaper(self.data.test.sentence, self.data.test.pos1, self.data.test.pos2, self.data.test.y)
self.loader_train = DataLoader(train, batch_size=params.batch_size)
self.loader_dev = DataLoader(dev, batch_size=params.batch_size)
self.loader_test = DataLoader(test, batch_size=params.batch_size)
self.device = params.device
self.len_sentences = params.len_sentences
self.relations = params.relations
self.epochs = params.epochs
self.batch_size = params.batch_size
self.model = model
self.optimizer = optim.AdamW(self.model.parameters(), lr=params.learning_rate)
self.loss = nn.CrossEntropyLoss(weight=torch.Tensor(self.compute_loss_weight()).to(self.device))
self.tokenizerBert = AutoTokenizer.from_pretrained('allenai/scibert_scivocab_uncased')
self.modelBert = AutoModel.from_pretrained('allenai/scibert_scivocab_uncased')
self.modelBert.to(self.device)
def train(self):
train_losses, dev_losses, macro_fscores_train, macro_fscores_dev = [], [], [], []
self.model.to(self.device)
for epoch in range(self.epochs):
running_loss = 0
# f-score train
tot_predictions_train, tot_targets_train = [], []
for s_batch, p1_batch, p2_batch, y_batch in self.loader_train:
# Train f-score
tot_targets_train = tot_targets_train + y_batch.tolist()
# BERT
sentences_list = [[token for token in sentence.split()] for sentence in s_batch]
tokens = self.tokenizerBert(sentences_list, return_offsets_mapping=True, is_split_into_words=True, padding='max_length', truncation=True, max_length=self.len_sentences)
encoded_sentences = []
for id_list, offset_list in zip(tokens['input_ids'], tokens['offset_mapping']):
encoded_sentence = []
for id, offset in zip(id_list, offset_list):
if offset[0] == 0 and offset[1] != 0:
encoded_sentence.append(id)
encoded_sentence.extend([0] * (self.len_sentences - len(encoded_sentence)))
encoded_sentences.insert(len(encoded_sentences), encoded_sentence)
embedsbert = self.modelBert(torch.LongTensor(encoded_sentences).to(self.device))[0]
# Move input tensors to the device
p1_batch, p2_batch, y_batch = p1_batch.to(self.device), p2_batch.to(self.device), torch.LongTensor(y_batch).to(self.device)
# Model prediction
self.optimizer.zero_grad()
prediction = self.model(embedsbert, p1_batch, p2_batch)
# f-score train
ps = torch.exp(prediction)
top_p, top_class = ps.topk(1, dim=1)
for elem in top_class:
tot_predictions_train.append(int(elem))
# Loss and backward step
loss = self.loss(prediction, y_batch)
running_loss += loss.item()
loss.backward()
self.optimizer.step()
else:
# Update train f-score
macro_fscore_train = round(f1_score(tot_targets_train, tot_predictions_train, average="macro") * 100, 2)
macro_fscores_train.append(macro_fscore_train)
# dev
dev_loss = 0
# dev f-score
tot_predictions_dev, tot_targets_dev = [], []
with torch.no_grad():
self.model.eval()
for s_batch_dev, p1_batch_dev, p2_batch_dev, y_batch_dev in self.loader_dev:
tot_targets_dev = tot_targets_dev + y_batch_dev.tolist()
# BERT
sentences_list_dev = [[token for token in sentence.split()] for sentence in s_batch_dev]
tokens_dev = self.tokenizerBert(sentences_list_dev, return_offsets_mapping=True, is_split_into_words=True, padding='max_length', truncation=True, max_length=self.len_sentences)
encoded_sentences_dev = []
for id_list, offset_list in zip(tokens_dev['input_ids'], tokens_dev['offset_mapping']):
encoded_sentence_dev = []
for id, offset in zip(id_list, offset_list):
if offset[0] == 0 and offset[1] != 0:
encoded_sentence_dev.append(id)
encoded_sentence_dev.extend([0] * (self.len_sentences - len(encoded_sentence_dev)))
encoded_sentences_dev.insert(len(encoded_sentences_dev), encoded_sentence_dev)
embedsbert_dev = self.modelBert(torch.LongTensor(encoded_sentences_dev).to(self.device))[0]
# Move input tensors to the device
p1_batch_dev, p2_batch_dev,y_batch_dev = p1_batch_dev.to(self.device), p2_batch_dev.to(self.device), torch.LongTensor(y_batch_dev).to(self.device)
# Model prediction
prediction = self.model(embedsbert_dev, p1_batch_dev, p2_batch_dev)
dev_loss += self.loss(prediction, y_batch_dev)
# From the model prediction to the original class
ps = torch.exp(prediction)
top_p, top_class = ps.topk(1, dim=1)
for elem in top_class:
tot_predictions_dev.append(int(elem))
self.model.train()
# Update f-score dev
macro_fscore_dev = round(f1_score(tot_targets_dev, tot_predictions_dev, average="macro") * 100, 2)
macro_fscores_dev.append(macro_fscore_dev)
# Update train and dev loss
train_losses.append(running_loss)
dev_losses.append(dev_loss)
print("Epoch: {}/{}".format(epoch + 1, self.epochs),
"Training Loss: {:.3f}".format(train_losses[-1]),
"Dev Loss: {:.3f}".format(dev_losses[-1]),
"Macro f-score train: {}".format(macro_fscore_train),
"Macro f-score dev: {}".format(macro_fscore_dev))
return macro_fscores_train, macro_fscores_dev
def test(self):
tot_predictions_test, tot_targets_test = [], []
with torch.no_grad():
self.model.eval()
for s_batch_test, p1_batch_test, p2_batch_test, y_batch_test in self.loader_test:
tot_targets_test = tot_targets_test + y_batch_test.tolist()
# BERT
sentences_list_test = [[token for token in sentence.split()] for sentence in s_batch_test]
tokens_test = self.tokenizerBert(sentences_list_test, return_offsets_mapping=True, is_split_into_words=True, padding='max_length', truncation=True, max_length=self.len_sentences)
encoded_sentences_test = []
for id_list, offset_list in zip(tokens_test['input_ids'], tokens_test['offset_mapping']):
encoded_sentence_test = []
for id, offset in zip(id_list, offset_list):
if offset[0] == 0 and offset[1] != 0:
encoded_sentence_test.append(id)
encoded_sentence_test.extend([0] * (self.len_sentences - len(encoded_sentence_test)))
encoded_sentences_test.insert(len(encoded_sentences_test), encoded_sentence_test)
embedsbert_test = self.modelBert(torch.LongTensor(encoded_sentences_test).to(self.device))[0]
p1_batch_test, p2_batch_test, y_batch_test = p1_batch_test.to(self.device), p2_batch_test.to(self.device), torch.LongTensor(y_batch_test).to(self.device)
prediction = self.model(embedsbert_test, p1_batch_test, p2_batch_test)
ps = torch.exp(prediction)
top_p, top_class = ps.topk(1, dim=1)
for elem in top_class:
tot_predictions_test.append(int(elem))
return round(f1_score(tot_targets_test, tot_predictions_test, average="macro") * 100, 2)
def compute_loss_weight(self):
relation_count = [0] * len(self.relations)
for label in self.data.train.y:
relation_count[label] += 1
tot = sum(relation_count)
relation_count = [1 / (elem / tot) if elem > 0 else 1 for elem in relation_count]
return relation_count | 9,545 | 45.565854 | 200 | py |
scientific-re | scientific-re-main/src/parameters/parameters.py | from dataclasses import dataclass
import torch
@dataclass
class Parameters:
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
seeds = [3828, 3152, 2396]
# Files
train = 'sample-data/sample-train.txt'
train_relations = 'sample-data/sample-train-rel.txt'
dev = 'sample-data/sample-dev.txt'
dev_relations = 'sample-data/sample-dev-rel.txt'
test = 'sample-data/ai-ml.txt'
test_relations = 'sample-data/ai-ml-rel.txt'
# Relations
relations = ['COMPARE','USAGE', 'MODEL-FEATURE', 'PART_WHOLE', 'RESULT']
# Data
min_position: int = -10
max_position: int = 10
# Preprocessing parameters
len_sentences: int = 105
n_pos: int = 18
# Model parameters
word_emb_fasttext_size: int = 300
position_emb_size: int = 50
bert_emb_size: int = 768
num_positions = max_position - min_position + 3 # add positions for 0, for <PAD> and for LONG DISTANCE
kernels = [2, 3, 4]
out_size: int = 15
stride: int = 1
dropout: int = 0.5
# Training parameters
epochs: int = 50
batch_size: int = 12
learning_rate: float = 0.001 | 1,140 | 25.534884 | 106 | py |
taskgrouping | taskgrouping-master/taskonomy_loader.py | import torch.utils.data as data
from PIL import Image, ImageOps
import os
import os.path
import zipfile as zf
import io
import logging
import random
import copy
import numpy as np
import time
import torch
import multiprocessing
import warnings
import torchvision.transforms as transforms
from multiprocessing import Manager
class TaskonomyLoader(data.Dataset):
def __init__(self,
root,
label_set=['depth_zbuffer','normal','segment_semantic','edge_occlusion','reshading','keypoints2d','edge_texture'],
model_whitelist=None,
model_limit=None,
output_size=None,
convert_to_tensor=True,
return_filename=False,
half_sized_output=False,
augment=False):
manager=Manager()
self.root = root
self.model_limit=model_limit
self.records=[]
if model_whitelist is None:
self.model_whitelist=None
else:
self.model_whitelist = set()
with open(model_whitelist) as f:
for line in f:
self.model_whitelist.add(line.strip())
for i,(where, subdirs, files) in enumerate(os.walk(os.path.join(root,'rgb'))):
if subdirs!=[]: continue
model = where.split('/')[-1]
if self.model_whitelist is None or model in self.model_whitelist:
full_paths = [os.path.join(where,f) for f in files]
if isinstance(model_limit, tuple):
full_paths.sort()
full_paths = full_paths[model_limit[0]:model_limit[1]]
elif model_limit is not None:
full_paths.sort()
full_paths = full_paths[:model_limit]
self.records+=full_paths
#self.records = manager.list(self.records)
self.label_set = label_set
self.output_size = output_size
self.half_sized_output=half_sized_output
self.convert_to_tensor = convert_to_tensor
self.return_filename=return_filename
self.to_tensor = transforms.ToTensor()
self.augment = augment
if augment == "aggressive":
print('Data augmentation is on (aggressive).')
elif augment:
print('Data augmentation is on (flip).')
else:
print('no data augmentation')
self.last = {}
def process_image(self,im,input=False):
output_size=self.output_size
if self.half_sized_output and not input:
if output_size is None:
output_size=(128,128)
else:
output_size=output_size[0]//2,output_size[1]//2
if output_size is not None and output_size!=im.size:
im = im.resize(output_size,Image.BILINEAR)
bands = im.getbands()
if self.convert_to_tensor:
if bands[0]=='L':
im = np.array(im)
im.setflags(write=1)
im = torch.from_numpy(im).unsqueeze(0)
else:
with warnings.catch_warnings():
warnings.simplefilter("ignore")
im = self.to_tensor(im)
return im
def __getitem__(self, index):
"""
Args:
index (int): Index
Returns:
tuple: (image, target) where target is an uint8 matrix of integers with the same width and height.
If there is an error loading an image or its labels, simply return the previous example.
"""
with torch.no_grad():
file_name=self.records[index]
save_filename = file_name
flip_lr = (random.randint(0,1) > .5 and self.augment)
flip_ud = (random.randint(0,1) > .5 and (self.augment=="aggressive"))
pil_im = Image.open(file_name)
if flip_lr:
pil_im = ImageOps.mirror(pil_im)
if flip_ud:
pil_im = ImageOps.flip(pil_im)
im = self.process_image(pil_im,input=True)
error=False
ys = {}
mask = None
to_load = self.label_set
if len(set(['edge_occlusion','normal','reshading','principal_curvature']).intersection(self.label_set))!=0:
if os.path.isfile(file_name.replace('rgb','mask')):
to_load.append('mask')
elif 'depth_zbuffer' not in to_load:
to_load.append('depth_zbuffer')
for i in to_load:
if i=='mask' and mask is not None:
continue
yfilename = file_name.replace('rgb',i)
try:
yim = Image.open(yfilename)
except:
yim = self.last[i].copy()
error = True
if (i in self.last and yim.getbands() != self.last[i].getbands()) or error:
yim = self.last[i].copy()
try:
self.last[i]=yim.copy()
except:
pass
if flip_lr:
try:
yim = ImageOps.mirror(yim)
except:
pass
if flip_ud:
try:
yim = ImageOps.flip(yim)
except:
pass
try:
yim = self.process_image(yim)
except:
yim = self.last[i].copy()
yim = self.process_image(yim)
if i == 'depth_zbuffer':
yim = yim.float()
mask = yim < (2**13)
yim-=1500.0
yim/= 1000.0
elif i == 'edge_occlusion':
yim = yim.float()
yim-=56.0248
yim/=239.1265
elif i == 'keypoints2d':
yim = yim.float()
yim-=50.0
yim/=100.0
elif i == 'edge_texture':
yim = yim.float()
yim-=718.0
yim/=1070.0
elif i == 'normal':
yim = yim.float()
yim -=.5
yim *=2.0
if flip_lr:
yim[0]*=-1.0
if flip_ud:
yim[1]*=-1.0
elif i == 'reshading':
yim=yim.mean(dim=0,keepdim=True)
yim-=.4962
yim/=0.2846
#print('reshading',yim.shape,yim.max(),yim.min())
elif i == 'principal_curvature':
yim=yim[:2]
yim-=torch.tensor([0.5175, 0.4987]).view(2,1,1)
yim/=torch.tensor([0.1373, 0.0359]).view(2,1,1)
#print('principal_curvature',yim.shape,yim.max(),yim.min())
elif i == 'mask':
mask=yim.bool()
yim=mask
ys[i] = yim
if mask is not None:
ys['mask']=mask
# print(self.label_set)
# print('rgb' in self.label_set)
if not 'rgb' in self.label_set:
ys['rgb']=im
if self.return_filename:
return im, ys, file_name
else:
return im, ys
def __len__(self):
return (len(self.records))
def show(im, ys):
from matplotlib import pyplot as plt
plt.figure(figsize=(30,30))
plt.subplot(4,3,1).set_title('RGB')
im = im.permute([1,2,0])
plt.imshow(im)
#print(im)
#print(ys)
for i, y in enumerate(ys):
yim=ys[y]
plt.subplot(4,3,2+i).set_title(y)
if y=='normal':
yim+=1
yim/=2
if yim.shape[0]==2:
yim = torch.cat([yim,torch.zeros((1,yim.shape[1],yim.shape[2]))],dim=0)
yim = yim.permute([1,2,0])
yim = yim.squeeze()
plt.imshow(np.array(yim))
plt.show()
def test():
loader = TaskonomyLoader(
'/home/tstand/Desktop/lite_taskonomy/',
label_set=['normal','reshading','principal_curvature','edge_occlusion','depth_zbuffer'],
augment='aggressive')
totals= {}
totals2 = {}
count = {}
indices= list(range(len(loader)))
random.shuffle(indices)
for data_count, index in enumerate(indices):
im, ys=loader[index]
show(im,ys)
mask = ys['mask']
#mask = ~mask
print(index)
for i, y in enumerate(ys):
yim=ys[y]
yim = yim.float()
if y not in totals:
totals[y]=0
totals2[y]=0
count[y]=0
totals[y]+=(yim*mask).sum(dim=[1,2])
totals2[y]+=((yim**2)*mask).sum(dim=[1,2])
count[y]+=(torch.ones_like(yim)*mask).sum(dim=[1,2])
#print(y,yim.shape)
std = torch.sqrt((totals2[y]-(totals[y]**2)/count[y])/count[y])
print(data_count,'/',len(loader),y,'mean:',totals[y]/count[y],'std:',std)
def output_mask(index,loader):
from matplotlib import pyplot as plt
filename=loader.records[index]
filename=filename.replace('rgb','mask')
filename=filename.replace('/intel_nvme/taskonomy_data/','/run/shm/')
if os.path.isfile(filename):
return
print(filename)
x,ys = loader[index]
mask =ys['mask']
mask=mask.squeeze()
mask_im=Image.fromarray(mask.numpy())
mask_im = mask_im.convert(mode='1')
# plt.subplot(2,1,1)
# plt.imshow(mask)
# plt.subplot(2,1,2)
# plt.imshow(mask_im)
# plt.show()
path, _ = os.path.split(filename)
os.makedirs(path,exist_ok=True)
mask_im.save(filename,bits=1,optimize=True)
def get_masks():
import multiprocessing
loader = TaskonomyLoader(
'/intel_nvme/taskonomy_data/',
label_set=['depth_zbuffer'],
augment=False)
indices= list(range(len(loader)))
random.shuffle(indices)
for count,index in enumerate(indices):
print(count,len(indices))
output_mask(index,loader)
if __name__ == "__main__":
test()
#get_masks()
| 10,600 | 29.81686 | 131 | py |
taskgrouping | taskgrouping-master/taskonomy_losses.py | import torch
import collections
sl=0
nl=0
nl2=0
nl3=0
dl=0
el=0
rl=0
kl=0
tl=0
al=0
cl=0
popular_offsets=collections.defaultdict(int)
batch_number=0
def segment_semantic_loss(output,target,mask):
global sl
sl = torch.nn.functional.cross_entropy(output.float(),target.long().squeeze(dim=1),ignore_index=0,reduction='mean')
return sl
def normal_loss(output,target,mask):
global nl
nl= rotate_loss(output,target,mask,normal_loss_base)
return nl
def normal_loss_simple(output,target,mask):
global nl
out = torch.nn.functional.l1_loss(output,target,reduction='none')
out *=mask.float()
nl = out.mean()
return nl
def rotate_loss(output,target,mask,loss_name):
global popular_offsets
target=target[:,:,1:-1,1:-1].float()
mask = mask[:,:,1:-1,1:-1].float()
output=output.float()
val1 = loss = loss_name(output[:,:,1:-1,1:-1],target,mask)
val2 = loss_name(output[:,:,0:-2,1:-1],target,mask)
loss = torch.min(loss,val2)
val3 = loss_name(output[:,:,1:-1,0:-2],target,mask)
loss = torch.min(loss,val3)
val4 = loss_name(output[:,:,2:,1:-1],target,mask)
loss = torch.min(loss,val4)
val5 = loss_name(output[:,:,1:-1,2:],target,mask)
loss = torch.min(loss,val5)
val6 = loss_name(output[:,:,0:-2,0:-2],target,mask)
loss = torch.min(loss,val6)
val7 = loss_name(output[:,:,2:,2:],target,mask)
loss = torch.min(loss,val7)
val8 = loss_name(output[:,:,0:-2,2:],target,mask)
loss = torch.min(loss,val8)
val9 = loss_name(output[:,:,2:,0:-2],target,mask)
loss = torch.min(loss,val9)
#lst = [val1,val2,val3,val4,val5,val6,val7,val8,val9]
#print(loss.size())
loss=loss.mean()
#print(loss)
return loss
def normal_loss_base(output,target,mask):
out = torch.nn.functional.l1_loss(output,target,reduction='none')
out *=mask
out = out.mean(dim=(1,2,3))
return out
def normal2_loss(output,target,mask):
global nl3
diff = output.float() - target.float()
out = torch.abs(diff)
out = out*mask.float()
nl3 = out.mean()
return nl3
def depth_loss_simple(output,target,mask):
global dl
out = torch.nn.functional.l1_loss(output,target,reduction='none')
out *=mask.float()
dl = out.mean()
return dl
def depth_loss(output,target,mask):
global dl
dl = rotate_loss(output,target,mask,depth_loss_base)
return dl
def depth_loss_base(output,target,mask):
out = torch.nn.functional.l1_loss(output,target,reduction='none')
out *=mask.float()
out = out.mean(dim=(1,2,3))
return out
def edge_loss_simple(output,target,mask):
global el
out = torch.nn.functional.l1_loss(output,target,reduction='none')
out *=mask
el = out.mean()
return el
def reshade_loss(output,target,mask):
global rl
out = torch.nn.functional.l1_loss(output,target,reduction='none')
out *=mask
rl = out.mean()
return rl
def keypoints2d_loss(output,target,mask):
global kl
kl = torch.nn.functional.l1_loss(output,target)
return kl
def edge2d_loss(output,target,mask):
global tl
tl = torch.nn.functional.l1_loss(output,target)
return tl
def auto_loss(output,target,mask):
global al
al = torch.nn.functional.l1_loss(output,target)
return al
def pc_loss(output,target,mask):
global cl
out = torch.nn.functional.l1_loss(output,target,reduction='none')
out *=mask
cl = out.mean()
return cl
def edge_loss(output,target,mask):
global el
out = torch.nn.functional.l1_loss(output,target,reduction='none')
out *=mask
el = out.mean()
return el
def get_taskonomy_loss(losses):
def taskonomy_loss(output,target):
if 'mask' in target:
mask = target['mask']
else:
mask=None
sum_loss=None
num=0
for n,t in target.items():
if n in losses:
o = output[n].float()
this_loss = losses[n](o,t,mask)
num+=1
if sum_loss:
sum_loss = sum_loss+ this_loss
else:
sum_loss = this_loss
return sum_loss#/num # should not take average when using xception_taskonomy_new
return taskonomy_loss
def get_losses_and_tasks(args):
task_str = args.tasks
losses = {}
criteria = {}
taskonomy_tasks = []
if 's' in task_str:
losses['segment_semantic'] = segment_semantic_loss
criteria['ss_l']=lambda x,y : sl
taskonomy_tasks.append('segment_semantic')
if 'd' in task_str:
if not args.rotate_loss:
losses['depth_zbuffer'] = depth_loss_simple
else:
print('got rotate loss')
losses['depth_zbuffer'] = depth_loss
criteria['depth_l']=lambda x,y : dl
taskonomy_tasks.append('depth_zbuffer')
if 'n' in task_str:
if not args.rotate_loss:
losses['normal']=normal_loss_simple
else:
print('got rotate loss')
losses['normal']=normal_loss
criteria['norm_l']=lambda x,y : nl
#criteria['norm_l2']=lambda x,y : nl2
taskonomy_tasks.append('normal')
if 'N' in task_str:
losses['normal2']=normal2_loss
criteria['norm2']=lambda x,y : nl3
taskonomy_tasks.append('normal2')
if 'k' in task_str:
losses['keypoints2d']=keypoints2d_loss
criteria['key_l']=lambda x,y : kl
taskonomy_tasks.append('keypoints2d')
if 'e' in task_str:
if not args.rotate_loss:
losses['edge_occlusion'] = edge_loss_simple
else:
print('got rotate loss')
losses['edge_occlusion'] = edge_loss
#losses['edge_occlusion']=edge_loss
criteria['edge_l']=lambda x,y : el
taskonomy_tasks.append('edge_occlusion')
if 'r' in task_str:
losses['reshading']=reshade_loss
criteria['shade_l']=lambda x,y : rl
taskonomy_tasks.append('reshading')
if 't' in task_str:
losses['edge_texture']=edge2d_loss
criteria['edge2d_l']=lambda x,y : tl
taskonomy_tasks.append('edge_texture')
if 'a' in task_str:
losses['rgb']=auto_loss
criteria['rgb_l']=lambda x,y : al
taskonomy_tasks.append('rgb')
if 'c' in task_str:
losses['principal_curvature']=pc_loss
criteria['pc_l']=lambda x,y : cl
taskonomy_tasks.append('principal_curvature')
#"nacre"
if args.task_weights:
weights=[float(x) for x in args.task_weights.split(',')]
losses2={}
criteria2={}
for l,w,c in zip(losses.items(),weights,criteria.items()):
losses[l[0]]=lambda x,y,z,l=l[1],w=w:l(x,y,z)*w
criteria[c[0]]=lambda x,y,c=c[1],w=w:c(x,y)*w
taskonomy_loss = get_taskonomy_loss(losses)
return taskonomy_loss,losses, criteria, taskonomy_tasks
| 6,961 | 27.650206 | 119 | py |
taskgrouping | taskgrouping-master/train_taskonomy.py | import argparse
import os
import shutil
import time
import platform
import torch
from torch.autograd import Variable
import torch.nn as nn
import torch.backends.cudnn as cudnn
import torchvision.datasets as datasets
from taskonomy_losses import *
from taskonomy_loader import TaskonomyLoader
from apex.parallel import DistributedDataParallel as DDP
from apex.fp16_utils import *
from apex import amp, optimizers
import copy
import numpy as np
import signal
import sys
import math
from collections import defaultdict
import scipy.stats
#from ptflops import get_model_complexity_info
import model_definitions as models
model_names = sorted(name for name in models.__dict__
if name.islower() and not name.startswith("__")
and callable(models.__dict__[name]))
parser = argparse.ArgumentParser(description='PyTorch Taskonomy Training')
parser.add_argument('--data_dir', '-d', dest='data_dir',required=True,
help='path to training set')
parser.add_argument('--arch', '-a', metavar='ARCH',required=True,
choices=model_names,
help='model architecture: ' +
' | '.join(model_names) +
' (required)')
parser.add_argument('-b', '--batch-size', default=64, type=int,
help='mini-batch size (default: 64)')
parser.add_argument('--tasks', '-ts', default='sdnkt', dest='tasks',
help='which tasks to train on')
parser.add_argument('--model_dir', default='saved_models', dest='model_dir',
help='where to save models')
parser.add_argument('--image-size', default=256, type=int,
help='size of image side (images are square)')
parser.add_argument('-j', '--workers', default=4, type=int,
help='number of data loading workers (default: 4)')
parser.add_argument('-pf', '--print_frequency', default=1, type=int,
help='how often to print output')
parser.add_argument('--epochs', default=100, type=int,
help='maximum number of epochs to run')
parser.add_argument('-mlr', '--minimum_learning_rate', default=3e-5, type=float,
metavar='LR', help='End trianing when learning rate falls below this value.')
parser.add_argument('-lr', '--learning-rate',dest='lr', default=0.1, type=float,
metavar='LR', help='initial learning rate')
parser.add_argument('-ltw0', '--loss_tracking_window_initial', default=500000, type=int,
help='inital loss tracking window (default: 500000)')
parser.add_argument('-mltw', '--maximum_loss_tracking_window', default=2000000, type=int,
help='maximum loss tracking window (default: 2000000)')
parser.add_argument('--momentum', default=0.9, type=float, metavar='M',
help='momentum')
parser.add_argument('--weight-decay', '-wd','--wd', default=1e-4, type=float,
metavar='W', help='weight decay (default: 1e-4)')
parser.add_argument('--resume','--restart', default='', type=str, metavar='PATH',
help='path to latest checkpoint (default: none)')
# parser.add_argument('--start-epoch', default=0, type=int,
# help='manual epoch number (useful on restarts)')
parser.add_argument('-n','--experiment_name', default='', type=str,
help='name to prepend to experiment saves.')
parser.add_argument('-v', '--validate', dest='validate', action='store_true',
help='evaluate model on validation set')
parser.add_argument('-t', '--test', dest='test', action='store_true',
help='evaluate model on test set')
parser.add_argument('-r', '--rotate_loss', dest='rotate_loss', action='store_true',
help='should loss rotation occur')
parser.add_argument('--pretrained', dest='pretrained', default='',
help='use pre-trained model')
parser.add_argument('-vb', '--virtual-batch-multiplier', default=1, type=int,
metavar='N', help='number of forward/backward passes per parameter update')
parser.add_argument('--fp16', action='store_true',
help='Run model fp16 mode.')
parser.add_argument('-sbn', '--sync_batch_norm', action='store_true',
help='sync batch norm parameters accross gpus.')
parser.add_argument('-hs', '--half_sized_output', action='store_true',
help='output 128x128 rather than 256x256.')
parser.add_argument('-na','--no_augment', action='store_true',
help='Run model fp16 mode.')
parser.add_argument('-ml', '--model-limit', default=None, type=int,
help='Limit the number of training instances from a single 3d building model.')
parser.add_argument('-tw', '--task-weights', default=None, type=str,
help='a comma separated list of numbers one for each task to multiply the loss by.')
cudnn.benchmark = False
def main(args):
print(args)
print('starting on', platform.node())
if 'CUDA_VISIBLE_DEVICES' in os.environ:
print('cuda gpus:',os.environ['CUDA_VISIBLE_DEVICES'])
main_stream = torch.cuda.Stream()
if args.fp16:
assert torch.backends.cudnn.enabled, "fp16 mode requires cudnn backend to be enabled."
print('Got fp16!')
taskonomy_loss, losses, criteria, taskonomy_tasks = get_losses_and_tasks(args)
print("including the following tasks:", list(losses.keys()))
criteria2={'Loss':taskonomy_loss}
for key,value in criteria.items():
criteria2[key]=value
criteria = criteria2
print('data_dir =',args.data_dir, len(args.data_dir))
if args.no_augment:
augment = False
else:
augment = True
train_dataset = TaskonomyLoader(
args.data_dir,
label_set=taskonomy_tasks,
model_whitelist='train_models.txt',
model_limit=args.model_limit,
output_size = (args.image_size,args.image_size),
half_sized_output=args.half_sized_output,
augment=augment)
print('Found',len(train_dataset),'training instances.')
print("=> creating model '{}'".format(args.arch))
model = models.__dict__[args.arch](tasks=losses.keys(),half_sized_output=args.half_sized_output)
def get_n_params(model):
pp=0
for p in list(model.parameters()):
#print(p.size())
nn=1
for s in list(p.size()):
nn = nn*s
pp += nn
return pp
print("Model has", get_n_params(model), "parameters")
try:
print("Encoder has", get_n_params(model.encoder), "parameters")
#flops, params=get_model_complexity_info(model.encoder,(3,256,256), as_strings=False, print_per_layer_stat=False)
#print("Encoder has", flops, "Flops and", params, "parameters,")
except:
print("Each encoder has", get_n_params(model.encoders[0]), "parameters")
for decoder in model.task_to_decoder.values():
print("Decoder has", get_n_params(decoder), "parameters")
model = model.cuda()
optimizer = torch.optim.SGD(model.parameters(), args.lr, momentum=args.momentum, weight_decay=args.weight_decay)
#tested with adamW. Poor results observed
#optimizer = adamW.AdamW(model.parameters(),lr= args.lr,weight_decay=args.weight_decay,eps=1e-3)
# Initialize Amp. Amp accepts either values or strings for the optional override arguments,
# for convenient interoperation with argparse.
if args.fp16:
model, optimizer = amp.initialize(model, optimizer,
opt_level='O1',
loss_scale="dynamic",
verbosity=0
)
print('Got fp16!')
#args.lr = args.lr*float(args.batch_size*args.virtual_batch_multiplier)/256.
# optionally resume from a checkpoint
checkpoint=None
if args.resume:
if os.path.isfile(args.resume):
print("=> loading checkpoint '{}'".format(args.resume))
checkpoint = torch.load(args.resume, map_location = lambda storage, loc: storage.cuda())
model.load_state_dict(checkpoint['state_dict'])
print("=> loaded checkpoint '{}' (epoch {})"
.format(args.resume, checkpoint['epoch']))
else:
print("=> no checkpoint found at '{}'".format(args.resume))
if args.pretrained != '':
print('loading pretrained weights for '+args.arch+' ('+args.pretrained+')')
model.encoder.load_state_dict(torch.load(args.pretrained))
if torch.cuda.device_count() >1:
model = torch.nn.DataParallel(model).cuda()
if args.sync_batch_norm:
from sync_batchnorm import patch_replication_callback
patch_replication_callback(model)
print('Virtual batch size =', args.batch_size*args.virtual_batch_multiplier)
if args.resume:
if os.path.isfile(args.resume) and 'optimizer' in checkpoint:
optimizer.load_state_dict(checkpoint['optimizer'])
train_loader = torch.utils.data.DataLoader(
train_dataset, batch_size=args.batch_size, shuffle=True,
num_workers=args.workers, pin_memory=True, sampler=None)
val_loader = get_eval_loader(args.data_dir, taskonomy_tasks, args)
trainer=Trainer(train_loader,val_loader,model,optimizer,criteria,args,checkpoint)
if args.validate:
trainer.progress_table=[]
trainer.validate([{}])
print()
return
if args.test:
trainer.progress_table=[]
# replace val loader with a loader that loads test data
trainer.val_loader=get_eval_loader(args.data_dir, taskonomy_tasks, args,model_limit=(1000,2000))
trainer.validate([{}])
return
trainer.train()
def get_eval_loader(datadir, label_set, args,model_limit=1000):
print(datadir)
val_dataset = TaskonomyLoader(datadir,
label_set=label_set,
model_whitelist='val_models.txt',
model_limit=model_limit,
output_size = (args.image_size,args.image_size),
half_sized_output=args.half_sized_output,
augment=False)
print('Found',len(val_dataset),'validation instances.')
val_loader = torch.utils.data.DataLoader(
val_dataset,
batch_size=max(args.batch_size//2,1), shuffle=False,
num_workers=args.workers, pin_memory=True,sampler=None)
return val_loader
program_start_time = time.time()
def on_keyboared_interrupt(x,y):
#print()
sys.exit(1)
signal.signal(signal.SIGINT, on_keyboared_interrupt)
def get_average_learning_rate(optimizer):
try:
return optimizer.learning_rate
except:
s = 0
for param_group in optimizer.param_groups:
s+=param_group['lr']
return s/len(optimizer.param_groups)
class data_prefetcher():
def __init__(self, loader):
self.inital_loader = loader
self.loader = iter(loader)
self.stream = torch.cuda.Stream()
self.preload()
def preload(self):
try:
self.next_input, self.next_target = next(self.loader)
except StopIteration:
# self.next_input = None
# self.next_target = None
self.loader = iter(self.inital_loader)
self.preload()
return
with torch.cuda.stream(self.stream):
self.next_input = self.next_input.cuda(non_blocking=True)
#self.next_target = self.next_target.cuda(async=True)
self.next_target = {key: val.cuda(non_blocking=True) for (key,val) in self.next_target.items()}
def next(self):
torch.cuda.current_stream().wait_stream(self.stream)
input = self.next_input
target = self.next_target
self.preload()
return input, target
class color:
PURPLE = '\033[95m'
CYAN = '\033[96m'
DARKCYAN = '\033[36m'
BLUE = '\033[94m'
GREEN = '\033[92m'
YELLOW = '\033[93m'
RED = '\033[91m'
BOLD = '\033[1m'
UNDERLINE = '\033[4m'
END = '\033[0m'
def print_table(table_list, go_back=True):
if len(table_list)==0:
print()
print()
return
if go_back:
print("\033[F",end='')
print("\033[K",end='')
for i in range(len(table_list)):
print("\033[F",end='')
print("\033[K",end='')
lens = defaultdict(int)
for i in table_list:
for ii,to_print in enumerate(i):
for title,val in to_print.items():
lens[(title,ii)]=max(lens[(title,ii)],max(len(title),len(val)))
# printed_table_list_header = []
for ii,to_print in enumerate(table_list[0]):
for title,val in to_print.items():
print('{0:^{1}}'.format(title,lens[(title,ii)]),end=" ")
for i in table_list:
print()
for ii,to_print in enumerate(i):
for title,val in to_print.items():
print('{0:^{1}}'.format(val,lens[(title,ii)]),end=" ",flush=True)
print()
class AverageMeter(object):
"""Computes and stores the average and current value"""
def __init__(self):
self.reset()
def reset(self):
self.val = 0
self.avg = 0
self.std= 0
self.sum = 0
self.sumsq = 0
self.count = 0
self.lst = []
def update(self, val, n=1):
self.val = float(val)
self.sum += float(val) * n
#self.sumsq += float(val)**2
self.count += n
self.avg = self.sum / self.count
self.lst.append(self.val)
self.std=np.std(self.lst)
class Trainer:
def __init__(self,train_loader,val_loader,model,optimizer,criteria,args,checkpoint=None):
self.train_loader=train_loader
self.val_loader=val_loader
self.train_prefetcher=data_prefetcher(self.train_loader)
self.model=model
self.optimizer=optimizer
self.criteria=criteria
self.args = args
self.fp16=args.fp16
self.code_archive=self.get_code_archive()
if checkpoint:
if 'progress_table' in checkpoint:
self.progress_table = checkpoint['progress_table']
else:
self.progress_table=[]
if 'epoch' in checkpoint:
self.start_epoch = checkpoint['epoch']+1
else:
self.start_epoch = 0
if 'best_loss' in checkpoint:
self.best_loss = checkpoint['best_loss']
else:
self.best_loss = 9e9
if 'stats' in checkpoint:
self.stats = checkpoint['stats']
else:
self.stats=[]
if 'loss_history' in checkpoint:
self.loss_history = checkpoint['loss_history']
else:
self.loss_history=[]
else:
self.progress_table=[]
self.best_loss = 9e9
self.stats = []
self.start_epoch = 0
self.loss_history=[]
self.lr0 = get_average_learning_rate(optimizer)
print_table(self.progress_table,False)
self.ticks=0
self.last_tick=0
self.loss_tracking_window = args.loss_tracking_window_initial
def get_code_archive(self):
file_contents={}
for i in os.listdir('.'):
if i[-3:]=='.py':
with open(i,'r') as file:
file_contents[i]=file.read()
return file_contents
def train(self):
for self.epoch in range(self.start_epoch,self.args.epochs):
current_learning_rate = get_average_learning_rate(self.optimizer)
if current_learning_rate < self.args.minimum_learning_rate:
break
# train for one epoch
train_string, train_stats = self.train_epoch()
# evaluate on validation set
progress_string=train_string
loss, progress_string, val_stats = self.validate(progress_string)
print()
self.progress_table.append(progress_string)
self.stats.append((train_stats,val_stats))
self.checkpoint(loss)
def checkpoint(self, loss):
is_best = loss < self.best_loss
self.best_loss = min(loss, self.best_loss)
save_filename = self.args.experiment_name+'_'+self.args.arch+'_'+('p' if self.args.pretrained != '' else 'np')+'_'+self.args.tasks+'_checkpoint.pth.tar'
try:
to_save = self.model
if torch.cuda.device_count() >1:
to_save=to_save.module
gpus='all'
if 'CUDA_VISIBLE_DEVICES' in os.environ:
gpus=os.environ['CUDA_VISIBLE_DEVICES']
self.save_checkpoint({
'epoch': self.epoch,
'info':{'machine':platform.node(), 'GPUS':gpus},
'args': self.args,
'arch': self.args.arch,
'state_dict': to_save.state_dict(),
'best_loss': self.best_loss,
'optimizer' : self.optimizer.state_dict(),
'progress_table' : self.progress_table,
'stats': self.stats,
'loss_history': self.loss_history,
'code_archive':self.code_archive
}, False, self.args.model_dir, save_filename)
if is_best:
self.save_checkpoint(None, True,self.args.model_dir, save_filename)
except:
print('save checkpoint failed...')
def save_checkpoint(self,state, is_best,directory='', filename='checkpoint.pth.tar'):
path = os.path.join(directory,filename)
if is_best:
best_path = os.path.join(directory,'best_'+filename)
shutil.copyfile(path, best_path)
else:
torch.save(state, path)
def learning_rate_schedule(self):
ttest_p=0
z_diff=0
#don't reduce learning rate until the second epoch has ended
if self.epoch < 2:
return 0,0
wind=self.loss_tracking_window//(self.args.batch_size*args.virtual_batch_multiplier)
if len(self.loss_history)-self.last_tick > wind:
a = self.loss_history[-wind:-wind*5//8]
b = self.loss_history[-wind*3//8:]
#remove outliers
a = sorted(a)
b = sorted(b)
a = a[int(len(a)*.05):int(len(a)*.95)]
b = b[int(len(b)*.05):int(len(b)*.95)]
length_=min(len(a),len(b))
a=a[:length_]
b=b[:length_]
z_diff,ttest_p = scipy.stats.ttest_rel(a,b,nan_policy='omit')
if z_diff < 0 or ttest_p > .99:
self.ticks+=1
self.last_tick=len(self.loss_history)
self.adjust_learning_rate()
self.loss_tracking_window = min(self.args.maximum_loss_tracking_window,self.loss_tracking_window*2)
return ttest_p, z_diff
def train_epoch(self):
global program_start_time
average_meters = defaultdict(AverageMeter)
display_values = []
for name,func in self.criteria.items():
display_values.append(name)
# switch to train mode
self.model.train()
end = time.time()
epoch_start_time = time.time()
epoch_start_time2=time.time()
batch_num = 0
num_data_points=len(self.train_loader)//self.args.virtual_batch_multiplier
if num_data_points > 10000:
num_data_points = num_data_points//5
starting_learning_rate=get_average_learning_rate(self.optimizer)
while True:
if batch_num ==0:
end=time.time()
epoch_start_time2=time.time()
if num_data_points==batch_num:
break
self.percent = batch_num/num_data_points
loss_dict=None
loss=0
# accumulate gradients over multiple runs of input
for _ in range(self.args.virtual_batch_multiplier):
data_start = time.time()
input, target = self.train_prefetcher.next()
average_meters['data_time'].update(time.time() - data_start)
loss_dict2,loss2 = self.train_batch(input,target)
loss+=loss2
if loss_dict is None:
loss_dict=loss_dict2
else:
for key,value in loss_dict2.items():
loss_dict[key]+=value
# divide by the number of accumulations
loss/=self.args.virtual_batch_multiplier
for key,value in loss_dict.items():
loss_dict[key]=value/self.args.virtual_batch_multiplier
# do the weight updates and set gradients back to zero
self.update()
self.loss_history.append(float(loss))
ttest_p, z_diff = self.learning_rate_schedule()
for name,value in loss_dict.items():
try:
average_meters[name].update(value.data)
except:
average_meters[name].update(value)
elapsed_time_for_epoch = (time.time()-epoch_start_time2)
eta = (elapsed_time_for_epoch/(batch_num+.2))*(num_data_points-batch_num)
if eta >= 24*3600:
eta = 24*3600-1
batch_num+=1
current_learning_rate= get_average_learning_rate(self.optimizer)
if True:
to_print = {}
to_print['ep']= ('{0}:').format(self.epoch)
to_print['#/{0}'.format(num_data_points)]= ('{0}').format(batch_num)
to_print['lr']= ('{0:0.3g}-{1:0.3g}').format(starting_learning_rate,current_learning_rate)
to_print['eta']= ('{0}').format(time.strftime("%H:%M:%S", time.gmtime(int(eta))))
to_print['d%']=('{0:0.2g}').format(100*average_meters['data_time'].sum/elapsed_time_for_epoch)
for name in display_values:
meter = average_meters[name]
to_print[name]= ('{meter.avg:.4g}').format(meter=meter)
if batch_num < num_data_points-1:
to_print['ETA']= ('{0}').format(time.strftime("%H:%M:%S", time.gmtime(int(eta+elapsed_time_for_epoch))))
to_print['ttest']= ('{0:0.3g},{1:0.3g}').format(z_diff,ttest_p)
if batch_num % self.args.print_frequency == 0:
print_table(self.progress_table+[[to_print]])
epoch_time = time.time()-epoch_start_time
stats={'batches':num_data_points,
'learning_rate':current_learning_rate,
'Epoch time':epoch_time,
}
for name in display_values:
meter = average_meters[name]
stats[name] = meter.avg
data_time = average_meters['data_time'].sum
to_print['eta']= ('{0}').format(time.strftime("%H:%M:%S", time.gmtime(int(epoch_time))))
return [to_print], stats
def train_batch(self, input, target):
loss_dict = {}
input = input.float()
output = self.model(input)
first_loss=None
for c_name,criterion_fun in self.criteria.items():
if first_loss is None:first_loss=c_name
loss_dict[c_name]=criterion_fun(output, target)
loss = loss_dict[first_loss].clone()
loss = loss / self.args.virtual_batch_multiplier
if self.args.fp16:
with amp.scale_loss(loss, self.optimizer) as scaled_loss:
scaled_loss.backward()
else:
loss.backward()
return loss_dict, loss
def update(self):
self.optimizer.step()
self.optimizer.zero_grad()
def validate(self, train_table):
average_meters = defaultdict(AverageMeter)
self.model.eval()
epoch_start_time = time.time()
batch_num=0
num_data_points=len(self.val_loader)
prefetcher = data_prefetcher(self.val_loader)
torch.cuda.empty_cache()
with torch.no_grad():
for i in range(len(self.val_loader)):
input, target = prefetcher.next()
if batch_num ==0:
epoch_start_time2=time.time()
output = self.model(input)
loss_dict = {}
for c_name,criterion_fun in self.criteria.items():
loss_dict[c_name]=criterion_fun(output, target)
batch_num=i+1
for name,value in loss_dict.items():
try:
average_meters[name].update(value.data)
except:
average_meters[name].update(value)
eta = ((time.time()-epoch_start_time2)/(batch_num+.2))*(len(self.val_loader)-batch_num)
to_print = {}
to_print['#/{0}'.format(num_data_points)]= ('{0}').format(batch_num)
to_print['eta']= ('{0}').format(time.strftime("%H:%M:%S", time.gmtime(int(eta))))
for name in self.criteria.keys():
meter = average_meters[name]
to_print[name]= ('{meter.avg:.4g}').format(meter=meter)
progress=train_table+[to_print]
if batch_num % self.args.print_frequency == 0:
print_table(self.progress_table+[progress])
epoch_time = time.time()-epoch_start_time
stats={'batches':len(self.val_loader),
'Epoch time':epoch_time,
}
ultimate_loss = None
for name in self.criteria.keys():
meter = average_meters[name]
stats[name]=meter.avg
ultimate_loss = stats['Loss']
to_print['eta']= ('{0}').format(time.strftime("%H:%M:%S", time.gmtime(int(epoch_time))))
torch.cuda.empty_cache()
return float(ultimate_loss), progress , stats
def adjust_learning_rate(self):
self.lr = self.lr0 * (0.50 ** (self.ticks))
self.set_learning_rate(self.lr)
def set_learning_rate(self,lr):
for param_group in self.optimizer.param_groups:
param_group['lr'] = lr
if __name__ == '__main__':
#mp.set_start_method('forkserver')
args = parser.parse_args()
main(args)
| 26,764 | 35.917241 | 160 | py |
taskgrouping | taskgrouping-master/read_training_history.py | import argparse
import os
import torch
from collections import defaultdict
from train_taskonomy import print_table
parser = argparse.ArgumentParser(description='PyTorch ImageNet Training')
parser.add_argument('--model_file', '-m', default='', type=str, metavar='PATH',
help='path to latest checkpoint (default: none)')
parser.add_argument('--arch', '-a', metavar='ARCH', default='',
help='model architecture: ' +
' (default: resnet18)')
parser.add_argument('--save_raw',default='')
parser.add_argument('--show_loss_plot','-s', action='store_true',
help='show loss plot')
args = parser.parse_args()
# def print_table(table_list, go_back=True):
# if go_back:
# print("\033[F",end='')
# print("\033[K",end='')
# for i in range(len(table_list)):
# print("\033[F",end='')
# print("\033[K",end='')
# lens = defaultdict(int)
# for i in table_list:
# for ii,to_print in enumerate(i):
# for title,val in to_print.items():
# lens[(title,ii)]=max(lens[(title,ii)],max(len(title),len(val)))
# # printed_table_list_header = []
# for ii,to_print in enumerate(table_list[0]):
# for title,val in to_print.items():
# print('{0:^{1}}'.format(title,lens[(title,ii)]),end=" ")
# for i in table_list:
# print()
# for ii,to_print in enumerate(i):
# for title,val in to_print.items():
# print('{0:^{1}}'.format(val,lens[(title,ii)]),end=" ",flush=True)
# print()
def create_model():
import mymodels as models
try:
model = models.__dict__[args.arch](num_classification_classes=1000,
num_segmentation_classes=21,
num_segmentation_classes2=90,
normalize=False)
except:
model = models.__dict__[args.arch]()
return model
if args.model_file:
if os.path.isfile(args.model_file):
print("=> loading checkpoint '{}'".format(args.model_file))
checkpoint = torch.load(args.model_file)
progress_table = checkpoint['progress_table']
print_table(progress_table,False)
if args.show_loss_plot:
loss_history = checkpoint['loss_history']
print(len(loss_history))
print()
import matplotlib.pyplot as plt
loss_history2 = loss_history[200:]
loss_history3 = []
cur = loss_history2[0]
for i in loss_history2:
cur = .99*cur+i*.01
loss_history3.append(cur)
plt.plot(range(len(loss_history3)),loss_history3)
plt.show() | 2,820 | 30.696629 | 83 | py |
taskgrouping | taskgrouping-master/sync_batchnorm/replicate.py | # -*- coding: utf-8 -*-
# File : replicate.py
# Author : Jiayuan Mao
# Email : maojiayuan@gmail.com
# Date : 27/01/2018
#
# This file is part of Synchronized-BatchNorm-PyTorch.
# https://github.com/vacancy/Synchronized-BatchNorm-PyTorch
# Distributed under MIT License.
import functools
from torch.nn.parallel.data_parallel import DataParallel
__all__ = [
'CallbackContext',
'execute_replication_callbacks',
'DataParallelWithCallback',
'patch_replication_callback'
]
class CallbackContext(object):
pass
def execute_replication_callbacks(modules):
"""
Execute an replication callback `__data_parallel_replicate__` on each module created by original replication.
The callback will be invoked with arguments `__data_parallel_replicate__(ctx, copy_id)`
Note that, as all modules are isomorphism, we assign each sub-module with a context
(shared among multiple copies of this module on different devices).
Through this context, different copies can share some information.
We guarantee that the callback on the master copy (the first copy) will be called ahead of calling the callback
of any slave copies.
"""
master_copy = modules[0]
nr_modules = len(list(master_copy.modules()))
ctxs = [CallbackContext() for _ in range(nr_modules)]
for i, module in enumerate(modules):
for j, m in enumerate(module.modules()):
if hasattr(m, '__data_parallel_replicate__'):
m.__data_parallel_replicate__(ctxs[j], i)
class DataParallelWithCallback(DataParallel):
"""
Data Parallel with a replication callback.
An replication callback `__data_parallel_replicate__` of each module will be invoked after being created by
original `replicate` function.
The callback will be invoked with arguments `__data_parallel_replicate__(ctx, copy_id)`
Examples:
> sync_bn = SynchronizedBatchNorm1d(10, eps=1e-5, affine=False)
> sync_bn = DataParallelWithCallback(sync_bn, device_ids=[0, 1])
# sync_bn.__data_parallel_replicate__ will be invoked.
"""
def replicate(self, module, device_ids):
modules = super(DataParallelWithCallback, self).replicate(module, device_ids)
execute_replication_callbacks(modules)
return modules
def patch_replication_callback(data_parallel):
"""
Monkey-patch an existing `DataParallel` object. Add the replication callback.
Useful when you have customized `DataParallel` implementation.
Examples:
> sync_bn = SynchronizedBatchNorm1d(10, eps=1e-5, affine=False)
> sync_bn = DataParallel(sync_bn, device_ids=[0, 1])
> patch_replication_callback(sync_bn)
# this is equivalent to
> sync_bn = SynchronizedBatchNorm1d(10, eps=1e-5, affine=False)
> sync_bn = DataParallelWithCallback(sync_bn, device_ids=[0, 1])
"""
assert isinstance(data_parallel, DataParallel)
old_replicate = data_parallel.replicate
@functools.wraps(old_replicate)
def new_replicate(module, device_ids):
modules = old_replicate(module, device_ids)
execute_replication_callbacks(modules)
return modules
data_parallel.replicate = new_replicate
| 3,226 | 32.968421 | 115 | py |
taskgrouping | taskgrouping-master/sync_batchnorm/unittest.py | # -*- coding: utf-8 -*-
# File : unittest.py
# Author : Jiayuan Mao
# Email : maojiayuan@gmail.com
# Date : 27/01/2018
#
# This file is part of Synchronized-BatchNorm-PyTorch.
# https://github.com/vacancy/Synchronized-BatchNorm-PyTorch
# Distributed under MIT License.
import unittest
import torch
class TorchTestCase(unittest.TestCase):
def assertTensorClose(self, x, y):
adiff = float((x - y).abs().max())
if (y == 0).all():
rdiff = 'NaN'
else:
rdiff = float((adiff / y).abs().max())
message = (
'Tensor close check failed\n'
'adiff={}\n'
'rdiff={}\n'
).format(adiff, rdiff)
self.assertTrue(torch.allclose(x, y), message)
| 746 | 23.9 | 59 | py |
taskgrouping | taskgrouping-master/sync_batchnorm/batchnorm.py | # -*- coding: utf-8 -*-
# File : batchnorm.py
# Author : Jiayuan Mao
# Email : maojiayuan@gmail.com
# Date : 27/01/2018
#
# This file is part of Synchronized-BatchNorm-PyTorch.
# https://github.com/vacancy/Synchronized-BatchNorm-PyTorch
# Distributed under MIT License.
import collections
import contextlib
import torch
import torch.nn.functional as F
from torch.nn.modules.batchnorm import _BatchNorm
try:
from torch.nn.parallel._functions import ReduceAddCoalesced, Broadcast
except ImportError:
ReduceAddCoalesced = Broadcast = None
try:
from jactorch.parallel.comm import SyncMaster
from jactorch.parallel.data_parallel import JacDataParallel as DataParallelWithCallback
except ImportError:
from .comm import SyncMaster
from .replicate import DataParallelWithCallback
__all__ = [
'SynchronizedBatchNorm1d', 'SynchronizedBatchNorm2d', 'SynchronizedBatchNorm3d',
'patch_sync_batchnorm', 'convert_model'
]
def _sum_ft(tensor):
"""sum over the first and last dimention"""
return tensor.sum(dim=0).sum(dim=-1)
def _unsqueeze_ft(tensor):
"""add new dimensions at the front and the tail"""
return tensor.unsqueeze(0).unsqueeze(-1)
_ChildMessage = collections.namedtuple('_ChildMessage', ['sum', 'ssum', 'sum_size'])
_MasterMessage = collections.namedtuple('_MasterMessage', ['sum', 'inv_std'])
class _SynchronizedBatchNorm(_BatchNorm):
def __init__(self, num_features, eps=1e-5, momentum=0.1, affine=True):
assert ReduceAddCoalesced is not None, 'Can not use Synchronized Batch Normalization without CUDA support.'
super(_SynchronizedBatchNorm, self).__init__(num_features, eps=eps, momentum=momentum, affine=affine)
self._sync_master = SyncMaster(self._data_parallel_master)
self._is_parallel = False
self._parallel_id = None
self._slave_pipe = None
def forward(self, input):
# If it is not parallel computation or is in evaluation mode, use PyTorch's implementation.
if not (self._is_parallel and self.training):
return F.batch_norm(
input, self.running_mean, self.running_var, self.weight, self.bias,
self.training, self.momentum, self.eps)
# Resize the input to (B, C, -1).
input_shape = input.size()
input = input.view(input.size(0), self.num_features, -1)
# Compute the sum and square-sum.
sum_size = input.size(0) * input.size(2)
input_sum = _sum_ft(input)
input_ssum = _sum_ft(input ** 2)
# Reduce-and-broadcast the statistics.
if self._parallel_id == 0:
mean, inv_std = self._sync_master.run_master(_ChildMessage(input_sum, input_ssum, sum_size))
else:
mean, inv_std = self._slave_pipe.run_slave(_ChildMessage(input_sum, input_ssum, sum_size))
# Compute the output.
if self.affine:
# MJY:: Fuse the multiplication for speed.
output = (input - _unsqueeze_ft(mean)) * _unsqueeze_ft(inv_std * self.weight) + _unsqueeze_ft(self.bias)
else:
output = (input - _unsqueeze_ft(mean)) * _unsqueeze_ft(inv_std)
# Reshape it.
return output.view(input_shape)
def __data_parallel_replicate__(self, ctx, copy_id):
self._is_parallel = True
self._parallel_id = copy_id
# parallel_id == 0 means master device.
if self._parallel_id == 0:
ctx.sync_master = self._sync_master
else:
self._slave_pipe = ctx.sync_master.register_slave(copy_id)
def _data_parallel_master(self, intermediates):
"""Reduce the sum and square-sum, compute the statistics, and broadcast it."""
# Always using same "device order" makes the ReduceAdd operation faster.
# Thanks to:: Tete Xiao (http://tetexiao.com/)
intermediates = sorted(intermediates, key=lambda i: i[1].sum.get_device())
to_reduce = [i[1][:2] for i in intermediates]
to_reduce = [j for i in to_reduce for j in i] # flatten
target_gpus = [i[1].sum.get_device() for i in intermediates]
sum_size = sum([i[1].sum_size for i in intermediates])
sum_, ssum = ReduceAddCoalesced.apply(target_gpus[0], 2, *to_reduce)
mean, inv_std = self._compute_mean_std(sum_, ssum, sum_size)
broadcasted = Broadcast.apply(target_gpus, mean, inv_std)
outputs = []
for i, rec in enumerate(intermediates):
outputs.append((rec[0], _MasterMessage(*broadcasted[i*2:i*2+2])))
return outputs
def _compute_mean_std(self, sum_, ssum, size):
"""Compute the mean and standard-deviation with sum and square-sum. This method
also maintains the moving average on the master device."""
assert size > 1, 'BatchNorm computes unbiased standard-deviation, which requires size > 1.'
mean = sum_ / size
sumvar = ssum - sum_ * mean
unbias_var = sumvar / (size - 1)
bias_var = sumvar / size
if hasattr(torch, 'no_grad'):
with torch.no_grad():
self.running_mean = (1 - self.momentum) * self.running_mean + self.momentum * mean.data
self.running_var = (1 - self.momentum) * self.running_var + self.momentum * unbias_var.data
else:
self.running_mean = (1 - self.momentum) * self.running_mean + self.momentum * mean.data
self.running_var = (1 - self.momentum) * self.running_var + self.momentum * unbias_var.data
return mean, bias_var.clamp(self.eps) ** -0.5
class SynchronizedBatchNorm1d(_SynchronizedBatchNorm):
r"""Applies Synchronized Batch Normalization over a 2d or 3d input that is seen as a
mini-batch.
.. math::
y = \frac{x - mean[x]}{ \sqrt{Var[x] + \epsilon}} * gamma + beta
This module differs from the built-in PyTorch BatchNorm1d as the mean and
standard-deviation are reduced across all devices during training.
For example, when one uses `nn.DataParallel` to wrap the network during
training, PyTorch's implementation normalize the tensor on each device using
the statistics only on that device, which accelerated the computation and
is also easy to implement, but the statistics might be inaccurate.
Instead, in this synchronized version, the statistics will be computed
over all training samples distributed on multiple devices.
Note that, for one-GPU or CPU-only case, this module behaves exactly same
as the built-in PyTorch implementation.
The mean and standard-deviation are calculated per-dimension over
the mini-batches and gamma and beta are learnable parameter vectors
of size C (where C is the input size).
During training, this layer keeps a running estimate of its computed mean
and variance. The running sum is kept with a default momentum of 0.1.
During evaluation, this running mean/variance is used for normalization.
Because the BatchNorm is done over the `C` dimension, computing statistics
on `(N, L)` slices, it's common terminology to call this Temporal BatchNorm
Args:
num_features: num_features from an expected input of size
`batch_size x num_features [x width]`
eps: a value added to the denominator for numerical stability.
Default: 1e-5
momentum: the value used for the running_mean and running_var
computation. Default: 0.1
affine: a boolean value that when set to ``True``, gives the layer learnable
affine parameters. Default: ``True``
Shape::
- Input: :math:`(N, C)` or :math:`(N, C, L)`
- Output: :math:`(N, C)` or :math:`(N, C, L)` (same shape as input)
Examples:
>>> # With Learnable Parameters
>>> m = SynchronizedBatchNorm1d(100)
>>> # Without Learnable Parameters
>>> m = SynchronizedBatchNorm1d(100, affine=False)
>>> input = torch.autograd.Variable(torch.randn(20, 100))
>>> output = m(input)
"""
def _check_input_dim(self, input):
if input.dim() != 2 and input.dim() != 3:
raise ValueError('expected 2D or 3D input (got {}D input)'
.format(input.dim()))
super(SynchronizedBatchNorm1d, self)._check_input_dim(input)
class SynchronizedBatchNorm2d(_SynchronizedBatchNorm):
r"""Applies Batch Normalization over a 4d input that is seen as a mini-batch
of 3d inputs
.. math::
y = \frac{x - mean[x]}{ \sqrt{Var[x] + \epsilon}} * gamma + beta
This module differs from the built-in PyTorch BatchNorm2d as the mean and
standard-deviation are reduced across all devices during training.
For example, when one uses `nn.DataParallel` to wrap the network during
training, PyTorch's implementation normalize the tensor on each device using
the statistics only on that device, which accelerated the computation and
is also easy to implement, but the statistics might be inaccurate.
Instead, in this synchronized version, the statistics will be computed
over all training samples distributed on multiple devices.
Note that, for one-GPU or CPU-only case, this module behaves exactly same
as the built-in PyTorch implementation.
The mean and standard-deviation are calculated per-dimension over
the mini-batches and gamma and beta are learnable parameter vectors
of size C (where C is the input size).
During training, this layer keeps a running estimate of its computed mean
and variance. The running sum is kept with a default momentum of 0.1.
During evaluation, this running mean/variance is used for normalization.
Because the BatchNorm is done over the `C` dimension, computing statistics
on `(N, H, W)` slices, it's common terminology to call this Spatial BatchNorm
Args:
num_features: num_features from an expected input of
size batch_size x num_features x height x width
eps: a value added to the denominator for numerical stability.
Default: 1e-5
momentum: the value used for the running_mean and running_var
computation. Default: 0.1
affine: a boolean value that when set to ``True``, gives the layer learnable
affine parameters. Default: ``True``
Shape::
- Input: :math:`(N, C, H, W)`
- Output: :math:`(N, C, H, W)` (same shape as input)
Examples:
>>> # With Learnable Parameters
>>> m = SynchronizedBatchNorm2d(100)
>>> # Without Learnable Parameters
>>> m = SynchronizedBatchNorm2d(100, affine=False)
>>> input = torch.autograd.Variable(torch.randn(20, 100, 35, 45))
>>> output = m(input)
"""
def _check_input_dim(self, input):
if input.dim() != 4:
raise ValueError('expected 4D input (got {}D input)'
.format(input.dim()))
super(SynchronizedBatchNorm2d, self)._check_input_dim(input)
class SynchronizedBatchNorm3d(_SynchronizedBatchNorm):
r"""Applies Batch Normalization over a 5d input that is seen as a mini-batch
of 4d inputs
.. math::
y = \frac{x - mean[x]}{ \sqrt{Var[x] + \epsilon}} * gamma + beta
This module differs from the built-in PyTorch BatchNorm3d as the mean and
standard-deviation are reduced across all devices during training.
For example, when one uses `nn.DataParallel` to wrap the network during
training, PyTorch's implementation normalize the tensor on each device using
the statistics only on that device, which accelerated the computation and
is also easy to implement, but the statistics might be inaccurate.
Instead, in this synchronized version, the statistics will be computed
over all training samples distributed on multiple devices.
Note that, for one-GPU or CPU-only case, this module behaves exactly same
as the built-in PyTorch implementation.
The mean and standard-deviation are calculated per-dimension over
the mini-batches and gamma and beta are learnable parameter vectors
of size C (where C is the input size).
During training, this layer keeps a running estimate of its computed mean
and variance. The running sum is kept with a default momentum of 0.1.
During evaluation, this running mean/variance is used for normalization.
Because the BatchNorm is done over the `C` dimension, computing statistics
on `(N, D, H, W)` slices, it's common terminology to call this Volumetric BatchNorm
or Spatio-temporal BatchNorm
Args:
num_features: num_features from an expected input of
size batch_size x num_features x depth x height x width
eps: a value added to the denominator for numerical stability.
Default: 1e-5
momentum: the value used for the running_mean and running_var
computation. Default: 0.1
affine: a boolean value that when set to ``True``, gives the layer learnable
affine parameters. Default: ``True``
Shape::
- Input: :math:`(N, C, D, H, W)`
- Output: :math:`(N, C, D, H, W)` (same shape as input)
Examples:
>>> # With Learnable Parameters
>>> m = SynchronizedBatchNorm3d(100)
>>> # Without Learnable Parameters
>>> m = SynchronizedBatchNorm3d(100, affine=False)
>>> input = torch.autograd.Variable(torch.randn(20, 100, 35, 45, 10))
>>> output = m(input)
"""
def _check_input_dim(self, input):
if input.dim() != 5:
raise ValueError('expected 5D input (got {}D input)'
.format(input.dim()))
super(SynchronizedBatchNorm3d, self)._check_input_dim(input)
@contextlib.contextmanager
def patch_sync_batchnorm():
import torch.nn as nn
backup = nn.BatchNorm1d, nn.BatchNorm2d, nn.BatchNorm3d
nn.BatchNorm1d = SynchronizedBatchNorm1d
nn.BatchNorm2d = SynchronizedBatchNorm2d
nn.BatchNorm3d = SynchronizedBatchNorm3d
yield
nn.BatchNorm1d, nn.BatchNorm2d, nn.BatchNorm3d = backup
def convert_model(module):
"""Traverse the input module and its child recursively
and replace all instance of torch.nn.modules.batchnorm.BatchNorm*N*d
to SynchronizedBatchNorm*N*d
Args:
module: the input module needs to be convert to SyncBN model
Examples:
>>> import torch.nn as nn
>>> import torchvision
>>> # m is a standard pytorch model
>>> m = torchvision.models.resnet18(True)
>>> m = nn.DataParallel(m)
>>> # after convert, m is using SyncBN
>>> m = convert_model(m)
"""
if isinstance(module, torch.nn.DataParallel):
mod = module.module
mod = convert_model(mod)
mod = DataParallelWithCallback(mod)
return mod
mod = module
for pth_module, sync_module in zip([torch.nn.modules.batchnorm.BatchNorm1d,
torch.nn.modules.batchnorm.BatchNorm2d,
torch.nn.modules.batchnorm.BatchNorm3d],
[SynchronizedBatchNorm1d,
SynchronizedBatchNorm2d,
SynchronizedBatchNorm3d]):
if isinstance(module, pth_module):
mod = sync_module(module.num_features, module.eps, module.momentum, module.affine)
mod.running_mean = module.running_mean
mod.running_var = module.running_var
if module.affine:
mod.weight.data = module.weight.data.clone().detach()
mod.bias.data = module.bias.data.clone().detach()
for name, child in module.named_children():
mod.add_module(name, convert_model(child))
return mod
| 15,829 | 39.075949 | 116 | py |
taskgrouping | taskgrouping-master/sync_batchnorm/batchnorm_reimpl.py | #! /usr/bin/env python3
# -*- coding: utf-8 -*-
# File : batchnorm_reimpl.py
# Author : acgtyrant
# Date : 11/01/2018
#
# This file is part of Synchronized-BatchNorm-PyTorch.
# https://github.com/vacancy/Synchronized-BatchNorm-PyTorch
# Distributed under MIT License.
import torch
import torch.nn as nn
import torch.nn.init as init
__all__ = ['BatchNorm2dReimpl']
class BatchNorm2dReimpl(nn.Module):
"""
A re-implementation of batch normalization, used for testing the numerical
stability.
Author: acgtyrant
See also:
https://github.com/vacancy/Synchronized-BatchNorm-PyTorch/issues/14
"""
def __init__(self, num_features, eps=1e-5, momentum=0.1):
super().__init__()
self.num_features = num_features
self.eps = eps
self.momentum = momentum
self.weight = nn.Parameter(torch.empty(num_features))
self.bias = nn.Parameter(torch.empty(num_features))
self.register_buffer('running_mean', torch.zeros(num_features))
self.register_buffer('running_var', torch.ones(num_features))
self.reset_parameters()
def reset_running_stats(self):
self.running_mean.zero_()
self.running_var.fill_(1)
def reset_parameters(self):
self.reset_running_stats()
init.uniform_(self.weight)
init.zeros_(self.bias)
def forward(self, input_):
batchsize, channels, height, width = input_.size()
numel = batchsize * height * width
input_ = input_.permute(1, 0, 2, 3).contiguous().view(channels, numel)
sum_ = input_.sum(1)
sum_of_square = input_.pow(2).sum(1)
mean = sum_ / numel
sumvar = sum_of_square - sum_ * mean
self.running_mean = (
(1 - self.momentum) * self.running_mean
+ self.momentum * mean.detach()
)
unbias_var = sumvar / (numel - 1)
self.running_var = (
(1 - self.momentum) * self.running_var
+ self.momentum * unbias_var.detach()
)
bias_var = sumvar / numel
inv_std = 1 / (bias_var + self.eps).pow(0.5)
output = (
(input_ - mean.unsqueeze(1)) * inv_std.unsqueeze(1) *
self.weight.unsqueeze(1) + self.bias.unsqueeze(1))
return output.view(channels, batchsize, height, width).permute(1, 0, 2, 3).contiguous()
| 2,385 | 30.813333 | 95 | py |
taskgrouping | taskgrouping-master/model_definitions/ozan_rep_fun.py | import torch.autograd
import sys
import math
from .ozan_min_norm_solvers import MinNormSolver
import statistics
class OzanRepFunction(torch.autograd.Function):
# def __init__(self,copies,noop=False):
# super(OzanRepFunction,self).__init__()
# self.copies=copies
# self.noop=noop
n=5
def __init__(self):
super(OzanRepFunction, self).__init__()
@staticmethod
def forward(ctx, input):
shape = input.shape
ret = input.expand(OzanRepFunction.n,*shape)
return ret.clone() # REASON FOR ERROR: forgot to .clone() here
#@staticmethod
# def backward(ctx, grad_output):
# # print("backward",grad_output.shape)
# # print()
# # print()
# if grad_output.shape[0]==2:
# theta0,theta1=grad_output[0].view(-1).float(), grad_output[1].view(-1).float()
# diff = theta0-theta1
# num = diff.dot(theta0)
# denom = (diff.dot(diff)+.00000001)
# a = num/denom
# a1=float(a)
# a = a.clamp(0,1)
# a = float(a)
# # print(float(a),a1,float(num),float(denom))
# # print()
# # print()
# def get_out_for_a(a):
# return grad_output[0]*(1-a)+grad_output[1]*a
# def get_score_for_a(a):
# out = get_out_for_a(a)
# vec = out.view(-1)
# score = vec.dot(vec)
# return float(score)
# # print(0,get_score_for_a(0),
# # .1,get_score_for_a(0.1),
# # .2,get_score_for_a(0.2),
# # .3,get_score_for_a(0.3),
# # .4,get_score_for_a(0.4),
# # .5,get_score_for_a(0.5),
# # .6,get_score_for_a(0.6),
# # .7,get_score_for_a(0.7),
# # .8,get_score_for_a(0.8),
# # .9,get_score_for_a(0.9),
# # 1,get_score_for_a(1))
# # print(a,get_score_for_a(a))
# # print()
# # print()
# out = get_out_for_a(a)
# #out=out*2
# elif grad_output.shape[0]==1:
# grad_input=grad_output.clone()
# out = grad_input.sum(dim=0)
# else:
# pass
# return out
@staticmethod
def backward(ctx, grad_output):
num_grads = grad_output.shape[0]
batch_size = grad_output.shape[1]
# print(num_grads)
# print(num_grads)
# print(num_grads)
#print(grad_output.shape)
# print(grad_output.shape)
# print(grad_output.shape)
# print(num_grads)
# print(num_grads)
if num_grads>=2:
#print ('shape in = ',grad_output[0].view(batch_size,-1).float().shape)
try:
alphas, score = MinNormSolver.find_min_norm_element([grad_output[i].view(batch_size,-1).float() for i in range(num_grads)])
#print(alphas)
except ValueError as error:
alphas = [1/num_grads for i in range(num_grads)]
#print('outs shape',out.shape)
#print('alphas shape',alphas.shape)
#out = out.view()
#out = torch.zeros_like(grad_output[0])
# print(alphas)
# print()
# print()
grad_outputs = [grad_output[i]*alphas[i]*math.sqrt(num_grads) for i in range(num_grads)]
output = grad_outputs[0]
for i in range(1,num_grads):
output+=grad_outputs[i]
return output
elif num_grads==1:
grad_input=grad_output.clone()
out = grad_input.sum(dim=0)
else:
pass
return out
ozan_rep_function = OzanRepFunction.apply
class TrevorRepFunction(torch.autograd.Function):
n=5
def __init__(self):
super(TrevorRepFunction, self).__init__()
@staticmethod
def forward(ctx, input):
return input.clone()
@staticmethod
def backward(ctx, grad_output):
#num_grads = grad_output.shape[0]
#print(num_grads)
grad_input=grad_output.clone()
mul = 1.0/math.sqrt(TrevorRepFunction.n)
out = grad_input * mul
return out
trevor_rep_function = TrevorRepFunction.apply
count = 0
class GradNormRepFunction(torch.autograd.Function):
n=5
inital_task_losses=None
current_task_losses=None
current_weights=None
def __init__(self):
super(GradNormRepFunction, self).__init__()
@staticmethod
def forward(ctx, input):
shape = input.shape
ret = input.expand(GradNormRepFunction.n,*shape)
return ret.clone()
@staticmethod
def backward(ctx, grad_output):
global count
num_grads = grad_output.shape[0]
batch_size = grad_output.shape[1]
grad_output=grad_output.float()
if num_grads>=2:
GiW = [torch.sqrt(grad_output[i].reshape(-1).dot(grad_output[i].reshape(-1)))*GradNormRepFunction.current_weights[i] for i in range(num_grads)]
GW_bar = torch.mean(torch.stack(GiW))
try:
Li_ratio=[c/max(i,.0000001) for c,i in zip(GradNormRepFunction.current_task_losses,GradNormRepFunction.inital_task_losses)]
mean_ratio = statistics.mean(Li_ratio)
ri = [lir/max(mean_ratio,.00000001) for lir in Li_ratio]
target_grad=[float(GW_bar * (max(r_i,.00000001)**1.5)) for r_i in ri]
target_weight= [float(target_grad[i]/float(GiW[i])) for i in range(num_grads)]
total_weight = sum(target_weight)
total_weight = max(.0000001,total_weight)
target_weight=[i*num_grads/total_weight for i in target_weight]
for i in range(len(GradNormRepFunction.current_weights)):
wi = GradNormRepFunction.current_weights[i]
GradNormRepFunction.current_weights[i]+=(.0001*wi if (wi<target_weight[i]) else -.0001*wi)
# print('Li_ratio',Li_ratio)
# print('mean_ratio',mean_ratio)
# print('ri',ri)
# print('target_weight',target_weight)
# print('current_weights',GradNormRepFunction.current_weights)
# print()
# print()
count+=1
if count % 80==0:
with open("gradnorm_weights.txt", "a") as myfile:
myfile.write('target: '+str(target_weight)+'\n')
total_weight = sum(GradNormRepFunction.current_weights)
total_weight = max(.0000001,total_weight)
GradNormRepFunction.current_weights = [i*num_grads/total_weight for i in GradNormRepFunction.current_weights]
except:
pass
grad_outputs = [grad_output[i]*GradNormRepFunction.current_weights[i]*(1/math.sqrt(num_grads)) for i in range(num_grads)]
output = grad_outputs[0]
for i in range(1,num_grads):
output+=grad_outputs[i]
return output.half()
elif num_grads==1:
grad_input=grad_output.clone()
out = grad_input.sum(dim=0)
else:
pass
return out
gradnorm_rep_function = GradNormRepFunction.apply
| 7,442 | 34.442857 | 155 | py |
taskgrouping | taskgrouping-master/model_definitions/xception_taskonomy_small.py | """
Creates an Xception Model as defined in:
Francois Chollet
Xception: Deep Learning with Depthwise Separable Convolutions
https://arxiv.org/pdf/1610.02357.pdf
This weights ported from the Keras implementation. Achieves the following performance on the validation set:
Loss:0.9173 Prec@1:78.892 Prec@5:94.292
REMEMBER to set your image size to 3x299x299 for both test and validation
normalize = transforms.Normalize(mean=[0.5, 0.5, 0.5],
std=[0.5, 0.5, 0.5])
The resize parameter of the validation transform should be 333, and make sure to center crop at 299x299
"""
import math
import torch.nn as nn
import torch.nn.functional as F
import torch.utils.model_zoo as model_zoo
from torch.nn import init
import torch
from .ozan_rep_fun import ozan_rep_function,trevor_rep_function,OzanRepFunction,TrevorRepFunction,gradnorm_rep_function,GradNormRepFunction
__all__ = ['xception_taskonomy_small','xception_taskonomy_small_gradnorm','xception_taskonomy_small_ozan','xception_taskonomy_small_half','xception_taskonomy_small_quad','xception_taskonomy_small_double','xception_taskonomy_small_double_ozan','xception_taskonomy_small_half_ozan','xception_taskonomy_small_quad_ozan']
# model_urls = {
# 'xception_taskonomy':'file:///home/tstand/Dropbox/taskonomy/xception_taskonomy-a4b32ef7.pth.tar'
# }
class SeparableConv2d(nn.Module):
def __init__(self,in_channels,out_channels,kernel_size=1,stride=1,padding=0,dilation=1,bias=False,groupsize=1):
super(SeparableConv2d,self).__init__()
self.conv1 = nn.Conv2d(in_channels,in_channels,kernel_size,stride,padding,dilation,groups=max(1,in_channels//groupsize),bias=bias)
self.pointwise = nn.Conv2d(in_channels,out_channels,1,1,0,1,1,bias=bias)
def forward(self,x):
x = self.conv1(x)
x = self.pointwise(x)
return x
class Block(nn.Module):
def __init__(self,in_filters,out_filters,reps,strides=1,start_with_relu=True,grow_first=True):
super(Block, self).__init__()
if out_filters != in_filters or strides!=1:
self.skip = nn.Conv2d(in_filters,out_filters,1,stride=strides, bias=False)
self.skipbn = nn.BatchNorm2d(out_filters)
else:
self.skip=None
self.relu = nn.ReLU(inplace=True)
rep=[]
filters=in_filters
if grow_first:
rep.append(self.relu)
rep.append(SeparableConv2d(in_filters,out_filters,3,stride=1,padding=1,bias=False))
rep.append(nn.BatchNorm2d(out_filters))
filters = out_filters
for i in range(reps-1):
rep.append(self.relu)
rep.append(SeparableConv2d(filters,filters,3,stride=1,padding=1,bias=False))
rep.append(nn.BatchNorm2d(filters))
if not grow_first:
rep.append(self.relu)
rep.append(SeparableConv2d(in_filters,out_filters,3,stride=1,padding=1,bias=False))
rep.append(nn.BatchNorm2d(out_filters))
filters=out_filters
if not start_with_relu:
rep = rep[1:]
else:
rep[0] = nn.ReLU(inplace=False)
if strides != 1:
#rep.append(nn.AvgPool2d(3,strides,1))
rep.append(nn.Conv2d(filters,filters,2,2))
self.rep = nn.Sequential(*rep)
def forward(self,inp):
x = self.rep(inp)
if self.skip is not None:
skip = self.skip(inp)
skip = self.skipbn(skip)
else:
skip = inp
x+=skip
return x
class Encoder(nn.Module):
def __init__(self):
super(Encoder, self).__init__()
self.conv1 = nn.Conv2d(3, 24, 3,2, 1, bias=False)
self.bn1 = nn.BatchNorm2d(24)
self.relu = nn.ReLU(inplace=True)
self.relu2 = nn.ReLU(inplace=False)
self.conv2 = nn.Conv2d(24,48,3,1,1,bias=False)
self.bn2 = nn.BatchNorm2d(48)
#do relu here
self.block1=Block(48,96,2,2,start_with_relu=False,grow_first=True)
self.block2=Block(96,192,2,2,start_with_relu=True,grow_first=True)
self.block3=Block(192,512,2,2,start_with_relu=True,grow_first=True)
#self.block4=Block(768,768,3,1,start_with_relu=True,grow_first=True)
#self.block5=Block(768,768,3,1,start_with_relu=True,grow_first=True)
# self.block6=Block(768,768,3,1,start_with_relu=True,grow_first=True)
# self.block7=Block(768,768,3,1,start_with_relu=True,grow_first=True)
self.block8=Block(512,512,2,1,start_with_relu=True,grow_first=True)
self.block9=Block(512,512,2,1,start_with_relu=True,grow_first=True)
self.block10=Block(512,512,2,1,start_with_relu=True,grow_first=True)
self.block11=Block(512,512,2,1,start_with_relu=True,grow_first=True)
#self.block12=Block(728,1024,2,2,start_with_relu=True,grow_first=False)
self.conv3 = SeparableConv2d(512,256,3,1,1)
self.bn3 = nn.BatchNorm2d(256)
#self.conv3 = SeparableConv2d(1024,1536,3,1,1)
#self.bn3 = nn.BatchNorm2d(1536)
#do relu here
#self.conv4 = SeparableConv2d(1536,2048,3,1,1)
#self.bn4 = nn.BatchNorm2d(2048)
def forward(self,input):
x = self.conv1(input)
x = self.bn1(x)
x = self.relu(x)
x = self.conv2(x)
x = self.bn2(x)
x = self.relu(x)
x = self.block1(x)
x = self.block2(x)
x = self.block3(x)
# x = self.block4(x)
# x = self.block5(x)
# x = self.block6(x)
# x = self.block7(x)
x = self.block8(x)
x = self.block9(x)
x = self.block10(x)
x = self.block11(x)
#x = self.block12(x)
x = self.conv3(x)
x = self.bn3(x)
#x = self.relu(x)
#x = self.conv4(x)
#x = self.bn4(x)
representation = self.relu2(x)
return representation
class EncoderHalf(nn.Module):
def __init__(self):
super(EncoderHalf, self).__init__()
self.conv1 = nn.Conv2d(3, 24, 3,2, 1, bias=False)
self.bn1 = nn.BatchNorm2d(24)
self.relu = nn.ReLU(inplace=True)
self.relu2 = nn.ReLU(inplace=False)
self.conv2 = nn.Conv2d(24,48,3,1,1,bias=False)
self.bn2 = nn.BatchNorm2d(48)
#do relu here
self.block1=Block(48,64,2,2,start_with_relu=False,grow_first=True)
self.block2=Block(64,128,2,2,start_with_relu=True,grow_first=True)
self.block3=Block(128,360,2,2,start_with_relu=True,grow_first=True)
#self.block4=Block(768,768,3,1,start_with_relu=True,grow_first=True)
#self.block5=Block(768,768,3,1,start_with_relu=True,grow_first=True)
# self.block6=Block(768,768,3,1,start_with_relu=True,grow_first=True)
# self.block7=Block(768,768,3,1,start_with_relu=True,grow_first=True)
self.block8=Block(360,360,2,1,start_with_relu=True,grow_first=True)
self.block9=Block(360,360,2,1,start_with_relu=True,grow_first=True)
self.block10=Block(360,360,2,1,start_with_relu=True,grow_first=True)
self.block11=Block(360,360,2,1,start_with_relu=True,grow_first=True)
#self.block12=Block(728,1024,2,2,start_with_relu=True,grow_first=False)
self.conv3 = SeparableConv2d(360,256,3,1,1)
self.bn3 = nn.BatchNorm2d(256)
#self.conv3 = SeparableConv2d(1024,1536,3,1,1)
#self.bn3 = nn.BatchNorm2d(1536)
#do relu here
#self.conv4 = SeparableConv2d(1536,2048,3,1,1)
#self.bn4 = nn.BatchNorm2d(2048)
def forward(self,input):
x = self.conv1(input)
x = self.bn1(x)
x = self.relu(x)
x = self.conv2(x)
x = self.bn2(x)
x = self.relu(x)
x = self.block1(x)
x = self.block2(x)
x = self.block3(x)
# x = self.block4(x)
# x = self.block5(x)
# x = self.block6(x)
# x = self.block7(x)
x = self.block8(x)
x = self.block9(x)
x = self.block10(x)
x = self.block11(x)
#x = self.block12(x)
x = self.conv3(x)
x = self.bn3(x)
#x = self.relu(x)
#x = self.conv4(x)
#x = self.bn4(x)
representation = self.relu2(x)
return representation
class EncoderQuad(nn.Module):
def __init__(self):
super(EncoderQuad, self).__init__()
print('entering quad constructor')
self.conv1 = nn.Conv2d(3, 48, 3,2, 1, bias=False)
self.bn1 = nn.BatchNorm2d(48)
self.relu = nn.ReLU(inplace=True)
self.relu2 = nn.ReLU(inplace=False)
self.conv2 = nn.Conv2d(48,96,3,1,1,bias=False)
self.bn2 = nn.BatchNorm2d(96)
#do relu here
self.block1=Block(96,192,2,2,start_with_relu=False,grow_first=True)
self.block2=Block(192,384,2,2,start_with_relu=True,grow_first=True)
self.block3=Block(384,1024,2,2,start_with_relu=True,grow_first=True)
#self.block4=Block(768,768,3,1,start_with_relu=True,grow_first=True)
#self.block5=Block(768,768,3,1,start_with_relu=True,grow_first=True)
# self.block6=Block(768,768,3,1,start_with_relu=True,grow_first=True)
# self.block7=Block(768,768,3,1,start_with_relu=True,grow_first=True)
self.block8=Block(1024,1024,2,1,start_with_relu=True,grow_first=True)
self.block9=Block(1024,1024,2,1,start_with_relu=True,grow_first=True)
self.block10=Block(1024,1024,2,1,start_with_relu=True,grow_first=True)
self.block11=Block(1024,1024,2,1,start_with_relu=True,grow_first=True)
#self.block12=Block(728,1024,2,2,start_with_relu=True,grow_first=False)
self.conv3 = SeparableConv2d(1024,256,3,1,1)
self.bn3 = nn.BatchNorm2d(256)
#self.conv3 = SeparableConv2d(1024,1536,3,1,1)
#self.bn3 = nn.BatchNorm2d(1536)
#do relu here
#self.conv4 = SeparableConv2d(1536,2048,3,1,1)
#self.bn4 = nn.BatchNorm2d(2048)
def forward(self,input):
x = self.conv1(input)
x = self.bn1(x)
x = self.relu(x)
x = self.conv2(x)
x = self.bn2(x)
x = self.relu(x)
x = self.block1(x)
x = self.block2(x)
x = self.block3(x)
# x = self.block4(x)
# x = self.block5(x)
# x = self.block6(x)
# x = self.block7(x)
x = self.block8(x)
x = self.block9(x)
x = self.block10(x)
x = self.block11(x)
#x = self.block12(x)
x = self.conv3(x)
x = self.bn3(x)
#x = self.relu(x)
#x = self.conv4(x)
#x = self.bn4(x)
representation = self.relu2(x)
return representation
class EncoderDouble(nn.Module):
def __init__(self):
super(EncoderDouble, self).__init__()
print('entering double constructor')
self.conv1 = nn.Conv2d(3, 32, 3,2, 1, bias=False)
self.bn1 = nn.BatchNorm2d(32)
self.relu = nn.ReLU(inplace=True)
self.relu2 = nn.ReLU(inplace=False)
self.conv2 = nn.Conv2d(32,64,3,1,1,bias=False)
self.bn2 = nn.BatchNorm2d(64)
#do relu here
self.block1=Block(64,128,2,2,start_with_relu=False,grow_first=True)
self.block2=Block(128,256,2,2,start_with_relu=True,grow_first=True)
self.block3=Block(256,728,2,2,start_with_relu=True,grow_first=True)
#self.block4=Block(768,768,3,1,start_with_relu=True,grow_first=True)
#self.block5=Block(768,768,3,1,start_with_relu=True,grow_first=True)
# self.block6=Block(768,768,3,1,start_with_relu=True,grow_first=True)
# self.block7=Block(768,768,3,1,start_with_relu=True,grow_first=True)
self.block8=Block(728,728,2,1,start_with_relu=True,grow_first=True)
self.block9=Block(728,728,2,1,start_with_relu=True,grow_first=True)
self.block10=Block(728,728,2,1,start_with_relu=True,grow_first=True)
self.block11=Block(728,728,2,1,start_with_relu=True,grow_first=True)
#self.block12=Block(728,1024,2,2,start_with_relu=True,grow_first=False)
self.conv3 = SeparableConv2d(728,256,3,1,1)
self.bn3 = nn.BatchNorm2d(256)
#self.conv3 = SeparableConv2d(1024,1536,3,1,1)
#self.bn3 = nn.BatchNorm2d(1536)
#do relu here
#self.conv4 = SeparableConv2d(1536,2048,3,1,1)
#self.bn4 = nn.BatchNorm2d(2048)
def forward(self,input):
x = self.conv1(input)
x = self.bn1(x)
x = self.relu(x)
x = self.conv2(x)
x = self.bn2(x)
x = self.relu(x)
x = self.block1(x)
x = self.block2(x)
x = self.block3(x)
# x = self.block4(x)
# x = self.block5(x)
# x = self.block6(x)
# x = self.block7(x)
x = self.block8(x)
x = self.block9(x)
x = self.block10(x)
x = self.block11(x)
#x = self.block12(x)
x = self.conv3(x)
x = self.bn3(x)
#x = self.relu(x)
#x = self.conv4(x)
#x = self.bn4(x)
representation = self.relu2(x)
return representation
def interpolate(inp,size):
t = inp.type()
inp = inp.float()
out = nn.functional.interpolate(inp,size=size,mode='bilinear',align_corners=False)
if out.type()!=t:
out = out.half()
return out
class Decoder(nn.Module):
def __init__(self, output_channels=32,num_classes=None):
super(Decoder, self).__init__()
self.output_channels = output_channels
self.num_classes = num_classes
if num_classes is not None:
self.fc = nn.Linear(256, num_classes)
#else:
# self.fc = nn.Linear(256, 1000)
else:
self.relu = nn.ReLU(inplace=True)
self.conv_decode_res = SeparableConv2d(256, 16, 3,padding=1)
self.conv_decode_res2 = SeparableConv2d(256, 96, 3,padding=1)
self.bn_conv_decode_res = nn.BatchNorm2d(16)
self.bn_conv_decode_res2 = nn.BatchNorm2d(96)
self.upconv1 = nn.ConvTranspose2d(96,96,2,2)
self.bn_upconv1 = nn.BatchNorm2d(96)
self.conv_decode1 = SeparableConv2d(96, 64, 3,padding=1)
self.bn_decode1 = nn.BatchNorm2d(64)
self.upconv2 = nn.ConvTranspose2d(64,64,2,2)
self.bn_upconv2 = nn.BatchNorm2d(64)
self.conv_decode2 = SeparableConv2d(64, 64, 5,padding=2)
self.bn_decode2 = nn.BatchNorm2d(64)
self.upconv3 = nn.ConvTranspose2d(64,32,2,2)
self.bn_upconv3 = nn.BatchNorm2d(32)
self.conv_decode3 = SeparableConv2d(32, 32, 5,padding=2)
self.bn_decode3 = nn.BatchNorm2d(32)
self.upconv4 = nn.ConvTranspose2d(32,32,2,2)
self.bn_upconv4 = nn.BatchNorm2d(32)
self.conv_decode4 = SeparableConv2d(48, output_channels, 5,padding=2)
def forward(self,representation):
#batch_size=representation.shape[0]
if self.num_classes is None:
x2 = self.conv_decode_res(representation)
x2 = self.bn_conv_decode_res(x2)
x2 = interpolate(x2,size=(256,256))
x = self.conv_decode_res2(representation)
x = self.bn_conv_decode_res2(x)
x = self.upconv1(x)
x = self.bn_upconv1(x)
x = self.relu(x)
x = self.conv_decode1(x)
x = self.bn_decode1(x)
x = self.relu(x)
x = self.upconv2(x)
x = self.bn_upconv2(x)
x = self.relu(x)
x = self.conv_decode2(x)
x = self.bn_decode2(x)
x = self.relu(x)
x = self.upconv3(x)
x = self.bn_upconv3(x)
x = self.relu(x)
x = self.conv_decode3(x)
x = self.bn_decode3(x)
x = self.relu(x)
x = self.upconv4(x)
x = self.bn_upconv4(x)
x = torch.cat([x,x2],1)
#print(x.shape,self.static.shape)
#x = torch.cat([x,x2,input,self.static.expand(batch_size,-1,-1,-1)],1)
x = self.relu(x)
x = self.conv_decode4(x)
#z = x[:,19:22,:,:].clone()
#y = (z).norm(2,1,True).clamp(min=1e-12)
#print(y.shape,x[:,21:24,:,:].shape)
#x[:,19:22,:,:]=z/y
else:
#print(representation.shape)
x = F.adaptive_avg_pool2d(representation, (1, 1))
x = x.view(x.size(0), -1)
#print(x.shape)
x = self.fc(x)
#print(x.shape)
return x
class XceptionTaskonomySmall(nn.Module):
"""
Xception optimized for the ImageNet dataset, as specified in
https://arxiv.org/pdf/1610.02357.pdf
"""
def __init__(self, tasks=None,num_classes=None, ozan=False, half=False):
""" Constructor
Args:
num_classes: number of classes
"""
super(XceptionTaskonomySmall, self).__init__()
print('half is',half)
if half=='Quad':
print('running quad code')
self.encoder=EncoderQuad()
elif half == 'Double':
self.encoder=EncoderDouble()
elif half:
self.encoder=EncoderHalf()
else:
self.encoder=Encoder()
self.tasks=tasks
self.ozan=ozan
self.task_to_decoder = {}
if tasks is not None:
for task in tasks:
if task == 'segment_semantic':
output_channels = 18
if task == 'depth_zbuffer':
output_channels = 1
if task == 'normal':
output_channels = 3
if task == 'normal2':
output_channels = 3
if task == 'edge_occlusion':
output_channels = 1
if task == 'reshading':
output_channels = 3
if task == 'keypoints2d':
output_channels = 1
if task == 'edge_texture':
output_channels = 1
decoder=Decoder(output_channels,num_classes)
self.task_to_decoder[task]=decoder
else:
self.task_to_decoder['classification']=Decoder(output_channels=0,num_classes=1000)
self.decoders = nn.ModuleList(self.task_to_decoder.values())
#------- init weights --------
for m in self.modules():
if isinstance(m, nn.Conv2d):
n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
m.weight.data.normal_(0, math.sqrt(2. / n))
elif isinstance(m, nn.BatchNorm2d):
m.weight.data.fill_(1)
m.bias.data.zero_()
#-----------------------------
count=0
def input_per_task_losses(self,losses):
# if GradNormRepFunction.inital_task_losses is None:
# GradNormRepFunction.inital_task_losses=losses
# GradNormRepFunction.current_weights=[1 for i in losses]
XceptionTaskonomySmall.count+=1
if XceptionTaskonomySmall.count < 200:
GradNormRepFunction.inital_task_losses=losses
GradNormRepFunction.current_weights=[1 for i in losses]
elif XceptionTaskonomySmall.count%20==0:
with open("gradnorm_weights.txt", "a") as myfile:
myfile.write(str(XceptionTaskonomySmall.count)+': '+str(GradNormRepFunction.current_weights)+'\n')
GradNormRepFunction.current_task_losses=losses
def forward(self, input):
rep = self.encoder(input)
if self.tasks is None:
return self.decoders[0](rep)
outputs={'rep':rep}
if self.ozan=='gradnorm':
GradNormRepFunction.n=len(self.decoders)
rep = gradnorm_rep_function(rep)
for i,(task,decoder) in enumerate(zip(self.task_to_decoder.keys(),self.decoders)):
outputs[task]=decoder(rep[i])
elif self.ozan:
OzanRepFunction.n=len(self.decoders)
rep = ozan_rep_function(rep)
for i,(task,decoder) in enumerate(zip(self.task_to_decoder.keys(),self.decoders)):
outputs[task]=decoder(rep[i])
else:
for i,(task,decoder) in enumerate(zip(self.task_to_decoder.keys(),self.decoders)):
outputs[task]=decoder(rep)
return outputs
def xception_taskonomy_small(pretrained=False,**kwargs):
"""
Construct Xception.
"""
# try:
# num_classes = kwargs['num_classes']
# except:
# num_classes=1000
# if pretrained:
# kwargs['num_classes']=1000
model = XceptionTaskonomySmall(**kwargs)
if pretrained:
#state_dict = model_zoo.load_url(model_urls['xception_taskonomy'])
# for name,weight in state_dict.items():
# if 'pointwise' in name:
# state_dict[name]=weight.unsqueeze(-1).unsqueeze(-1)
# if 'conv1' in name and len(weight.shape)!=4:
# state_dict[name]=weight.unsqueeze(1)
#model.load_state_dict(state_dict)
#model.load_state_dict(torch.load('xception_taskonomy_small_imagenet_pretrained.pth.tar'))
model.encoder.load_state_dict(torch.load('xception_taskonomy_small2.encoder.pth.tar'))
# if num_classes !=1000:
# model.fc = nn.Linear(2048, num_classes)
# import torch
# print("writing new state dict")
# torch.save(model.state_dict(),"xception.pth.tar")
# print("done")
# import sys
# sys.exit(1)
return model
def xception_taskonomy_small_ozan(pretrained=False,**kwargs):
"""
Construct Xception.
"""
model = XceptionTaskonomySmall(ozan=True,**kwargs)
if pretrained:
#model.load_state_dict(torch.load('xception_taskonomy_small_imagenet_pretrained.pth.tar'))
model.encoder.load_state_dict(torch.load('xception_taskonomy_small2.encoder.pth.tar'))
return model
def xception_taskonomy_small_gradnorm(pretrained=False,**kwargs):
"""
Construct Xception.
"""
model = XceptionTaskonomySmall(ozan='gradnorm',**kwargs)
if pretrained:
#model.load_state_dict(torch.load('xception_taskonomy_small_imagenet_pretrained.pth.tar'))
model.encoder.load_state_dict(torch.load('xception_taskonomy_small2.encoder.pth.tar'))
return model
def xception_taskonomy_small_half(pretrained=False,**kwargs):
"""
Construct Xception.
"""
# try:
# num_classes = kwargs['num_classes']
# except:
# num_classes=1000
# if pretrained:
# kwargs['num_classes']=1000
model = XceptionTaskonomySmall(half=True,**kwargs)
if pretrained:
#state_dict = model_zoo.load_url(model_urls['xception_taskonomy'])
# for name,weight in state_dict.items():
# if 'pointwise' in name:
# state_dict[name]=weight.unsqueeze(-1).unsqueeze(-1)
# if 'conv1' in name and len(weight.shape)!=4:
# state_dict[name]=weight.unsqueeze(1)
#model.load_state_dict(state_dict)
#model.load_state_dict(torch.load('xception_taskonomy_small_imagenet_pretrained.pth.tar'))
model.encoder.load_state_dict(torch.load('xception_taskonomy_small2.encoder.pth.tar'))
# if num_classes !=1000:
# model.fc = nn.Linear(2048, num_classes)
# import torch
# print("writing new state dict")
# torch.save(model.state_dict(),"xception.pth.tar")
# print("done")
# import sys
# sys.exit(1)
return model
def xception_taskonomy_small_quad(pretrained=False,**kwargs):
"""
Construct Xception.
"""
# try:
# num_classes = kwargs['num_classes']
# except:
# num_classes=1000
# if pretrained:
# kwargs['num_classes']=1000
print('got quad')
model = XceptionTaskonomySmall(half='Quad',**kwargs)
if pretrained:
#state_dict = model_zoo.load_url(model_urls['xception_taskonomy'])
# for name,weight in state_dict.items():
# if 'pointwise' in name:
# state_dict[name]=weight.unsqueeze(-1).unsqueeze(-1)
# if 'conv1' in name and len(weight.shape)!=4:
# state_dict[name]=weight.unsqueeze(1)
#model.load_state_dict(state_dict)
#model.load_state_dict(torch.load('xception_taskonomy_small_imagenet_pretrained.pth.tar'))
model.encoder.load_state_dict(torch.load('xception_taskonomy_small2.encoder.pth.tar'))
# if num_classes !=1000:
# model.fc = nn.Linear(2048, num_classes)
# import torch
# print("writing new state dict")
# torch.save(model.state_dict(),"xception.pth.tar")
# print("done")
# import sys
# sys.exit(1)
return model
def xception_taskonomy_small_double(pretrained=False,**kwargs):
"""
Construct Xception.
"""
# try:
# num_classes = kwargs['num_classes']
# except:
# num_classes=1000
# if pretrained:
# kwargs['num_classes']=1000
print('got double')
model = XceptionTaskonomySmall(half='Double',**kwargs)
if pretrained:
#state_dict = model_zoo.load_url(model_urls['xception_taskonomy'])
# for name,weight in state_dict.items():
# if 'pointwise' in name:
# state_dict[name]=weight.unsqueeze(-1).unsqueeze(-1)
# if 'conv1' in name and len(weight.shape)!=4:
# state_dict[name]=weight.unsqueeze(1)
#model.load_state_dict(state_dict)
#model.load_state_dict(torch.load('xception_taskonomy_small_imagenet_pretrained.pth.tar'))
model.encoder.load_state_dict(torch.load('xception_taskonomy_small2.encoder.pth.tar'))
# if num_classes !=1000:
# model.fc = nn.Linear(2048, num_classes)
# import torch
# print("writing new state dict")
# torch.save(model.state_dict(),"xception.pth.tar")
# print("done")
# import sys
# sys.exit(1)
return model
def xception_taskonomy_small_quad_ozan(pretrained=False,**kwargs):
"""
Construct Xception.
"""
# try:
# num_classes = kwargs['num_classes']
# except:
# num_classes=1000
# if pretrained:
# kwargs['num_classes']=1000
print('got quad ozan')
model = XceptionTaskonomySmall(ozan=True,half='Quad',**kwargs)
if pretrained:
#state_dict = model_zoo.load_url(model_urls['xception_taskonomy'])
# for name,weight in state_dict.items():
# if 'pointwise' in name:
# state_dict[name]=weight.unsqueeze(-1).unsqueeze(-1)
# if 'conv1' in name and len(weight.shape)!=4:
# state_dict[name]=weight.unsqueeze(1)
#model.load_state_dict(state_dict)
#model.load_state_dict(torch.load('xception_taskonomy_small_imagenet_pretrained.pth.tar'))
model.encoder.load_state_dict(torch.load('xception_taskonomy_small2.encoder.pth.tar'))
# if num_classes !=1000:
# model.fc = nn.Linear(2048, num_classes)
# import torch
# print("writing new state dict")
# torch.save(model.state_dict(),"xception.pth.tar")
# print("done")
# import sys
# sys.exit(1)
return model
def xception_taskonomy_small_double_ozan(pretrained=False,**kwargs):
"""
Construct Xception.
"""
# try:
# num_classes = kwargs['num_classes']
# except:
# num_classes=1000
# if pretrained:
# kwargs['num_classes']=1000
print('got double')
model = XceptionTaskonomySmall(ozan=True,half='Double',**kwargs)
if pretrained:
#state_dict = model_zoo.load_url(model_urls['xception_taskonomy'])
# for name,weight in state_dict.items():
# if 'pointwise' in name:
# state_dict[name]=weight.unsqueeze(-1).unsqueeze(-1)
# if 'conv1' in name and len(weight.shape)!=4:
# state_dict[name]=weight.unsqueeze(1)
#model.load_state_dict(state_dict)
#model.load_state_dict(torch.load('xception_taskonomy_small_imagenet_pretrained.pth.tar'))
model.encoder.load_state_dict(torch.load('xception_taskonomy_small2.encoder.pth.tar'))
# if num_classes !=1000:
# model.fc = nn.Linear(2048, num_classes)
# import torch
# print("writing new state dict")
# torch.save(model.state_dict(),"xception.pth.tar")
# print("done")
# import sys
# sys.exit(1)
return model
def xception_taskonomy_small_half_ozan(pretrained=False,**kwargs):
"""
Construct Xception.
"""
model = XceptionTaskonomySmall(ozan=True,half=True,**kwargs)
if pretrained:
#model.load_state_dict(torch.load('xception_taskonomy_small_imagenet_pretrained.pth.tar'))
model.encoder.load_state_dict(torch.load('xception_taskonomy_small2.encoder.pth.tar'))
return model
| 29,185 | 34.37697 | 317 | py |
taskgrouping | taskgrouping-master/model_definitions/resnet_taskonomy.py | import torch.nn as nn
import math
import torch.nn.functional as F
import torch.utils.model_zoo as model_zoo
from torch.nn import init
import torch
from .ozan_rep_fun import ozan_rep_function,trevor_rep_function,OzanRepFunction,TrevorRepFunction
#from .utils import load_state_dict_from_url
__all__ = ['resnet18_taskonomy','resnet18_taskonomy_half','resnet18_taskonomy_tripple', 'resnet34_taskonomy', 'resnet50_taskonomy', 'resnet101_taskonomy',
'resnet152_taskonomy']
def conv3x3(in_planes, out_planes, stride=1, groups=1, dilation=1):
"""3x3 convolution with padding"""
return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride,
padding=dilation, groups=groups, bias=False, dilation=dilation)
def conv1x1(in_planes, out_planes, stride=1):
"""1x1 convolution"""
return nn.Conv2d(in_planes, out_planes, kernel_size=1, stride=stride, bias=False)
class BasicBlock(nn.Module):
expansion = 1
def __init__(self, inplanes, planes, stride=1, downsample=None, groups=1,
base_width=64, dilation=1, norm_layer=None):
super(BasicBlock, self).__init__()
if norm_layer is None:
norm_layer = nn.BatchNorm2d
if groups != 1 or base_width != 64:
raise ValueError('BasicBlock only supports groups=1 and base_width=64')
if dilation > 1:
raise NotImplementedError("Dilation > 1 not supported in BasicBlock")
# Both self.conv1 and self.downsample layers downsample the input when stride != 1
self.conv1 = conv3x3(inplanes, planes, stride)
self.bn1 = norm_layer(planes)
self.relu = nn.ReLU(inplace=True)
self.conv2 = conv3x3(planes, planes)
self.bn2 = norm_layer(planes)
self.downsample = downsample
self.stride = stride
def forward(self, x):
identity = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
if self.downsample is not None:
identity = self.downsample(x)
out += identity
out = self.relu(out)
return out
class Bottleneck(nn.Module):
expansion = 4
def __init__(self, inplanes, planes, stride=1, downsample=None, groups=1,
base_width=64, dilation=1, norm_layer=None):
super(Bottleneck, self).__init__()
if norm_layer is None:
norm_layer = nn.BatchNorm2d
width = int(planes * (base_width / 64.)) * groups
# Both self.conv2 and self.downsample layers downsample the input when stride != 1
self.conv1 = conv1x1(inplanes, width)
self.bn1 = norm_layer(width)
self.conv2 = conv3x3(width, width, stride, groups, dilation)
self.bn2 = norm_layer(width)
self.conv3 = conv1x1(width, planes * self.expansion)
self.bn3 = norm_layer(planes * self.expansion)
self.relu = nn.ReLU(inplace=True)
self.downsample = downsample
self.stride = stride
def forward(self, x):
identity = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
out = self.relu(out)
out = self.conv3(out)
out = self.bn3(out)
if self.downsample is not None:
identity = self.downsample(x)
out += identity
out = self.relu(out)
return out
class ResNetEncoder(nn.Module):
def __init__(self, block, layers,widths=[64,128,256,512], num_classes=1000, zero_init_residual=False,
groups=1, width_per_group=64, replace_stride_with_dilation=None,
norm_layer=None):
super(ResNetEncoder, self).__init__()
if norm_layer is None:
norm_layer = nn.BatchNorm2d
self._norm_layer = norm_layer
self.inplanes = 64
self.dilation = 1
if replace_stride_with_dilation is None:
# each element in the tuple indicates if we should replace
# the 2x2 stride with a dilated convolution instead
replace_stride_with_dilation = [False, False, False]
if len(replace_stride_with_dilation) != 3:
raise ValueError("replace_stride_with_dilation should be None "
"or a 3-element tuple, got {}".format(replace_stride_with_dilation))
self.groups = groups
self.base_width = width_per_group
self.conv1 = nn.Conv2d(3, self.inplanes, kernel_size=7, stride=2, padding=3,
bias=False)
self.bn1 = norm_layer(self.inplanes)
self.relu = nn.ReLU(inplace=True)
self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
self.layer1 = self._make_layer(block, widths[0], layers[0])
self.layer2 = self._make_layer(block, widths[1], layers[1], stride=2,
dilate=replace_stride_with_dilation[0])
self.layer3 = self._make_layer(block, widths[2], layers[2], stride=2,
dilate=replace_stride_with_dilation[1])
self.layer4 = self._make_layer(block, widths[3], layers[3], stride=2,
dilate=replace_stride_with_dilation[2])
for m in self.modules():
if isinstance(m, nn.Conv2d):
nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')
elif isinstance(m, (nn.BatchNorm2d, nn.GroupNorm)):
nn.init.constant_(m.weight, 1)
nn.init.constant_(m.bias, 0)
# Zero-initialize the last BN in each residual branch,
# so that the residual branch starts with zeros, and each residual block behaves like an identity.
# This improves the model by 0.2~0.3% according to https://arxiv.org/abs/1706.02677
if zero_init_residual:
for m in self.modules():
if isinstance(m, Bottleneck):
nn.init.constant_(m.bn3.weight, 0)
elif isinstance(m, BasicBlock):
nn.init.constant_(m.bn2.weight, 0)
def _make_layer(self, block, planes, blocks, stride=1, dilate=False):
norm_layer = self._norm_layer
downsample = None
previous_dilation = self.dilation
if dilate:
self.dilation *= stride
stride = 1
if stride != 1 or self.inplanes != planes * block.expansion:
downsample = nn.Sequential(
conv1x1(self.inplanes, planes * block.expansion, stride),
norm_layer(planes * block.expansion),
)
layers = []
layers.append(block(self.inplanes, planes, stride, downsample, self.groups,
self.base_width, previous_dilation, norm_layer))
self.inplanes = planes * block.expansion
for _ in range(1, blocks):
layers.append(block(self.inplanes, planes, groups=self.groups,
base_width=self.base_width, dilation=self.dilation,
norm_layer=norm_layer))
return nn.Sequential(*layers)
def forward(self, x):
x = self.conv1(x)
x = self.bn1(x)
x = self.relu(x)
x = self.maxpool(x)
x = self.layer1(x)
x = self.layer2(x)
x = self.layer3(x)
x = self.layer4(x)
return x
class Decoder(nn.Module):
def __init__(self, output_channels=32,num_classes=None,base_match=512):
super(Decoder, self).__init__()
self.output_channels = output_channels
self.num_classes = num_classes
self.relu = nn.ReLU(inplace=True)
if num_classes is not None:
self.avgpool = nn.AdaptiveAvgPool2d((1, 1))
self.fc = nn.Linear(512 * block.expansion, num_classes)
else:
self.upconv0 = nn.ConvTranspose2d(base_match,256,2,2)
self.bn_upconv0 = nn.BatchNorm2d(256)
self.conv_decode0 = nn.Conv2d(256, 256, 3,padding=1)
self.bn_decode0 = nn.BatchNorm2d(256)
self.upconv1 = nn.ConvTranspose2d(256,128,2,2)
self.bn_upconv1 = nn.BatchNorm2d(128)
self.conv_decode1 = nn.Conv2d(128, 128, 3,padding=1)
self.bn_decode1 = nn.BatchNorm2d(128)
self.upconv2 = nn.ConvTranspose2d(128,64,2,2)
self.bn_upconv2 = nn.BatchNorm2d(64)
self.conv_decode2 = nn.Conv2d(64, 64, 3,padding=1)
self.bn_decode2 = nn.BatchNorm2d(64)
self.upconv3 = nn.ConvTranspose2d(64,48,2,2)
self.bn_upconv3 = nn.BatchNorm2d(48)
self.conv_decode3 = nn.Conv2d(48, 48, 3,padding=1)
self.bn_decode3 = nn.BatchNorm2d(48)
self.upconv4 = nn.ConvTranspose2d(48,32,2,2)
self.bn_upconv4 = nn.BatchNorm2d(32)
self.conv_decode4 = nn.Conv2d(32, output_channels, 3,padding=1)
def forward(self,representation):
#batch_size=representation.shape[0]
if self.num_classes is None:
#x2 = self.conv_decode_res(representation)
#x2 = self.bn_conv_decode_res(x2)
#x2 = interpolate(x2,size=(256,256))
x = self.upconv0(representation)
x = self.bn_upconv0(x)
x = self.relu(x)
x = self.conv_decode0(x)
x = self.bn_decode0(x)
x = self.relu(x)
x = self.upconv1(x)
x = self.bn_upconv1(x)
x = self.relu(x)
x = self.conv_decode1(x)
x = self.bn_decode1(x)
x = self.relu(x)
x = self.upconv2(x)
x = self.bn_upconv2(x)
x = self.relu(x)
x = self.conv_decode2(x)
x = self.bn_decode2(x)
x = self.relu(x)
x = self.upconv3(x)
x = self.bn_upconv3(x)
x = self.relu(x)
x = self.conv_decode3(x)
x = self.bn_decode3(x)
x = self.relu(x)
x = self.upconv4(x)
x = self.bn_upconv4(x)
#x = torch.cat([x,x2],1)
#print(x.shape,self.static.shape)
#x = torch.cat([x,x2,input,self.static.expand(batch_size,-1,-1,-1)],1)
x = self.relu(x)
x = self.conv_decode4(x)
#z = x[:,19:22,:,:].clone()
#y = (z).norm(2,1,True).clamp(min=1e-12)
#print(y.shape,x[:,21:24,:,:].shape)
#x[:,19:22,:,:]=z/y
else:
x = F.adaptive_avg_pool2d(x, (1, 1))
x = x.view(x.size(0), -1)
x = self.fc(x)
return x
class ResNet(nn.Module):
def __init__(self,block,layers,tasks=None,num_classes=None, ozan=False,size=1,**kwargs):
super(ResNet, self).__init__()
if size==1:
self.encoder=ResNetEncoder(block,layers,**kwargs)
elif size==2:
self.encoder=ResNetEncoder(block,layers,[96,192,384,720],**kwargs)
elif size==3:
self.encoder=ResNetEncoder(block,layers,[112,224,448,880],**kwargs)
elif size==0.5:
self.encoder=ResNetEncoder(block,layers,[48,96,192,360],**kwargs)
self.tasks=tasks
self.ozan=ozan
self.task_to_decoder = {}
if tasks is not None:
#self.final_conv = nn.Conv2d(728,512,3,1,1)
#self.final_conv_bn = nn.BatchNorm2d(512)
for task in tasks:
if task == 'segment_semantic':
output_channels = 18
if task == 'depth_zbuffer':
output_channels = 1
if task == 'normal':
output_channels = 3
if task == 'edge_occlusion':
output_channels = 1
if task == 'reshading':
output_channels = 3
if task == 'keypoints2d':
output_channels = 1
if task == 'edge_texture':
output_channels = 1
if size==1:
decoder=Decoder(output_channels)
elif size==2:
decoder=Decoder(output_channels,base_match=720)
elif size==3:
decoder=Decoder(output_channels,base_match=880)
elif size==0.5:
decoder=Decoder(output_channels,base_match=360)
self.task_to_decoder[task]=decoder
else:
self.task_to_decoder['classification']=Decoder(output_channels=0,num_classes=1000)
self.decoders = nn.ModuleList(self.task_to_decoder.values())
#------- init weights --------
for m in self.modules():
if isinstance(m, nn.Conv2d):
n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
m.weight.data.normal_(0, math.sqrt(2. / n))
elif isinstance(m, nn.BatchNorm2d):
m.weight.data.fill_(1)
m.bias.data.zero_()
#-----------------------------
def forward(self, input):
rep = self.encoder(input)
if self.tasks is None:
return self.decoders[0](rep)
#rep = self.final_conv(rep)
#rep = self.final_conv_bn(rep)
outputs={'rep':rep}
if self.ozan:
OzanRepFunction.n=len(self.decoders)
rep = ozan_rep_function(rep)
for i,(task,decoder) in enumerate(zip(self.task_to_decoder.keys(),self.decoders)):
outputs[task]=decoder(rep[i])
else:
TrevorRepFunction.n=len(self.decoders)
rep = trevor_rep_function(rep)
for i,(task,decoder) in enumerate(zip(self.task_to_decoder.keys(),self.decoders)):
outputs[task]=decoder(rep)
return outputs
def _resnet(arch, block, layers, pretrained, **kwargs):
model = ResNet(block=block, layers=layers, **kwargs)
# if pretrained:
# state_dict = load_state_dict_from_url(model_urls[arch],
# progress=progress)
# model.load_state_dict(state_dict)
return model
def resnet18_taskonomy(pretrained=False, **kwargs):
"""Constructs a ResNet-18 model.
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
progress (bool): If True, displays a progress bar of the download to stderr
"""
return _resnet('resnet18', BasicBlock, [2, 2, 2, 2], pretrained,
**kwargs)
def resnet18_taskonomy_tripple(pretrained=False, **kwargs):
"""Constructs a ResNet-18 model.
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
progress (bool): If True, displays a progress bar of the download to stderr
"""
return _resnet('resnet18', BasicBlock, [2, 2, 2, 2], pretrained,size=3,
**kwargs)
def resnet18_taskonomy_half(pretrained=False, **kwargs):
"""Constructs a ResNet-18 model.
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
progress (bool): If True, displays a progress bar of the download to stderr
"""
return _resnet('resnet18', BasicBlock, [2, 2, 2, 2], pretrained,size=0.5,
**kwargs)
def resnet34_taskonomy(pretrained=False, **kwargs):
"""Constructs a ResNet-34 model.
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
progress (bool): If True, displays a progress bar of the download to stderr
"""
return _resnet('resnet34', BasicBlock, [3, 4, 6, 3], pretrained,
**kwargs)
def resnet50_taskonomy(pretrained=False, **kwargs):
"""Constructs a ResNet-50 model.
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
progress (bool): If True, displays a progress bar of the download to stderr
"""
return _resnet('resnet50', Bottleneck, [3, 4, 6, 3], pretrained,
**kwargs)
def resnet101_taskonomy(pretrained=False, **kwargs):
"""Constructs a ResNet-101 model.
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
progress (bool): If True, displays a progress bar of the download to stderr
"""
return _resnet('resnet101', Bottleneck, [3, 4, 23, 3], pretrained,
**kwargs)
def resnet152_taskonomy(pretrained=False, **kwargs):
"""Constructs a ResNet-152 model.
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
progress (bool): If True, displays a progress bar of the download to stderr
"""
return _resnet('resnet152', Bottleneck, [3, 8, 36, 3], pretrained,
**kwargs)
| 16,822 | 36.301552 | 154 | py |
taskgrouping | taskgrouping-master/model_definitions/ozan_min_norm_solvers.py | import numpy as np
import torch
import math
class MinNormSolver:
MAX_ITER = 250
STOP_CRIT = 1e-5
def _min_norm_element_from2(v1v1, v1v2, v2v2):
"""
Analytical solution for min_{c} |cx_1 + (1-c)x_2|_2^2
d is the distance (objective) optimzed
v1v1 = <x1,x1>
v1v2 = <x1,x2>
v2v2 = <x2,x2>
"""
if v1v2 >= v1v1:
# Case: Fig 1, third column
gamma = 0.999
cost = v1v1
return gamma, cost
if v1v2 >= v2v2:
# Case: Fig 1, first column
gamma = 0.001
cost = v2v2
return gamma, cost
# Case: Fig 1, second column
gamma = -1.0 * ( (v1v2 - v2v2) / (v1v1+v2v2 - 2*v1v2) )
cost = v2v2 + gamma*(v1v2 - v2v2)
return gamma, cost
def _min_norm_2d(vecs, dps):
"""
Find the minimum norm solution as combination of two points
This is correct only in 2D
ie. min_c |\sum c_i x_i|_2^2 st. \sum c_i = 1 , 1 >= c_1 >= 0 for all i, c_i + c_j = 1.0 for some i, j
"""
dmin = 1e99
sol=None
for i in range(len(vecs)):
for j in range(i+1,len(vecs)):
if (i,j) not in dps:
dps[(i, j)] = 0.0
for k in range(len(vecs[i])):
dps[(i,j)] += torch.dot(vecs[i][k], vecs[j][k]).item()#.data[0]
dps[(j, i)] = dps[(i, j)]
if (i,i) not in dps:
dps[(i, i)] = 0.0
for k in range(len(vecs[i])):
dps[(i,i)] += torch.dot(vecs[i][k], vecs[i][k]).item()#.data[0]
if (j,j) not in dps:
dps[(j, j)] = 0.0
for k in range(len(vecs[i])):
dps[(j, j)] += torch.dot(vecs[j][k], vecs[j][k]).item()#.data[0]
c,d = MinNormSolver._min_norm_element_from2(dps[(i,i)], dps[(i,j)], dps[(j,j)])
#print('c,d',c,d)
if d < dmin:
dmin = d
sol = [(i,j),c,d]
if sol is None or math.isnan(c):
raise ValueError('A numeric instability occured in ozan_min_norm_solvers.')
return sol, dps
def _projection2simplex(y):
"""
Given y, it solves argmin_z |y-z|_2 st \sum z = 1 , 1 >= z_i >= 0 for all i
"""
m = len(y)
sorted_y = np.flip(np.sort(y), axis=0)
tmpsum = 0.0
tmax_f = (np.sum(y) - 1.0)/m
for i in range(m-1):
tmpsum+= sorted_y[i]
tmax = (tmpsum - 1)/ (i+1.0)
if tmax > sorted_y[i+1]:
tmax_f = tmax
break
return np.maximum(y - tmax_f, np.zeros(y.shape))
def _next_point(cur_val, grad, n):
proj_grad = grad - ( np.sum(grad) / n )
tm1 = -1.0*cur_val[proj_grad<0]/proj_grad[proj_grad<0]
tm2 = (1.0 - cur_val[proj_grad>0])/(proj_grad[proj_grad>0])
skippers = np.sum(tm1<1e-7) + np.sum(tm2<1e-7)
t = 1
if len(tm1[tm1>1e-7]) > 0:
t = np.min(tm1[tm1>1e-7])
if len(tm2[tm2>1e-7]) > 0:
t = min(t, np.min(tm2[tm2>1e-7]))
next_point = proj_grad*t + cur_val
next_point = MinNormSolver._projection2simplex(next_point)
return next_point
def find_min_norm_element(vecs):
"""
Given a list of vectors (vecs), this method finds the minimum norm element in the convex hull
as min |u|_2 st. u = \sum c_i vecs[i] and \sum c_i = 1.
It is quite geometric, and the main idea is the fact that if d_{ij} = min |u|_2 st u = c x_i + (1-c) x_j; the solution lies in (0, d_{i,j})
Hence, we find the best 2-task solution, and then run the projected gradient descent until convergence
"""
# Solution lying at the combination of two points
dps = {}
init_sol, dps = MinNormSolver._min_norm_2d(vecs, dps)
n=len(vecs)
sol_vec = np.zeros(n)
sol_vec[init_sol[0][0]] = init_sol[1]
sol_vec[init_sol[0][1]] = 1 - init_sol[1]
if n < 3:
# This is optimal for n=2, so return the solution
return sol_vec , init_sol[2]
iter_count = 0
grad_mat = np.zeros((n,n))
for i in range(n):
for j in range(n):
grad_mat[i,j] = dps[(i, j)]
while iter_count < MinNormSolver.MAX_ITER:
grad_dir = -1.0*np.dot(grad_mat, sol_vec)
new_point = MinNormSolver._next_point(sol_vec, grad_dir, n)
# Re-compute the inner products for line search
v1v1 = 0.0
v1v2 = 0.0
v2v2 = 0.0
for i in range(n):
for j in range(n):
v1v1 += sol_vec[i]*sol_vec[j]*dps[(i,j)]
v1v2 += sol_vec[i]*new_point[j]*dps[(i,j)]
v2v2 += new_point[i]*new_point[j]*dps[(i,j)]
nc, nd = MinNormSolver._min_norm_element_from2(v1v1, v1v2, v2v2)
new_sol_vec = nc*sol_vec + (1-nc)*new_point
change = new_sol_vec - sol_vec
if np.sum(np.abs(change)) < MinNormSolver.STOP_CRIT:
return sol_vec, nd
sol_vec = new_sol_vec
def find_min_norm_element_FW(vecs):
"""
Given a list of vectors (vecs), this method finds the minimum norm element in the convex hull
as min |u|_2 st. u = \sum c_i vecs[i] and \sum c_i = 1.
It is quite geometric, and the main idea is the fact that if d_{ij} = min |u|_2 st u = c x_i + (1-c) x_j; the solution lies in (0, d_{i,j})
Hence, we find the best 2-task solution, and then run the Frank Wolfe until convergence
"""
# Solution lying at the combination of two points
dps = {}
init_sol, dps = MinNormSolver._min_norm_2d(vecs, dps)
n=len(vecs)
sol_vec = np.zeros(n)
sol_vec[init_sol[0][0]] = init_sol[1]
sol_vec[init_sol[0][1]] = 1 - init_sol[1]
if n < 3:
# This is optimal for n=2, so return the solution
return sol_vec , init_sol[2]
iter_count = 0
grad_mat = np.zeros((n,n))
for i in range(n):
for j in range(n):
grad_mat[i,j] = dps[(i, j)]
while iter_count < MinNormSolver.MAX_ITER:
t_iter = np.argmin(np.dot(grad_mat, sol_vec))
v1v1 = np.dot(sol_vec, np.dot(grad_mat, sol_vec))
v1v2 = np.dot(sol_vec, grad_mat[:, t_iter])
v2v2 = grad_mat[t_iter, t_iter]
nc, nd = MinNormSolver._min_norm_element_from2(v1v1, v1v2, v2v2)
new_sol_vec = nc*sol_vec
new_sol_vec[t_iter] += 1 - nc
change = new_sol_vec - sol_vec
if np.sum(np.abs(change)) < MinNormSolver.STOP_CRIT:
return sol_vec, nd
sol_vec = new_sol_vec
def gradient_normalizers(grads, losses, normalization_type):
gn = {}
if normalization_type == 'l2':
for t in grads:
gn[t] = np.sqrt(np.sum([gr.pow(2).sum().data[0] for gr in grads[t]]))
elif normalization_type == 'loss':
for t in grads:
gn[t] = losses[t]
elif normalization_type == 'loss+':
for t in grads:
gn[t] = losses[t] * np.sqrt(np.sum([gr.pow(2).sum().data[0] for gr in grads[t]]))
elif normalization_type == 'none':
for t in grads:
gn[t] = 1.0
else:
print('ERROR: Invalid Normalization Type')
return gn | 7,628 | 36.214634 | 147 | py |
taskgrouping | taskgrouping-master/model_definitions/xception_taskonomy_joined_decoder.py | """
Creates an Xception Model as defined in:
Francois Chollet
Xception: Deep Learning with Depthwise Separable Convolutions
https://arxiv.org/pdf/1610.02357.pdf
This weights ported from the Keras implementation. Achieves the following performance on the validation set:
Loss:0.9173 Prec@1:78.892 Prec@5:94.292
REMEMBER to set your image size to 3x299x299 for both test and validation
normalize = transforms.Normalize(mean=[0.5, 0.5, 0.5],
std=[0.5, 0.5, 0.5])
The resize parameter of the validation transform should be 333, and make sure to center crop at 299x299
"""
import math
import torch.nn as nn
import torch.nn.functional as F
import torch.utils.model_zoo as model_zoo
from torch.nn import init
import torch
from .ozan_rep_fun import ozan_rep_function,trevor_rep_function,OzanRepFunction,TrevorRepFunction
__all__ = ['xception_taskonomy_joined_decoder','xception_taskonomy_joined_decoder_fifth','xception_taskonomy_joined_decoder_quad','xception_taskonomy_joined_decoder_half','xception_taskonomy_joined_decoder_80','xception_taskonomy_joined_decoder_ozan']
# model_urls = {
# 'xception_taskonomy':'file:///home/tstand/Dropbox/taskonomy/xception_taskonomy-a4b32ef7.pth.tar'
# }
class SeparableConv2d(nn.Module):
def __init__(self,in_channels,out_channels,kernel_size=1,stride=1,padding=0,dilation=1,bias=False,groupsize=1):
super(SeparableConv2d,self).__init__()
self.conv1 = nn.Conv2d(in_channels,in_channels,kernel_size,stride,padding,dilation,groups=max(1,in_channels//groupsize),bias=bias)
self.pointwise = nn.Conv2d(in_channels,out_channels,1,1,0,1,1,bias=bias)
#self.conv1=nn.Conv2d(in_channels,out_channels,kernel_size,stride,padding,dilation,bias=bias)
#self.pointwise=lambda x:x
def forward(self,x):
x = self.conv1(x)
x = self.pointwise(x)
return x
class Block(nn.Module):
def __init__(self,in_filters,out_filters,reps,strides=1,start_with_relu=True,grow_first=True):
super(Block, self).__init__()
if out_filters != in_filters or strides!=1:
self.skip = nn.Conv2d(in_filters,out_filters,1,stride=strides, bias=False)
self.skipbn = nn.BatchNorm2d(out_filters)
else:
self.skip=None
self.relu = nn.ReLU(inplace=True)
rep=[]
filters=in_filters
if grow_first:
rep.append(self.relu)
rep.append(SeparableConv2d(in_filters,out_filters,3,stride=1,padding=1,bias=False))
rep.append(nn.BatchNorm2d(out_filters))
filters = out_filters
for i in range(reps-1):
rep.append(self.relu)
rep.append(SeparableConv2d(filters,filters,3,stride=1,padding=1,bias=False))
rep.append(nn.BatchNorm2d(filters))
if not grow_first:
rep.append(self.relu)
rep.append(SeparableConv2d(in_filters,out_filters,3,stride=1,padding=1,bias=False))
rep.append(nn.BatchNorm2d(out_filters))
filters=out_filters
if not start_with_relu:
rep = rep[1:]
else:
rep[0] = nn.ReLU(inplace=False)
if strides != 1:
#rep.append(nn.AvgPool2d(3,strides,1))
rep.append(nn.Conv2d(filters,filters,2,2))
self.rep = nn.Sequential(*rep)
def forward(self,inp):
x = self.rep(inp)
if self.skip is not None:
skip = self.skip(inp)
skip = self.skipbn(skip)
else:
skip = inp
x+=skip
return x
class Encoder(nn.Module):
def __init__(self, sizes=[32,64,128,256,728,728,728,728,728,728,728,728,728]):
super(Encoder, self).__init__()
self.conv1 = nn.Conv2d(3, sizes[0], 3,2, 1, bias=False)
self.bn1 = nn.BatchNorm2d(sizes[0])
self.relu = nn.ReLU(inplace=True)
self.relu2 = nn.ReLU(inplace=False)
self.conv2 = nn.Conv2d(sizes[0],sizes[1],3,1,1,bias=False)
self.bn2 = nn.BatchNorm2d(sizes[1])
#do relu here
self.block1=Block(sizes[1],sizes[2],2,2,start_with_relu=False,grow_first=True)
self.block2=Block(sizes[2],sizes[3],2,2,start_with_relu=True,grow_first=True)
self.block3=Block(sizes[3],sizes[4],2,2,start_with_relu=True,grow_first=True)
self.block4=Block(sizes[4],sizes[5],3,1,start_with_relu=True,grow_first=True)
self.block5=Block(sizes[5],sizes[6],3,1,start_with_relu=True,grow_first=True)
self.block6=Block(sizes[6],sizes[7],3,1,start_with_relu=True,grow_first=True)
self.block7=Block(sizes[7],sizes[8],3,1,start_with_relu=True,grow_first=True)
self.block8=Block(sizes[8],sizes[9],3,1,start_with_relu=True,grow_first=True)
self.block9=Block(sizes[9],sizes[10],3,1,start_with_relu=True,grow_first=True)
self.block10=Block(sizes[10],sizes[11],3,1,start_with_relu=True,grow_first=True)
self.block11=Block(sizes[11],sizes[12],3,1,start_with_relu=True,grow_first=True)
#self.block12=Block(728,1024,2,2,start_with_relu=True,grow_first=False)
#self.conv3 = SeparableConv2d(768,512,3,1,1)
#self.bn3 = nn.BatchNorm2d(512)
#self.conv3 = SeparableConv2d(1024,1536,3,1,1)
#self.bn3 = nn.BatchNorm2d(1536)
#do relu here
#self.conv4 = SeparableConv2d(1536,2048,3,1,1)
#self.bn4 = nn.BatchNorm2d(2048)
def forward(self,input):
x = self.conv1(input)
x = self.bn1(x)
x = self.relu(x)
x = self.conv2(x)
x = self.bn2(x)
x = self.relu(x)
x = self.block1(x)
x = self.block2(x)
x = self.block3(x)
x = self.block4(x)
x = self.block5(x)
x = self.block6(x)
x = self.block7(x)
x = self.block8(x)
x = self.block9(x)
x = self.block10(x)
x = self.block11(x)
#x = self.block12(x)
#x = self.conv3(x)
#x = self.bn3(x)
#x = self.relu(x)
#x = self.conv4(x)
#x = self.bn4(x)
representation = self.relu2(x)
return representation
def interpolate(inp,size):
t = inp.type()
inp = inp.float()
out = nn.functional.interpolate(inp,size=size,mode='bilinear',align_corners=False)
if out.type()!=t:
out = out.half()
return out
class Decoder(nn.Module):
def __init__(self, output_channels=32,num_classes=None):
super(Decoder, self).__init__()
self.output_channels = output_channels
self.num_classes = num_classes
self.relu = nn.ReLU(inplace=True)
if num_classes is not None:
self.block12=Block(728,1024,2,2,start_with_relu=True,grow_first=False)
self.conv3 = SeparableConv2d(1024,1536,3,1,1)
self.bn3 = nn.BatchNorm2d(1536)
#do relu here
self.conv4 = SeparableConv2d(1536,2048,3,1,1)
self.bn4 = nn.BatchNorm2d(2048)
self.fc = nn.Linear(2048, num_classes)
else:
self.upconv1 = nn.ConvTranspose2d(512,128,2,2)
self.bn_upconv1 = nn.BatchNorm2d(128)
self.conv_decode1 = nn.Conv2d(128, 128, 3,padding=1)
self.bn_decode1 = nn.BatchNorm2d(128)
self.upconv2 = nn.ConvTranspose2d(128,64,2,2)
self.bn_upconv2 = nn.BatchNorm2d(64)
self.conv_decode2 = nn.Conv2d(64, 64, 3,padding=1)
self.bn_decode2 = nn.BatchNorm2d(64)
self.upconv3 = nn.ConvTranspose2d(64,48,2,2)
self.bn_upconv3 = nn.BatchNorm2d(48)
self.conv_decode3 = nn.Conv2d(48, 48, 3,padding=1)
self.bn_decode3 = nn.BatchNorm2d(48)
self.upconv4 = nn.ConvTranspose2d(48,32,2,2)
self.bn_upconv4 = nn.BatchNorm2d(32)
self.conv_decode4 = nn.Conv2d(32, output_channels, 3,padding=1)
def forward(self,representation):
if self.num_classes is None:
x = self.upconv1(representation)
x = self.bn_upconv1(x)
x = self.relu(x)
x = self.conv_decode1(x)
x = self.bn_decode1(x)
x = self.relu(x)
x = self.upconv2(x)
x = self.bn_upconv2(x)
x = self.relu(x)
x = self.conv_decode2(x)
x = self.bn_decode2(x)
x = self.relu(x)
x = self.upconv3(x)
x = self.bn_upconv3(x)
x = self.relu(x)
x = self.conv_decode3(x)
x = self.bn_decode3(x)
x = self.relu(x)
x = self.upconv4(x)
x = self.bn_upconv4(x)
x = self.relu(x)
x = self.conv_decode4(x)
else:
x = self.block12(representation)
x = self.conv3(x)
x = self.bn3(x)
x = self.relu(x)
x = self.conv4(x)
x = self.bn4(x)
x = self.relu(x)
x = F.adaptive_avg_pool2d(x, (1, 1))
x = x.view(x.size(0), -1)
x = self.fc(x)
return x
class XceptionTaskonomy(nn.Module):
"""
Xception optimized for the ImageNet dataset, as specified in
https://arxiv.org/pdf/1610.02357.pdf
"""
def __init__(self,size=1, tasks=None,num_classes=None, ozan=False):
""" Constructor
Args:
num_classes: number of classes
"""
super(XceptionTaskonomy, self).__init__()
pre_rep_size=728
sizes=[32,64,128,256,728,728,728,728,728,728,728,728,728]
if size == 1:
sizes=[32,64,128,256,728,728,728,728,728,728,728,728,728]
elif size==.2:
sizes=[32,64,128,256,728,728,728,728,728,728,728,728,728]
elif size==.3:
sizes=[32,64,128,256,728,728,728,728,728,728,728,728,728]
elif size==.4:
sizes=[32,64,128,256,728,728,728,728,728,728,728,728,728]
elif size==.5:
sizes=[24,48,96,192,512,512,512,512,512,512,512,512,512]
elif size==.8:
sizes=[32,64,128,248,648,648,648,648,648,648,648,648,648]
elif size==2:
sizes=[32,64, 128,256, 728, 728, 728, 728, 728, 728, 728, 728, 728]
elif size==4:
sizes=[64,128,256,512,1456,1456,1456,1456,1456,1456,1456,1456,1456]
self.encoder=Encoder(sizes=sizes)
pre_rep_size=sizes[-1]
self.tasks=tasks
self.ozan=ozan
self.task_to_decoder = {}
if tasks is not None:
self.final_conv = SeparableConv2d(pre_rep_size,512,3,1,1)
self.final_conv_bn = nn.BatchNorm2d(512)
output_channels=0
self.channels_per_task = {'segment_semantic':18,
'depth_zbuffer':1,
'normal':3,
'edge_occlusion':1,
'reshading':3,
'keypoints2d':1,
'edge_texture':1,
}
for task in tasks:
output_channels+=self.channels_per_task[task]
self.decoder=Decoder(output_channels)
else:
self.decoder=Decoder(output_channels=0,num_classes=1000)
#------- init weights --------
for m in self.modules():
if isinstance(m, nn.Conv2d):
n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
m.weight.data.normal_(0, math.sqrt(2. / n))
elif isinstance(m, nn.BatchNorm2d):
m.weight.data.fill_(1)
m.bias.data.zero_()
#-----------------------------
def forward(self, input):
rep = self.encoder(input)
if self.tasks is None:
return self.decoder(rep)
rep = self.final_conv(rep)
rep = self.final_conv_bn(rep)
outputs = {}
raw_output=self.decoder(rep)
range_start = 0
#print(raw_output.shape)
for task in self.tasks:
outputs[task]=raw_output[:,range_start:range_start+self.channels_per_task[task],:,:]
range_start+=self.channels_per_task[task]
return outputs
def xception_taskonomy_joined_decoder(**kwargs):
"""
Construct Xception.
"""
model = XceptionTaskonomy(**kwargs,size=1)
return model
def xception_taskonomy_joined_decoder_fifth(**kwargs):
"""
Construct Xception.
"""
model = XceptionTaskonomy(**kwargs,size=.2)
return model
def xception_taskonomy_joined_decoder_quad(**kwargs):
"""
Construct Xception.
"""
model = XceptionTaskonomy(**kwargs,size=4)
return model
def xception_taskonomy_joined_decoder_half(**kwargs):
"""
Construct Xception.
"""
model = XceptionTaskonomy(**kwargs,size=.5)
return model
def xception_taskonomy_joined_decoder_80(**kwargs):
"""
Construct Xception.
"""
model = XceptionTaskonomy(**kwargs,size=.8)
return model
def xception_taskonomy_joined_decoder_ozan(**kwargs):
"""
Construct Xception.
"""
model = XceptionTaskonomy(ozan=True,**kwargs)
return model
| 13,355 | 31.183133 | 251 | py |
taskgrouping | taskgrouping-master/model_definitions/xception_taskonomy_new.py | """
"""
import math
import torch.nn as nn
import torch.nn.functional as F
import torch.utils.model_zoo as model_zoo
from torch.nn import init
import torch
from .ozan_rep_fun import ozan_rep_function,trevor_rep_function,OzanRepFunction,TrevorRepFunction
__all__ = ['xception_taskonomy_new','xception_taskonomy_new_fifth','xception_taskonomy_new_quad','xception_taskonomy_new_half','xception_taskonomy_new_80','xception_taskonomy_ozan']
# model_urls = {
# 'xception_taskonomy':'file:///home/tstand/Dropbox/taskonomy/xception_taskonomy-a4b32ef7.pth.tar'
# }
class SeparableConv2d(nn.Module):
def __init__(self,in_channels,out_channels,kernel_size=1,stride=1,padding=0,dilation=1,bias=False,groupsize=1):
super(SeparableConv2d,self).__init__()
self.conv1 = nn.Conv2d(in_channels,in_channels,kernel_size,stride,padding,dilation,groups=max(1,in_channels//groupsize),bias=bias)
self.pointwise = nn.Conv2d(in_channels,out_channels,1,1,0,1,1,bias=bias)
#self.conv1=nn.Conv2d(in_channels,out_channels,kernel_size,stride,padding,dilation,bias=bias)
#self.pointwise=lambda x:x
def forward(self,x):
x = self.conv1(x)
x = self.pointwise(x)
return x
class Block(nn.Module):
def __init__(self,in_filters,out_filters,reps,strides=1,start_with_relu=True,grow_first=True):
super(Block, self).__init__()
if out_filters != in_filters or strides!=1:
self.skip = nn.Conv2d(in_filters,out_filters,1,stride=strides, bias=False)
self.skipbn = nn.BatchNorm2d(out_filters)
else:
self.skip=None
self.relu = nn.ReLU(inplace=True)
rep=[]
filters=in_filters
if grow_first:
rep.append(self.relu)
rep.append(SeparableConv2d(in_filters,out_filters,3,stride=1,padding=1,bias=False))
rep.append(nn.BatchNorm2d(out_filters))
filters = out_filters
for i in range(reps-1):
rep.append(self.relu)
rep.append(SeparableConv2d(filters,filters,3,stride=1,padding=1,bias=False))
rep.append(nn.BatchNorm2d(filters))
if not grow_first:
rep.append(self.relu)
rep.append(SeparableConv2d(in_filters,out_filters,3,stride=1,padding=1,bias=False))
rep.append(nn.BatchNorm2d(out_filters))
filters=out_filters
if not start_with_relu:
rep = rep[1:]
else:
rep[0] = nn.ReLU(inplace=False)
if strides != 1:
#rep.append(nn.AvgPool2d(3,strides,1))
rep.append(nn.Conv2d(filters,filters,2,2))
self.rep = nn.Sequential(*rep)
def forward(self,inp):
x = self.rep(inp)
if self.skip is not None:
skip = self.skip(inp)
skip = self.skipbn(skip)
else:
skip = inp
x+=skip
return x
class Encoder(nn.Module):
def __init__(self, sizes=[32,64,128,256,728,728,728,728,728,728,728,728,728]):
super(Encoder, self).__init__()
self.conv1 = nn.Conv2d(3, sizes[0], 3,2, 1, bias=False)
self.bn1 = nn.BatchNorm2d(sizes[0])
self.relu = nn.ReLU(inplace=True)
self.relu2 = nn.ReLU(inplace=False)
self.conv2 = nn.Conv2d(sizes[0],sizes[1],3,1,1,bias=False)
self.bn2 = nn.BatchNorm2d(sizes[1])
#do relu here
self.block1=Block(sizes[1],sizes[2],2,2,start_with_relu=False,grow_first=True)
self.block2=Block(sizes[2],sizes[3],2,2,start_with_relu=True,grow_first=True)
self.block3=Block(sizes[3],sizes[4],2,2,start_with_relu=True,grow_first=True)
self.block4=Block(sizes[4],sizes[5],3,1,start_with_relu=True,grow_first=True)
self.block5=Block(sizes[5],sizes[6],3,1,start_with_relu=True,grow_first=True)
self.block6=Block(sizes[6],sizes[7],3,1,start_with_relu=True,grow_first=True)
self.block7=Block(sizes[7],sizes[8],3,1,start_with_relu=True,grow_first=True)
self.block8=Block(sizes[8],sizes[9],3,1,start_with_relu=True,grow_first=True)
self.block9=Block(sizes[9],sizes[10],3,1,start_with_relu=True,grow_first=True)
self.block10=Block(sizes[10],sizes[11],3,1,start_with_relu=True,grow_first=True)
self.block11=Block(sizes[11],sizes[12],3,1,start_with_relu=True,grow_first=True)
#self.block12=Block(728,1024,2,2,start_with_relu=True,grow_first=False)
#self.conv3 = SeparableConv2d(768,512,3,1,1)
#self.bn3 = nn.BatchNorm2d(512)
#self.conv3 = SeparableConv2d(1024,1536,3,1,1)
#self.bn3 = nn.BatchNorm2d(1536)
#do relu here
#self.conv4 = SeparableConv2d(1536,2048,3,1,1)
#self.bn4 = nn.BatchNorm2d(2048)
def forward(self,input):
x = self.conv1(input)
x = self.bn1(x)
x = self.relu(x)
x = self.conv2(x)
x = self.bn2(x)
x = self.relu(x)
x = self.block1(x)
x = self.block2(x)
x = self.block3(x)
x = self.block4(x)
x = self.block5(x)
x = self.block6(x)
x = self.block7(x)
x = self.block8(x)
x = self.block9(x)
x = self.block10(x)
x = self.block11(x)
#x = self.block12(x)
#x = self.conv3(x)
#x = self.bn3(x)
#x = self.relu(x)
#x = self.conv4(x)
#x = self.bn4(x)
representation = self.relu2(x)
return representation
def interpolate(inp,size):
t = inp.type()
inp = inp.float()
out = nn.functional.interpolate(inp,size=size,mode='bilinear',align_corners=False)
if out.type()!=t:
out = out.half()
return out
class Decoder(nn.Module):
def __init__(self, output_channels=32,num_classes=None,half_sized_output=False,small_decoder=True):
super(Decoder, self).__init__()
self.output_channels = output_channels
self.num_classes = num_classes
self.half_sized_output=half_sized_output
self.relu = nn.ReLU(inplace=True)
if num_classes is not None:
self.block12=Block(728,1024,2,2,start_with_relu=True,grow_first=False)
self.conv3 = SeparableConv2d(1024,1536,3,1,1)
self.bn3 = nn.BatchNorm2d(1536)
#do relu here
self.conv4 = SeparableConv2d(1536,2048,3,1,1)
self.bn4 = nn.BatchNorm2d(2048)
self.fc = nn.Linear(2048, num_classes)
else:
if small_decoder:
self.upconv1 = nn.ConvTranspose2d(512,128,2,2)
self.bn_upconv1 = nn.BatchNorm2d(128)
self.conv_decode1 = nn.Conv2d(128, 128, 3,padding=1)
self.bn_decode1 = nn.BatchNorm2d(128)
self.upconv2 = nn.ConvTranspose2d(128,64,2,2)
self.bn_upconv2 = nn.BatchNorm2d(64)
self.conv_decode2 = nn.Conv2d(64, 64, 3,padding=1)
self.bn_decode2 = nn.BatchNorm2d(64)
self.upconv3 = nn.ConvTranspose2d(64,48,2,2)
self.bn_upconv3 = nn.BatchNorm2d(48)
self.conv_decode3 = nn.Conv2d(48, 48, 3,padding=1)
self.bn_decode3 = nn.BatchNorm2d(48)
if half_sized_output:
self.upconv4 = nn.Identity()
self.bn_upconv4 = nn.Identity()
self.conv_decode4 = nn.Conv2d(48, output_channels, 3,padding=1)
else:
self.upconv4 = nn.ConvTranspose2d(48,32,2,2)
self.bn_upconv4 = nn.BatchNorm2d(32)
self.conv_decode4 = nn.Conv2d(32, output_channels, 3,padding=1)
else:
self.upconv1 = nn.ConvTranspose2d(512,256,2,2)
self.bn_upconv1 = nn.BatchNorm2d(256)
self.conv_decode1 = nn.Conv2d(256, 256, 3,padding=1)
self.bn_decode1 = nn.BatchNorm2d(256)
self.upconv2 = nn.ConvTranspose2d(256,128,2,2)
self.bn_upconv2 = nn.BatchNorm2d(128)
self.conv_decode2 = nn.Conv2d(128, 128, 3,padding=1)
self.bn_decode2 = nn.BatchNorm2d(128)
self.upconv3 = nn.ConvTranspose2d(128,96,2,2)
self.bn_upconv3 = nn.BatchNorm2d(96)
self.conv_decode3 = nn.Conv2d(96, 96, 3,padding=1)
self.bn_decode3 = nn.BatchNorm2d(96)
if half_sized_output:
self.upconv4 = nn.Identity()
self.bn_upconv4 = nn.Identity()
self.conv_decode4 = nn.Conv2d(96, output_channels, 3,padding=1)
else:
self.upconv4 = nn.ConvTranspose2d(96,64,2,2)
self.bn_upconv4 = nn.BatchNorm2d(64)
self.conv_decode4 = nn.Conv2d(64, output_channels, 3,padding=1)
def forward(self,representation):
if self.num_classes is None:
x = self.upconv1(representation)
x = self.bn_upconv1(x)
x = self.relu(x)
x = self.conv_decode1(x)
x = self.bn_decode1(x)
x = self.relu(x)
x = self.upconv2(x)
x = self.bn_upconv2(x)
x = self.relu(x)
x = self.conv_decode2(x)
x = self.bn_decode2(x)
x = self.relu(x)
x = self.upconv3(x)
x = self.bn_upconv3(x)
x = self.relu(x)
x = self.conv_decode3(x)
x = self.bn_decode3(x)
x = self.relu(x)
if not self.half_sized_output:
x = self.upconv4(x)
x = self.bn_upconv4(x)
x = self.relu(x)
x = self.conv_decode4(x)
else:
x = self.block12(representation)
x = self.conv3(x)
x = self.bn3(x)
x = self.relu(x)
x = self.conv4(x)
x = self.bn4(x)
x = self.relu(x)
x = F.adaptive_avg_pool2d(x, (1, 1))
x = x.view(x.size(0), -1)
x = self.fc(x)
return x
class XceptionTaskonomy(nn.Module):
"""
Xception optimized for the ImageNet dataset, as specified in
https://arxiv.org/pdf/1610.02357.pdf
"""
def __init__(self,size=1, tasks=None,num_classes=None, ozan=False,half_sized_output=False):
""" Constructor
Args:
num_classes: number of classes
"""
super(XceptionTaskonomy, self).__init__()
pre_rep_size=728
sizes=[32,64,128,256,728,728,728,728,728,728,728,728,728]
if size == 1:
sizes=[32,64,128,256,728,728,728,728,728,728,728,728,728]
elif size==.2:
sizes=[16,32,64,256,320,320,320,320,320,320,320,320,320]
elif size==.3:
sizes=[32,64,128,256,728,728,728,728,728,728,728,728,728]
elif size==.4:
sizes=[32,64,128,256,728,728,728,728,728,728,728,728,728]
elif size==.5:
sizes=[24,48,96,192,512,512,512,512,512,512,512,512,512]
elif size==.8:
sizes=[32,64,128,248,648,648,648,648,648,648,648,648,648]
elif size==2:
sizes=[32,64, 128,256, 728, 728, 728, 728, 728, 728, 728, 728, 728]
elif size==4:
sizes=[64,128,256,512,1456,1456,1456,1456,1456,1456,1456,1456,1456]
self.encoder=Encoder(sizes=sizes)
pre_rep_size=sizes[-1]
self.tasks=tasks
self.ozan=ozan
self.task_to_decoder = {}
if tasks is not None:
self.final_conv = SeparableConv2d(pre_rep_size,512,3,1,1)
self.final_conv_bn = nn.BatchNorm2d(512)
for task in tasks:
if task == 'segment_semantic':
output_channels = 18
if task == 'depth_zbuffer':
output_channels = 1
if task == 'normal':
output_channels = 3
if task == 'edge_occlusion':
output_channels = 1
if task == 'keypoints2d':
output_channels = 1
if task == 'edge_texture':
output_channels = 1
if task == 'reshading':
output_channels = 1
if task == 'rgb':
output_channels = 3
if task == 'principal_curvature':
output_channels = 2
decoder=Decoder(output_channels,half_sized_output=half_sized_output)
self.task_to_decoder[task]=decoder
else:
self.task_to_decoder['classification']=Decoder(output_channels=0,num_classes=1000)
self.decoders = nn.ModuleList(self.task_to_decoder.values())
#------- init weights --------
for m in self.modules():
if isinstance(m, nn.Conv2d):
n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
m.weight.data.normal_(0, math.sqrt(2. / n))
elif isinstance(m, nn.BatchNorm2d):
m.weight.data.fill_(1)
m.bias.data.zero_()
#-----------------------------
def forward(self, input):
rep = self.encoder(input)
if self.tasks is None:
return self.decoders[0](rep)
rep = self.final_conv(rep)
rep = self.final_conv_bn(rep)
outputs={'rep':rep}
if self.ozan:
OzanRepFunction.n=len(self.decoders)
rep = ozan_rep_function(rep)
for i,(task,decoder) in enumerate(zip(self.task_to_decoder.keys(),self.decoders)):
outputs[task]=decoder(rep[i])
else:
TrevorRepFunction.n=len(self.decoders)
rep = trevor_rep_function(rep)
for i,(task,decoder) in enumerate(zip(self.task_to_decoder.keys(),self.decoders)):
outputs[task]=decoder(rep)
return outputs
def xception_taskonomy_new(**kwargs):
"""
Construct Xception.
"""
model = XceptionTaskonomy(**kwargs,size=1)
return model
def xception_taskonomy_new_fifth(**kwargs):
"""
Construct Xception.
"""
model = XceptionTaskonomy(**kwargs,size=.2)
return model
def xception_taskonomy_new_quad(**kwargs):
"""
Construct Xception.
"""
model = XceptionTaskonomy(**kwargs,size=4)
return model
def xception_taskonomy_new_half(**kwargs):
"""
Construct Xception.
"""
model = XceptionTaskonomy(**kwargs,size=.5)
return model
def xception_taskonomy_new_80(**kwargs):
"""
Construct Xception.
"""
model = XceptionTaskonomy(**kwargs,size=.8)
return model
def xception_taskonomy_ozan(**kwargs):
"""
Construct Xception.
"""
model = XceptionTaskonomy(ozan=True,**kwargs)
return model
| 14,950 | 32.979545 | 181 | py |
vae_lesion_deficit | vae_lesion_deficit-main/utils.py | import numpy as np
import random
from torch.utils.data import Dataset, DataLoader
from monai.transforms import *
def resize(volume, target_size):
resize_transform = Compose([Resize((target_size[0],
target_size[1],
target_size[2]))])
if len(volume.shape) == 3:
volume = np.expand_dims(volume, axis=0)
resized_volume = resize_transform(volume)
resized_volume = np.squeeze(resized_volume)
return resized_volume
class DeficitDataset(Dataset):
def __init__(self, data, labels):
self.data = data
self.labels = labels
def __len__(self):
return len(self.data)
def __getitem__(self, index):
img = self.data[index]
return img, np.expand_dims(self.labels[index], axis=0)
def create_train_val_cal_loaders(images, labels, batch_size, continuous=False, seed=42):
'''
Create the training, validation and calibration sets given input lesions and associated labels
If the labels are continuous you need to pass the flag as TRUE otherwise they won't be normalised
:param images:
:param labels:
:param continuous:
:param seed:
:return:
'''
# Currently chooses training, validation and calibration randomly
# You will most likely need to proper sampling to ensure Positive/Negative ratios that are decent
# This is data dependent though, so you have to choose
np.random.seed(seed)
random.seed(seed)
# Shuffle data randomly -- REMEMBER! BE SMART WITH YOUR SAMPLING OF TRAIN, VAL, CAL THIS IS JUST AN EXAMPLE
indices = [i for i in range(len(images))]
np.random.shuffle(indices)
images = images[indices]
labels = labels[indices]
if continuous:
# Put them in the 0-1 range -- OPTIONAL but recommended
labels = labels - labels.min()
labels = labels / labels.max()
# Gotta z-score normalise the labels
labels = (labels - labels.mean()) / labels.std()
# 90/5/5 split - no test set, this is inference
train_l = int(0.9 * len(images))
val_l = int(0.05 * len(images))
cal_l = int(0.05 * len(images))
train_data = images[:train_l]
train_labels = labels[:train_l]
val_data = images[train_l:(train_l+val_l)]
val_labels = labels[train_l:(train_l+val_l)]
cal_data = images[(train_l + val_l):]
cal_labels = labels[(train_l + val_l):]
'''
Num workers = 0 because this DataLoader you can fit all the images and labels into RAM
Lesion-deficit datasets are usually small, so this should be possible
'''
dataset = DeficitDataset(data=train_data, labels=train_labels)
train_loader = DataLoader(dataset, batch_size=batch_size, drop_last=False,
shuffle=True, num_workers=0, pin_memory=True)
val_dataset = DeficitDataset(data=val_data, labels=val_labels)
val_loader = DataLoader(val_dataset, batch_size=batch_size, drop_last=False,
shuffle=True, num_workers=0, pin_memory=True)
cal_dataset = DeficitDataset(data=cal_data, labels=cal_labels)
cal_loader = DataLoader(cal_dataset, batch_size=batch_size, drop_last=False,
shuffle=True, num_workers=0, pin_memory=True)
return train_loader, val_loader, cal_loader
def viz_functional_parcellation(mask, grid_acc, v_idx, v, template_brain, cmap='autumn',
vmax=None, vmin=None):
if v_idx == 0:
template_slice = template_brain[:, :, v]
template_slice = np.pad(template_slice, ((6, 6), (0, 0)),
constant_values=0)
elif v_idx == 1:
template_slice = template_brain[v, :, :]
template_slice = np.pad(template_slice, ((0, 0), (6, 6)),
constant_values=0)
else:
template_slice = template_brain[:, v, :]
template_slice = np.pad(template_slice, ((6, 6), (6, 6)),
constant_values=0)
template_slice = np.rot90(template_slice, k=1)
grid_acc.imshow(template_slice, cmap='gray')
if v_idx == 0:
mask = mask[:, :, v]
mask = np.pad(mask, ((6, 6), (0, 0)), constant_values=0)
mask = np.rot90(mask, k=1)
elif v_idx == 1:
mask = mask[v, :, :]
mask = np.pad(mask, ((0, 0), (6, 6)), constant_values=0)
mask = np.rot90(mask, k=1)
else:
mask = mask[:, v, :]
mask = np.pad(mask, ((6, 6), (6, 6)), constant_values=0)
mask = np.rot90(mask, k=1)
if cmap != 'jet':
mask = (mask > 0).astype(np.uint8)
mask = np.ma.masked_where(mask == 0, mask)
if not vmax:
grid_acc.imshow(mask, cmap=cmap, alpha=0.6)
else:
grid_acc.imshow(mask, cmap=cmap, alpha=0.6, vmin=vmin, vmax=vmax)
| 4,837 | 35.104478 | 111 | py |
vae_lesion_deficit | vae_lesion_deficit-main/model.py | import math
import torch
import torch.nn as nn
import torch.distributions as D
import torch.nn.functional as F
# Define two globals
bce_fn = nn.BCELoss(reduction='none')
Tensor = torch.cuda.FloatTensor
def add_coords(x, just_coords=False):
'''
This just the Uber CoordConv method extended to 3D. Definitely use it on the input
Using it on other layers of the model can be helpful, but it slows down training
:param x:
:param just_coords:
:return:
'''
batch_size_shape, channel_in_shape, dim_z, dim_y, dim_x = x.shape
xx_ones = torch.ones([1, 1, 1, 1, dim_x])
yy_ones = torch.ones([1, 1, 1, 1, dim_y])
zz_ones = torch.ones([1, 1, 1, 1, dim_z])
xy_range = torch.arange(dim_y).float()
xy_range = xy_range[None, None, None, :, None]
yz_range = torch.arange(dim_z).float()
yz_range = yz_range[None, None, None, :, None]
zx_range = torch.arange(dim_x).float()
zx_range = zx_range[None, None, None, :, None]
xy_channel = torch.matmul(xy_range, xx_ones)
xx_channel = torch.cat([xy_channel + i for i in range(dim_z)], dim=2)
xx_channel = xx_channel.repeat(batch_size_shape, 1, 1, 1, 1)
yz_channel = torch.matmul(yz_range, yy_ones)
yz_channel = yz_channel.permute(0, 1, 3, 4, 2)
yy_channel = torch.cat([yz_channel + i for i in range(dim_x)], dim=4)
yy_channel = yy_channel.repeat(batch_size_shape, 1, 1, 1, 1)
zx_channel = torch.matmul(zx_range, zz_ones)
zx_channel = zx_channel.permute(0, 1, 4, 2, 3)
zz_channel = torch.cat([zx_channel + i for i in range(dim_y)], dim=3)
zz_channel = zz_channel.repeat(batch_size_shape, 1, 1, 1, 1)
xx_channel = xx_channel.to(x.device)
yy_channel = yy_channel.to(x.device)
zz_channel = zz_channel.to(x.device)
xx_channel = xx_channel.float() / (dim_x - 1)
yy_channel = yy_channel.float() / (dim_y - 1)
zz_channel = zz_channel.float() / (dim_z - 1)
if just_coords:
out = torch.cat([xx_channel, yy_channel, zz_channel], dim=1)
else:
out = torch.cat([x, xx_channel, yy_channel, zz_channel], dim=1)
return out
class SBlock(nn.Module):
def __init__(self, in_planes, planes, downsample=False, ks=3, stride=1, upsample=False, add_coords=False):
'''
This is the Convolutional block that constitutes the meat of the Encoder and Decoder
:param in_planes:
:param planes:
:param downsample:
:param ks:
:param stride:
:param upsample:
:param add_coords:
'''
super(SBlock, self).__init__()
self.downsample = downsample
self.upsample = upsample
if ks == 3:
pad = 1
elif ks == 5:
pad = 2
else:
pad = 3
if add_coords:
in_planes += 3
self.add_coords = add_coords
self.c1 = nn.Sequential(nn.Conv3d(in_planes, planes, kernel_size=ks, stride=stride,
padding=pad),
nn.BatchNorm3d(planes),
nn.GELU())
self.upsample_layer = nn.Upsample(scale_factor=2, mode='nearest')
def forward(self, x):
if self.add_coords:
x = add_coords(x)
out = self.c1(x)
if self.downsample:
out = F.avg_pool3d(out, kernel_size=2, stride=2)
if self.upsample:
out = self.upsample_layer(out)
return out
class VAE(nn.Module):
def __init__(self, input_size, sd=16, z_dim=20, out_chans=1, in_chans=1):
'''
This is the VAE model that does the lesion deficit mapping inference. It does two tasks with a single latent.
First it produces the lesion-deficit map. Second it produces a reconstruction of the lesions.
Both of these are necessary because we are modelling the joint distribution P(X,Y)
There are many architectural improvements that will probably help get better accuracy, but this is a simple
architecture that works even with little data. The more data you have, the more you might want to replace
the Encoder and Decoder with something more complicated. Or even use a VDVAE
Adding coordinates helps as well, but by default the models doesn't add them
:param input_size:
:param sd:
:param z_dim:
:param out_chans:
:param in_chans:
'''
super(VAE, self).__init__()
self.sd = sd
self.z_dim = z_dim
self.half_z = z_dim // 2
# Each layer reduces by a factor of 2, how many layers we need to get to latent space 2**3
self.num_layers = int(math.log2(input_size)) - 1
'''
Encoder -- You'll probably need to tweak this to get the best results, GPU memory usage, etc.
'''
self.encoder_layers = nn.ModuleList()
enc_sd = self.sd
for l in range(self.num_layers):
self.encoder_layers.append(SBlock(in_chans, enc_sd, downsample=True))
in_chans = enc_sd
if l < self.num_layers - 1:
enc_sd *= 2
# These are the dimensions of a fully connected latent at the end of the encoder
# TODO: might not need to always be 2 cubed
self.spatial_dims = input_size // (2 ** self.num_layers)
self.dense_dims = self.spatial_dims ** 3 * (enc_sd)
'''
Parameters of the latent space
'''
self.mu = nn.Linear(self.dense_dims, z_dim)
self.logvar = nn.Linear(self.dense_dims, z_dim)
'''
Decoders for the inference maps and lesion reconstructions
'''
self.decoder_inference = nn.ModuleList()
self.decoder_reconstruction = nn.ModuleList()
self.decoder_inference.append(nn.Sequential(nn.Linear(self.half_z, self.dense_dims),
nn.GELU()))
self.decoder_reconstruction.append(nn.Sequential(nn.Linear(self.half_z, self.dense_dims),
nn.GELU()))
dec_sd = enc_sd
for l in range(self.num_layers):
self.decoder_inference.append(SBlock(dec_sd, dec_sd // 2, upsample=True))
self.decoder_reconstruction.append(SBlock(dec_sd, dec_sd // 2, upsample=True))
dec_sd = dec_sd // 2
# Finish both decoders
self.decoder_inference.append(
nn.Sequential(nn.Conv3d(dec_sd, int(dec_sd / 2), kernel_size=3, stride=1, padding=1),
nn.GELU(),
nn.Conv3d(int(dec_sd / 2), out_chans, kernel_size=1, stride=1, padding=0)
)
)
self.decoder_reconstruction.append(
nn.Sequential(nn.Conv3d(dec_sd, int(dec_sd / 2), kernel_size=3, stride=1, padding=1),
nn.GELU(),
nn.Conv3d(int(dec_sd / 2), 1, kernel_size=1, stride=1, padding=0)
)
)
def sampling(self, mu, log_var):
'''
Sample your latent from z ~ N(mean, scale)
:param mu:
:param log_var:
:return:
'''
std = torch.exp(0.5 * log_var)
eps = torch.randn_like(std)
return eps.mul(std).add_(mu)
def encoder(self, x):
for enc_layer in self.encoder_layers:
x = enc_layer(x)
x = x.view(-1, self.dense_dims)
return self.mu(x), self.logvar(x)
def decoder(self, x):
x = self.decoder_inference[0](x)
x = x.view(x.size(0), -1, self.spatial_dims, self.spatial_dims, self.spatial_dims)
for dec_layer in self.decoder_inference[1:]:
x = dec_layer(x)
return x
def rdecoder(self, x):
x = self.decoder_reconstruction[0](x)
x = x.view(x.size(0), -1, self.spatial_dims, self.spatial_dims, self.spatial_dims)
for dec_layer in self.decoder_reconstruction[1:]:
x = dec_layer(x)
return x
def forward(self, x, y):
mu, log_var = self.encoder(x)
z = self.sampling(mu, log_var)
mask_z = z[:, :self.half_z]
recon_z = z[:, self.half_z:]
kl = torch.sum(0.5 * (-log_var + torch.exp(log_var) + mu ** 2 - 1), dim=1)
return self.decoder(mask_z), self.rdecoder(recon_z), kl
class ModelWrapper(nn.Module):
def __init__(self, input_size, z_dim=128, start_dims=16, continuous=False):
'''
A model wrapper around the VAE
:param input_size:
:param z_dim:
:param start_dims:
:param continuous:
'''
super().__init__()
self.z_dim = z_dim
self.start_dims = start_dims
# 5 input channels - X, the coordinates, and Y
# 2 output channels - The mean and the variance of the inference maps
self.mask_model = VAE(input_size,
sd=start_dims,
z_dim=z_dim,
out_chans=2,
in_chans=5)
self.continuous = continuous
print(f'CONTINUOUS MODEL: {self.continuous}')
def forward(self, x, y, val=False, provided_mask=None, provided_scale=None, t=0.5, calibrate=False, lesion_threshold=None):
'''
If doing validation you will want to use the generated inference map to gauge the accuracy of the
predictions
:param x:
:param y:
:param val:
:param provided_mask:
:param provided_scale:
:param t:
:param calibrate:
:return:
'''
b, c, h, w, d = x.shape
# Add coordinates to the lesion
coord_x = add_coords(x)
# Add the label as a volume
my = y.view(-1, 1, 1, 1, 1).repeat(1, 1, h, w, d)
coord_x = torch.cat([coord_x, my], dim=1)
if val:
# If doing validation use the masks calculated from the training data
# Do a forward pass still so we can evaluate reconstruction quality and KL
masks, recons, kl_m = self.mask_model(coord_x, y)
preds_mean = provided_mask
preds_scale = provided_scale
else:
masks, recons, kl_m = self.mask_model(coord_x, y)
preds_mean = masks[:, 0].view(-1, 1, h, w, d)
preds_scale = masks[:, 1].view(-1, 1, h, w, d)
if calibrate:
# If calibrating predictions, we want to find a thresholding quantile that achieves the best accuracy!
flat_preds_a = preds_mean.view(x.size(0), -1)
qt = torch.quantile(flat_preds_a, t, dim=1).view(-1, 1, 1, 1, 1)
preds_mean = (preds_mean > qt) * preds_mean
# The three outputs of our network -> Reconstructed lesion, Mean inference map and STD variance map
recons = torch.sigmoid(recons)
# If a lesion threshold is provided, binarise the reconstruction according to this and calculate the predictive loss with it
# rather than with the original lesion
if lesion_threshold:
bin_lesion = (recons > lesion_threshold)
logits = torch.mean(bin_lesion * preds_mean, dim=(-4, -3, -2, -1)).view(-1, 1)
# Standard deviation is currently between 0 and 1, but it can be larger or smaller
scale = torch.mean(bin_lesion * preds_scale, dim=(-4, -3, -2, -1)).view(-1, 1).exp()
else:
logits = torch.mean(x * preds_mean, dim=(-4, -3, -2, -1)).view(-1, 1)
# Standard deviation is currently between 0 and 1, but it can be larger or smaller
scale = torch.mean(x * preds_scale, dim=(-4, -3, -2, -1)).view(-1, 1).exp()
'''
Calculate log P(Y|X,M), i.e. the log-likelihood of our inference objective
'''
if self.continuous:
# mask_ll = - D.Normal(logits, scale + 1e-5).log_prob(y).mean()
mask_ll = torch.mean((logits - y) ** 2)
else:
# Don't use STD on binary case because Bernoulli has no variance -> Beta distributions work well
probabilities = torch.sigmoid(logits)
mask_ll = bce_fn(probabilities, y).mean()
'''
Calculate log P(X|M), i.e. the log likelihood of our lesions
'''
recon_ll = torch.sum(bce_fn(recons, x), dim=(-3, -2, -1)).mean()
preds = torch.mean(preds_mean, dim=0).view(1, 1, h, w, d)
mask_scale = torch.mean(preds_scale, dim=0).view(1, 1, h, w, d)
# Calculate the accuracy of the predictions. If it is continuous, this is just MSE
if self.continuous:
acc = mask_ll
else:
quant_preds = (probabilities > 0.5).to(torch.float32)
acc = torch.mean(torch.eq(quant_preds, y).float())
'''
The final loss is log P(Y| X, M) + log P(X|M) + D_KL[Q(M|X,Y) || P(M)]
'''
loss = mask_ll + recon_ll + kl_m.mean()
ret_dict = dict(mean_mask=preds,
mask_scale=mask_scale,
mask_ll=mask_ll.mean(),
kl=kl_m.mean(),
loss=loss, acc=acc,
recon_ll=recon_ll.mean()
)
return ret_dict
def sample_masks(self, num_samples=400):
'''
Use this to sample the mean and STD masks from the latent space
:param x:
:param num_samples:
:return:
'''
z = torch.randn(num_samples, self.z_dim).type(Tensor)
preds = self.mask_model.decoder(z)
mean_mask = torch.mean(preds[:, 0], dim=(0, 1))
scale_mask = torch.mean(preds[:, 1], dim=(0, 1))
return mean_mask, scale_mask
| 13,659 | 36.322404 | 132 | py |
vae_lesion_deficit | vae_lesion_deficit-main/train.py | import numpy as np
import os
import argparse
import torch
import torch.optim as optim
import datetime
import torch as tc
from model import ModelWrapper
from utils import create_train_val_cal_loaders
Tensor = torch.cuda.FloatTensor
def count_parameters(model):
return sum(p.numel() for p in model.parameters() if p.requires_grad)
def train_vdvae(config, images, labels):
device = torch.device(f"cuda:0")
best_epoch = 0
# These are the directories to store trained models and vae masks
if not os.path.exists('pretrained'):
os.makedirs('pretrained')
if not os.path.exists('vae_masks'):
os.makedirs('vae_masks')
# Get the time stamp
ft = "%Y_%m_%d_%H_%M_%S"
timestamp = datetime.datetime.now().strftime(ft)
model = ModelWrapper(config['input_size'],
z_dim=config['z_dim'],
start_dims=config['start_dims'],
continuous=config['continuous']).to(device)
num_epochs = config['epochs']
train_loader, val_loader, cal_loader = create_train_val_cal_loaders(images, labels,
batch_size=config['batch_size'],
continuous=config['continuous'])
# Other optimisers work as well, Adamax is quite stable though
optimizer = optim.Adamax(model.parameters(), weight_decay=config['wd'], lr=config['lr'])
print('NUM PARAMS: {}'.format(count_parameters(model)))
print(f'NUM EPOCHS: {num_epochs}')
best_loss = 1e30
best_acc = 0
best_lk = 1e30
global_step = 0
for epoch in range(num_epochs):
model.zero_grad()
train_acc = 0
# The trackers for the mean and scale of the inference map
vae_mask = np.zeros((config['input_size'], config['input_size'], config['input_size']))
vae_scale = np.zeros((config['input_size'], config['input_size'], config['input_size']))
for (x, y) in train_loader:
optimizer.zero_grad()
x = x.type(Tensor).to(device)
y = y.type(Tensor).to(device)
ret_dict = model(x, y)
loss = ret_dict['loss'].mean()
loss.backward()
optimizer.step()
vae_mask += np.squeeze(ret_dict['mean_mask'].cpu().data.numpy())
vae_scale += np.squeeze(ret_dict['mask_scale'].cpu().data.numpy())
train_acc += 1
global_step += 1
vae_mask = vae_mask / train_acc
val_mask = tc.from_numpy(vae_mask).type(Tensor).to(device).view(1, 1,
config['input_size'],
config['input_size'],
config['input_size'])
vae_scale = vae_scale / train_acc
val_scale = tc.from_numpy(vae_scale).type(Tensor).to(device).view(1, 1,
config['input_size'],
config['input_size'],
config['input_size'])
val_acc = 0
accuracy_acc = 0
loss_acc = 0
likelihood_acc = 0
kld_acc = 0
recon_acc = 0
with torch.no_grad():
for (x, y) in val_loader:
x = x.type(Tensor).to(device)
y = y.type(Tensor).to(device)
ret_dict = model(x, y,
provided_mask=val_mask,
provided_scale=val_scale,
val=True)
loss_acc += ret_dict['loss'].mean().item()
val_acc += 1
likelihood_acc += ret_dict['mask_ll'].item()
accuracy_acc += ret_dict['acc'].item()
kld_acc += ret_dict['kl'].item()
recon_acc += ret_dict['recon_ll'].item()
loss = loss_acc / val_acc
lk = likelihood_acc / val_acc
acc = round(accuracy_acc / val_acc, 4)
kl = round(kld_acc / val_acc, 3)
rec = recon_acc / val_acc
print(f'Epoch: {epoch}, mask likelihood: {lk}, KL: {kl}, accuracy: {acc}, recon likelihood: {rec}')
if lk < best_lk:
best_loss = loss
best_lk = lk
best_acc = acc
best_recon = recon_acc
best_epoch = epoch
torch.save(model, f"pretrained/{timestamp}.pth")
np.save(f'vae_masks/{timestamp}.npy', vae_mask)
np.save(f'vae_masks/{timestamp}_scale.npy', vae_scale)
if epoch % 10 == 0:
print(f'Best acc: {best_acc}, likelihood: {best_lk}, epoch: {best_epoch}')
print(f'Best acc: {best_acc}, likelihood: {best_loss}, epoch: {best_epoch}')
print('TRAINING DONE, CALIBRATING THE BEST MODEL')
model = torch.load(f"pretrained/{timestamp}.pth")
model.eval()
vae_mask = np.load(f'vae_masks/{timestamp}.npy')
best_threshold = 0
best_likelihood = 1e30
threshold_range = np.linspace(0.95, 0.99, num=20)
for thresh in threshold_range:
t = np.quantile(vae_mask, thresh)
bin_res = (vae_mask > t) * vae_mask
with torch.no_grad():
counter = 0
likelihood = 0
for (x, y) in cal_loader:
x = x.type(Tensor).to(device)
y = y.type(Tensor).to(device)
ret_dict = model(x, y,
calibrate=True,
t=float(thresh))
likelihood += ret_dict['mask_ll']
counter += 1
likelihood = likelihood / counter
if likelihood < best_likelihood:
best_likelihood = likelihood
best_threshold = thresh
t = np.quantile(vae_mask, best_threshold)
thresholded_mask = (vae_mask > t) * vae_mask
# Save the thresholded mask
np.save(f'vae_masks/thresholded_{timestamp}.npy', thresholded_mask)
if __name__ == '__main__':
# parser = argparse.ArgumentParser()
#
# parser.add_argument('-d', required=True)
# parser.add_argument('-c', required=True)
#
# args = parser.parse_args()
# Currently generate fake images and labels, replace with your own data
images = np.random.uniform(0, 1, (1000, 32, 32, 32))
labels = np.random.uniform(0, 1, (1000, 1))
# This config works pretty well, but context dependent
config = dict(input_size=32,
z_dim=128,
start_dims=16,
continuous=True,
epochs=1000,
batch_size=500
)
train_vdvae(config, images, labels)
| 6,905 | 34.234694 | 107 | py |
GCNH | GCNH-main/main.py | """
Perform training and testing of GCNH on the 10 available splits
"""
import torch
import torch.nn.functional as F
import torch.optim as optim
import numpy as np
from utils import *
from datetime import datetime
from copy import deepcopy
from scipy.sparse import coo_matrix
from models import GCNH
from tqdm import tqdm
if __name__ == "__main__":
args = parse_args()
cuda = torch.cuda.is_available()
if args.use_seed:
np.random.seed(args.seed)
torch.manual_seed(args.seed)
n_nodes, n_classes = get_nodes_classes(args.dataset)
labeled = None
if args.dataset in ['cora', 'pubmed', 'citeseer']:
adj, features, labels, idx_train, idx_val, idx_test, labeled = load_data_cit(args.dataset, undirected=True)
else:
features, labels, idx_train, idx_val, idx_test = load_data(args.dataset)
adj = load_graph(args.dataset, n_nodes, features, undirected=True)
print("Train percentage: ", len(idx_train) / (len(idx_train) + len(idx_val) + len(idx_test)))
print("Eval percentage: ", len(idx_val) / (len(idx_train) + len(idx_val) + len(idx_test)))
print("Test percentage: ", len(idx_test) / (len(idx_train) + len(idx_val) + len(idx_test)))
tot_splits = 10
if args.aggfunc not in ["mean", "sum", "maxpool"]:
print('Valid aggregation functions are "sum", "mean", "maxpool".\nAggregation function "{}" is not available. Using "sum" instead.'.format(args.aggfunc))
if args.aggfunc == "mean":
# Mean aggregation requires to normalize the adjacency matrix
print("Normalizing adj")
adj = normalize(adj, False)
if args.aggfunc == "maxpool":
# Precomputing this allows for a fast execution of maxpooling aggregation
coo_m = coo_matrix(adj.numpy())
row, col = torch.tensor(coo_m.row).long(), torch.tensor(coo_m.col).long()
else:
row, col = None, None
split_acc = []
for split in range(tot_splits):
print("Split: ", split)
idx_train, idx_val, idx_test = load_idx(split, args.dataset, labeled)
model = GCNH(nfeat=features.shape[1],
nhid=args.nhid,
nclass=n_classes,
dropout=args.dropout,
nlayers=args.nlayers,
maxpool=args.aggfunc == "maxpool")
if cuda:
print("Using CUDA")
model.cuda()
features = features.cuda()
adj = adj.cuda()
labels = labels.cuda()
idx_train = idx_train.cuda()
idx_test = idx_test.cuda()
idx_val = idx_val.cuda()
if args.aggfunc == "maxpool":
row, col = row.cuda(), col.cuda()
optimizer = optim.Adam(model.parameters(), lr=args.lr, weight_decay=args.weight_decay)
batch_size = args.batch_size
num_batches = len(idx_train) // batch_size + 1
print("Number of batches: ", num_batches)
state_dict_early_model = None
best_val_acc = 0.0
best_val_loss = 0.0
t1 = datetime.now()
if args.verbose:
epochs = range(args.epochs)
else:
epochs = tqdm(range(args.epochs))
patience_count = 0
for epoch in epochs:
if patience_count > args.patience:
break
model.train()
idx = list(range(len(idx_train)))
np.random.shuffle(idx)
tot_acc = 0
tot_loss = 0
for batch in range(num_batches):
optimizer.zero_grad()
cur_idx = idx_train[idx[batch * batch_size: batch * batch_size + batch_size]]
# For each batch, forward the whole graph but compute loss only on nodes in current batch
output = model(features, adj, cur_idx=cur_idx, verbose=False,row=row,col=col)
train_loss = F.nll_loss(output, labels[cur_idx])
train_acc = accuracy(output, labels[cur_idx])
train_loss.backward()
optimizer.step()
tot_loss += train_loss.detach().cpu().numpy()
tot_acc += train_acc
# Validation for each epoch
model.eval()
with torch.no_grad():
output = model(features, adj, cur_idx=idx_val, verbose=False,row=row,col=col)
val_loss = F.nll_loss(output, labels[idx_val])
val_acc = accuracy(output, labels[idx_val])
if args.verbose:
print(
"Epoch {:05d} | Train Loss {:.4f} | Train Acc {:.4f} | Val Loss {:.4f} | Val Acc {:.4f}".format(
epoch, train_loss.item(), train_acc, val_loss, val_acc))
if val_acc >= best_val_acc and (val_acc > best_val_acc or val_loss < best_val_loss):
best_val_acc = val_acc.cpu()
best_val_loss = val_loss.detach().cpu()
state_dict_early_model = deepcopy(model.state_dict())
patience_count = 0
else:
patience_count += 1
# Perform test
with torch.no_grad():
print("Testing")
model.load_state_dict(state_dict_early_model)
model.eval()
output = model(features, adj, cur_idx=idx_test, verbose=True,row=row,col=col)
acc_test = accuracy(output, labels[idx_test])
t2 = datetime.now()
split_acc.append(acc_test.item())
print("Test_acc" + ":" + str(acc_test))
print("Time: ", (t2-t1).total_seconds())
split_acc = np.array(split_acc)
print("Average acc: ", split_acc.mean()) | 5,717 | 34.7375 | 161 | py |
GCNH | GCNH-main/utils.py | import scipy.sparse as sp
import torch
import numpy as np
import pickle as pkl
import sys
import networkx as nx
from dataset import CustomDataset
import argparse
import random
from os import path as path
"""
READ ARGUMENTS
"""
def parse_boolean(value):
"""Parse boolean values passed as argument"""
value = value.lower()
if value in ["true", "yes", "y", "1", "t"]:
return True
elif value in ["false", "no", "n", "0", "f"]:
return False
return False
def parse_args():
""" Parse arguments """
parse = argparse.ArgumentParser()
## Run details
parse.add_argument("--model", help="model to train and test", type=str, default="GATH")
parse.add_argument("-d", "--dataset", help="dataset", type=str, default="cornell")
parse.add_argument('--model_name', type=str, help='Name of model used', default="Empty")
parse.add_argument('--verbose', type=parse_boolean, default=False, help='Whether to display training losses')
parse.add_argument('--hom_syn', type=str, default="h0.00-r1", help='Homophily level for synthetic dataset')
parse.add_argument('--use_seed', type=parse_boolean, default=True, help='Whether to use seed')
parse.add_argument('--seed', type=int, default=112, help='Seed')
parse.add_argument('--splits', type=int, default=0, help='Dataset split') ## Fix this later
parse.add_argument('--aggfunc', type=str, default="sum", help='Neighbor aggregation function: one of sum, mean or maxpool')
## Hyperparameters
parse.add_argument('--epochs', type=int, default=100, help='Number of epochs')
parse.add_argument('--patience', type=int, default=1000, help='Patience')
parse.add_argument('--batch_size', type=int, default=100000, help='Batch size')
parse.add_argument('--nhid', type=int, default=16, help='Hidden size')
parse.add_argument('--dropout', type=float, default=0.0, help='Dropout rate')
parse.add_argument('--nlayers', type=int, default=1, help='Number of layers')
parse.add_argument('--lr', type=float, default=5e-3, help='Learning rate')
parse.add_argument('--weight_decay', type=float, default=5e-3, help='Weight decay')
args = parse.parse_args()
return args
"""
GET INFORMATION FOR SELECTED MODEL
"""
def get_nodes_classes(dataset):
"""Get number of nodes and number of classes for the current graph"""
nodes = {"cornell": 183, "texas":183, "wisconsin":251, "film":7600,
"chameleon":2277, "squirrel":5201, "cora":2708, "citeseer":3327}
classes = {"cornell": 5, "texas":5, "wisconsin":5, "film":5,
"chameleon":5, "squirrel":5, "cora":7, "citeseer":6}
if dataset not in nodes:
print("Dataset is not present!")
return None, None
return nodes[dataset], classes[dataset]
def accuracy(output, labels):
"""Compute accuracy of predictions"""
preds = output.max(1)[1].type_as(labels)
correct = preds.eq(labels).double()
correct = correct.sum()
return correct / len(labels)
def sparse_mx_to_torch_sparse_tensor(sparse_mx):
"""Convert a scipy sparse matrix to a torch sparse tensor."""
sparse_mx = sparse_mx.tocoo().astype(np.float32)
indices = torch.from_numpy(
np.vstack((sparse_mx.row, sparse_mx.col)).astype(np.int64))
values = torch.from_numpy(sparse_mx.data)
shape = torch.Size(sparse_mx.shape)
return torch.sparse.FloatTensor(indices, values, shape)
def normalize(adj, is_sparse=False):
"""Symmetrically normalize adjacency matrix."""
if is_sparse:
adj = adj.coalesce()
indices = adj.indices()
values = adj.values()
adj = sp.coo_matrix((values, (indices[0],indices[1])), shape=adj.shape)
rowsum = np.array(adj.sum(1))
d_inv_sqrt = np.power(rowsum, -0.5).flatten()
d_inv_sqrt[np.isinf(d_inv_sqrt)] = 0.
d_mat_inv_sqrt = sp.diags(d_inv_sqrt)
mx = sparse_mx_to_torch_sparse_tensor(adj.dot(d_mat_inv_sqrt).transpose().dot(d_mat_inv_sqrt).tocoo())
assert (mx.coalesce().indices() == indices).all()
return mx
else:
d = adj.sum(dim=1) + 1e-6
adj = adj / d.view([len(d),1])
return adj
"""
LOAD GRAPHS FROM FILES
"""
def parse_index_file(filename):
"""Parse index file."""
index = []
for line in open(filename):
index.append(int(line.strip()))
return index
def load_idx(split, dataset, labeled):
"""Return fixed splits, doesn't work for syn"""
idx = np.load("./data/{}/splits/{}_split_0.6_0.2_{}.npz".format(dataset, dataset, split))
if labeled is None:
idx_train = np.where(idx['train_mask'] == 1)[0]
idx_test = np.where(idx['test_mask'] == 1)[0]
idx_val = np.where(idx['val_mask'] == 1)[0]
else:
idx_train = np.where(idx['train_mask'][labeled] == 1)[0]
idx_test = np.where(idx['test_mask'][labeled] == 1)[0]
idx_val = np.where(idx['val_mask'][labeled] == 1)[0]
idx_train = torch.LongTensor(idx_train)
idx_val = torch.LongTensor(idx_val)
idx_test = torch.LongTensor(idx_test)
return idx_train, idx_val, idx_test
def load_syn_cora(name):
"""Load the dataset in file `syn-cora/<name>.npz`
`seed` controls the generation of training, validation and test splits"""
dataset = CustomDataset(root="syn-cora", name=name, setting="gcn", seed=15)
adj = dataset.adj # Access adjacency matrix
features = dataset.features # Access node features
labels = dataset.labels
idx = np.arange(features.shape[0])
random.seed(155)
random.shuffle(idx)
idx_train = idx[:int(0.5*len(idx))]
idx_val = idx[int(0.5*len(idx)):int(0.7*len(idx))]
idx_test = idx[int(0.7*len(idx)):]
idx_train = torch.LongTensor(idx_train)
idx_val = torch.LongTensor(idx_val)
idx_test = torch.LongTensor(idx_test)
labels = torch.LongTensor(labels)
features = sp.csr_matrix(features, dtype=np.float32)
features = torch.FloatTensor(np.array(features.todense()))
adj = torch.FloatTensor(np.array(adj.todense()))
return features, labels, adj, idx_train, idx_val, idx_test
def load_data(dataset, split_name=0):
"""Load features, labels and splits for the specified dataset"""
feature_list = []
label_list = []
f = open('./data/{}/out1_node_feature_label.txt'.format(dataset), 'r')
for line in f.readlines():
ele = line.strip().split('\t')
if ele[0] == 'node_id':
continue
feature = ele[1]
label = int(ele[2])
if dataset == 'film':
feature_array = np.zeros([931])
for f in feature.strip().split(','):
feature_array[int(f)-1] = 1
feature_list.append(feature_array)
else:
feature = feature.strip().split(',')
feature_list.append(feature)
label_list.append(label)
feature = np.array(feature_list, dtype=float)
idx = np.load("./data/{}/splits/{}_split_0.6_0.2_{}.npz".format(dataset, dataset, split_name))
idx_train = np.where(idx['train_mask'] == 1)[0]
idx_test = np.where(idx['test_mask'] == 1)[0]
idx_val = np.where(idx['val_mask'] == 1)[0]
idx_train = torch.LongTensor(idx_train)
idx_val = torch.LongTensor(idx_val)
idx_test = torch.LongTensor(idx_test)
labels = torch.LongTensor(label_list)
features = sp.csr_matrix(feature, dtype=np.float32)
features = torch.FloatTensor(np.array(features.todense()))
return features, labels, idx_train, idx_val, idx_test
def load_graph(dataset, n_nodes, features=None, undirected=False):
"""Load adjacency matrix for the specified dataset"""
print('Loading {} dataset...'.format(dataset))
struct_edges = np.genfromtxt("./data/" + dataset + "/out1_graph_edges.txt", dtype=np.int32)
sedges = np.array(list(struct_edges), dtype=np.int32).reshape(struct_edges.shape)
sadj = sp.coo_matrix((np.ones(sedges.shape[0]), (sedges[:, 0], sedges[:, 1])), shape=(n_nodes, n_nodes),
dtype=np.float32)
if undirected:
sadj = sadj + sadj.T.multiply(sadj.T > sadj) - sadj.multiply(sadj.T > sadj)
nsadj = torch.FloatTensor(np.array(sadj.todense()))
return nsadj
def load_data_cit(dataset_str, split_name=0, undirected=False):
"""
Load citation graphs Cora, Citeseer and Pubmed
Loads input data from gcn/data directory
ind.dataset_str.x => the feature vectors of the training instances as scipy.sparse.csr.csr_matrix object;
ind.dataset_str.tx => the feature vectors of the test instances as scipy.sparse.csr.csr_matrix object;
ind.dataset_str.allx => the feature vectors of both labeled and unlabeled training instances
(a superset of ind.dataset_str.x) as scipy.sparse.csr.csr_matrix object;
ind.dataset_str.y => the one-hot labels of the labeled training instances as numpy.ndarray object;
ind.dataset_str.ty => the one-hot labels of the test instances as numpy.ndarray object;
ind.dataset_str.ally => the labels for instances in ind.dataset_str.allx as numpy.ndarray object;
ind.dataset_str.graph => a dict in the format {index: [index_of_neighbor_nodes]} as collections.defaultdict
object;
ind.dataset_str.test.index => the indices of test instances in graph, for the inductive setting as list object.
All objects above must be saved using python pickle module.
:param dataset_str: Dataset name
:return: All data input files loaded (as well the training/test data).
"""
names = ['x', 'y', 'tx', 'ty', 'allx', 'ally', 'graph']
objects = []
for i in range(len(names)):
with open("data/{}/ind.{}.{}".format(dataset_str, dataset_str, names[i]), 'rb') as f:
if sys.version_info > (3, 0):
objects.append(pkl.load(f, encoding='latin1'))
else:
objects.append(pkl.load(f))
x, y, tx, ty, allx, ally, graph = tuple(objects)
test_idx_reorder = parse_index_file("data/{}/ind.{}.test.index".format(dataset_str, dataset_str))
test_idx_range = np.sort(test_idx_reorder)
if dataset_str == 'citeseer':
# Fix citeseer dataset (there are some isolated nodes in the graph)
# Find isolated nodes, add them as zero-vecs into the right position
test_idx_range_full = range(min(test_idx_reorder), max(test_idx_reorder)+1)
tx_extended = sp.lil_matrix((len(test_idx_range_full), x.shape[1]))
tx_extended[test_idx_range-min(test_idx_range), :] = tx
tx = tx_extended
ty_extended = np.zeros((len(test_idx_range_full), y.shape[1]))
ty_extended[test_idx_range-min(test_idx_range), :] = ty
ty = ty_extended
features = sp.vstack((allx, tx)).tolil()
features[test_idx_reorder, :] = features[test_idx_range, :]
adj = nx.adjacency_matrix(nx.from_dict_of_lists(graph))
if undirected:
adj = adj + adj.T.multiply(adj.T > adj) - adj.multiply(adj.T > adj)
adj = torch.FloatTensor(np.array(adj.todense()))
labels = np.vstack((ally, ty))
labels[test_idx_reorder, :] = labels[test_idx_range, :]
lab = torch.zeros([adj.shape[0], ], dtype=torch.long)
labeled = []
for i in range(adj.shape[0]):
if len(np.where(labels[i,:] == 1)[0]) != 0:
lab[i] = int(np.where(labels[i,:] == 1)[0])
labeled.append(i)
adj = torch.reshape(adj[labeled, :][:,labeled], [len(labeled), len(labeled)])
features = features[labeled, :]
lab = lab[labeled]
idx = np.load("./data/{}/splits/{}_split_0.6_0.2_{}.npz".format(dataset_str, dataset_str, split_name))
idx_train = np.where(idx['train_mask'][labeled] == 1)[0]
idx_test = np.where(idx['test_mask'][labeled] == 1)[0]
idx_val = np.where(idx['val_mask'][labeled] == 1)[0]
idx_train = torch.LongTensor(idx_train)
idx_val = torch.LongTensor(idx_val)
idx_test = torch.LongTensor(idx_test)
labels = torch.LongTensor(labels)
features = sp.csr_matrix(features, dtype=np.float32)
features = torch.FloatTensor(np.array(features.todense()))
if dataset_str != "citeseer":
labeled = None
return adj, features, lab, idx_train, idx_val, idx_test, labeled
| 12,193 | 38.980328 | 127 | py |
GCNH | GCNH-main/layers.py | """
GCNH Layer
"""
import torch
import torch.nn as nn
from torch.nn.modules.module import Module
from torch_scatter import scatter
class GCNH_layer(Module):
def __init__(self, nfeat, nhid, maxpool):
super(GCNH_layer, self).__init__()
self.nhid = nhid
self.maxpool = maxpool
# Two MLPs, one to encode center-node embedding,
# the other for the neighborhood embedding
self.MLPfeat = nn.Sequential(
nn.Linear(nfeat, self.nhid),
nn.LeakyReLU()
)
self.init_weights(self.MLPfeat)
self.MLPmsg = nn.Sequential(
nn.Linear(nfeat, self.nhid),
nn.LeakyReLU()
)
self.init_weights(self.MLPmsg)
# Parameter beta
self.beta = nn.Parameter(0.0 * torch.ones(size=(1, 1)), requires_grad=True)
def init_weights(self, m):
if isinstance(m, nn.Linear):
torch.nn.init.xavier_uniform(m.weight)
m.bias.data.fill_(0.01)
def forward(self, feat, adj, cur_idx=None,row=None,col=None):
"""
feat: feature matrix
adj: adjacency matrix
cur_idx: index of nodes in current batch
row, col: used for maxpool aggregation
"""
if cur_idx == None:
cur_idx = range(feat.shape[0])
# Transform center-node and neighborhood messages
h = self.MLPfeat(feat)
z = self.MLPmsg(feat)
# Aggregate messages
beta = torch.sigmoid(self.beta)
if not self.maxpool: # sum or mean
hp = beta * z + (1-beta) * torch.matmul(adj, h)
else:
hh = torch.zeros(adj.shape[0], self.nhid)
if next(self.parameters()).is_cuda:
hh = hh.cuda()
_ = scatter(h[row], col, dim=0, out=hh, reduce="max")
hp = beta * z + (1 - beta) * hh
return hp, beta
| 1,967 | 28.373134 | 84 | py |
GCNH | GCNH-main/models.py | """
Define GCNH model
"""
import torch.nn as nn
import torch.nn.functional as F
from layers import GCNH_layer
import torch
from utils import *
class GCNH(nn.Module):
def __init__(self, nfeat, nclass, nhid, dropout, nlayers, maxpool):
super(GCNH, self).__init__()
self.nhid = nhid
self.dropout = dropout
self.nlayers = nlayers
# Define layers
layer_sizes = [nfeat] + [nhid] * (self.nlayers - 1)
self.layers = nn.ModuleList([GCNH_layer(layer_sizes[i], nhid, maxpool) for i in range(self.nlayers)])
# MLP for classification
self.MLPcls = nn.Sequential(
nn.Linear(self.nhid, nclass),
nn.LogSoftmax(dim=1)
)
self.init_weights(self.MLPcls)
def init_weights(self, m):
if isinstance(m, nn.Linear):
torch.nn.init.xavier_uniform(m.weight)
m.bias.data.fill_(0.01)
def forward(self, feat, adj, cur_idx=None, verbose=False, row=None, col=None):
"""
feat: feature matrix
adj: adjacency matrix
cur_idx: index of nodes in current batch
row, col: used for maxpool aggregation
"""
if cur_idx == None:
cur_idx = range(feat.shape[0])
hp = feat
for i in range(self.nlayers):
hp, beta = self.layers[i](hp, adj, cur_idx=cur_idx,row=row,col=col)
if verbose:
print("Layer: ", i, " beta: ", beta.item())
hp = F.dropout(hp, self.dropout, training=self.training)
return self.MLPcls(hp[cur_idx])
| 1,619 | 28.454545 | 109 | py |
GCNH | GCNH-main/main_syn.py | """
Perform training and testing of GCNH on the synthetic dataset
"""
import torch
import torch.nn.functional as F
import torch.optim as optim
import numpy as np
from utils import *
import os
from tqdm import tqdm
from copy import deepcopy
from models import GCNH
from scipy.sparse import coo_matrix
if __name__ == "__main__":
os.environ["CUDA_VISIBLE_DEVICES"] = "0"
args = parse_args()
dataset = "syn"
n_classes = 5
cuda = torch.cuda.is_available()
if args.use_seed:
np.random.seed(args.seed)
torch.manual_seed(args.seed)
for hom_syn in ["h0.00-r", "h0.10-r", "h0.20-r", "h0.30-r", "h0.40-r", "h0.50-r", "h0.60-r", "h0.70-r", "h0.80-r", "h0.90-r", "h1.00-r"]:
final_acc = []
b_list = []
for r in range(1,4): # There are 3 datasets for each homophily level
print("Loading graph ", hom_syn + str(r))
features, labels, adj, idx_train, idx_val, idx_test = load_syn_cora(hom_syn + str(r))
if args.aggfunc == "mean":
adj = normalize(adj)
if args.aggfunc == "maxpool":
# Precomputing this allows for a fast execution of maxpooling aggregation
coo_m = coo_matrix(adj.numpy())
row, col = torch.tensor(coo_m.row).long(), torch.tensor(coo_m.col).long()
else:
row, col = None, None
model = GCNH(nfeat=features.shape[1],
nhid=args.nhid,
nclass=n_classes,
dropout=args.dropout,
nlayers=args.nlayers,
maxpool=args.aggfunc == "maxpool")
if cuda:
model.cuda()
features = features.cuda()
adj = adj.cuda()
labels = labels.cuda()
idx_train = idx_train.cuda()
idx_test = idx_test.cuda()
idx_val = idx_val.cuda()
if args.aggfunc == "maxpool":
row, col = row.cuda(), col.cuda()
optimizer = optim.Adam(model.parameters(), lr=args.lr, weight_decay=args.weight_decay)
n_nodes = adj.shape[0]
batch_size = args.batch_size
num_batches = len(idx_train) // batch_size + 1
state_dict_early_model = None
best_val_acc = 0.0
best_val_loss = 0.0
for epoch in tqdm(range(args.epochs)):
model.train()
idx = list(range(len(idx_train)))
np.random.shuffle(idx)
for batch in range(num_batches):
optimizer.zero_grad()
cur_idx = idx_train[idx[batch * batch_size: batch * batch_size + batch_size]]
# For each batch, forward the whole graph but compute loss only on nodes in current batch
output = model(features, adj, cur_idx=cur_idx, verbose=False,row=row,col=col)
train_loss = F.nll_loss(output, labels[cur_idx])
train_acc = accuracy(output, labels[cur_idx])
train_loss.backward()
optimizer.step()
# Validation for each epoch
model.eval()
with torch.no_grad():
output = model(features, adj, cur_idx=idx_val, verbose=False,row=row,col=col)
val_loss = F.nll_loss(output, labels[idx_val])
val_acc = accuracy(output, labels[idx_val])
if args.verbose:
print(
"Epoch {:05d} | Train Loss {:.4f} | Train Acc {:.4f} | Val Loss {:.4f} | Val Acc {:.4f}".format(
epoch, train_loss.item(), train_acc, val_loss, val_acc))
if val_acc >= best_val_acc and (val_acc > best_val_acc or val_loss < best_val_loss):
best_val_acc = val_acc.cpu()
best_val_loss = val_loss.detach().cpu()
state_dict_early_model = deepcopy(model.state_dict())
# Perform test
with torch.no_grad():
model.load_state_dict(state_dict_early_model)
model.eval()
output = model(features, adj, cur_idx=idx_test, verbose=True,row=row,col=col)
acc_test = accuracy(output, labels[idx_test])
final_acc.append(acc_test.detach().cpu().item())
print("Test_acc" + ":" + str(acc_test.detach().cpu().item()))
final_acc = np.array(final_acc)
print("Total accuracy: ", np.mean(final_acc) , " std: ", np.std(final_acc))
| 4,675 | 36.111111 | 141 | py |
merc2020 | merc2020-master/feature_extract.py | """
train code
"""
import os
import tensorflow as tf
import keras
from keras.layers import Dense, Lambda, AveragePooling1D
import numpy as np
from keras import backend as K
from keras.models import load_model, Model
os.environ["CUDA_VISIBLE_DEVICES"]='0'
def attention_pooling(model_input):
"""
attention pooling module
Args:
model_input: sequential input
Returns:
attention_output: attention weight
"""
# average pooling for lstm units
model_input_mean = AveragePooling1D(pool_size=128, data_format='channels_first', padding='valid')(model_input)
model_input_mean = Lambda(lambda x: K.squeeze(x, axis=2))(model_input_mean)
# transposed input
model_input_tran = Lambda(lambda x: K.permute_dimensions(x, [0, 2, 1]))(model_input)
# calculate attention weight
attention = Dense(50, activation='softmax', name='attention')(model_input_mean)
# input * attention weight
attention_output = Lambda(lambda x: K.batch_dot(x[0], x[1], axes=(1, 2)))([attention, model_input_tran])
return attention_output
def inter_model_load(model_path):
speech_model = load_model(model_path, custom_objects={'attention_pooling': attention_pooling}) # Load best model
inter_layer_model = Model(inputs=speech_model.input, outputs=speech_model.get_layer('dense_2').output)
return inter_layer_model
def main(data_type):
gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=0.3)
config = tf.ConfigProto(device_count={'GPU': 1, 'CPU': 30}, gpu_options=gpu_options)
sess = tf.Session(config=config)
keras.backend.set_session(sess)
# Load mel-spectrogram numpy
x = np.load('dataset/' + 'speech_' + data_type + '.npy')
# Load model
modelPath = 'model/speech_model_acc_0.3925.hdf5'
model = inter_model_load(modelPath)
model.summary()
# Feature extraction
feature = model.predict(x, verbose=1, batch_size=256)
print(np.shape(feature))
if not (os.path.isdir('features')):
os.makedirs(os.path.join('features'))
np.save('features/' + 'speech_BN_' + data_type + '.npy', feature)
print("Finished")
K.clear_session()
if __name__ == '__main__':
print("train/val/test1/test2/test3:")
data_type = input()
main(data_type)
| 2,279 | 28.230769 | 117 | py |
merc2020 | merc2020-master/train.py | """
train code
"""
import os
import tensorflow as tf
import keras
from keras.callbacks import EarlyStopping, ModelCheckpoint
from keras.layers import Dense, Conv2D, Dropout, MaxPooling2D, Input, Flatten, Lambda, AveragePooling1D, Activation, TimeDistributed, LSTM, Bidirectional, BatchNormalization
import numpy as np
from keras import backend as K
from keras.models import Model
os.environ["CUDA_VISIBLE_DEVICES"]='0'
def attention_pooling(model_input):
"""
attention pooling module
Args:
model_input: sequential input
Returns:
attention_output: attention weight
"""
# average pooling for lstm units
model_input_mean = AveragePooling1D(pool_size=128, data_format='channels_first', padding='valid')(model_input)
model_input_mean = Lambda(lambda x: K.squeeze(x, axis=2))(model_input_mean)
# transposed input
model_input_tran = Lambda(lambda x: K.permute_dimensions(x, [0, 2, 1]))(model_input)
# calculate attention weight
attention = Dense(50, activation='softmax', name='attention')(model_input_mean)
# input * attention weight
attention_output = Lambda(lambda x: K.batch_dot(x[0], x[1], axes=(1, 2)))([attention, model_input_tran])
return attention_output
def speech_base_model():
"""
speech baseline model
Returns:
model: speech baseline model
"""
model_in = Input(shape=(400, 40, 1))
# Layer1 : conv , batch norm, relu, and maxpool
model_conv2d_1 = Conv2D(8, (5, 5), padding='same')(model_in)
model_bn_1 = BatchNormalization()(model_conv2d_1)
model_relu_1 = Activation(activation='relu')(model_bn_1)
model_mpool_1 = MaxPooling2D((2, 2), strides=(2, 2), padding='valid')(model_relu_1)
# Layer2 : conv , batch norm, relu, and maxpool
model_conv2d_2 = Conv2D(16, (5, 5), padding='same')(model_mpool_1)
model_bn_2 = BatchNormalization()(model_conv2d_2)
model_relu_2 = Activation(activation='relu')(model_bn_2)
model_mpool_2 = MaxPooling2D((2, 2), strides=(2, 2), padding='valid')(model_relu_2)
# Layer3 : conv , batch norm, relu, and maxpool
model_conv2d_3 = Conv2D(32, (5, 5), padding='same')(model_mpool_2)
model_bn_3 = BatchNormalization()(model_conv2d_3)
model_relu_3 = Activation(activation='relu')(model_bn_3)
model_mpool_3 = MaxPooling2D((2, 2), strides=(2, 2), padding='valid')(model_relu_3)
# Flatten layer
model_flat = TimeDistributed(Flatten())(model_mpool_3)
# bi-lstm and attention pooling
model_bi_lstm = Bidirectional(LSTM(64, return_sequences=True))(model_flat)
model_att = attention_pooling(model_bi_lstm)
# dense layer
model_dense_1 = Dense(64, activation='relu')(model_att)
model_dense_1 = Dropout(0.5)(model_dense_1)
model_out = Dense(7, activation='softmax', name='output_layer')(model_dense_1)
model = Model(inputs=model_in, outputs=model_out)
model.summary()
return model
def main():
# Load Training & Validation data
_ROOT_PATH = "dataset/"
x_train = np.load(_ROOT_PATH + "speech_train.npy")
x_val = np.load(_ROOT_PATH + "speech_val.npy")
y_train = np.load(_ROOT_PATH + "label_train.npy")
y_val = np.load(_ROOT_PATH + "label_val.npy")
# Convert labels to categorical one-hot encoding
y_train = keras.utils.to_categorical(y_train, num_classes=7)
y_val = keras.utils.to_categorical(y_val, num_classes=7)
# Training Parameter setting
gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=0.3)
config = tf.ConfigProto(device_count={'GPU': 1, 'CPU': 30}, gpu_options=gpu_options)
sess = tf.Session(config=config)
keras.backend.set_session(sess)
# Model build
model = speech_base_model()
# Model Check point
model_path = 'model/' + 'speech_model_' + 'acc_{val_acc:.4f}.hdf5'
checkpoint = ModelCheckpoint(filepath=model_path, monitor='val_loss', verbose=1, save_best_only=True)
early_stopping = EarlyStopping(monitor='val_acc', min_delta=0.0005, patience=30, verbose=1, mode='auto')
# Training
adam = keras.optimizers.Adam(lr = 0.001, beta_1 = 0.9, beta_2 = 0.999, amsgrad=False)
model.compile(loss='categorical_crossentropy', optimizer=adam, metrics=['accuracy'])
model.fit(x_train, y_train, batch_size=256, epochs=256, validation_data=(x_val, y_val), verbose=1, callbacks=[early_stopping, checkpoint])
### Evaluation
score = model.evaluate(x_val, y_val, batch_size=256)
print(score)
print('Test score:', score[0])
print('Test accuracy:', score[1])
if __name__ == '__main__':
main()
| 4,570 | 34.434109 | 173 | py |
CLMR | CLMR-master/main.py | import argparse
import pytorch_lightning as pl
from pytorch_lightning.callbacks.early_stopping import EarlyStopping
from pytorch_lightning import Trainer
from pytorch_lightning.loggers import TensorBoardLogger
from torch.utils.data import DataLoader
# Audio Augmentations
from torchaudio_augmentations import (
RandomApply,
ComposeMany,
RandomResizedCrop,
PolarityInversion,
Noise,
Gain,
HighLowPass,
Delay,
PitchShift,
Reverb,
)
from clmr.data import ContrastiveDataset
from clmr.datasets import get_dataset
from clmr.evaluation import evaluate
from clmr.models import SampleCNN
from clmr.modules import ContrastiveLearning, SupervisedLearning
from clmr.utils import yaml_config_hook
if __name__ == "__main__":
parser = argparse.ArgumentParser(description="CLMR")
parser = Trainer.add_argparse_args(parser)
config = yaml_config_hook("./config/config.yaml")
for k, v in config.items():
parser.add_argument(f"--{k}", default=v, type=type(v))
args = parser.parse_args()
pl.seed_everything(args.seed)
# ------------
# data augmentations
# ------------
if args.supervised:
train_transform = [RandomResizedCrop(n_samples=args.audio_length)]
num_augmented_samples = 1
else:
train_transform = [
RandomResizedCrop(n_samples=args.audio_length),
RandomApply([PolarityInversion()], p=args.transforms_polarity),
RandomApply([Noise()], p=args.transforms_noise),
RandomApply([Gain()], p=args.transforms_gain),
RandomApply(
[HighLowPass(sample_rate=args.sample_rate)], p=args.transforms_filters
),
RandomApply([Delay(sample_rate=args.sample_rate)], p=args.transforms_delay),
RandomApply(
[
PitchShift(
n_samples=args.audio_length,
sample_rate=args.sample_rate,
)
],
p=args.transforms_pitch,
),
RandomApply(
[Reverb(sample_rate=args.sample_rate)], p=args.transforms_reverb
),
]
num_augmented_samples = 2
# ------------
# dataloaders
# ------------
train_dataset = get_dataset(args.dataset, args.dataset_dir, subset="train")
valid_dataset = get_dataset(args.dataset, args.dataset_dir, subset="valid")
contrastive_train_dataset = ContrastiveDataset(
train_dataset,
input_shape=(1, args.audio_length),
transform=ComposeMany(
train_transform, num_augmented_samples=num_augmented_samples
),
)
contrastive_valid_dataset = ContrastiveDataset(
valid_dataset,
input_shape=(1, args.audio_length),
transform=ComposeMany(
train_transform, num_augmented_samples=num_augmented_samples
),
)
train_loader = DataLoader(
contrastive_train_dataset,
batch_size=args.batch_size,
num_workers=args.workers,
drop_last=True,
shuffle=True,
)
valid_loader = DataLoader(
contrastive_valid_dataset,
batch_size=args.batch_size,
num_workers=args.workers,
drop_last=True,
shuffle=False,
)
# ------------
# encoder
# ------------
encoder = SampleCNN(
strides=[3, 3, 3, 3, 3, 3, 3, 3, 3],
supervised=args.supervised,
out_dim=train_dataset.n_classes,
)
# ------------
# model
# ------------
if args.supervised:
module = SupervisedLearning(args, encoder, output_dim=train_dataset.n_classes)
else:
module = ContrastiveLearning(args, encoder)
logger = TensorBoardLogger("runs", name="CLMRv2-{}".format(args.dataset))
if args.checkpoint_path:
module = module.load_from_checkpoint(
args.checkpoint_path, encoder=encoder, output_dim=train_dataset.n_classes
)
else:
# ------------
# training
# ------------
if args.supervised:
early_stopping = EarlyStopping(monitor="Valid/loss", patience=20)
else:
early_stopping = None
trainer = Trainer.from_argparse_args(
args,
logger=logger,
sync_batchnorm=True,
max_epochs=args.max_epochs,
log_every_n_steps=10,
check_val_every_n_epoch=1,
accelerator=args.accelerator,
)
trainer.fit(module, train_loader, valid_loader)
if args.supervised:
test_dataset = get_dataset(args.dataset, args.dataset_dir, subset="test")
contrastive_test_dataset = ContrastiveDataset(
test_dataset,
input_shape=(1, args.audio_length),
transform=None,
)
device = "cuda:0" if args.gpus else "cpu"
results = evaluate(
module.encoder,
None,
contrastive_test_dataset,
args.dataset,
args.audio_length,
device=device,
)
print(results)
| 5,117 | 28.583815 | 88 | py |
CLMR | CLMR-master/export.py | """
This script will extract a pre-trained CLMR PyTorch model to an ONNX model.
"""
import argparse
import os
import torch
from collections import OrderedDict
from copy import deepcopy
from clmr.models import SampleCNN, Identity
from clmr.utils import load_encoder_checkpoint, load_finetuner_checkpoint
def convert_encoder_to_onnx(
encoder: torch.nn.Module, test_input: torch.Tensor, fp: str
) -> None:
input_names = ["audio"]
output_names = ["representation"]
torch.onnx.export(
encoder,
test_input,
fp,
verbose=False,
input_names=input_names,
output_names=output_names,
)
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("--checkpoint_path", type=str, required=True)
parser.add_argument("--finetuner_checkpoint_path", type=str, required=True)
parser.add_argument("--n_classes", type=int, default=50)
args = parser.parse_args()
if not os.path.exists(args.checkpoint_path):
raise FileNotFoundError("That encoder checkpoint does not exist")
if not os.path.exists(args.finetuner_checkpoint_path):
raise FileNotFoundError("That linear model checkpoint does not exist")
# ------------
# encoder
# ------------
encoder = SampleCNN(
strides=[3, 3, 3, 3, 3, 3, 3, 3, 3],
supervised=False,
out_dim=args.n_classes,
)
n_features = encoder.fc.in_features # get dimensions of last fully-connected layer
state_dict = load_encoder_checkpoint(args.checkpoint_path, args.n_classes)
encoder.load_state_dict(state_dict)
encoder.eval()
# ------------
# linear model
# ------------
state_dict = load_finetuner_checkpoint(args.finetuner_checkpoint_path)
encoder.fc.load_state_dict(
OrderedDict({k.replace("0.", ""): v for k, v in state_dict.items()})
)
encoder_export = deepcopy(encoder)
# set last fully connected layer to an identity function:
encoder_export.fc = Identity()
batch_size = 1
channels = 1
audio_length = 59049
test_input = torch.randn(batch_size, 1, audio_length)
convert_encoder_to_onnx(encoder, test_input, "clmr_sample-cnn.onnx")
convert_encoder_to_onnx(
encoder_export, test_input, "clmr_encoder_only_sample-cnn.onnx"
)
| 2,321 | 28.025 | 87 | py |
CLMR | CLMR-master/setup.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# Note: To use the 'upload' functionality of this file, you must:
# $ pipenv install twine --dev
import io
import os
import sys
from shutil import rmtree
from setuptools import find_packages, setup, Command
# Package meta-data.
NAME = "clmr"
DESCRIPTION = "Contrastive Learning of Musical Representations"
URL = "https://github.com/spijkervet/CLMR"
EMAIL = "janne.spijkervet@gmail.com"
AUTHOR = "Janne Spijkervet"
REQUIRES_PYTHON = ">=3.6.0"
VERSION = "0.1.0"
# What packages are required for this module to be executed?
REQUIRED = [
"torch==1.9.0",
"torchaudio",
"simclr",
"torchaudio-augmentations",
"pytorch-lightning",
"soundfile",
"sklearn",
"matplotlib",
]
# What packages are optional?
EXTRAS = {
# 'fancy feature': ['django'],
}
# The rest you shouldn't have to touch too much :)
# ------------------------------------------------
# Except, perhaps the License and Trove Classifiers!
# If you do change the License, remember to change the Trove Classifier for that!
here = os.path.abspath(os.path.dirname(__file__))
# Import the README and use it as the long-description.
# Note: this will only work if 'README.md' is present in your MANIFEST.in file!
try:
with io.open(os.path.join(here, "README.md"), encoding="utf-8") as f:
long_description = "\n" + f.read()
except FileNotFoundError:
long_description = DESCRIPTION
# Load the package's __version__.py module as a dictionary.
about = {}
if not VERSION:
project_slug = NAME.lower().replace("-", "_").replace(" ", "_")
with open(os.path.join(here, project_slug, "__version__.py")) as f:
exec(f.read(), about)
else:
about["__version__"] = VERSION
class UploadCommand(Command):
"""Support setup.py upload."""
description = "Build and publish the package."
user_options = []
@staticmethod
def status(s):
"""Prints things in bold."""
print("\033[1m{0}\033[0m".format(s))
def initialize_options(self):
pass
def finalize_options(self):
pass
def run(self):
try:
self.status("Removing previous builds…")
rmtree(os.path.join(here, "dist"))
except OSError:
pass
self.status("Building Source and Wheel (universal) distribution…")
os.system("{0} setup.py sdist bdist_wheel --universal".format(sys.executable))
self.status("Uploading the package to PyPI via Twine…")
os.system("twine upload dist/*")
self.status("Pushing git tags…")
os.system("git tag v{0}".format(about["__version__"]))
os.system("git push --tags")
sys.exit()
# Where the magic happens:
setup(
name=NAME,
version=about["__version__"],
description=DESCRIPTION,
long_description=long_description,
long_description_content_type="text/markdown",
author=AUTHOR,
author_email=EMAIL,
python_requires=REQUIRES_PYTHON,
url=URL,
packages=find_packages(exclude=["tests", "*.tests", "*.tests.*", "tests.*"]),
# If your package is a single module, use this instead of 'packages':
# py_modules=['mypackage'],
# entry_points={
# 'console_scripts': ['mycli=mymodule:cli'],
# },
install_requires=REQUIRED,
extras_require=EXTRAS,
include_package_data=True,
license="MIT",
classifiers=[
# Trove classifiers
# Full list: https://pypi.python.org/pypi?%3Aaction=list_classifiers
"License :: OSI Approved :: MIT License",
"Programming Language :: Python",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.6",
"Programming Language :: Python :: Implementation :: CPython",
"Programming Language :: Python :: Implementation :: PyPy",
],
# $ setup.py publish support.
cmdclass={
"upload": UploadCommand,
},
)
| 3,919 | 27.405797 | 86 | py |
CLMR | CLMR-master/linear_evaluation.py | import os
import argparse
import pytorch_lightning as pl
from torch.utils.data import DataLoader
from torchaudio_augmentations import Compose, RandomResizedCrop
from pytorch_lightning import Trainer
from pytorch_lightning.callbacks import EarlyStopping
from pytorch_lightning.loggers import TensorBoardLogger
from clmr.datasets import get_dataset
from clmr.data import ContrastiveDataset
from clmr.evaluation import evaluate
from clmr.models import SampleCNN
from clmr.modules import ContrastiveLearning, LinearEvaluation
from clmr.utils import (
yaml_config_hook,
load_encoder_checkpoint,
load_finetuner_checkpoint,
)
if __name__ == "__main__":
parser = argparse.ArgumentParser(description="SimCLR")
parser = Trainer.add_argparse_args(parser)
config = yaml_config_hook("./config/config.yaml")
for k, v in config.items():
parser.add_argument(f"--{k}", default=v, type=type(v))
args = parser.parse_args()
pl.seed_everything(args.seed)
args.accelerator = None
if not os.path.exists(args.checkpoint_path):
raise FileNotFoundError("That checkpoint does not exist")
train_transform = [RandomResizedCrop(n_samples=args.audio_length)]
# ------------
# dataloaders
# ------------
train_dataset = get_dataset(args.dataset, args.dataset_dir, subset="train")
valid_dataset = get_dataset(args.dataset, args.dataset_dir, subset="valid")
test_dataset = get_dataset(args.dataset, args.dataset_dir, subset="test")
contrastive_train_dataset = ContrastiveDataset(
train_dataset,
input_shape=(1, args.audio_length),
transform=Compose(train_transform),
)
contrastive_valid_dataset = ContrastiveDataset(
valid_dataset,
input_shape=(1, args.audio_length),
transform=Compose(train_transform),
)
contrastive_test_dataset = ContrastiveDataset(
test_dataset,
input_shape=(1, args.audio_length),
transform=None,
)
train_loader = DataLoader(
contrastive_train_dataset,
batch_size=args.finetuner_batch_size,
num_workers=args.workers,
shuffle=True,
)
valid_loader = DataLoader(
contrastive_valid_dataset,
batch_size=args.finetuner_batch_size,
num_workers=args.workers,
shuffle=False,
)
test_loader = DataLoader(
contrastive_test_dataset,
batch_size=args.finetuner_batch_size,
num_workers=args.workers,
shuffle=False,
)
# ------------
# encoder
# ------------
encoder = SampleCNN(
strides=[3, 3, 3, 3, 3, 3, 3, 3, 3],
supervised=args.supervised,
out_dim=train_dataset.n_classes,
)
n_features = encoder.fc.in_features # get dimensions of last fully-connected layer
state_dict = load_encoder_checkpoint(args.checkpoint_path, train_dataset.n_classes)
encoder.load_state_dict(state_dict)
cl = ContrastiveLearning(args, encoder)
cl.eval()
cl.freeze()
module = LinearEvaluation(
args,
cl.encoder,
hidden_dim=n_features,
output_dim=train_dataset.n_classes,
)
train_representations_dataset = module.extract_representations(train_loader)
train_loader = DataLoader(
train_representations_dataset,
batch_size=args.batch_size,
num_workers=args.workers,
shuffle=True,
)
valid_representations_dataset = module.extract_representations(valid_loader)
valid_loader = DataLoader(
valid_representations_dataset,
batch_size=args.batch_size,
num_workers=args.workers,
shuffle=False,
)
if args.finetuner_checkpoint_path:
state_dict = load_finetuner_checkpoint(args.finetuner_checkpoint_path)
module.model.load_state_dict(state_dict)
else:
early_stop_callback = EarlyStopping(
monitor="Valid/loss", patience=10, verbose=False, mode="min"
)
trainer = Trainer.from_argparse_args(
args,
logger=TensorBoardLogger(
"runs", name="CLMRv2-eval-{}".format(args.dataset)
),
max_epochs=args.finetuner_max_epochs,
callbacks=[early_stop_callback],
)
trainer.fit(module, train_loader, valid_loader)
device = "cuda:0" if args.gpus else "cpu"
results = evaluate(
module.encoder,
module.model,
contrastive_test_dataset,
args.dataset,
args.audio_length,
device=device,
)
print(results)
| 4,555 | 28.393548 | 87 | py |
CLMR | CLMR-master/tests/test_spectogram.py | import unittest
import torchaudio
import torch.nn as nn
from torchaudio_augmentations import *
from clmr.datasets import AUDIO
class TestAudioSet(unittest.TestCase):
sample_rate = 16000
def get_audio_transforms(self, num_samples):
transform = Compose(
[
RandomResizedCrop(n_samples=num_samples),
RandomApply([PolarityInversion()], p=0.8),
RandomApply([Noise(min_snr=0.3, max_snr=0.5)], p=0.3),
RandomApply([Gain()], p=0.2),
RandomApply([Delay(sample_rate=self.sample_rate)], p=0.5),
RandomApply(
[PitchShift(n_samples=num_samples, sample_rate=self.sample_rate)],
p=0.4,
),
RandomApply([Reverb(sample_rate=self.sample_rate)], p=0.3),
]
)
return transform
def test_audioset(self):
audio_dataset = AUDIO("tests/data/audioset")
audio, label = audio_dataset[0]
sample_rate = 22050
n_fft = 1024
n_mels = 128
stype = "magnitude" # magnitude
top_db = None # f_max
transform = self.get_audio_transforms(num_samples=sample_rate)
spec_transform = nn.Sequential(
torchaudio.transforms.MelSpectrogram(
sample_rate=sample_rate,
n_fft=n_fft,
n_mels=n_mels,
),
torchaudio.transforms.AmplitudeToDB(stype=stype, top_db=top_db),
)
audio = transform(audio)
audio = spec_transform(audio)
assert audio.shape[1] == 128
assert audio.shape[2] == 44
| 1,659 | 29.740741 | 86 | py |
CLMR | CLMR-master/tests/test_audioset.py | import unittest
import torchaudio
from torchaudio_augmentations import (
Compose,
RandomApply,
RandomResizedCrop,
PolarityInversion,
Noise,
Gain,
Delay,
PitchShift,
Reverb,
)
from clmr.datasets import AUDIO
class TestAudioSet(unittest.TestCase):
sample_rate = 16000
def get_audio_transforms(self, num_samples):
transform = Compose(
[
RandomResizedCrop(n_samples=num_samples),
RandomApply([PolarityInversion()], p=0.8),
RandomApply([Noise(min_snr=0.3, max_snr=0.5)], p=0.3),
RandomApply([Gain()], p=0.2),
RandomApply([Delay(sample_rate=self.sample_rate)], p=0.5),
RandomApply(
[PitchShift(n_samples=num_samples, sample_rate=self.sample_rate)],
p=0.4,
),
RandomApply([Reverb(sample_rate=self.sample_rate)], p=0.3),
]
)
return transform
def test_audioset(self):
audio_dataset = AUDIO("./tests/data/audioset")
audio, label = audio_dataset[0]
assert audio.shape[0] == 1
assert audio.shape[1] == 93680
num_samples = (
self.sample_rate * 5
) # the test item is approximately 5.8 seconds.
transform = self.get_audio_transforms(num_samples=num_samples)
audio = transform(audio)
torchaudio.save("augmented_sample.wav", audio, sample_rate=self.sample_rate)
| 1,500 | 29.632653 | 86 | py |
CLMR | CLMR-master/clmr/data.py | """Wrapper for Torch Dataset class to enable contrastive training
"""
import torch
from torch import Tensor
from torch.utils.data import Dataset
from torchaudio_augmentations import Compose
from typing import Tuple, List
class ContrastiveDataset(Dataset):
def __init__(self, dataset: Dataset, input_shape: List[int], transform: Compose):
self.dataset = dataset
self.transform = transform
self.input_shape = input_shape
self.ignore_idx = []
def __getitem__(self, idx) -> Tuple[Tensor, Tensor]:
if idx in self.ignore_idx:
return self[idx + 1]
audio, label = self.dataset[idx]
if audio.shape[1] < self.input_shape[1]:
self.ignore_idx.append(idx)
return self[idx + 1]
if self.transform:
audio = self.transform(audio)
return audio, label
def __len__(self) -> int:
return len(self.dataset)
def concat_clip(self, n: int, audio_length: float) -> Tensor:
audio, _ = self.dataset[n]
batch = torch.split(audio, audio_length, dim=1)
batch = torch.cat(batch[:-1])
batch = batch.unsqueeze(dim=1)
if self.transform:
batch = self.transform(batch)
return batch
| 1,258 | 27.613636 | 85 | py |
CLMR | CLMR-master/clmr/evaluation.py | import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.utils.data import Dataset
from tqdm import tqdm
from sklearn import metrics
def evaluate(
encoder: nn.Module,
finetuned_head: nn.Module,
test_dataset: Dataset,
dataset_name: str,
audio_length: int,
device,
) -> dict:
est_array = []
gt_array = []
encoder = encoder.to(device)
encoder.eval()
if finetuned_head is not None:
finetuned_head = finetuned_head.to(device)
finetuned_head.eval()
with torch.no_grad():
for idx in tqdm(range(len(test_dataset))):
_, label = test_dataset[idx]
batch = test_dataset.concat_clip(idx, audio_length)
batch = batch.to(device)
output = encoder(batch)
if finetuned_head:
output = finetuned_head(output)
# we always return logits, so we need a sigmoid here for multi-label classification
if dataset_name in ["magnatagatune", "msd"]:
output = torch.sigmoid(output)
else:
output = F.softmax(output, dim=1)
track_prediction = output.mean(dim=0)
est_array.append(track_prediction)
gt_array.append(label)
if dataset_name in ["magnatagatune", "msd"]:
est_array = torch.stack(est_array, dim=0).cpu().numpy()
gt_array = torch.stack(gt_array, dim=0).cpu().numpy()
roc_aucs = metrics.roc_auc_score(gt_array, est_array, average="macro")
pr_aucs = metrics.average_precision_score(gt_array, est_array, average="macro")
return {
"PR-AUC": pr_aucs,
"ROC-AUC": roc_aucs,
}
est_array = torch.stack(est_array, dim=0)
_, est_array = torch.max(est_array, 1) # extract the predicted labels here.
accuracy = metrics.accuracy_score(gt_array, est_array)
return {"Accuracy": accuracy}
| 1,921 | 30.508197 | 95 | py |
CLMR | CLMR-master/clmr/modules/callbacks.py | import matplotlib
import matplotlib.pyplot as plt
matplotlib.use("Agg")
from pytorch_lightning.callbacks import Callback
class PlotSpectogramCallback(Callback):
def on_train_start(self, trainer, pl_module):
if not pl_module.hparams.time_domain:
x, y = trainer.train_dataloader.dataset[0]
fig = plt.figure()
x_i = x[0, :]
fig.add_subplot(1, 2, 1)
plt.imshow(x_i)
if x.shape[0] > 1:
x_j = x[1, :]
fig.add_subplot(1, 2, 2)
plt.imshow(x_j)
trainer.logger.experiment.add_figure(
"Train/spectogram_sample", fig, global_step=0
)
plt.close()
| 725 | 24.928571 | 61 | py |
CLMR | CLMR-master/clmr/modules/linear_evaluation.py | import torch
import torch.nn as nn
import torchmetrics
from copy import deepcopy
from pytorch_lightning import LightningModule
from torch import Tensor
from torch.utils.data import DataLoader, Dataset, TensorDataset
from typing import Tuple
from tqdm import tqdm
class LinearEvaluation(LightningModule):
def __init__(self, args, encoder: nn.Module, hidden_dim: int, output_dim: int):
super().__init__()
self.save_hyperparameters(args)
self.encoder = encoder
self.hidden_dim = hidden_dim
self.output_dim = output_dim
if self.hparams.finetuner_mlp:
self.model = nn.Sequential(
nn.Linear(self.hidden_dim, self.hidden_dim),
nn.ReLU(),
nn.Linear(self.hidden_dim, self.output_dim),
)
else:
self.model = nn.Sequential(nn.Linear(self.hidden_dim, self.output_dim))
self.criterion = self.configure_criterion()
self.accuracy = torchmetrics.Accuracy()
self.average_precision = torchmetrics.AveragePrecision(pos_label=1)
def forward(self, x: Tensor, y: Tensor) -> Tuple[Tensor, Tensor]:
preds = self._forward_representations(x, y)
loss = self.criterion(preds, y)
return loss, preds
def _forward_representations(self, x: Tensor, y: Tensor) -> Tensor:
"""
Perform a forward pass using either the representations, or the input data (that we still)
need to extract the represenations from using our encoder.
"""
if x.shape[-1] == self.hidden_dim:
h0 = x
else:
with torch.no_grad():
h0 = self.encoder(x)
return self.model(h0)
def training_step(self, batch, _) -> Tensor:
x, y = batch
loss, preds = self.forward(x, y)
self.log("Train/accuracy", self.accuracy(preds, y))
# self.log("Train/pr_auc", self.average_precision(preds, y))
self.log("Train/loss", loss)
return loss
def validation_step(self, batch, _) -> Tensor:
x, y = batch
loss, preds = self.forward(x, y)
self.log("Valid/accuracy", self.accuracy(preds, y))
# self.log("Valid/pr_auc", self.average_precision(preds, y))
self.log("Valid/loss", loss)
return loss
def configure_criterion(self) -> nn.Module:
if self.hparams.dataset in ["magnatagatune", "msd"]:
criterion = nn.BCEWithLogitsLoss()
else:
criterion = nn.CrossEntropyLoss()
return criterion
def configure_optimizers(self) -> dict:
optimizer = torch.optim.Adam(
self.model.parameters(),
lr=self.hparams.finetuner_learning_rate,
weight_decay=self.hparams.weight_decay,
)
scheduler = torch.optim.lr_scheduler.ReduceLROnPlateau(
optimizer,
mode="min",
factor=0.1,
patience=5,
threshold=0.0001,
threshold_mode="rel",
cooldown=0,
min_lr=0,
eps=1e-08,
verbose=False,
)
if scheduler:
return {
"optimizer": optimizer,
"lr_scheduler": scheduler,
"monitor": "Valid/loss",
}
else:
return {"optimizer": optimizer}
def extract_representations(self, dataloader: DataLoader) -> Dataset:
representations = []
ys = []
for x, y in tqdm(dataloader):
with torch.no_grad():
h0 = self.encoder(x)
representations.append(h0)
ys.append(y)
if len(representations) > 1:
representations = torch.cat(representations, dim=0)
ys = torch.cat(ys, dim=0)
else:
representations = representations[0]
ys = ys[0]
tensor_dataset = TensorDataset(representations, ys)
return tensor_dataset
| 3,975 | 31.590164 | 98 | py |
CLMR | CLMR-master/clmr/modules/supervised_learning.py | import torch
import torchmetrics
import torch.nn as nn
from pytorch_lightning import LightningModule
class SupervisedLearning(LightningModule):
def __init__(self, args, encoder: nn.Module, output_dim: int):
super().__init__()
self.save_hyperparameters(args)
self.encoder = encoder
self.encoder.fc.out_features = output_dim
self.output_dim = output_dim
self.model = self.encoder
self.criterion = self.configure_criterion()
self.average_precision = torchmetrics.AveragePrecision(pos_label=1)
def forward(self, x, y):
x = x[:, 0, :] # we only have 1 sample, no augmentations
preds = self.model(x)
loss = self.criterion(preds, y)
return loss, preds
def training_step(self, batch, batch_idx):
x, y = batch
loss, preds = self.forward(x, y)
self.log("Train/pr_auc", self.average_precision(preds, y))
self.log("Train/loss", loss)
return loss
def validation_step(self, batch, batch_idx):
x, y = batch
loss, preds = self.forward(x, y)
self.log("Valid/pr_auc", self.average_precision(preds, y))
self.log("Valid/loss", loss)
return loss
def configure_criterion(self):
if self.hparams.dataset in ["magnatagatune"]:
criterion = nn.BCEWithLogitsLoss()
else:
criterion = nn.CrossEntropyLoss()
return criterion
def configure_optimizers(self):
optimizer = torch.optim.SGD(
self.model.parameters(),
lr=self.hparams.learning_rate,
momentum=0.9,
weight_decay=1e-6,
nesterov=True,
)
scheduler = torch.optim.lr_scheduler.ReduceLROnPlateau(
optimizer, mode="min", factor=0.2, patience=5, verbose=True
)
if scheduler:
return {
"optimizer": optimizer,
"lr_scheduler": scheduler,
"monitor": "Valid/loss",
}
else:
return {"optimizer": optimizer}
| 2,082 | 29.632353 | 75 | py |
CLMR | CLMR-master/clmr/modules/contrastive_learning.py | import torch
import torch.nn as nn
from pytorch_lightning import LightningModule
from torch import Tensor
from simclr import SimCLR
from simclr.modules import NT_Xent, LARS
class ContrastiveLearning(LightningModule):
def __init__(self, args, encoder: nn.Module):
super().__init__()
self.save_hyperparameters(args)
self.encoder = encoder
self.n_features = (
self.encoder.fc.in_features
) # get dimensions of last fully-connected layer
self.model = SimCLR(self.encoder, self.hparams.projection_dim, self.n_features)
self.criterion = self.configure_criterion()
def forward(self, x_i: Tensor, x_j: Tensor) -> Tensor:
_, _, z_i, z_j = self.model(x_i, x_j)
loss = self.criterion(z_i, z_j)
return loss
def training_step(self, batch, _) -> Tensor:
x, _ = batch
x_i = x[:, 0, :]
x_j = x[:, 1, :]
loss = self.forward(x_i, x_j)
self.log("Train/loss", loss)
return loss
def configure_criterion(self) -> nn.Module:
# PT lightning aggregates differently in DP mode
if self.hparams.accelerator == "dp" and self.hparams.gpus:
batch_size = int(self.hparams.batch_size / self.hparams.gpus)
else:
batch_size = self.hparams.batch_size
criterion = NT_Xent(batch_size, self.hparams.temperature, world_size=1)
return criterion
def configure_optimizers(self) -> dict:
scheduler = None
if self.hparams.optimizer == "Adam":
optimizer = torch.optim.Adam(self.model.parameters(), lr=3e-4)
elif self.hparams.optimizer == "LARS":
# optimized using LARS with linear learning rate scaling
# (i.e. LearningRate = 0.3 × BatchSize/256) and weight decay of 10−6.
learning_rate = 0.3 * self.hparams.batch_size / 256
optimizer = LARS(
self.model.parameters(),
lr=learning_rate,
weight_decay=self.hparams.weight_decay,
exclude_from_weight_decay=["batch_normalization", "bias"],
)
# "decay the learning rate with the cosine decay schedule without restarts"
scheduler = torch.optim.lr_scheduler.CosineAnnealingLR(
optimizer, self.hparams.max_epochs, eta_min=0, last_epoch=-1
)
else:
raise NotImplementedError
if scheduler:
return {"optimizer": optimizer, "lr_scheduler": scheduler}
else:
return {"optimizer": optimizer}
| 2,587 | 35.450704 | 87 | py |
CLMR | CLMR-master/clmr/models/sample_cnn.py | import torch
import torch.nn as nn
from .model import Model
class SampleCNN(Model):
def __init__(self, strides, supervised, out_dim):
super(SampleCNN, self).__init__()
self.strides = strides
self.supervised = supervised
self.sequential = [
nn.Sequential(
nn.Conv1d(1, 128, kernel_size=3, stride=3, padding=0),
nn.BatchNorm1d(128),
nn.ReLU(),
)
]
self.hidden = [
[128, 128],
[128, 128],
[128, 256],
[256, 256],
[256, 256],
[256, 256],
[256, 256],
[256, 256],
[256, 512],
]
assert len(self.hidden) == len(
self.strides
), "Number of hidden layers and strides are not equal"
for stride, (h_in, h_out) in zip(self.strides, self.hidden):
self.sequential.append(
nn.Sequential(
nn.Conv1d(h_in, h_out, kernel_size=stride, stride=1, padding=1),
nn.BatchNorm1d(h_out),
nn.ReLU(),
nn.MaxPool1d(stride, stride=stride),
)
)
# 1 x 512
self.sequential.append(
nn.Sequential(
nn.Conv1d(512, 512, kernel_size=3, stride=1, padding=1),
nn.BatchNorm1d(512),
nn.ReLU(),
)
)
self.sequential = nn.Sequential(*self.sequential)
if self.supervised:
self.dropout = nn.Dropout(0.5)
self.fc = nn.Linear(512, out_dim)
def forward(self, x):
out = self.sequential(x)
if self.supervised:
out = self.dropout(out)
out = out.reshape(x.shape[0], out.size(1) * out.size(2))
logit = self.fc(out)
return logit
| 1,881 | 26.676471 | 84 | py |
CLMR | CLMR-master/clmr/models/sample_cnn_xl.py | import torch
import torch.nn as nn
from .model import Model
class SampleCNNXL(Model):
def __init__(self, strides, supervised, out_dim):
super(SampleCNN, self).__init__()
self.strides = strides
self.supervised = supervised
self.sequential = [
nn.Sequential(
nn.Conv1d(1, 128, kernel_size=3, stride=3, padding=0),
nn.BatchNorm1d(128),
nn.ReLU(),
)
]
self.hidden = [
[128, 128],
[128, 128],
[128, 256],
[256, 256],
[256, 512],
[512, 512],
[512, 1024],
[1024, 1024],
[1024, 2048],
]
assert len(self.hidden) == len(
self.strides
), "Number of hidden layers and strides are not equal"
for stride, (h_in, h_out) in zip(self.strides, self.hidden):
self.sequential.append(
nn.Sequential(
nn.Conv1d(h_in, h_out, kernel_size=stride, stride=1, padding=1),
nn.BatchNorm1d(h_out),
nn.ReLU(),
nn.MaxPool1d(stride, stride=stride),
)
)
# 1 x 512
self.sequential.append(
nn.Sequential(
nn.Conv1d(2048, 2048, kernel_size=3, stride=1, padding=1),
nn.BatchNorm1d(2048),
nn.ReLU(),
)
)
self.sequential = nn.Sequential(*self.sequential)
if self.supervised:
self.dropout = nn.Dropout(0.5)
self.fc = nn.Linear(2048, out_dim)
def forward(self, x):
out = self.sequential(x)
if self.supervised:
out = self.dropout(out)
out = out.reshape(x.shape[0], out.size(1) * out.size(2))
logit = self.fc(out)
return logit
| 1,892 | 26.838235 | 84 | py |
CLMR | CLMR-master/clmr/models/shortchunk_cnn.py | import torch.nn as nn
class ShortChunkCNN_Res(nn.Module):
"""
Short-chunk CNN architecture with residual connections.
"""
def __init__(self, n_channels=128, n_classes=50):
super(ShortChunkCNN_Res, self).__init__()
self.spec_bn = nn.BatchNorm2d(1)
# CNN
self.layer1 = Res_2d(1, n_channels, stride=2)
self.layer2 = Res_2d(n_channels, n_channels, stride=2)
self.layer3 = Res_2d(n_channels, n_channels * 2, stride=2)
self.layer4 = Res_2d(n_channels * 2, n_channels * 2, stride=2)
self.layer5 = Res_2d(n_channels * 2, n_channels * 2, stride=2)
self.layer6 = Res_2d(n_channels * 2, n_channels * 2, stride=2)
self.layer7 = Res_2d(n_channels * 2, n_channels * 4, stride=2)
# Dense
self.dense1 = nn.Linear(n_channels * 4, n_channels * 4)
self.bn = nn.BatchNorm1d(n_channels * 4)
self.fc = nn.Linear(n_channels * 4, n_classes)
self.dropout = nn.Dropout(0.5)
self.relu = nn.ReLU()
def forward(self, x):
x = self.spec_bn(x)
# CNN
x = self.layer1(x)
x = self.layer2(x)
x = self.layer3(x)
x = self.layer4(x)
x = self.layer5(x)
x = self.layer6(x)
x = self.layer7(x)
x = x.squeeze(2)
# Global Max Pooling
if x.size(-1) != 1:
x = nn.MaxPool1d(x.size(-1))(x)
x = x.squeeze(2)
# Dense
x = self.dense1(x)
x = self.bn(x)
x = self.relu(x)
x = self.dropout(x)
x = self.fc(x)
# x = nn.Sigmoid()(x)
return x
class Res_2d(nn.Module):
def __init__(self, input_channels, output_channels, shape=3, stride=2):
super(Res_2d, self).__init__()
# convolution
self.conv_1 = nn.Conv2d(
input_channels, output_channels, shape, stride=stride, padding=shape // 2
)
self.bn_1 = nn.BatchNorm2d(output_channels)
self.conv_2 = nn.Conv2d(
output_channels, output_channels, shape, padding=shape // 2
)
self.bn_2 = nn.BatchNorm2d(output_channels)
# residual
self.diff = False
if (stride != 1) or (input_channels != output_channels):
self.conv_3 = nn.Conv2d(
input_channels,
output_channels,
shape,
stride=stride,
padding=shape // 2,
)
self.bn_3 = nn.BatchNorm2d(output_channels)
self.diff = True
self.relu = nn.ReLU()
def forward(self, x):
# convolution
out = self.bn_2(self.conv_2(self.relu(self.bn_1(self.conv_1(x)))))
# residual
if self.diff:
x = self.bn_3(self.conv_3(x))
out = x + out
out = self.relu(out)
return out
| 2,845 | 28.340206 | 85 | py |
CLMR | CLMR-master/clmr/models/model.py | import torch.nn as nn
import numpy as np
class Model(nn.Module):
def __init__(self):
super(Model, self).__init__()
def initialize(self, m):
if isinstance(m, (nn.Conv1d)):
# nn.init.xavier_uniform_(m.weight)
# if m.bias is not None:
# nn.init.xavier_uniform_(m.bias)
nn.init.kaiming_uniform_(m.weight, mode="fan_in", nonlinearity="relu")
class Identity(nn.Module):
def __init__(self):
super(Identity, self).__init__()
def forward(self, x):
return x
| 555 | 22.166667 | 82 | py |
CLMR | CLMR-master/clmr/models/sinc_net.py | import numpy as np
import torch
import torch.nn.functional as F
import torch.nn as nn
import sys
from torch.autograd import Variable
import math
def flip(x, dim):
xsize = x.size()
dim = x.dim() + dim if dim < 0 else dim
x = x.contiguous()
x = x.view(-1, *xsize[dim:])
x = x.view(x.size(0), x.size(1), -1)[
:,
getattr(
torch.arange(x.size(1) - 1, -1, -1), ("cpu", "cuda")[x.is_cuda]
)().long(),
:,
]
return x.view(xsize)
def sinc(band, t_right):
y_right = torch.sin(2 * math.pi * band * t_right) / (2 * math.pi * band * t_right)
y_left = flip(y_right, 0)
y = torch.cat([y_left, Variable(torch.ones(1)).cuda(), y_right])
return y
class SincConv_fast(nn.Module):
"""Sinc-based convolution
Parameters
----------
in_channels : `int`
Number of input channels. Must be 1.
out_channels : `int`
Number of filters.
kernel_size : `int`
Filter length.
sample_rate : `int`, optional
Sample rate. Defaults to 16000.
Usage
-----
See `torch.nn.Conv1d`
Reference
---------
Mirco Ravanelli, Yoshua Bengio,
"Speaker Recognition from raw waveform with SincNet".
https://arxiv.org/abs/1808.00158
"""
@staticmethod
def to_mel(hz):
return 2595 * np.log10(1 + hz / 700)
@staticmethod
def to_hz(mel):
return 700 * (10 ** (mel / 2595) - 1)
def __init__(
self,
out_channels,
kernel_size,
sample_rate=16000,
in_channels=1,
stride=1,
padding=0,
dilation=1,
bias=False,
groups=1,
min_low_hz=50,
min_band_hz=50,
):
super(SincConv_fast, self).__init__()
if in_channels != 1:
# msg = (f'SincConv only support one input channel '
# f'(here, in_channels = {in_channels:d}).')
msg = (
"SincConv only support one input channel (here, in_channels = {%i})"
% (in_channels)
)
raise ValueError(msg)
self.out_channels = out_channels
self.kernel_size = kernel_size
# Forcing the filters to be odd (i.e, perfectly symmetrics)
if kernel_size % 2 == 0:
self.kernel_size = self.kernel_size + 1
self.stride = stride
self.padding = padding
self.dilation = dilation
if bias:
raise ValueError("SincConv does not support bias.")
if groups > 1:
raise ValueError("SincConv does not support groups.")
self.sample_rate = sample_rate
self.min_low_hz = min_low_hz
self.min_band_hz = min_band_hz
# initialize filterbanks such that they are equally spaced in Mel scale
low_hz = 30
high_hz = self.sample_rate / 2 - (self.min_low_hz + self.min_band_hz)
mel = np.linspace(
self.to_mel(low_hz), self.to_mel(high_hz), self.out_channels + 1
)
hz = self.to_hz(mel)
# filter lower frequency (out_channels, 1)
self.low_hz_ = nn.Parameter(torch.Tensor(hz[:-1]).view(-1, 1))
# filter frequency band (out_channels, 1)
self.band_hz_ = nn.Parameter(torch.Tensor(np.diff(hz)).view(-1, 1))
# Hamming window
# self.window_ = torch.hamming_window(self.kernel_size)
n_lin = torch.linspace(
0, (self.kernel_size / 2) - 1, steps=int((self.kernel_size / 2))
) # computing only half of the window
self.window_ = 0.54 - 0.46 * torch.cos(2 * math.pi * n_lin / self.kernel_size)
# (1, kernel_size/2)
n = (self.kernel_size - 1) / 2.0
self.n_ = (
2 * math.pi * torch.arange(-n, 0).view(1, -1) / self.sample_rate
) # Due to symmetry, I only need half of the time axes
def forward(self, waveforms):
"""
Parameters
----------
waveforms : `torch.Tensor` (batch_size, 1, n_samples)
Batch of waveforms.
Returns
-------
features : `torch.Tensor` (batch_size, out_channels, n_samples_out)
Batch of sinc filters activations.
"""
self.n_ = self.n_.to(waveforms.device)
self.window_ = self.window_.to(waveforms.device)
low = self.min_low_hz + torch.abs(self.low_hz_)
high = torch.clamp(
low + self.min_band_hz + torch.abs(self.band_hz_),
self.min_low_hz,
self.sample_rate / 2,
)
band = (high - low)[:, 0]
f_times_t_low = torch.matmul(low, self.n_)
f_times_t_high = torch.matmul(high, self.n_)
band_pass_left = (
(torch.sin(f_times_t_high) - torch.sin(f_times_t_low)) / (self.n_ / 2)
) * self.window_ # Equivalent of Eq.4 of the reference paper (SPEAKER RECOGNITION FROM RAW WAVEFORM WITH SINCNET). I just have expanded the sinc and simplified the terms. This way I avoid several useless computations.
band_pass_center = 2 * band.view(-1, 1)
band_pass_right = torch.flip(band_pass_left, dims=[1])
band_pass = torch.cat(
[band_pass_left, band_pass_center, band_pass_right], dim=1
)
band_pass = band_pass / (2 * band[:, None])
self.filters = (band_pass).view(self.out_channels, 1, self.kernel_size)
return F.conv1d(
waveforms,
self.filters,
stride=self.stride,
padding=self.padding,
dilation=self.dilation,
bias=None,
groups=1,
)
class sinc_conv(nn.Module):
def __init__(self, N_filt, Filt_dim, fs):
super(sinc_conv, self).__init__()
# Mel Initialization of the filterbanks
low_freq_mel = 80
high_freq_mel = 2595 * np.log10(1 + (fs / 2) / 700) # Convert Hz to Mel
mel_points = np.linspace(
low_freq_mel, high_freq_mel, N_filt
) # Equally spaced in Mel scale
f_cos = 700 * (10 ** (mel_points / 2595) - 1) # Convert Mel to Hz
b1 = np.roll(f_cos, 1)
b2 = np.roll(f_cos, -1)
b1[0] = 30
b2[-1] = (fs / 2) - 100
self.freq_scale = fs * 1.0
self.filt_b1 = nn.Parameter(torch.from_numpy(b1 / self.freq_scale))
self.filt_band = nn.Parameter(torch.from_numpy((b2 - b1) / self.freq_scale))
self.N_filt = N_filt
self.Filt_dim = Filt_dim
self.fs = fs
def forward(self, x):
filters = Variable(torch.zeros((self.N_filt, self.Filt_dim))).cuda()
N = self.Filt_dim
t_right = Variable(
torch.linspace(1, (N - 1) / 2, steps=int((N - 1) / 2)) / self.fs
).cuda()
min_freq = 50.0
min_band = 50.0
filt_beg_freq = torch.abs(self.filt_b1) + min_freq / self.freq_scale
filt_end_freq = filt_beg_freq + (
torch.abs(self.filt_band) + min_band / self.freq_scale
)
n = torch.linspace(0, N, steps=N)
# Filter window (hamming)
window = 0.54 - 0.46 * torch.cos(2 * math.pi * n / N)
window = Variable(window.float().cuda())
for i in range(self.N_filt):
low_pass1 = (
2
* filt_beg_freq[i].float()
* sinc(filt_beg_freq[i].float() * self.freq_scale, t_right)
)
low_pass2 = (
2
* filt_end_freq[i].float()
* sinc(filt_end_freq[i].float() * self.freq_scale, t_right)
)
band_pass = low_pass2 - low_pass1
band_pass = band_pass / torch.max(band_pass)
filters[i, :] = band_pass.cuda() * window
out = F.conv1d(x, filters.view(self.N_filt, 1, self.Filt_dim))
return out
def act_fun(act_type):
if act_type == "relu":
return nn.ReLU()
if act_type == "tanh":
return nn.Tanh()
if act_type == "sigmoid":
return nn.Sigmoid()
if act_type == "leaky_relu":
return nn.LeakyReLU(0.2)
if act_type == "elu":
return nn.ELU()
if act_type == "softmax":
return nn.LogSoftmax(dim=1)
if act_type == "linear":
return nn.LeakyReLU(1) # initializzed like this, but not used in forward!
class LayerNorm(nn.Module):
def __init__(self, features, eps=1e-6):
super(LayerNorm, self).__init__()
self.gamma = nn.Parameter(torch.ones(features))
self.beta = nn.Parameter(torch.zeros(features))
self.eps = eps
def forward(self, x):
mean = x.mean(-1, keepdim=True)
std = x.std(-1, keepdim=True)
return self.gamma * (x - mean) / (std + self.eps) + self.beta
class MLP(nn.Module):
def __init__(self, options):
super(MLP, self).__init__()
self.input_dim = int(options["input_dim"])
self.fc_lay = options["fc_lay"]
self.fc_drop = options["fc_drop"]
self.fc_use_batchnorm = options["fc_use_batchnorm"]
self.fc_use_laynorm = options["fc_use_laynorm"]
self.fc_use_laynorm_inp = options["fc_use_laynorm_inp"]
self.fc_use_batchnorm_inp = options["fc_use_batchnorm_inp"]
self.fc_act = options["fc_act"]
self.wx = nn.ModuleList([])
self.bn = nn.ModuleList([])
self.ln = nn.ModuleList([])
self.act = nn.ModuleList([])
self.drop = nn.ModuleList([])
# input layer normalization
if self.fc_use_laynorm_inp:
self.ln0 = LayerNorm(self.input_dim)
# input batch normalization
if self.fc_use_batchnorm_inp:
self.bn0 = nn.BatchNorm1d([self.input_dim], momentum=0.05)
self.N_fc_lay = len(self.fc_lay)
current_input = self.input_dim
# Initialization of hidden layers
for i in range(self.N_fc_lay):
# dropout
self.drop.append(nn.Dropout(p=self.fc_drop[i]))
# activation
self.act.append(act_fun(self.fc_act[i]))
add_bias = True
# layer norm initialization
self.ln.append(LayerNorm(self.fc_lay[i]))
self.bn.append(nn.BatchNorm1d(self.fc_lay[i], momentum=0.05))
if self.fc_use_laynorm[i] or self.fc_use_batchnorm[i]:
add_bias = False
# Linear operations
self.wx.append(nn.Linear(current_input, self.fc_lay[i], bias=add_bias))
# weight initialization
self.wx[i].weight = torch.nn.Parameter(
torch.Tensor(self.fc_lay[i], current_input).uniform_(
-np.sqrt(0.01 / (current_input + self.fc_lay[i])),
np.sqrt(0.01 / (current_input + self.fc_lay[i])),
)
)
self.wx[i].bias = torch.nn.Parameter(torch.zeros(self.fc_lay[i]))
current_input = self.fc_lay[i]
def forward(self, x):
# Applying Layer/Batch Norm
if bool(self.fc_use_laynorm_inp):
x = self.ln0((x))
if bool(self.fc_use_batchnorm_inp):
x = self.bn0((x))
for i in range(self.N_fc_lay):
if self.fc_act[i] != "linear":
if self.fc_use_laynorm[i]:
x = self.drop[i](self.act[i](self.ln[i](self.wx[i](x))))
if self.fc_use_batchnorm[i]:
x = self.drop[i](self.act[i](self.bn[i](self.wx[i](x))))
if (
self.fc_use_batchnorm[i] == False
and self.fc_use_laynorm[i] == False
):
x = self.drop[i](self.act[i](self.wx[i](x)))
else:
if self.fc_use_laynorm[i]:
x = self.drop[i](self.ln[i](self.wx[i](x)))
if self.fc_use_batchnorm[i]:
x = self.drop[i](self.bn[i](self.wx[i](x)))
if (
self.fc_use_batchnorm[i] == False
and self.fc_use_laynorm[i] == False
):
x = self.drop[i](self.wx[i](x))
return x
class SincNet(nn.Module):
def __init__(
self,
cnn_N_filt,
cnn_len_filt,
cnn_max_pool_len,
cnn_act,
cnn_drop,
cnn_use_laynorm,
cnn_use_batchnorm,
cnn_use_laynorm_inp,
cnn_use_batchnorm_inp,
input_dim,
fs,
):
super(SincNet, self).__init__()
self.cnn_N_filt = cnn_N_filt
self.cnn_len_filt = cnn_len_filt
self.cnn_max_pool_len = cnn_max_pool_len
self.cnn_act = cnn_act
self.cnn_drop = cnn_drop
self.cnn_use_laynorm = cnn_use_laynorm
self.cnn_use_batchnorm = cnn_use_batchnorm
self.cnn_use_laynorm_inp = cnn_use_laynorm_inp
self.cnn_use_batchnorm_inp = cnn_use_batchnorm_inp
self.input_dim = int(input_dim)
self.fs = fs
self.N_cnn_lay = len(self.cnn_N_filt)
self.conv = nn.ModuleList([])
self.bn = nn.ModuleList([])
self.ln = nn.ModuleList([])
self.act = nn.ModuleList([])
self.drop = nn.ModuleList([])
if self.cnn_use_laynorm_inp:
self.ln0 = LayerNorm(self.input_dim)
if self.cnn_use_batchnorm_inp:
self.bn0 = nn.BatchNorm1d([self.input_dim], momentum=0.05)
current_input = self.input_dim
for i in range(self.N_cnn_lay):
N_filt = int(self.cnn_N_filt[i])
len_filt = int(self.cnn_len_filt[i])
# dropout
self.drop.append(nn.Dropout(p=self.cnn_drop[i]))
# activation
self.act.append(act_fun(self.cnn_act[i]))
# layer norm initialization
self.ln.append(
LayerNorm(
[
N_filt,
int(
(current_input - self.cnn_len_filt[i] + 1)
/ self.cnn_max_pool_len[i]
),
]
)
)
self.bn.append(
nn.BatchNorm1d(
N_filt,
int(
(current_input - self.cnn_len_filt[i] + 1)
/ self.cnn_max_pool_len[i]
),
momentum=0.05,
)
)
if i == 0:
self.conv.append(
SincConv_fast(self.cnn_N_filt[0], self.cnn_len_filt[0], self.fs)
)
else:
self.conv.append(
nn.Conv1d(
self.cnn_N_filt[i - 1], self.cnn_N_filt[i], self.cnn_len_filt[i]
)
)
current_input = int(
(current_input - self.cnn_len_filt[i] + 1) / self.cnn_max_pool_len[i]
)
self.out_dim = current_input * N_filt
def forward(self, x):
batch = x.shape[0]
seq_len = x.shape[1]
if bool(self.cnn_use_laynorm_inp):
x = self.ln0((x))
if bool(self.cnn_use_batchnorm_inp):
x = self.bn0((x))
x = x.view(batch, 1, seq_len)
for i in range(self.N_cnn_lay):
if self.cnn_use_laynorm[i]:
if i == 0:
x = self.drop[i](
self.act[i](
self.ln[i](
F.max_pool1d(
torch.abs(self.conv[i](x)), self.cnn_max_pool_len[i]
)
)
)
)
else:
x = self.drop[i](
self.act[i](
self.ln[i](
F.max_pool1d(self.conv[i](x), self.cnn_max_pool_len[i])
)
)
)
if self.cnn_use_batchnorm[i]:
x = self.drop[i](
self.act[i](
self.bn[i](
F.max_pool1d(self.conv[i](x), self.cnn_max_pool_len[i])
)
)
)
if self.cnn_use_batchnorm[i] == False and self.cnn_use_laynorm[i] == False:
x = self.drop[i](
self.act[i](F.max_pool1d(self.conv[i](x), self.cnn_max_pool_len[i]))
)
x = x.view(batch, -1)
return x
| 16,565 | 28.902527 | 226 | py |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.