repo stringlengths 1 99 | file stringlengths 13 215 | code stringlengths 12 59.2M | file_length int64 12 59.2M | avg_line_length float64 3.82 1.48M | max_line_length int64 12 2.51M | extension_type stringclasses 1
value |
|---|---|---|---|---|---|---|
FATE | FATE-master/python/federatedml/nn/backend/utils/data.py | import numpy as np
from torch.utils.data import Dataset as torchDataset
from federatedml.util import LOGGER
from federatedml.nn.dataset.base import Dataset, get_dataset_class
from federatedml.nn.dataset.image import ImageDataset
from federatedml.nn.dataset.table import TableDataset
from federatedml.nn.dataset.graph import GraphDataset
def try_dataset_class(dataset_class, path, param):
# try default dataset
try:
dataset_inst: Dataset = dataset_class(**param)
dataset_inst.load(path)
return dataset_inst
except Exception as e:
LOGGER.warning('try to load dataset failed, exception :{}'.format(e))
return None
def load_dataset(dataset_name, data_path_or_dtable, param, dataset_cache: dict):
# load dataset class
if isinstance(data_path_or_dtable, str):
cached_id = data_path_or_dtable
else:
cached_id = str(id(data_path_or_dtable))
if cached_id in dataset_cache:
LOGGER.debug('use cached dataset, cached id {}'.format(cached_id))
return dataset_cache[cached_id]
if dataset_name is None or dataset_name == '':
# automatically match default dataset
LOGGER.info('dataset is not specified, use auto inference')
for ds_class in [TableDataset, ImageDataset, GraphDataset]:
dataset_inst = try_dataset_class(
ds_class, data_path_or_dtable, param=param)
if dataset_inst is not None:
break
if dataset_inst is None:
raise ValueError(
'cannot find default dataset that can successfully load data from path {}, '
'please check the warning message for error details'. format(data_path_or_dtable))
else:
# load specified dataset
dataset_class = get_dataset_class(dataset_name)
dataset_inst = dataset_class(**param)
dataset_inst.load(data_path_or_dtable)
dataset_cache[cached_id] = dataset_inst
return dataset_inst
def get_ret_predict_table(id_table, pred_table, classes, partitions, computing_session):
id_dtable = computing_session.parallelize(
id_table, partition=partitions, include_key=True)
pred_dtable = computing_session.parallelize(
pred_table, partition=partitions, include_key=True)
return id_dtable, pred_dtable
def add_match_id(id_table: list, dataset_inst: TableDataset):
assert isinstance(dataset_inst, TableDataset), 'when using match id your dataset must be a Table Dataset'
for id_inst in id_table:
id_inst[1].inst_id = dataset_inst.match_ids[id_inst[0]]
| 2,598 | 35.605634 | 109 | py |
FATE | FATE-master/python/federatedml/nn/backend/utils/common.py | import torch as t
import numpy as np
import tempfile
ML_PATH = 'federatedml.nn'
LLM_PATH = "fate_llm"
HOMOMODELMETA = "HomoNNMeta"
HOMOMODELPARAM = "HomoNNParam"
def global_seed(seed):
# set random seed of torch
t.manual_seed(seed)
t.cuda.manual_seed_all(seed)
t.backends.cudnn.deterministic = True
def get_homo_model_dict(param, meta):
return {HOMOMODELPARAM: param, # param
HOMOMODELMETA: meta} # meta
def get_homo_param_meta(model_dict):
return model_dict.get(HOMOMODELPARAM), model_dict.get(HOMOMODELMETA)
# read model from model bytes
def recover_model_bytes(model_bytes):
with tempfile.TemporaryFile() as f:
f.write(model_bytes)
f.seek(0)
model_dict = t.load(f)
return model_dict
def get_torch_model_bytes(model_dict):
with tempfile.TemporaryFile() as f:
t.save(model_dict, f)
f.seek(0)
model_saved_bytes = f.read()
return model_saved_bytes
| 968 | 20.065217 | 72 | py |
FATE | FATE-master/python/federatedml/nn/backend/utils/distributed_util.py | #
# Copyright 2019 The FATE Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import torch.distributed as dist
def is_rank_0():
return dist.get_rank() == 0
def is_distributed():
return dist.is_initialized()
def get_num_workers():
return dist.get_world_size()
| 815 | 27.137931 | 75 | py |
FATE | FATE-master/python/federatedml/nn/homo/client.py | import json
import torch
import inspect
from fate_arch.computing.non_distributed import LocalData
from fate_arch.computing import is_table
from federatedml.model_base import ModelBase
from federatedml.nn.homo.trainer.trainer_base import get_trainer_class, TrainerBase
from federatedml.nn.backend.utils.data import load_dataset
from federatedml.nn.backend.utils import deepspeed_util
from federatedml.param.homo_nn_param import HomoNNParam
from federatedml.nn.backend.torch import serialization as s
from federatedml.nn.backend.torch.base import FateTorchOptimizer
from federatedml.model_base import MetricMeta
from federatedml.util import LOGGER
from federatedml.util import consts
from federatedml.nn.homo.trainer.trainer_base import StdReturnFormat
from federatedml.nn.backend.utils.common import global_seed, get_homo_model_dict, get_homo_param_meta, recover_model_bytes, get_torch_model_bytes
from federatedml.callbacks.model_checkpoint import ModelCheckpoint
from federatedml.statistic.data_overview import check_with_inst_id
from federatedml.nn.homo.trainer.trainer_base import ExporterBase
from fate_arch.session import computing_session
from federatedml.nn.backend.utils.data import get_ret_predict_table
from federatedml.nn.backend.utils.data import add_match_id
from federatedml.protobuf.generated.homo_nn_model_param_pb2 import HomoNNParam as HomoNNParamPB
from federatedml.protobuf.generated.homo_nn_model_meta_pb2 import HomoNNMeta as HomoNNMetaPB
class NNModelExporter(ExporterBase):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
def export_model_dict(
self,
model=None,
optimizer=None,
model_define=None,
optimizer_define=None,
loss_define=None,
epoch_idx=-1,
converge_status=False,
loss_history=None,
best_epoch=-1,
local_save_path='',
extra_data={}):
if issubclass(type(model), torch.nn.Module):
model_statedict = model.state_dict()
else:
model_statedict = None
opt_state_dict = None
if optimizer is not None:
assert isinstance(optimizer, torch.optim.Optimizer), \
'optimizer must be an instance of torch.optim.Optimizer'
opt_state_dict = optimizer.state_dict()
model_status = {
'model': model_statedict,
'optimizer': opt_state_dict,
}
model_saved_bytes = get_torch_model_bytes(model_status)
extra_data_bytes = get_torch_model_bytes(extra_data)
param = HomoNNParamPB()
meta = HomoNNMetaPB()
# save param
param.model_bytes = model_saved_bytes
param.extra_data_bytes = extra_data_bytes
param.epoch_idx = epoch_idx
param.converge_status = converge_status
param.best_epoch = best_epoch
param.local_save_path = local_save_path
if loss_history is None:
loss_history = []
param.loss_history.extend(loss_history)
# save meta
meta.nn_define.append(json.dumps(model_define))
meta.optimizer_define.append(json.dumps(optimizer_define))
meta.loss_func_define.append(json.dumps(loss_define))
return get_homo_model_dict(param, meta)
class HomoNNClient(ModelBase):
def __init__(self):
super(HomoNNClient, self).__init__()
self.model_param = HomoNNParam()
self.trainer = consts.FEDAVG_TRAINER
self.trainer_param = {}
self.dataset_module = None
self.dataset = None
self.dataset_param = {}
self.torch_seed = None
self.loss = None
self.optimizer = None
self.nn_define = None
# running varialbles
self.trainer_inst = None
# export model
self.exporter = NNModelExporter()
self.model_loaded = False
self.model = None
# cache dataset
self.cache_dataset = {}
# dtable partitions
self.partitions = 4
# warm start display iter
self.warm_start_iter = None
# deepspeed
self.ds_config = None
self._ds_stage = -1
self.model_save_flag = False
def _init_model(self, param: HomoNNParam):
train_param = param.trainer.to_dict()
dataset_param = param.dataset.to_dict()
self.trainer = train_param['trainer_name']
self.dataset = dataset_param['dataset_name']
self.trainer_param = train_param['param']
self.dataset_param = dataset_param['param']
self.torch_seed = param.torch_seed
self.nn_define = param.nn_define
self.loss = param.loss
self.optimizer = param.optimizer
self.ds_config = param.ds_config
def init(self):
# set random seed
global_seed(self.torch_seed)
if self.ds_config:
deepspeed_util.init_deepspeed_env(self.ds_config)
# load trainer class
if self.trainer is None:
raise ValueError(
'Trainer is not specified, please specify your trainer')
trainer_class = get_trainer_class(self.trainer)
LOGGER.info('trainer class is {}'.format(trainer_class))
# recover model from model config / or recover from saved model param
loaded_model_dict = None
# if has model protobuf, load model config from protobuf
load_opt_state_dict = False
if self.model_loaded:
param, meta = get_homo_param_meta(self.model)
LOGGER.info('save path is {}'.format(param.local_save_path))
if param.local_save_path == '':
LOGGER.info('Load model from model protobuf')
self.warm_start_iter = param.epoch_idx
if param is None or meta is None:
raise ValueError(
'model protobuf is None, make sure'
'that your trainer calls export_model() function to save models')
if meta.nn_define[0] is None:
raise ValueError(
'nn_define is None, model protobuf has no nn-define, make sure'
'that your trainer calls export_model() function to save models')
self.nn_define = json.loads(meta.nn_define[0])
loss = json.loads(meta.loss_func_define[0])
optimizer = json.loads(meta.optimizer_define[0])
loaded_model_dict = recover_model_bytes(param.model_bytes)
extra_data = recover_model_bytes(param.extra_data_bytes)
else:
LOGGER.info('Load model from local save path')
save_dict = torch.load(open(param.local_save_path, 'rb'))
self.warm_start_iter = save_dict['epoch_idx']
self.nn_define = save_dict['model_define']
loss = save_dict['loss_define']
optimizer = save_dict['optimizer_define']
loaded_model_dict = save_dict
extra_data = save_dict['extra_data']
if self.optimizer is not None and optimizer != self.optimizer:
LOGGER.info('optimizer updated')
else:
self.optimizer = optimizer
load_opt_state_dict = True
if self.loss is not None and self.loss != loss:
LOGGER.info('loss updated')
else:
self.loss = loss
else:
extra_data = {}
# check key param
if self.nn_define is None:
raise ValueError(
'Model structure is not defined, nn_define is None, please check your param')
# get model from nn define
model = s.recover_sequential_from_dict(self.nn_define)
if loaded_model_dict:
model.load_state_dict(loaded_model_dict['model'])
LOGGER.info('load model state dict from check point')
LOGGER.info('model structure is {}'.format(model))
# init optimizer
if self.optimizer is not None and not self.ds_config:
optimizer_: FateTorchOptimizer = s.recover_optimizer_from_dict(
self.optimizer)
# pass model parameters to optimizer
optimizer = optimizer_.to_torch_instance(model.parameters())
if load_opt_state_dict:
LOGGER.info('load optimizer state dict')
optimizer.load_state_dict(loaded_model_dict['optimizer'])
LOGGER.info('optimizer is {}'.format(optimizer))
else:
optimizer = None
LOGGER.info('optimizer is not specified')
# init loss
if self.loss is not None:
loss_fn = s.recover_loss_fn_from_dict(self.loss)
LOGGER.info('loss function is {}'.format(loss_fn))
else:
loss_fn = None
LOGGER.info('loss function is not specified')
# init trainer
trainer_inst: TrainerBase = trainer_class(**self.trainer_param)
LOGGER.info('trainer class is {}'.format(trainer_class))
trainer_train_args = inspect.getfullargspec(trainer_inst.train).args
args_format = [
'self',
'train_set',
'validate_set',
'optimizer',
'loss',
'extra_data'
]
if len(trainer_train_args) < 6:
raise ValueError(
'Train function of trainer should take 6 arguments :{}, but current trainer.train '
'only takes {} arguments: {}'.format(
args_format, len(trainer_train_args), trainer_train_args))
trainer_inst.set_nn_config(self.nn_define, self.optimizer, self.loss)
trainer_inst.fed_mode = True
if self.ds_config:
model, optimizer = deepspeed_util.deepspeed_init(model, self.ds_config)
trainer_inst.enable_deepspeed(is_zero_3=deepspeed_util.is_zero3(self.ds_config))
if deepspeed_util.is_zero3(self.ds_config):
model.train()
return trainer_inst, model, optimizer, loss_fn, extra_data
def fit(self, train_input, validate_input=None):
LOGGER.debug('train input is {}'.format(train_input))
# train input & validate input are DTables or path str
if not is_table(train_input):
if isinstance(train_input, LocalData):
train_input = train_input.path
assert train_input is not None, 'input train path is None!'
if not is_table(validate_input):
if isinstance(validate_input, LocalData):
validate_input = validate_input.path
assert validate_input is not None, 'input validate path is None!'
# fate loss callback setting
self.callback_meta(
"loss",
"train",
MetricMeta(
name="train",
metric_type="LOSS",
extra_metas={
"unit_name": "epochs"}))
# set random seed
global_seed(self.torch_seed)
self.trainer_inst, model, optimizer, loss_fn, extra_data = self.init()
self.trainer_inst.set_model(model)
self.trainer_inst.set_tracker(self.tracker)
self.trainer_inst.set_model_exporter(self.exporter)
# load dataset class
dataset_inst = load_dataset(
dataset_name=self.dataset,
data_path_or_dtable=train_input,
dataset_cache=self.cache_dataset,
param=self.dataset_param
)
# set dataset prefix
dataset_inst.set_type('train')
LOGGER.info('train dataset instance is {}'.format(dataset_inst))
if validate_input:
val_dataset_inst = load_dataset(
dataset_name=self.dataset,
data_path_or_dtable=validate_input,
dataset_cache=self.cache_dataset,
param=self.dataset_param
)
if id(val_dataset_inst) != id(dataset_inst):
dataset_inst.set_type('validate')
LOGGER.info('validate dataset instance is {}'.format(dataset_inst))
else:
val_dataset_inst = None
# display warmstart iter
if self.component_properties.is_warm_start:
self.callback_warm_start_init_iter(self.warm_start_iter)
# set model check point
self.trainer_inst.set_checkpoint(ModelCheckpoint(self, save_freq=1))
# training
self.trainer_inst.train(
dataset_inst,
val_dataset_inst,
optimizer,
loss_fn,
extra_data
)
# training is done, get exported model
self.model = self.trainer_inst.get_cached_model()
self.set_summary(self.trainer_inst.get_summary())
def predict(self, cpn_input):
with_inst_id = False
schema = None
if not is_table(cpn_input):
if isinstance(cpn_input, LocalData):
cpn_input = cpn_input.path
assert cpn_input is not None, 'input path is None!'
elif is_table(cpn_input):
with_inst_id = check_with_inst_id(cpn_input)
schema = cpn_input.schema
LOGGER.info('running predict')
if self.trainer_inst is None:
# init model
self.trainer_inst, model, optimizer, loss_fn, _ = self.init()
self.trainer_inst.set_model(model)
self.trainer_inst.set_tracker(self.tracker)
dataset_inst = load_dataset(
dataset_name=self.dataset,
data_path_or_dtable=cpn_input,
dataset_cache=self.cache_dataset,
param=self.dataset_param)
if not dataset_inst.has_dataset_type():
dataset_inst.set_type('predict')
trainer_ret = self.trainer_inst.predict(dataset_inst)
if trainer_ret is None or not isinstance(trainer_ret, StdReturnFormat):
LOGGER.info(
'trainer did not return formatted predicted result, skip predict')
return None
id_table, pred_table, classes = trainer_ret()
if with_inst_id: # set match id
add_match_id(id_table=id_table, dataset_inst=dataset_inst)
id_dtable, pred_dtable = get_ret_predict_table(
id_table, pred_table, classes, self.partitions, computing_session)
ret_table = self.predict_score_to_output(
id_dtable, pred_dtable, classes)
if schema is not None:
self.set_predict_data_schema(ret_table, schema)
return ret_table
def export_model(self):
if self.model is None:
LOGGER.debug('export an empty model')
return self.exporter.export_model_dict() # return an empty model
return self.model
def load_model(self, model_dict):
model_dict = list(model_dict["model"].values())[0]
self.model = model_dict
self.model_loaded = True
# override function
@staticmethod
def set_predict_data_schema(predict_datas, schemas):
if predict_datas is None:
return predict_datas
if isinstance(predict_datas, list):
predict_data = predict_datas[0]
schema = schemas[0]
else:
predict_data = predict_datas
schema = schemas
if predict_data is not None:
predict_data.schema = {
"header": [
"label",
"predict_result",
"predict_score",
"predict_detail",
"type",
],
"sid": 'id',
"content_type": "predict_result"
}
if schema.get("match_id_name") is not None:
predict_data.schema["match_id_name"] = schema.get(
"match_id_name")
return predict_data
| 15,960 | 35.861432 | 145 | py |
FATE | FATE-master/python/federatedml/nn/homo/trainer/fedavg_trainer.py | import torch
import torch as t
import torch.distributed as dist
import tqdm
import numpy as np
import transformers
from torch.nn import DataParallel
from torch.utils.data import DataLoader
from torch.utils.data.distributed import DistributedSampler
from federatedml.framework.homo.aggregator.secure_aggregator import SecureAggregatorClient as SecureAggClient
from federatedml.framework.homo.aggregator.secure_aggregator import SecureAggregatorServer as SecureAggServer
from federatedml.nn.backend.utils import deepspeed_util
from federatedml.nn.backend.utils import distributed_util
from federatedml.nn.dataset.base import Dataset
from federatedml.nn.homo.trainer.trainer_base import TrainerBase
from federatedml.util import LOGGER, consts
from federatedml.optim.convergence import converge_func_factory
class FedAVGTrainer(TrainerBase):
"""
Parameters
----------
epochs: int >0, epochs to train
batch_size: int, -1 means full batch
secure_aggregate: bool, default is True, whether to use secure aggregation. if enabled, will add random number
mask to local models. These random number masks will eventually cancel out to get 0.
weighted_aggregation: bool, whether add weight to each local model when doing aggregation.
if True, According to origin paper, weight of a client is: n_local / n_global, where n_local
is the sample number locally and n_global is the sample number of all clients.
if False, simply averaging these models.
early_stop: None, 'diff' or 'abs'. if None, disable early stop; if 'diff', use the loss difference between
two epochs as early stop condition, if differences < tol, stop training ; if 'abs', if loss < tol,
stop training
tol: float, tol value for early stop
aggregate_every_n_epoch: None or int. if None, aggregate model on the end of every epoch, if int, aggregate
every n epochs.
cuda: None, int or list of int. if None, use cpu; if int, use the the {int} device, if list of int, use the
This trainier will automatically detect use DataParallel for multi GPU training, the first index will be
the main device and the output device.
pin_memory: bool, for pytorch DataLoader
shuffle: bool, for pytorch DataLoader
data_loader_worker: int, for pytorch DataLoader, number of workers when loading data
validation_freqs: None or int. if int, validate your model and send validate results to fate-board every n epoch.
if is binary classification task, will use metrics 'auc', 'ks', 'gain', 'lift', 'precision'
if is multi classification task, will use metrics 'precision', 'recall', 'accuracy'
if is regression task, will use metrics 'mse', 'mae', 'rmse', 'explained_variance', 'r2_score'
checkpoint_save_freqs: save model every n epoch, if None, will not save checkpoint.
task_type: str, 'auto', 'binary', 'multi', 'regression',
this option decides the return format of this trainer, and the evaluation type when running validation.
if auto, will automatically infer your task type from labels and predict results.
save_to_local_dir: bool, if True, a dictionary containing the model, optimizer, and metadata will be saved to a local directory.
The path is structured as follows: fateflow/jobs/${jobid}/${party}/${party_id}/${your_nn_component}.
If set to False, the model will not be saved to the FATE framework in protobuf format.
"""
def __init__(self, epochs=10, batch_size=512, # training parameter
early_stop=None, tol=0.0001, # early stop parameters
secure_aggregate=True, weighted_aggregation=True, aggregate_every_n_epoch=None, # federation
cuda=None,
pin_memory=True, shuffle=True, data_loader_worker=0, # GPU & dataloader
validation_freqs=None, # validation configuration
checkpoint_save_freqs=None, # checkpoint configuration
task_type='auto', # task type
save_to_local_dir=False, # save model to local path
collate_fn=None,
collate_fn_params=None
):
super(FedAVGTrainer, self).__init__()
# training parameters
self.epochs = epochs
self.tol = tol
self.validation_freq = validation_freqs
self.save_freq = checkpoint_save_freqs
self.save_to_local_dir = save_to_local_dir
self.task_type = task_type.lower()
task_type_allow = [
consts.BINARY,
consts.REGRESSION,
consts.MULTY,
consts.CAUSAL_LM,
consts.SEQ_2_SEQ_LM,
'auto']
assert self.task_type in task_type_allow, 'task type must in {}'.format(
task_type_allow)
# aggregation param
self.secure_aggregate = secure_aggregate
self.weighted_aggregation = weighted_aggregation
self.aggregate_every_n_epoch = aggregate_every_n_epoch
# GPU, check cuda setting
self.cuda = cuda
self.cuda_main_device = None
self.data_parallel = False
self.parallel_model = None
if not torch.cuda.is_available() and self.cuda is not None:
raise ValueError('Cuda is not available on this machine')
if isinstance(self.cuda, int):
self.cuda_main_device = self.cuda
elif isinstance(self.cuda, list):
for i in self.cuda:
assert isinstance(i, int), 'cuda device must be int, but got {}'.format(self.cuda)
self.cuda_main_device = self.cuda[0]
if len(self.cuda) > 1:
self.data_parallel = True
LOGGER.info('Using DataParallel in Pytorch')
# data loader
self.batch_size = batch_size
self.pin_memory = pin_memory
self.shuffle = shuffle
self.data_loader_worker = data_loader_worker
self.data_loader = None
self.collate_fn = collate_fn
self.collate_fn_params = collate_fn_params if collate_fn_params is not None else dict()
self.early_stop = early_stop
early_stop_type = ['diff', 'abs']
if early_stop is not None:
assert early_stop in early_stop_type, 'early stop type must be in {}, bug got {}' \
.format(early_stop_type, early_stop)
# communicate suffix
self.comm_suffix = 'fedavg'
# check param correctness
self.check_trainer_param([self.epochs,
self.validation_freq,
self.save_freq,
self.aggregate_every_n_epoch],
['epochs',
'validation_freq',
'save_freq',
'aggregate_every_n_epoch'],
self.is_pos_int,
'{} is not a positive int')
self.check_trainer_param([self.secure_aggregate, self.weighted_aggregation, self.pin_memory, self.save_to_local_dir], [
'secure_aggregate', 'weighted_aggregation', 'pin_memory', 'save_to_local_dir'], self.is_bool, '{} is not a bool')
self.check_trainer_param(
[self.tol], ['tol'], self.is_float, '{} is not a float')
def _init_aggregator(self, train_set):
# compute round to aggregate
cur_agg_round = 0
if self.aggregate_every_n_epoch is not None:
aggregate_round = self.epochs // self.aggregate_every_n_epoch
else:
aggregate_round = self.epochs
# initialize fed avg client
if self.fed_mode:
if self.weighted_aggregation:
sample_num = len(train_set)
else:
sample_num = 1.0
if not distributed_util.is_distributed() or distributed_util.is_rank_0():
client_agg = SecureAggClient(
self.secure_aggregate, aggregate_weight=sample_num, communicate_match_suffix=self.comm_suffix)
else:
client_agg = None
else:
client_agg = None
return client_agg, aggregate_round
def set_model(self, model: t.nn.Module):
if not issubclass(type(model), t.nn.Module):
raise ValueError('model must be a subclass of pytorch nn.Module')
self.model = model
if self.cuda is not None:
self.model = self.model.cuda(self.cuda_main_device)
if self.data_parallel:
self.parallel_model = DataParallel(model, device_ids=self.cuda, output_device=self.cuda_main_device)
def _select_model(self):
if self.data_parallel:
return self.parallel_model
else:
return self.model
def train_an_epoch(self, epoch_idx, model, train_set, optimizer, loss):
epoch_loss = 0.0
batch_idx = 0
acc_num = 0
if isinstance(self.data_loader.sampler, DistributedSampler):
self.data_loader.sampler.set_epoch(epoch_idx)
dl = self.data_loader
if not self.fed_mode:
to_iterate = tqdm.tqdm(dl)
else:
to_iterate = dl
batch_label = None
for _batch_iter in to_iterate:
_batch_iter = self._decode(_batch_iter)
if isinstance(_batch_iter, list):
batch_data, batch_label = _batch_iter
else:
batch_data = _batch_iter
"""
if self.task_type in [consts.CAUSAL_LM, consts.SEQ_2_SEQ_LM]:
batch_data = _batch_iter
else:
batch_data, batch_label = _batch_iter
batch_data = self._decode(batch_data)
batch_label = self._decode(batch_label)
"""
if self.cuda is not None or self._enable_deepspeed:
device = self.cuda_main_device if self.cuda_main_device is not None else self.model.device
batch_data = self.to_cuda(batch_data, device)
if batch_label is not None:
batch_label = self.to_cuda(batch_label, device)
if not self._enable_deepspeed:
optimizer.zero_grad()
else:
model.zero_grad()
pred = model(batch_data)
if not loss and hasattr(pred, "loss"):
batch_loss = pred.loss
elif loss is not None:
if batch_label is None:
raise ValueError(
"When loss is set, please provide label to calculate loss"
)
if not isinstance(pred, torch.Tensor) and hasattr(pred, "logits"):
pred = pred.logits
batch_loss = loss(pred, batch_label)
else:
raise ValueError(
'FedAVGTrainer requires a loss function, but got None, please specify loss function in the'
' job configuration')
if not self._enable_deepspeed:
batch_loss.backward()
optimizer.step()
batch_loss_np = np.array(batch_loss.detach().tolist()) if self.cuda is None \
else np.array(batch_loss.cpu().detach().tolist())
if acc_num + self.batch_size > len(train_set):
batch_len = len(train_set) - acc_num
else:
batch_len = self.batch_size
epoch_loss += batch_loss_np * batch_len
else:
batch_loss = model.backward(batch_loss)
batch_loss_np = np.array(batch_loss.cpu().detach().tolist())
model.step()
batch_loss_np = self._sync_loss(batch_loss_np * self._get_batch_size(batch_data))
if distributed_util.is_rank_0():
epoch_loss += batch_loss_np
batch_idx += 1
# LOGGER.info(f"finish epoch={epoch_idx}, batch={batch_idx}")
if self.fed_mode:
LOGGER.debug(
'epoch {} batch {} finished'.format(epoch_idx, batch_idx))
epoch_loss = epoch_loss / len(train_set)
return epoch_loss
def train(
self,
train_set: Dataset,
validate_set: Dataset = None,
optimizer: t.optim.Optimizer = None,
loss=None,
extra_dict={}):
if optimizer is None:
raise ValueError(
'FedAVGTrainer requires an optimizer, but got None, please specify optimizer in the '
'job configuration')
if self.batch_size > len(train_set) or self.batch_size == -1:
self.batch_size = len(train_set)
# compute round to aggregate
cur_agg_round = 0
client_agg, aggregate_round = self._init_aggregator(train_set)
# running var
cur_epoch = 0
loss_history = []
need_stop = False
evaluation_summary = {}
self._get_train_data_loader(train_set)
# training process
for i in range(self.epochs):
cur_epoch = i
LOGGER.info('epoch is {}'.format(i))
model = self._select_model()
epoch_loss = self.train_an_epoch(i, model, train_set, optimizer, loss)
if not distributed_util.is_distributed() or distributed_util.is_rank_0():
self.callback_loss(epoch_loss, i)
loss_history.append(float(epoch_loss))
LOGGER.info('epoch loss is {}'.format(epoch_loss))
# federation process, if running local mode, cancel federation
if client_agg is not None or distributed_util.is_distributed():
if not (self.aggregate_every_n_epoch is not None and (i + 1) % self.aggregate_every_n_epoch != 0):
# model averaging, only aggregate trainable param
if self._deepspeed_zero_3:
deepspeed_util.gather_model(self.model)
if not distributed_util.is_distributed() or distributed_util.is_rank_0():
self.model = client_agg.model_aggregation(self.model)
if distributed_util.is_distributed() and distributed_util.get_num_workers() > 1:
self._share_model()
else:
self._share_model()
# agg loss and get converge status
if not distributed_util.is_distributed() or distributed_util.is_rank_0():
converge_status = client_agg.loss_aggregation(epoch_loss)
cur_agg_round += 1
if distributed_util.is_distributed() and distributed_util.get_num_workers() > 1:
self._sync_converge_status(converge_status)
else:
converge_status = self._sync_converge_status()
if not distributed_util.is_distributed() or distributed_util.is_rank_0():
LOGGER.info(
'model averaging finished, aggregate round {}/{}'.format(
cur_agg_round, aggregate_round))
if converge_status:
LOGGER.info('early stop triggered, stop training')
need_stop = True
# validation process
if self.validation_freq and ((i + 1) % self.validation_freq == 0):
LOGGER.info('running validation')
ids_t, pred_t, label_t = self._predict(train_set)
evaluation_summary = self.evaluation(
ids_t,
pred_t,
label_t,
dataset_type='train',
epoch_idx=i,
task_type=self.task_type)
if validate_set is not None:
ids_v, pred_v, label_v = self._predict(validate_set)
evaluation_summary = self.evaluation(
ids_v,
pred_v,
label_v,
dataset_type='validate',
epoch_idx=i,
task_type=self.task_type)
# save check point process
if self.save_freq is not None and ((i + 1) % self.save_freq == 0):
if self._deepspeed_zero_3:
deepspeed_util.gather_model(self.model)
if not distributed_util.is_distributed() or distributed_util.is_rank_0():
if self.save_freq is not None and ((i + 1) % self.save_freq == 0):
if self.save_to_local_dir:
self.local_checkpoint(
self.model, i, optimizer, converge_status=need_stop, loss_history=loss_history)
else:
self.checkpoint(
self.model, i, optimizer, converge_status=need_stop, loss_history=loss_history)
LOGGER.info('save checkpoint : epoch {}'.format(i))
# if meet stop condition then stop
if need_stop:
break
# post-process
if self._deepspeed_zero_3:
deepspeed_util.gather_model(self.model)
if not distributed_util.is_distributed() or distributed_util.is_rank_0():
best_epoch = int(np.array(loss_history).argmin())
if self.save_to_local_dir:
self.local_save(model=self.model, optimizer=optimizer, epoch_idx=cur_epoch, loss_history=loss_history,
converge_status=need_stop, best_epoch=best_epoch)
else:
self.save(model=self.model, optimizer=optimizer, epoch_idx=cur_epoch, loss_history=loss_history,
converge_status=need_stop, best_epoch=best_epoch)
best_epoch = int(np.array(loss_history).argmin())
self.summary({
'best_epoch': best_epoch,
'loss_history': loss_history,
'need_stop': need_stop,
'metrics_summary': evaluation_summary
})
def _predict(self, dataset: Dataset):
pred_result = []
# switch eval mode
dataset.eval()
model = self._select_model()
model.eval()
if not dataset.has_sample_ids():
dataset.init_sid_and_getfunc(prefix=dataset.get_type())
labels = []
with torch.no_grad():
for _batch_iter in DataLoader(
dataset, self.batch_size
):
if isinstance(_batch_iter, list):
batch_data, batch_label = _batch_iter
else:
batch_label = _batch_iter.pop("labels")
batch_data = _batch_iter
if self.cuda is not None or self._enable_deepspeed:
device = self.cuda_main_device if self.cuda_main_device is not None else self.model.device
batch_data = self.to_cuda(batch_data, device)
pred = model(batch_data)
if not isinstance(pred, torch.Tensor) and hasattr(pred, "logits"):
pred = pred.logits
pred_result.append(pred)
labels.append(batch_label)
ret_rs = torch.concat(pred_result, axis=0)
ret_label = torch.concat(labels, axis=0)
# switch back to train mode
dataset.train()
model.train()
return dataset.get_sample_ids(), ret_rs, ret_label
def predict(self, dataset: Dataset):
if self.task_type in [consts.CAUSAL_LM, consts.SEQ_2_SEQ_LM]:
LOGGER.warning(f"Not support prediction of task_types={[consts.CAUSAL_LM, consts.SEQ_2_SEQ_LM]}")
return
if distributed_util.is_distributed() and not distributed_util.is_rank_0():
return
ids, ret_rs, ret_label = self._predict(dataset)
if self.fed_mode:
return self.format_predict_result(
ids, ret_rs, ret_label, task_type=self.task_type)
else:
return ret_rs, ret_label
def server_aggregate_procedure(self, extra_data={}):
# converge status
check_converge = False
converge_func = None
if self.early_stop:
check_converge = True
converge_func = converge_func_factory(
self.early_stop, self.tol).is_converge
LOGGER.info(
'check early stop, converge func is {}'.format(converge_func))
LOGGER.info('server running aggregate procedure')
server_agg = SecureAggServer(self.secure_aggregate, communicate_match_suffix=self.comm_suffix)
# aggregate and broadcast models
for i in range(self.epochs):
if not (self.aggregate_every_n_epoch is not None and (i + 1) % self.aggregate_every_n_epoch != 0):
# model aggregate
server_agg.model_aggregation()
# loss aggregate
agg_loss, converge_status = server_agg.loss_aggregation(
check_converge=check_converge, converge_func=converge_func)
self.callback_loss(agg_loss, i)
# save check point process
if self.save_freq is not None and ((i + 1) % self.save_freq == 0):
self.checkpoint(epoch_idx=i)
LOGGER.info('save checkpoint : epoch {}'.format(i))
# check stop condition
if converge_status:
LOGGER.debug('stop triggered, stop aggregation')
break
LOGGER.info('server aggregation process done')
def _decode(self, data):
if isinstance(data, transformers.tokenization_utils_base.BatchEncoding):
return dict(data)
else:
return data
def _get_batch_size(self, data):
if isinstance(data, list):
return len(data)
elif isinstance(data, dict):
if "input_ids" in data:
return data["input_ids"].shape[0]
else:
for _, value in data.items():
if hasattr(value, "shape"):
return value.shape[0]
raise ValueError("cat not infer batch size from data")
def _get_collate_fn(self, dataset):
if not self.collate_fn and not hasattr(dataset, "collate_fn"):
return None
if self.collate_fn:
if not hasattr(dataset, "tokenizer"):
raise ValueError(f"Collate Fn Only Support in task types=[{consts.CAUSAL_LM}, {consts.SEQ_2_SEQ_LM}]")
collate_fn = getattr(transformers, self.collate_fn)(dataset.tokenizer, **self.collate_fn_params)
return collate_fn
else:
return dataset.collate_fn
def _get_train_data_loader(self, train_set):
collate_fn = self._get_collate_fn(train_set)
if not distributed_util.is_distributed() or distributed_util.get_num_workers() <= 1:
self.data_loader = DataLoader(
train_set,
batch_size=self.batch_size,
pin_memory=self.pin_memory,
shuffle=self.shuffle,
num_workers=self.data_loader_worker,
collate_fn=collate_fn
)
else:
train_sampler = DistributedSampler(
train_set,
num_replicas=dist.get_world_size(),
rank=dist.get_rank()
)
self.data_loader = DataLoader(
train_set,
batch_size=self.batch_size,
pin_memory=self.pin_memory,
num_workers=self.data_loader_worker,
collate_fn=collate_fn,
sampler=train_sampler
)
def _share_model(self):
if distributed_util.is_rank_0():
for p in self.model.parameters():
if p.requires_grad:
scatter_list = [p.data for _ in range(distributed_util.get_num_workers())]
dist.scatter(p.data, scatter_list, async_op=False)
else:
for p in self.model.parameters():
if p.requires_grad:
dist.scatter(p.data, src=0, async_op=False)
def _sync_converge_status(self, converge_status=None):
if distributed_util.is_rank_0():
t_status = self.to_cuda(torch.Tensor([converge_status]), self.model.device)
dist.scatter(t_status, [t_status for _ in range(distributed_util.get_num_workers())], async_op=False)
else:
t_status = self.to_cuda(torch.Tensor([False]), self.model.device)
dist.scatter(t_status, src=0, async_op=False)
return t_status[0].item()
def _sync_loss(self, loss):
if distributed_util.get_num_workers() == 1:
return loss
loss = self.to_cuda(torch.tensor(loss), self.model.device)
if distributed_util.is_rank_0():
loss_list = [torch.zeros_like(loss) for _ in range(distributed_util.get_num_workers())]
dist.gather(loss, gather_list=loss_list, async_op=False)
loss_sum = 0
for _l in loss_list:
loss_sum += _l.item()
return loss_sum
else:
dist.gather(loss, dst=0, async_op=False)
# LOGGER.info(f"Loss on rank{dist.get_rank()}={loss}")
| 25,839 | 41.291326 | 146 | py |
FATE | FATE-master/python/federatedml/nn/homo/trainer/trainer_base.py | import os
import abc
import importlib
import torch as t
import numpy as np
from torch.nn import Module
from typing import List
from federatedml.util import consts
from federatedml.util import LOGGER
from federatedml.model_base import serialize_models
from federatedml.nn.backend.utils.common import ML_PATH
from federatedml.feature.instance import Instance
from federatedml.evaluation.evaluation import Evaluation
from federatedml.model_base import Metric, MetricMeta
from federatedml.param import EvaluateParam
class StdReturnFormat(object):
def __init__(self, id_table_list, pred_table, classes):
self.id = id_table_list
self.pred_table = pred_table
self.classes = classes
def __call__(self,):
return self.id, self.pred_table, self.classes
class ExporterBase(object):
def __init__(self, *args, **kwargs):
pass
def export_model_dict(self, model=None, optimizer=None, model_define=None, optimizer_define=None, loss_define=None,
epoch_idx=-1, converge_status=False, loss_history=None, best_epoch=-1, extra_data={}):
pass
class TrainerBase(object):
def __init__(self, **kwargs):
self._fed_mode = True
self.role = None
self.party_id = None
self.party_id_list = None
self._flowid = None
self._cache_model = None
self._model = None
self._tracker = None
self._model_checkpoint = None
self._exporter = None
self._evaluation_summary = {}
# running status
self._set_model_checkpoint_epoch = set()
# nn config
self.nn_define, self.opt_define, self.loss_define = {}, {}, {}
# ret summary
self._summary = {}
# deepspeed enabled
self._enable_deepspeed = False
self._deepspeed_zero_3 = False
@staticmethod
def is_pos_int(val):
return val > 0 and isinstance(val, int)
@staticmethod
def is_float(val):
return isinstance(val, float)
@staticmethod
def is_bool(val):
return isinstance(val, bool)
@staticmethod
def check_trainer_param(
var_list,
name_list,
judge_func,
warning_str,
allow_none=True):
for var, name in zip(var_list, name_list):
if allow_none and var is None:
continue
assert judge_func(var), warning_str.format(name)
@property
def model(self):
if not hasattr(self, '_model'):
raise AttributeError(
'model variable is not initialized, remember to call'
' super(your_class, self).__init__()')
if self._model is None:
raise AttributeError(
'model is not set, use set_model() function to set training model')
return self._model
@model.setter
def model(self, val):
self._model = val
@property
def fed_mode(self):
if not hasattr(self, '_fed_mode'):
raise AttributeError(
'run_local_mode variable is not initialized, remember to call'
' super(your_class, self).__init__()')
return self._fed_mode
@fed_mode.setter
def fed_mode(self, val):
assert isinstance(val, bool), 'fed mode must be a bool'
self._fed_mode = val
def enable_deepspeed(self, is_zero_3=False):
self._enable_deepspeed = True
self._deepspeed_zero_3 = is_zero_3
def local_mode(self):
self.fed_mode = False
def set_nn_config(self, nn_define, optimizer_define, loss_define):
self.nn_define = nn_define
self.opt_define = optimizer_define
self.loss_define = loss_define
def set_tracker(self, tracker):
self._tracker = tracker
def set_checkpoint(self, chkp):
self._model_checkpoint = chkp
def set_party_id_list(self, party_id_list):
self.party_id_list = party_id_list
def set_model_exporter(self, exporter):
assert isinstance(
exporter, ExporterBase), 'exporter is not an instance of ExporterBase'
self._exporter = exporter
def get_cached_model(self):
return self._cache_model
@staticmethod
def task_type_infer(predict_result: t.Tensor, true_label):
# infer task type and classes(of classification task)
predict_result = predict_result.cpu()
true_label = true_label.cpu()
pred_shape = predict_result.shape
with t.no_grad():
if true_label.max() == 1.0 and true_label.min() == 0.0:
return consts.BINARY
if (len(pred_shape) > 1) and (pred_shape[1] > 1):
if t.isclose(
predict_result.sum(
axis=1).cpu(), t.Tensor(
[1.0])).all():
return consts.MULTY
else:
return None
elif (len(pred_shape) == 1) or (pred_shape[1] == 1):
return consts.REGRESSION
return None
def _update_metric_summary(self, metric_dict):
if len(metric_dict) == 0:
return
iter_name = list(metric_dict.keys())[0]
metric_dict = metric_dict[iter_name]
if len(self._evaluation_summary) == 0:
self._evaluation_summary = {namespace: {}
for namespace in metric_dict}
for namespace in metric_dict:
for metric_name in metric_dict[namespace]:
epoch_metric = metric_dict[namespace][metric_name]
if namespace not in self._evaluation_summary:
self._evaluation_summary[namespace] = {}
if metric_name not in self._evaluation_summary[namespace]:
self._evaluation_summary[namespace][metric_name] = []
self._evaluation_summary[namespace][metric_name].append(
epoch_metric)
def get_evaluation_summary(self):
return self._evaluation_summary
def get_summary(self):
return self._summary
"""
User Interfaces
"""
def _local_save(
self,
model,
optimizer,
epoch_idx,
converge_status,
loss_history,
best_epoch,
extra_data,
save_path):
LOGGER.debug('save model to local dir')
if hasattr(model, "enable_save_pretrained") and model.enable_save_pretrained:
model.save_pretrained(save_path)
else:
unwrap_model = TrainerBase.unwrap_model(model)
if hasattr(model, "enable_save_pretrained") and model.enable_save_pretrained:
unwrap_model.save_pretrained(save_path)
else:
model_state_dict = model.state_dict()
model_dict = {
'model': model_state_dict,
'optimizer': optimizer.state_dict(),
'model_define': self.nn_define,
'optimizer_define': self.opt_define,
'loss_define': self.loss_define,
'epoch_idx': epoch_idx,
'converge_status': converge_status,
'loss_history': loss_history,
'best_epoch': best_epoch,
'extra_data': extra_data
}
t.save(model_dict, save_path)
local_save_path = save_path if not self._enable_deepspeed else os.environ[consts.FLOW_MODEL_SYNC_PATH]
model_dict = self._exporter.export_model_dict(model_define=self.nn_define,
optimizer_define=self.opt_define,
loss_define=self.loss_define,
epoch_idx=epoch_idx,
converge_status=converge_status,
loss_history=loss_history,
best_epoch=best_epoch,
extra_data=extra_data,
local_save_path=local_save_path
)
self._cache_model = model_dict
def set_model(self, model: Module):
if not issubclass(type(model), Module):
raise ValueError('model must be a subclass of pytorch nn.Module')
self.model = model
def save(
self,
model=None,
epoch_idx=-1,
optimizer=None,
converge_status=False,
loss_history=None,
best_epoch=-1,
extra_data={}):
assert isinstance(
epoch_idx, int) and epoch_idx >= 0, 'epoch idx must be an int >= 0'
if self._exporter:
LOGGER.debug('save model to fate')
model_dict = self._exporter.export_model_dict(model=model,
optimizer=optimizer,
model_define=self.nn_define,
optimizer_define=self.opt_define,
loss_define=self.loss_define,
epoch_idx=epoch_idx,
converge_status=converge_status,
loss_history=loss_history,
best_epoch=best_epoch,
extra_data=extra_data
)
self._cache_model = model_dict
def checkpoint(
self,
model=None,
epoch_idx=-1,
optimizer=None,
converge_status=False,
loss_history=None,
best_epoch=-1,
extra_data={}):
assert isinstance(
epoch_idx, int) and epoch_idx >= 0, 'epoch idx must be an int >= 0'
"""
if isinstance(TrainerBase.unwrap_model(model), PELLM):
raise ValueError("save checkpoint of Pretrained model should provide local dir")
"""
if self._model_checkpoint:
if self._exporter is None:
raise RuntimeError('exporter is None, cannot save checkpoint')
if epoch_idx in self._set_model_checkpoint_epoch:
LOGGER.info(
'checkpoint at epoch {} set, skip setting checkpoint'.format(epoch_idx))
return
self.save(model=model, epoch_idx=epoch_idx, optimizer=optimizer, converge_status=converge_status,
loss_history=loss_history, best_epoch=best_epoch, extra_data=extra_data)
self._model_checkpoint.add_checkpoint(len(self._set_model_checkpoint_epoch),
to_save_model=serialize_models(self._cache_model)) # step_index, to_save_model
self._set_model_checkpoint_epoch.add(epoch_idx)
LOGGER.info('checkpoint at epoch {} saved'.format(epoch_idx))
def local_save(self,
model=None,
epoch_idx=-1,
optimizer=None,
converge_status=False,
loss_history=None,
best_epoch=-1,
extra_data={}):
assert isinstance(
epoch_idx, int) and epoch_idx >= 0, 'epoch idx must be an int >= 0'
if self._exporter:
# default saving folder is under the job folder
model_name = "model.pkl"
if self._enable_deepspeed:
save_path = os.path.join(os.environ[consts.DEEPSPEED_MODEL_DIR], model_name)
else:
save_path = os.path.abspath(os.path.join('../../../../', model_name))
self._local_save(
model,
optimizer,
epoch_idx,
converge_status,
loss_history,
best_epoch,
extra_data,
save_path)
def local_checkpoint(self,
model=None,
epoch_idx=-1,
optimizer=None,
converge_status=False,
loss_history=None,
best_epoch=-1,
extra_data={}):
if self._exporter:
# default saving folder is under the job folder
model_name = 'checkpoint_{}.pkl'.format(epoch_idx)
if self._enable_deepspeed:
save_path = os.path.join(os.environ[consts.DEEPSPEED_MODEL_DIR], model_name)
else:
save_path = os.path.abspath(os.path.join('../../../../', model_name))
self._local_save(
model,
optimizer,
epoch_idx,
converge_status,
loss_history,
best_epoch,
extra_data,
save_path)
self._model_checkpoint.add_checkpoint(len(self._set_model_checkpoint_epoch),
to_save_model=serialize_models(self._cache_model)) # step_index, to_save_model
self._set_model_checkpoint_epoch.add(epoch_idx)
LOGGER.info('checkpoint at epoch {} saved'.format(epoch_idx))
def format_predict_result(self, sample_ids: List, predict_result: t.Tensor,
true_label: t.Tensor, task_type: str = None):
predict_result = predict_result.cpu().detach()
if task_type == 'auto':
task_type = self.task_type_infer(predict_result, true_label)
if task_type is None:
LOGGER.warning(
'unable to infer predict result type, predict process will be skipped')
return None
classes = None
if task_type == consts.BINARY:
classes = [0, 1]
elif task_type == consts.MULTY:
classes = [i for i in range(predict_result.shape[1])]
true_label = true_label.cpu().detach().flatten().tolist()
if task_type == consts.MULTY:
predict_result = predict_result.tolist()
else:
predict_result = predict_result.flatten().tolist()
id_table = [(id_, Instance(label=l))
for id_, l in zip(sample_ids, true_label)]
score_table = [(id_, pred)
for id_, pred in zip(sample_ids, predict_result)]
return StdReturnFormat(id_table, score_table, classes)
def callback_metric(self, metric_name: str, value: float, metric_type='train', epoch_idx=0):
assert metric_type in [
'train', 'validate'], 'metric_type should be train or validate'
iter_name = 'iteration_{}'.format(epoch_idx)
if self._tracker is not None:
self._tracker.log_metric_data(
metric_type, iter_name, [
Metric(
metric_name, np.round(
value, 6))])
self._tracker.set_metric_meta(
metric_type, iter_name, MetricMeta(
name=metric_name, metric_type='EVALUATION_SUMMARY'))
def callback_loss(self, loss: float, epoch_idx: int):
if self._tracker is not None:
self._tracker.log_metric_data(
metric_name="loss",
metric_namespace="train",
metrics=[Metric(epoch_idx, loss)],
)
def summary(self, summary_dict: dict):
assert isinstance(summary_dict, dict), 'summary must be a dict'
self._summary = summary_dict
def evaluation(self, sample_ids: list, pred_scores: t.Tensor, label: t.Tensor, dataset_type='train',
metric_list=None, epoch_idx=0, task_type=None):
eval_obj = Evaluation()
if task_type == 'auto':
task_type = self.task_type_infer(pred_scores, label)
if task_type is None:
LOGGER.debug('cannot infer task type, return')
return
assert dataset_type in [
'train', 'validate'], 'dataset_type must in ["train", "validate"]'
eval_param = EvaluateParam(eval_type=task_type)
if task_type == consts.BINARY:
eval_param.metrics = ['auc', 'ks']
elif task_type == consts.MULTY:
eval_param.metrics = ['accuracy', 'precision', 'recall']
eval_param.check_single_value_default_metric()
eval_obj._init_model(eval_param)
pred_scores = pred_scores.cpu().detach().numpy()
label = label.cpu().detach().numpy().flatten()
if task_type == consts.REGRESSION or task_type == consts.BINARY:
pred_scores = pred_scores.flatten()
label = label.flatten()
pred_scores = pred_scores.tolist()
label = label.tolist()
assert len(pred_scores) == len(
label), 'the length of predict score != the length of label, pred {} and label {}'.format(len(pred_scores), len(label))
eval_data = []
for id_, s, l in zip(sample_ids, pred_scores, label):
if task_type == consts.REGRESSION:
eval_data.append([id_, (l, s, s)])
if task_type == consts.MULTY:
pred_label = np.argmax(s)
eval_data.append([id_, (l, pred_label, s)])
elif task_type == consts.BINARY:
pred_label = (s > 0.5) + 1
eval_data.append([id_, (l, pred_label, s)])
eval_result = eval_obj.evaluate_metrics(dataset_type, eval_data)
if self._tracker is not None:
eval_obj.set_tracker(self._tracker)
# send result to fate-board
eval_obj.callback_metric_data(
{'iteration_{}'.format(epoch_idx): [eval_result]})
self._update_metric_summary(eval_obj.metric_summaries)
return self._evaluation_summary
def to_cuda(self, var, device=0):
if hasattr(var, 'cuda'):
return var.cuda(device)
elif isinstance(var, tuple) or isinstance(var, list):
ret = tuple(self.to_cuda(i) for i in var)
return ret
elif isinstance(var, dict):
for k in var:
if hasattr(var[k], 'cuda'):
var[k] = var[k].cuda(device)
return var
else:
return var
@abc.abstractmethod
def train(self, train_set, validate_set=None, optimizer=None, loss=None, extra_data={}):
"""
train_set : A Dataset Instance, must be a instance of subclass of Dataset (federatedml.nn.dataset.base),
for example, TableDataset() (from federatedml.nn.dataset.table)
validate_set : A Dataset Instance, but optional must be a instance of subclass of Dataset
(federatedml.nn.dataset.base), for example, TableDataset() (from federatedml.nn.dataset.table)
optimizer : A pytorch optimizer class instance, for example, t.optim.Adam(), t.optim.SGD()
loss : A pytorch Loss class, for example, nn.BECLoss(), nn.CrossEntropyLoss()
"""
pass
@abc.abstractmethod
def predict(self, dataset):
pass
@abc.abstractmethod
def server_aggregate_procedure(self, extra_data={}):
pass
@staticmethod
def unwrap_model(model):
if hasattr(model, "module"):
return TrainerBase.unwrap_model(model.module)
else:
return model
"""
Load Trainer
"""
def get_trainer_class(trainer_module_name: str):
if trainer_module_name.endswith('.py'):
trainer_module_name = trainer_module_name.replace('.py', '')
ds_modules = importlib.import_module(
'{}.homo.trainer.{}'.format(
ML_PATH, trainer_module_name))
try:
trainers = []
for k, v in ds_modules.__dict__.items():
if isinstance(v, type):
if issubclass(v, TrainerBase) and v is not TrainerBase:
trainers.append(v)
if len(trainers) == 0:
raise ValueError('Did not find any class in {}.py that is the subclass of Trainer class'.
format(trainer_module_name))
else:
return trainers[-1] # return the last defined trainer
except ValueError as e:
raise e
| 20,753 | 35.410526 | 131 | py |
FATE | FATE-master/python/federatedml/nn/homo/trainer/fedavg_graph_trainer.py | import torch
import torch as t
import numpy as np
from torch_geometric.loader import NeighborLoader
from federatedml.framework.homo.aggregator.secure_aggregator import SecureAggregatorClient as SecureAggClient
from federatedml.nn.dataset.base import Dataset
from federatedml.nn.homo.trainer.fedavg_trainer import FedAVGTrainer
from federatedml.util import LOGGER
class FedAVGGraphTrainer(FedAVGTrainer):
"""
Parameters
----------
epochs: int >0, epochs to train
batch_size: int, -1 means full batch
secure_aggregate: bool, default is True, whether to use secure aggregation. if enabled, will add random number
mask to local models. These random number masks will eventually cancel out to get 0.
weighted_aggregation: bool, whether add weight to each local model when doing aggregation.
if True, According to origin paper, weight of a client is: n_local / n_global, where n_local
is the sample number locally and n_global is the sample number of all clients.
if False, simply averaging these models.
early_stop: None, 'diff' or 'abs'. if None, disable early stop; if 'diff', use the loss difference between
two epochs as early stop condition, if differences < tol, stop training ; if 'abs', if loss < tol,
stop training
tol: float, tol value for early stop
aggregate_every_n_epoch: None or int. if None, aggregate model on the end of every epoch, if int, aggregate
every n epochs.
cuda: bool, use cuda or not
pin_memory: bool, for pytorch DataLoader
shuffle: bool, for pytorch DataLoader
data_loader_worker: int, for pytorch DataLoader, number of workers when loading data
validation_freqs: None or int. if int, validate your model and send validate results to fate-board every n epoch.
if is binary classification task, will use metrics 'auc', 'ks', 'gain', 'lift', 'precision'
if is multi classification task, will use metrics 'precision', 'recall', 'accuracy'
if is regression task, will use metrics 'mse', 'mae', 'rmse', 'explained_variance', 'r2_score'
checkpoint_save_freqs: save model every n epoch, if None, will not save checkpoint.
task_type: str, 'auto', 'binary', 'multi', 'regression'
this option decides the return format of this trainer, and the evaluation type when running validation.
if auto, will automatically infer your task type from labels and predict results.
"""
def __init__(self, epochs=10, batch_size=512, # training parameter
early_stop=None, tol=0.0001, # early stop parameters
secure_aggregate=True, weighted_aggregation=True, aggregate_every_n_epoch=None, # federation
cuda=None, pin_memory=True, shuffle=True, data_loader_worker=0, # GPU & dataloader
validation_freqs=None, # validation configuration
checkpoint_save_freqs=None, # checkpoint configuration
task_type='auto',
num_neighbors=[10, 10],
):
super(FedAVGGraphTrainer, self).__init__(
epochs=epochs, batch_size=batch_size, # training parameter
early_stop=early_stop, tol=tol, # early stop parameters
secure_aggregate=secure_aggregate, weighted_aggregation=weighted_aggregation, aggregate_every_n_epoch=aggregate_every_n_epoch, # federation
cuda=cuda, pin_memory=pin_memory, shuffle=shuffle, data_loader_worker=data_loader_worker, # GPU & dataloader
validation_freqs=validation_freqs, # validation configuration
checkpoint_save_freqs=checkpoint_save_freqs, # checkpoint configuration
task_type=task_type,
)
self.comm_suffix = 'fedavg_graph'
LOGGER.debug("num_neighbors={}".format(num_neighbors))
self.num_neighbors = num_neighbors
def train(
self,
train_set: Dataset,
validate_set: Dataset = None,
optimizer: t.optim.Optimizer = None,
loss=None,
extra_dict={}):
ds = train_set
if self.cuda:
self.model = self.model.cuda()
if optimizer is None:
raise ValueError(
'FedAVGGraphTrainer requires an optimizer, but got None, please specify optimizer in the '
'job configuration')
if loss is None:
raise ValueError(
'FedAVGGraphTrainer requires a loss function, but got None, please specify loss function in the'
' job configuration')
if self.batch_size > len(ds.input_nodes_train) or self.batch_size == -1:
self.batch_size = len(ds.input_nodes_train)
dl = NeighborLoader(
data=ds.data,
num_neighbors=self.num_neighbors,
input_nodes=ds.input_nodes_train,
batch_size=self.batch_size,
pin_memory=self.pin_memory,
shuffle=self.shuffle,
num_workers=self.data_loader_worker)
# compute round to aggregate
cur_agg_round = 0
if self.aggregate_every_n_epoch is not None:
aggregate_round = self.epochs // self.aggregate_every_n_epoch
else:
aggregate_round = self.epochs
# initialize fed avg client
if self.fed_mode:
if self.weighted_aggregation:
sample_num = len(ds.input_nodes_train)
else:
sample_num = 1.0
client_agg = SecureAggClient(
True, aggregate_weight=sample_num, communicate_match_suffix=self.comm_suffix)
else:
client_agg = None
# running var
cur_epoch = 0
loss_history = []
need_stop = False
evaluation_summary = {}
LOGGER.debug(self.model)
# training process
for i in range(self.epochs):
cur_epoch = i
LOGGER.info('epoch is {}'.format(i))
epoch_loss = 0.0
batch_idx = 0
acc_num = 0
for _, batch in enumerate(dl):
label = batch.y[:self.batch_size]
optimizer.zero_grad()
pred = self.model(batch.x, batch.edge_index)[:self.batch_size]
batch_loss = loss(pred, label)
batch_loss.backward()
optimizer.step()
batch_loss_np = batch_loss.detach().numpy(
) if not self.cuda else batch_loss.cpu().detach().numpy()
if acc_num + self.batch_size > len(ds.input_nodes_train):
batch_len = len(ds.input_nodes_train) - acc_num
else:
batch_len = self.batch_size
epoch_loss += batch_loss_np * batch_len
batch_idx += 1
if self.fed_mode:
LOGGER.debug(
'epoch {} batch {} finished'.format(
i, batch_idx))
# loss compute
epoch_loss = epoch_loss / len(ds.input_nodes_train)
self.callback_loss(epoch_loss, i)
loss_history.append(float(epoch_loss))
LOGGER.info('epoch loss is {}'.format(epoch_loss))
# federation process, if running local mode, cancel federation
if client_agg is not None:
if not (self.aggregate_every_n_epoch is not None and (i + 1) % self.aggregate_every_n_epoch != 0):
# model averaging
self.model = client_agg.model_aggregation(self.model)
# agg loss and get converge status
converge_status = client_agg.loss_aggregation(epoch_loss)
cur_agg_round += 1
LOGGER.info(
'model averaging finished, aggregate round {}/{}'.format(
cur_agg_round, aggregate_round))
if converge_status:
LOGGER.info('early stop triggered, stop training')
need_stop = True
# validation process
if self.validation_freq and ((i + 1) % self.validation_freq == 0):
LOGGER.info('running validation')
ids_t, pred_t, label_t = self._predict(ds, 'train')
evaluation_summary = self.evaluation(
ids_t,
pred_t,
label_t,
dataset_type='train',
epoch_idx=i,
task_type=self.task_type)
if ds.input_nodes_vali is not None:
ids_v, pred_v, label_v = self._predict(ds, 'vali')
evaluation_summary = self.evaluation(
ids_v,
pred_v,
label_v,
dataset_type='validate',
epoch_idx=i,
task_type=self.task_type)
# save check point process
if self.save_freq is not None and ((i + 1) % self.save_freq == 0):
self.checkpoint(
i, self.model, optimizer, converge_status=need_stop, loss_history=loss_history)
LOGGER.info('save checkpoint : epoch {}'.format(i))
# if meet stop condition then stop
if need_stop:
break
# post-process
best_epoch = int(np.array(loss_history).argmin())
self.save(model=self.model, optimizer=optimizer, epoch_idx=cur_epoch, loss_history=loss_history,
converge_status=need_stop, best_epoch=best_epoch)
self.summary({
'best_epoch': best_epoch,
'loss_history': loss_history,
'need_stop': need_stop,
'metrics_summary': evaluation_summary
})
def _predict(self, dataset: Dataset, which_ds='train'):
pred_result = []
# switch eval mode
dataset.eval()
self.model.eval()
if not dataset.has_sample_ids():
dataset.init_sid_and_getfunc(prefix=dataset.get_type())
if which_ds == 'train':
input_nodes = dataset.input_nodes_train
elif which_ds == 'vali':
input_nodes = dataset.input_nodes_vali
elif which_ds == 'test':
input_nodes = dataset.input_nodes_test
else:
raise ValueError("Nnknown dataset to predict!")
dl = NeighborLoader(
data=dataset.data,
num_neighbors=self.num_neighbors,
input_nodes=input_nodes,
batch_size=self.batch_size,
pin_memory=self.pin_memory,
num_workers=self.data_loader_worker)
labels = []
with torch.no_grad():
for _, batch in enumerate(dl):
label = batch.y[:self.batch_size]
pred = self.model(batch.x, batch.edge_index)[:self.batch_size]
pred_result.append(pred)
labels.append(label)
ret_rs = torch.concat(pred_result, axis=0)
ret_label = torch.concat(labels, axis=0)
# switch back to train mode
dataset.train()
self.model.train()
LOGGER.debug(dataset.get_sample_ids())
LOGGER.debug(ret_rs)
LOGGER.debug(ret_label)
return dataset.get_sample_ids(), ret_rs, ret_label
| 11,560 | 41.977695 | 152 | py |
FATE | FATE-master/python/federatedml/nn/hetero/host.py | #
# Copyright 2019 The FATE Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from torch.utils.data import DataLoader
from federatedml.framework.hetero.procedure import batch_generator
from federatedml.nn.hetero.base import HeteroNNBase
from federatedml.nn.hetero.model import HeteroNNHostModel
from federatedml.param.hetero_nn_param import HeteroNNParam as NNParameter
from federatedml.protobuf.generated.hetero_nn_model_meta_pb2 import HeteroNNMeta
from federatedml.protobuf.generated.hetero_nn_model_param_pb2 import HeteroNNParam
from federatedml.util import consts, LOGGER
MODELMETA = "HeteroNNHostMeta"
MODELPARAM = "HeteroNNHostParam"
class HeteroNNHost(HeteroNNBase):
def __init__(self):
super(HeteroNNHost, self).__init__()
self.batch_generator = batch_generator.Host()
self.model = None
self.role = consts.HOST
self.input_shape = None
self.default_table_partitions = 4
def _init_model(self, hetero_nn_param):
super(HeteroNNHost, self)._init_model(hetero_nn_param)
def export_model(self):
if self.need_cv:
return None
model = {MODELMETA: self._get_model_meta(),
MODELPARAM: self._get_model_param()}
return model
def load_model(self, model_dict):
model_dict = list(model_dict["model"].values())[0]
param = model_dict.get(MODELPARAM)
meta = model_dict.get(MODELMETA)
if self.hetero_nn_param is None:
self.hetero_nn_param = NNParameter()
self.hetero_nn_param.check()
self.predict_param = self.hetero_nn_param.predict_param
self._build_model()
self._restore_model_meta(meta)
self._restore_model_param(param)
def _build_model(self):
self.model = HeteroNNHostModel(self.hetero_nn_param, self.flowid)
self.model.set_transfer_variable(self.transfer_variable)
self.model.set_partition(self.default_table_partitions)
def predict(self, data_inst):
ds = self.prepare_dataset(data_inst, data_type='predict')
batch_size = len(ds) if self.batch_size == -1 else self.batch_size
for batch_data in DataLoader(ds, batch_size=batch_size):
# ignore label if the dataset offers label
if isinstance(batch_data, tuple) and len(batch_data) > 1:
batch_data = batch_data[0]
self.model.predict(batch_data)
def fit(self, data_inst, validate_data=None):
if hasattr(
data_inst,
'partitions') and data_inst.partitions is not None:
self.default_table_partitions = data_inst.partitions
LOGGER.debug(
'reset default partitions is {}'.format(
self.default_table_partitions))
train_ds = self.prepare_dataset(data_inst, data_type='train')
if validate_data is not None:
val_ds = self.prepare_dataset(validate_data, data_type='validate')
else:
val_ds = None
self.callback_list.on_train_begin(train_ds, val_ds)
if not self.component_properties.is_warm_start:
self._build_model()
epoch_offset = 0
else:
self.callback_warm_start_init_iter(self.history_iter_epoch)
epoch_offset = self.history_iter_epoch + 1
batch_size = len(train_ds) if self.batch_size == - \
1 else self.batch_size
for cur_epoch in range(epoch_offset, epoch_offset + self.epochs):
self.iter_epoch = cur_epoch
for batch_idx, batch_data in enumerate(
DataLoader(train_ds, batch_size=batch_size)):
self.model.train(batch_data, cur_epoch, batch_idx)
self.callback_list.on_epoch_end(cur_epoch)
if self.callback_variables.stop_training:
LOGGER.debug('early stopping triggered')
break
is_converge = self.transfer_variable.is_converge.get(
idx=0, suffix=(cur_epoch,))
if is_converge:
LOGGER.debug(
"Training process is converged in epoch {}".format(cur_epoch))
break
self.callback_list.on_train_end()
def _get_model_meta(self):
model_meta = HeteroNNMeta()
model_meta.batch_size = self.batch_size
model_meta.hetero_nn_model_meta.CopyFrom(
self.model.get_hetero_nn_model_meta())
model_meta.module = 'HeteroNN'
return model_meta
def _get_model_param(self):
model_param = HeteroNNParam()
model_param.iter_epoch = self.iter_epoch
model_param.header.extend(self._header)
model_param.hetero_nn_model_param.CopyFrom(
self.model.get_hetero_nn_model_param())
model_param.best_iteration = self.callback_variables.best_iteration
return model_param
| 5,442 | 36.027211 | 82 | py |
FATE | FATE-master/python/federatedml/nn/hetero/guest.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright 2019 The FATE Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import numpy as np
import torch
from torch.utils.data import DataLoader
from fate_arch.computing._util import is_table
from fate_arch.session import computing_session as session
from federatedml.feature.instance import Instance
from federatedml.framework.hetero.procedure import batch_generator
from federatedml.model_base import Metric
from federatedml.model_base import MetricMeta
from federatedml.nn.hetero.base import HeteroNNBase
from federatedml.nn.hetero.model import HeteroNNGuestModel
from federatedml.optim.convergence import converge_func_factory
from federatedml.param.evaluation_param import EvaluateParam
from federatedml.param.hetero_nn_param import HeteroNNParam as NNParameter
from federatedml.protobuf.generated.hetero_nn_model_meta_pb2 import HeteroNNMeta
from federatedml.protobuf.generated.hetero_nn_model_param_pb2 import HeteroNNParam
from federatedml.util import consts, LOGGER
from federatedml.util.io_check import assert_io_num_rows_equal
from federatedml.nn.dataset.table import TableDataset
from federatedml.statistic.data_overview import check_with_inst_id
from federatedml.nn.backend.utils.data import add_match_id
MODELMETA = "HeteroNNGuestMeta"
MODELPARAM = "HeteroNNGuestParam"
class HeteroNNGuest(HeteroNNBase):
def __init__(self):
super(HeteroNNGuest, self).__init__()
self.task_type = None
self.converge_func = None
self.batch_generator = batch_generator.Guest()
self.data_keys = []
self.label_dict = {}
self.model = None
self.role = consts.GUEST
self.history_loss = []
self.input_shape = None
self._summary_buf = {"history_loss": [],
"is_converged": False,
"best_iteration": -1}
self.dataset_cache_dict = {}
self.default_table_partitions = 4
def _init_model(self, hetero_nn_param):
super(HeteroNNGuest, self)._init_model(hetero_nn_param)
self.task_type = hetero_nn_param.task_type
self.converge_func = converge_func_factory(self.early_stop, self.tol)
def _build_model(self):
self.model = HeteroNNGuestModel(
self.hetero_nn_param, self.component_properties, self.flowid)
self.model.set_transfer_variable(self.transfer_variable)
self.model.set_partition(self.default_table_partitions)
def _set_loss_callback_info(self):
self.callback_meta("loss",
"train",
MetricMeta(name="train",
metric_type="LOSS",
extra_metas={"unit_name": "iters"}))
@staticmethod
def _disable_sample_weight(dataset):
# currently not support sample weight
if isinstance(dataset, TableDataset):
dataset.with_sample_weight = False
def fit(self, data_inst, validate_data=None):
if hasattr(
data_inst,
'partitions') and data_inst.partitions is not None:
self.default_table_partitions = data_inst.partitions
LOGGER.debug(
'reset default partitions is {}'.format(
self.default_table_partitions))
train_ds = self.prepare_dataset(
data_inst, data_type='train', check_label=True)
train_ds.train() # set dataset to train mode
self._disable_sample_weight(train_ds)
if validate_data is not None:
val_ds = self.prepare_dataset(validate_data, data_type='validate')
val_ds.train() # set dataset to train mode
self._disable_sample_weight(val_ds)
else:
val_ds = None
self.callback_list.on_train_begin(train_ds, val_ds)
# collect data from table to form data loader
if not self.component_properties.is_warm_start:
self._build_model()
epoch_offset = 0
else:
self.callback_warm_start_init_iter(self.history_iter_epoch)
epoch_offset = self.history_iter_epoch + 1
# set label number
self.model.set_label_num(self.label_num)
if len(train_ds) == 0:
self.model.set_empty()
self._set_loss_callback_info()
batch_size = len(train_ds) if self.batch_size == - \
1 else self.batch_size
data_loader = DataLoader(
train_ds,
batch_size=batch_size,
num_workers=4)
for cur_epoch in range(epoch_offset, self.epochs + epoch_offset):
self.iter_epoch = cur_epoch
LOGGER.debug("cur epoch is {}".format(cur_epoch))
self.callback_list.on_epoch_begin(cur_epoch)
epoch_loss = 0
acc_sample_num = 0
for batch_idx, (batch_data, batch_label) in enumerate(data_loader):
batch_loss = self.model.train(
batch_data, batch_label, cur_epoch, batch_idx)
if acc_sample_num + batch_size > len(train_ds):
batch_len = len(train_ds) - acc_sample_num
else:
batch_len = batch_size
acc_sample_num += batch_size
epoch_loss += batch_loss * batch_len
epoch_loss = epoch_loss / len(train_ds)
LOGGER.debug("epoch {} loss is {}".format(cur_epoch, epoch_loss))
self.callback_metric("loss",
"train",
[Metric(cur_epoch, epoch_loss)])
self.history_loss.append(epoch_loss)
self.callback_list.on_epoch_end(cur_epoch)
if self.callback_variables.stop_training:
LOGGER.debug('early stopping triggered')
break
if self.hetero_nn_param.selector_param.method:
# when use selective bp, loss converge will be disabled
is_converge = False
else:
is_converge = self.converge_func.is_converge(epoch_loss)
self._summary_buf["is_converged"] = is_converge
self.transfer_variable.is_converge.remote(is_converge,
role=consts.HOST,
idx=-1,
suffix=(cur_epoch,))
if is_converge:
LOGGER.debug(
"Training process is converged in epoch {}".format(cur_epoch))
break
self.callback_list.on_train_end()
self.set_summary(self._get_model_summary())
@assert_io_num_rows_equal
def predict(self, data_inst):
with_match_id = False
if is_table(data_inst):
with_match_id = check_with_inst_id(data_inst)
ds = self.prepare_dataset(data_inst, data_type='predict')
ds.eval() # set dataset to eval mode
self._disable_sample_weight(ds)
keys = ds.get_sample_ids()
batch_size = len(ds) if self.batch_size == -1 else self.batch_size
dl = DataLoader(ds, batch_size=batch_size)
preds = []
labels = []
for batch_data, batch_label in dl:
batch_pred = self.model.predict(batch_data)
preds.append(batch_pred)
labels.append(batch_label)
preds = np.concatenate(preds, axis=0)
labels = torch.concat(labels, dim=0).cpu().numpy().flatten().tolist()
id_table = [(id_, Instance(label=l)) for id_, l in zip(keys, labels)]
if with_match_id:
add_match_id(id_table, ds.ds) # ds is wrap shuffle dataset here
data_inst = session.parallelize(
id_table,
partition=self.default_table_partitions,
include_key=True)
if self.task_type == consts.REGRESSION:
preds = preds.flatten().tolist()
preds = [float(pred) for pred in preds]
predict_tb = session.parallelize(zip(keys, preds), include_key=True,
partition=self.default_table_partitions)
result = self.predict_score_to_output(data_inst, predict_tb)
else:
if self.label_num > 2:
preds = preds.tolist()
preds = [list(map(float, pred)) for pred in preds]
predict_tb = session.parallelize(zip(keys, preds), include_key=True,
partition=self.default_table_partitions)
result = self.predict_score_to_output(
data_inst, predict_tb, classes=list(range(self.label_num)))
else:
preds = preds.flatten().tolist()
preds = [float(pred) for pred in preds]
predict_tb = session.parallelize(zip(keys, preds), include_key=True,
partition=self.default_table_partitions)
threshold = self.predict_param.threshold
result = self.predict_score_to_output(
data_inst, predict_tb, classes=[
0, 1], threshold=threshold)
return result
def export_model(self):
if self.need_cv:
return None
model = {MODELMETA: self._get_model_meta(),
MODELPARAM: self._get_model_param()}
return model
def load_model(self, model_dict):
model_dict = list(model_dict["model"].values())[0]
param = model_dict.get(MODELPARAM)
meta = model_dict.get(MODELMETA)
if self.hetero_nn_param is None:
self.hetero_nn_param = NNParameter()
self.hetero_nn_param.check()
self.predict_param = self.hetero_nn_param.predict_param
self._build_model()
self._restore_model_meta(meta)
self._restore_model_param(param)
def _get_model_summary(self):
self._summary_buf["history_loss"] = self.history_loss
if self.callback_variables.validation_summary:
self._summary_buf["validation_metrics"] = self.callback_variables.validation_summary
"""
if self.validation_strategy:
validation_summary = self.validation_strategy.summary()
if validation_summary:
self._summary_buf["validation_metrics"] = validation_summary
"""
return self._summary_buf
def _get_model_meta(self):
model_meta = HeteroNNMeta()
model_meta.task_type = self.task_type
model_meta.module = 'HeteroNN'
model_meta.batch_size = self.batch_size
model_meta.epochs = self.epochs
model_meta.early_stop = self.early_stop
model_meta.tol = self.tol
model_meta.hetero_nn_model_meta.CopyFrom(
self.model.get_hetero_nn_model_meta())
return model_meta
def _get_model_param(self):
model_param = HeteroNNParam()
model_param.iter_epoch = self.iter_epoch
model_param.hetero_nn_model_param.CopyFrom(
self.model.get_hetero_nn_model_param())
model_param.num_label = self.label_num
model_param.best_iteration = self.callback_variables.best_iteration
model_param.header.extend(self._header)
for loss in self.history_loss:
model_param.history_loss.append(loss)
return model_param
def get_metrics_param(self):
if self.task_type == consts.CLASSIFICATION:
if self.label_num == 2:
return EvaluateParam(eval_type="binary",
pos_label=1, metrics=self.metrics)
else:
return EvaluateParam(eval_type="multi", metrics=self.metrics)
else:
return EvaluateParam(eval_type="regression", metrics=self.metrics)
def _restore_model_param(self, param):
super(HeteroNNGuest, self)._restore_model_param(param)
self.label_num = param.num_label
| 12,612 | 37.571865 | 96 | py |
FATE | FATE-master/python/federatedml/nn/hetero/nn_component/bottom_model.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright 2019 The FATE Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import torch as t
import numpy as np
from federatedml.util import LOGGER
from federatedml.nn.hetero.nn_component.torch_model import TorchNNModel
class BottomModel(object):
def __init__(self, optimizer, layer_config):
self._model: TorchNNModel = TorchNNModel(nn_define=layer_config, optimizer_define=optimizer,
loss_fn_define=None)
self.do_backward_select_strategy = False
self.x = []
self.x_cached = []
self.batch_size = None
def set_backward_select_strategy(self):
self.do_backward_select_strategy = True
def set_batch(self, batch_size):
self.batch_size = batch_size
def train_mode(self, mode):
self._model.train_mode(mode)
def forward(self, x):
LOGGER.debug("bottom model start to forward propagation")
self.x = x
if self.do_backward_select_strategy:
if (not isinstance(x, np.ndarray) and not isinstance(x, t.Tensor)):
raise ValueError(
'When using selective bp, data from dataset must be a ndarray or a torch tensor, but got {}'.format(
type(x)))
if self.do_backward_select_strategy:
output_data = self._model.predict(x)
else:
output_data = self._model.forward(x)
return output_data
def backward(self, x, error, selective_ids):
LOGGER.debug("bottom model start to backward propagation")
if self.do_backward_select_strategy:
if selective_ids:
if len(self.x_cached) == 0:
self.x_cached = self.x[selective_ids]
else:
self.x_cached = np.vstack(
(self.x_cached, self.x[selective_ids]))
if len(error) == 0:
return
x = self.x_cached[: self.batch_size]
self.x_cached = self.x_cached[self.batch_size:]
self._model.train((x, error))
else:
self._model.backward(error)
LOGGER.debug('bottom model update parameters:')
def predict(self, x):
return self._model.predict(x)
def export_model(self):
return self._model.export_model()
def restore_model(self, model_bytes):
self._model = self._model.restore_model(model_bytes)
def __repr__(self):
return 'bottom model contains {}'.format(self._model.__repr__())
| 3,119 | 32.913043 | 120 | py |
FATE | FATE-master/python/federatedml/nn/hetero/nn_component/top_model.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright 2019 The FATE Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import numpy as np
import torch
from federatedml.nn.hetero.nn_component.torch_model import TorchNNModel
from federatedml.nn.hetero.protection_enhance.coae import train_an_autoencoder_confuser, CoAE, coae_label_reformat, \
CrossEntropy
from federatedml.util import LOGGER
class TopModel(object):
def __init__(self, loss, optimizer, layer_config, coae_config, label_num):
self.coae = None
self.coae_config = coae_config
self.label_num = label_num
LOGGER.debug('label num is {}'.format(self.label_num))
self._model: TorchNNModel = TorchNNModel(nn_define=layer_config, optimizer_define=optimizer,
loss_fn_define=loss)
self.label_reformat = None
if self.coae_config:
self._model.loss_fn = CrossEntropy()
if self.coae_config:
self.label_reformat = coae_label_reformat
self.batch_size = None
self.selector = None
self.batch_data_cached_X = []
self.batch_data_cached_y = []
def set_backward_selector_strategy(self, selector):
self.selector = selector
def set_batch(self, batch_size):
self.batch_size = batch_size
def train_mode(self, mode):
self._model.train_mode(mode)
def train_and_get_backward_gradient(self, x, y):
LOGGER.debug("top model start to forward propagation")
selective_id = []
input_gradient = []
# transform label format
if self.label_reformat:
y = self.label_reformat(y, label_num=self.label_num)
# train an auto-encoder confuser
if self.coae_config and self.coae is None:
LOGGER.debug('training coae encoder')
self.coae: CoAE = train_an_autoencoder_confuser(y.shape[1], self.coae_config.epoch,
self.coae_config.lambda1, self.coae_config.lambda2,
self.coae_config.lr, self.coae_config.verbose)
# make fake soft label
if self.coae:
# transform labels to fake labels
y = self.coae.encode(y).detach().numpy()
LOGGER.debug('fake labels are {}'.format(y))
# run selector
if self.selector:
# when run selective bp, need to convert y to numpy format
if isinstance(y, torch.Tensor):
y = y.cpu().numpy()
losses = self._model.get_forward_loss_from_input(x, y)
loss = sum(losses) / len(losses)
selective_strategy = self.selector.select_batch_sample(losses)
for idx, select in enumerate(selective_strategy):
if select:
selective_id.append(idx)
self.batch_data_cached_X.append(x[idx])
self.batch_data_cached_y.append(y[idx])
if len(self.batch_data_cached_X) >= self.batch_size:
data = (np.array(self.batch_data_cached_X[: self.batch_size]),
np.array(self.batch_data_cached_y[: self.batch_size]))
input_gradient = self._model.get_input_gradients(data[0], data[1])[
0]
self._model.train(data)
self.batch_data_cached_X = self.batch_data_cached_X[self.batch_size:]
self.batch_data_cached_y = self.batch_data_cached_y[self.batch_size:]
else:
input_gradient = self._model.get_input_gradients(x, y)[0]
self._model.train((x, y))
loss = self._model.get_loss()[0]
return selective_id, input_gradient, loss
def predict(self, input_data):
output_data = self._model.predict(input_data)
if self.coae:
real_output = self.coae.decode(output_data).detach().numpy()
if real_output.shape[1] == 2:
real_output = real_output[::, 1].reshape((-1, 1))
return real_output
else:
return output_data
def export_coae(self):
if self.coae:
model_bytes = TorchNNModel.get_model_bytes(self.coae)
return model_bytes
else:
return None
def restore_coae(self, model_bytes):
if model_bytes is not None and len(model_bytes) > 0:
coae = TorchNNModel.recover_model_bytes(model_bytes)
self.coae = coae
def export_model(self):
return self._model.export_model()
def restore_model(self, model_bytes):
self._model = self._model.restore_model(model_bytes)
def __repr__(self):
return 'top model contains {}'.format(self._model.__repr__())
| 5,348 | 35.636986 | 117 | py |
FATE | FATE-master/python/federatedml/nn/hetero/nn_component/torch_model.py | import numpy as np
import tempfile
from federatedml.util import LOGGER
try: # for the situation that torch is not installed, but other modules still can be used
import torch
import torch as t
import copy
from types import SimpleNamespace
from torch import autograd
from federatedml.nn.backend.torch import serialization as s
from federatedml.nn.backend.torch.base import FateTorchOptimizer
from federatedml.nn.backend.torch.nn import CrossEntropyLoss
from federatedml.nn.backend.torch import optim
except ImportError:
pass
def backward_loss(z, backward_error):
return t.sum(z * backward_error)
class TorchNNModel(object):
def __init__(self, nn_define: dict, optimizer_define: dict = None, loss_fn_define: dict = None, cuda=False):
self.cuda = cuda
self.double_model = False
if self.cuda and not t.cuda.is_available():
raise ValueError(
'this machine dose not support cuda, cuda.is_available() is False')
self.optimizer_define = optimizer_define
self.nn_define = nn_define
self.loss_fn_define = loss_fn_define
self.loss_history = []
self.model, self.opt_inst, self.loss_fn = self.init(
self.nn_define, self.optimizer_define, self.loss_fn_define)
self.fw_cached = None
def to_tensor(self, x: np.ndarray):
if isinstance(x, np.ndarray):
x = t.from_numpy(x)
if self.cuda:
return x.cuda()
else:
return x
def label_convert(self, y, loss_fn):
# pytorch CE loss require 1D-int64-tensor
if isinstance(loss_fn, CrossEntropyLoss):
return t.Tensor(y).flatten().type(
t.int64).flatten() # accept 1-D array
else:
return t.Tensor(y).type(t.float)
def init(self, nn_define: dict, optimizer_define: dict = None, loss_fn_define: dict = None):
model = s.recover_sequential_from_dict(nn_define)
if self.cuda:
model = model.cuda()
if optimizer_define is None: # default optimizer
optimizer = optim.SGD(lr=0.01)
else:
optimizer: FateTorchOptimizer = s.recover_optimizer_from_dict(optimizer_define)
opt_inst = optimizer.to_torch_instance(model.parameters())
if loss_fn_define is None:
loss_fn = backward_loss
else:
loss_fn = s.recover_loss_fn_from_dict(loss_fn_define)
if self.double_model:
self.model.type(t.float64)
return model, opt_inst, loss_fn
def print_parameters(self):
LOGGER.debug(
'model parameter is {}'.format(
list(
self.model.parameters())))
def __repr__(self):
return self.model.__repr__() + '\n' + self.opt_inst.__repr__() + \
'\n' + str(self.loss_fn)
def train_mode(self, mode):
self.model.train(mode)
def train(self, data_x_and_y):
x, y = data_x_and_y # this is a tuple
self.opt_inst.zero_grad()
yt = self.to_tensor(y)
xt = self.to_tensor(x)
out = self.model(xt)
loss = self.loss_fn(out, yt)
loss.backward()
loss_val = loss.cpu().detach().numpy()
self.loss_history.append(loss_val)
self.opt_inst.step()
return loss_val
def forward(self, x):
# will cache tensor with grad, this function is especially for bottom
# model
x = self.to_tensor(x)
out = self.model(x)
if self.fw_cached is not None:
raise ValueError('fed cached should be None when forward')
self.fw_cached = out
return out.cpu().detach().numpy()
def backward(self, error):
# backward ,this function is especially for bottom model
self.opt_inst.zero_grad()
error = self.to_tensor(error)
loss = self.loss_fn(self.fw_cached, error)
loss.backward()
self.fw_cached = None
self.opt_inst.step()
def predict(self, x):
with torch.no_grad():
return self.model(self.to_tensor(x)).cpu().detach().numpy()
def get_forward_loss_from_input(self, x, y, reduction='none'):
with torch.no_grad():
default_reduction = self.loss_fn.reduction
self.loss_fn.reduction = reduction
yt = self.to_tensor(y)
xt = self.to_tensor(x)
loss = self.loss_fn(self.model(xt), yt)
self.loss_fn.reduction = default_reduction
return list(map(float, loss.detach().numpy()))
def get_input_gradients(self, x, y):
yt = self.to_tensor(y)
xt = self.to_tensor(x).requires_grad_(True)
fw = self.model(xt)
loss = self.loss_fn(fw, yt)
grad = autograd.grad(loss, xt)
return [grad[0].detach().numpy()]
def get_loss(self):
return [self.loss_history[-1]]
@staticmethod
def get_model_bytes(model):
with tempfile.TemporaryFile() as f:
torch.save(model, f)
f.seek(0)
return f.read()
@staticmethod
def recover_model_bytes(model_bytes):
with tempfile.TemporaryFile() as f:
f.write(model_bytes)
f.seek(0)
model = torch.load(f)
return model
@staticmethod
def get_model_save_dict(model: t.nn.Module, model_define, optimizer: t.optim.Optimizer, optimizer_define,
loss_define):
with tempfile.TemporaryFile() as f:
save_dict = {
'nn_define': model_define,
'model': model.state_dict(),
'optimizer_define': optimizer_define,
'optimizer': optimizer.state_dict(),
'loss_define': loss_define
}
torch.save(save_dict, f)
f.seek(0)
return f.read()
@staticmethod
def recover_model_save_dict(model_bytes):
with tempfile.TemporaryFile() as f:
f.write(model_bytes)
f.seek(0)
save_dict = torch.load(f)
return save_dict
def restore_model(self, model_bytes):
save_dict = self.recover_model_save_dict(model_bytes)
self.nn_define = save_dict['nn_define']
opt_define = save_dict['optimizer_define']
# optimizer can be updated
# old define == new define, load state dict
if opt_define == self.optimizer_define:
opt_inst: t.optim.Optimizer = self.opt_inst
opt_inst.load_state_dict(save_dict['optimizer'])
# load state dict
self.model.load_state_dict(save_dict['model'])
return self
def export_model(self):
return self.get_model_save_dict(
self.model,
self.nn_define,
self.opt_inst,
self.optimizer_define,
self.loss_fn_define)
| 6,909 | 30.697248 | 112 | py |
FATE | FATE-master/python/federatedml/nn/hetero/protection_enhance/coae.py | from federatedml.util import LOGGER
from federatedml.util import consts
try:
import torch
import torch as t
from torch import nn
from torch.nn import Module
from torch.nn import functional as F
except ImportError:
Module = object
def entropy(tensor):
return -t.sum(tensor * t.log2(tensor))
def cross_entropy(p2, p1, reduction='mean'):
p2 = p2 + consts.FLOAT_ZERO # to avoid nan
assert p2.shape == p1.shape
if reduction == 'sum':
return -t.sum(p1 * t.log(p2))
elif reduction == 'mean':
return -t.mean(t.sum(p1 * t.log(p2), dim=1))
elif reduction == 'none':
return -t.sum(p1 * t.log(p2), dim=1)
else:
raise ValueError('unknown reduction')
def cross_entropy_for_one_hot(pred, target, reduce="mean"):
if reduce == "mean":
return torch.mean(torch.sum(- target * F.log_softmax(pred, dim=-1), 1))
elif reduce == "sum":
return torch.sum(torch.sum(- target * F.log_softmax(pred, dim=-1), 1))
else:
raise Exception("Does not support reduce [{}]".format(reduce))
def coae_loss(
label,
fake_label,
reconstruct_label,
lambda_1=10,
lambda_2=2,
verbose=False):
loss_a = cross_entropy(reconstruct_label, label) - \
lambda_1 * cross_entropy(fake_label, label)
loss_b = entropy(fake_label)
if verbose:
LOGGER.debug(
'loss a is {} {}'.format(
cross_entropy(
reconstruct_label, label), cross_entropy(
fake_label, label)))
LOGGER.debug('loss b is {}'.format(loss_b))
return loss_a - lambda_2 * loss_b
class CrossEntropy(object):
def __init__(self, reduction='mean'):
self.reduction = reduction
def __call__(self, p2, p1):
return cross_entropy(p2, p1, self.reduction)
class CoAE(Module):
def __init__(self, input_dim=2, encode_dim=None):
super(CoAE, self).__init__()
self.d = input_dim
if encode_dim is None:
encode_dim = (6 * input_dim) ** 2
self.encoder = nn.Sequential(
nn.Linear(input_dim, encode_dim),
nn.ReLU(),
nn.Linear(encode_dim, input_dim),
nn.Softmax(dim=1)
)
self.decoder = nn.Sequential(
nn.Linear(input_dim, encode_dim),
nn.ReLU(),
nn.Linear(encode_dim, input_dim),
nn.Softmax(dim=1)
)
def encode(self, x):
x = t.Tensor(x)
return self.encoder(x)
def decode(self, fake_labels):
fake_labels = t.Tensor(fake_labels)
return self.decoder(fake_labels)
def forward(self, x):
x = t.Tensor(x)
z = self.encoder(x)
return self.decoder(z), z
def train_an_autoencoder_confuser(
label_num,
epoch=50,
lambda1=1,
lambda2=2,
lr=0.001,
verbose=False):
coae = CoAE(label_num, )
labels = torch.eye(label_num)
opt = torch.optim.Adam(coae.parameters(), lr=lr)
for i in range(epoch):
opt.zero_grad()
fake_labels = coae.encode(labels)
reconstruct_labels = coae.decode(fake_labels)
loss = coae_loss(
labels,
fake_labels,
reconstruct_labels,
lambda1,
lambda2,
verbose=verbose)
loss.backward()
opt.step()
if verbose:
LOGGER.debug(
'origin labels {}, fake labels {}, reconstruct labels {}'.format(
labels, coae.encode(labels).detach().numpy(), coae.decode(
coae.encode(labels)).detach().numpy()))
return coae
def coae_label_reformat(labels, label_num):
LOGGER.debug('label shape is {}'.format(labels.shape))
labels = labels
if label_num == 1: # regression:
raise ValueError('label num ==1, regression task not support COAE')
else:
return nn.functional.one_hot(
t.Tensor(labels).flatten().type(
t.int64), label_num).numpy()
if __name__ == '__main__':
coae = train_an_autoencoder_confuser(
2,
epoch=1000,
verbose=True,
lambda1=2.0,
lambda2=1.0,
lr=0.02)
| 4,246 | 25.710692 | 79 | py |
FATE | FATE-master/python/federatedml/nn/hetero/interactive/he_interactive_layer.py | #
# Copyright 2019 The FATE Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import pickle
import numpy as np
import torch
from torch import autograd
from federatedml.nn.hetero.interactive.base import InteractiveLayerGuest, InteractiveLayerHost
from federatedml.nn.hetero.nn_component.torch_model import backward_loss
from federatedml.nn.backend.torch.interactive import InteractiveLayer
from federatedml.nn.backend.torch.serialization import recover_sequential_from_dict
from federatedml.util.fixpoint_solver import FixedPointEncoder
from federatedml.protobuf.generated.hetero_nn_model_param_pb2 import InteractiveLayerParam
from federatedml.secureprotol import PaillierEncrypt
from federatedml.util import consts, LOGGER
from federatedml.nn.hetero.interactive.utils.numpy_layer import NumpyDenseLayerGuest, NumpyDenseLayerHost
from federatedml.secureprotol.paillier_tensor import PaillierTensor
from federatedml.nn.hetero.nn_component.torch_model import TorchNNModel
from federatedml.transfer_variable.base_transfer_variable import BaseTransferVariables
from fate_arch.session import computing_session as session
from federatedml.nn.backend.utils.rng import RandomNumberGenerator
PLAINTEXT = False
class HEInteractiveTransferVariable(BaseTransferVariables):
def __init__(self, flowid=0):
super().__init__(flowid)
self.decrypted_guest_forward = self._create_variable(
name='decrypted_guest_forward', src=['host'], dst=['guest'])
self.decrypted_guest_weight_gradient = self._create_variable(
name='decrypted_guest_weight_gradient', src=['host'], dst=['guest'])
self.encrypted_acc_noise = self._create_variable(
name='encrypted_acc_noise', src=['host'], dst=['guest'])
self.encrypted_guest_forward = self._create_variable(
name='encrypted_guest_forward', src=['guest'], dst=['host'])
self.encrypted_guest_weight_gradient = self._create_variable(
name='encrypted_guest_weight_gradient', src=['guest'], dst=['host'])
self.encrypted_host_forward = self._create_variable(
name='encrypted_host_forward', src=['host'], dst=['guest'])
self.host_backward = self._create_variable(
name='host_backward', src=['guest'], dst=['host'])
self.selective_info = self._create_variable(
name="selective_info", src=["guest"], dst=["host"])
self.drop_out_info = self._create_variable(
name="drop_out_info", src=["guest"], dst=["host"])
self.drop_out_table = self._create_variable(
name="drop_out_table", src=["guest"], dst=["host"])
self.interactive_layer_output_unit = self._create_variable(
name="interactive_layer_output_unit", src=["guest"], dst=["host"])
class DropOut(object):
def __init__(self, rate, noise_shape):
self._keep_rate = rate
self._noise_shape = noise_shape
self._batch_size = noise_shape[0]
self._mask = None
self._partition = None
self._mask_table = None
self._select_mask_table = None
self._do_backward_select = False
self._mask_table_cache = {}
def forward(self, X):
if X.shape == self._mask.shape:
forward_x = X * self._mask / self._keep_rate
else:
forward_x = X * self._mask[0: len(X)] / self._keep_rate
return forward_x
def backward(self, grad):
if self._do_backward_select:
self._mask = self._select_mask_table[0: grad.shape[0]]
self._select_mask_table = self._select_mask_table[grad.shape[0]:]
return grad * self._mask / self._keep_rate
else:
if grad.shape == self._mask.shape:
return grad * self._mask / self._keep_rate
else:
return grad * self._mask[0: grad.shape[0]] / self._keep_rate
def generate_mask(self):
self._mask = np.random.uniform(
low=0, high=1, size=self._noise_shape) < self._keep_rate
def generate_mask_table(self, shape):
# generate mask table according to samples shape, because in some
# batches, sample_num < batch_size
if shape == self._noise_shape:
_mask_table = session.parallelize(
self._mask, include_key=False, partition=self._partition)
else:
_mask_table = session.parallelize(
self._mask[0: shape[0]], include_key=False, partition=self._partition)
return _mask_table
def set_partition(self, partition):
self._partition = partition
def select_backward_sample(self, select_ids):
select_mask_table = self._mask[np.array(select_ids)]
if self._select_mask_table is not None:
self._select_mask_table = np.vstack(
(self._select_mask_table, select_mask_table))
else:
self._select_mask_table = select_mask_table
def do_backward_select_strategy(self):
self._do_backward_select = True
class HEInteractiveLayerGuest(InteractiveLayerGuest):
def __init__(self, params=None, layer_config=None, host_num=1):
super(HEInteractiveLayerGuest, self).__init__(params)
# transfer var
self.host_num = host_num
self.layer_config = layer_config
self.transfer_variable = HEInteractiveTransferVariable()
self.plaintext = PLAINTEXT
self.layer_config = layer_config
self.host_input_shapes = []
self.rng_generator = RandomNumberGenerator()
self.learning_rate = params.interactive_layer_lr
# cached tensor
self.guest_tensor = None
self.host_tensors = None
self.dense_output_data_require_grad = None
self.activation_out_require_grad = None
# model
self.model: InteractiveLayer = None
self.guest_model = None
self.host_model_list = []
self.batch_size = None
self.partitions = 0
self.do_backward_select_strategy = False
self.optimizer = None
# drop out
self.drop_out_initiated = False
self.drop_out = None
self.drop_out_keep_rate = None
self.fixed_point_encoder = None if params.floating_point_precision is None else FixedPointEncoder(
2 ** params.floating_point_precision)
self.send_output_unit = False
# float64
self.float64 = False
"""
Init functions
"""
def set_flow_id(self, flow_id):
self.transfer_variable.set_flowid(flow_id)
def set_backward_select_strategy(self):
self.do_backward_select_strategy = True
def set_batch(self, batch_size):
self.batch_size = batch_size
def set_partition(self, partition):
self.partitions = partition
def _build_model(self):
if self.model is None:
raise ValueError('torch interactive model is not initialized!')
for i in range(self.host_num):
host_model = NumpyDenseLayerHost()
host_model.build(self.model.host_model[i])
host_model.set_learning_rate(self.learning_rate)
self.host_model_list.append(host_model)
self.guest_model = NumpyDenseLayerGuest()
self.guest_model.build(self.model.guest_model)
self.guest_model.set_learning_rate(self.learning_rate)
if self.do_backward_select_strategy:
self.guest_model.set_backward_selective_strategy()
self.guest_model.set_batch(self.batch_size)
for host_model in self.host_model_list:
host_model.set_backward_selective_strategy()
host_model.set_batch(self.batch_size)
"""
Drop out functions
"""
def init_drop_out_parameter(self):
if isinstance(self.model.param_dict['dropout'], float):
self.drop_out_keep_rate = 1 - self.model.param_dict['dropout']
else:
self.drop_out_keep_rate = -1
self.transfer_variable.drop_out_info.remote(
self.drop_out_keep_rate, idx=-1, suffix=('dropout_rate', ))
self.drop_out_initiated = True
def _create_drop_out(self, shape):
if self.drop_out_keep_rate and self.drop_out_keep_rate != 1 and self.drop_out_keep_rate > 0:
if not self.drop_out:
self.drop_out = DropOut(
noise_shape=shape, rate=self.drop_out_keep_rate)
self.drop_out.set_partition(self.partitions)
if self.do_backward_select_strategy:
self.drop_out.do_backward_select_strategy()
self.drop_out.generate_mask()
@staticmethod
def expand_columns(tensor, keep_array):
shape = keep_array.shape
tensor = np.reshape(tensor, (tensor.size,))
keep = np.reshape(keep_array, (keep_array.size,))
ret_tensor = []
idx = 0
for x in keep:
if x == 0:
ret_tensor.append(0)
else:
ret_tensor.append(tensor[idx])
idx += 1
return np.reshape(np.array(ret_tensor), shape)
"""
Plaintext forward/backward, these interfaces are for testing
"""
def plaintext_forward(self, guest_input, epoch=0, batch=0, train=True):
if self.model is None:
self.model = recover_sequential_from_dict(self.layer_config)[0]
if self.float64:
self.model.type(torch.float64)
if self.optimizer is None:
self.optimizer = torch.optim.SGD(
params=self.model.parameters(), lr=self.learning_rate)
if train:
self.model.train()
else:
self.model.eval()
with torch.no_grad():
guest_tensor = torch.from_numpy(guest_input)
host_inputs = self.get_forward_from_host(
epoch, batch, train, idx=-1)
host_tensors = [torch.from_numpy(arr) for arr in host_inputs]
interactive_out = self.model(guest_tensor, host_tensors)
self.guest_tensor = guest_tensor
self.host_tensors = host_tensors
return interactive_out.cpu().detach().numpy()
def plaintext_backward(self, output_gradient, epoch, batch):
# compute input gradient
self.guest_tensor: torch.Tensor = self.guest_tensor.requires_grad_(True)
for tensor in self.host_tensors:
tensor.requires_grad_(True)
out = self.model(self.guest_tensor, self.host_tensors)
loss = backward_loss(out, torch.from_numpy(output_gradient))
backward_list = [self.guest_tensor]
backward_list.extend(self.host_tensors)
ret_grad = autograd.grad(loss, backward_list)
# update model
self.guest_tensor: torch.Tensor = self.guest_tensor.requires_grad_(False)
for tensor in self.host_tensors:
tensor.requires_grad_(False)
self.optimizer.zero_grad()
out = self.model(self.guest_tensor, self.host_tensors)
loss = backward_loss(out, torch.from_numpy(output_gradient))
loss.backward()
self.optimizer.step()
self.guest_tensor, self.host_tensors = None, None
for idx, host_grad in enumerate(ret_grad[1:]):
self.send_host_backward_to_host(host_grad, epoch, batch, idx=idx)
return ret_grad[0]
"""
Activation forward & backward
"""
def activation_forward(self, dense_out, with_grad=True):
if with_grad:
if (self.dense_output_data_require_grad is not None) or (
self.activation_out_require_grad is not None):
raise ValueError(
'torch forward error, related required grad tensors are not freed')
self.dense_output_data_require_grad = dense_out.requires_grad_(
True)
activation_out_ = self.model.activation(
self.dense_output_data_require_grad)
self.activation_out_require_grad = activation_out_
else:
with torch.no_grad():
activation_out_ = self.model.activation(dense_out)
return activation_out_.cpu().detach().numpy()
def activation_backward(self, output_gradients):
if self.activation_out_require_grad is None and self.dense_output_data_require_grad is None:
raise ValueError('related grad is None, cannot compute backward')
loss = backward_loss(
self.activation_out_require_grad,
torch.Tensor(output_gradients))
activation_backward_grad = torch.autograd.grad(
loss, self.dense_output_data_require_grad)
self.activation_out_require_grad = None
self.dense_output_data_require_grad = None
return activation_backward_grad[0].cpu().detach().numpy()
"""
Forward & Backward
"""
def print_log(self, descr, epoch, batch, train):
if train:
LOGGER.info("{} epoch {} batch {}"
"".format(descr, epoch, batch))
else:
LOGGER.info("predicting, {} pred iteration {} batch {}"
"".format(descr, epoch, batch))
def forward_interactive(
self,
encrypted_host_input,
epoch,
batch,
train=True):
self.print_log(
'get encrypted dense output of host model of',
epoch,
batch,
train)
mask_table_list = []
guest_nosies = []
host_idx = 0
for model, host_bottom_input in zip(
self.host_model_list, encrypted_host_input):
encrypted_fw = model(host_bottom_input, self.fixed_point_encoder)
mask_table = None
if train:
self._create_drop_out(encrypted_fw.shape)
if self.drop_out:
mask_table = self.drop_out.generate_mask_table(
encrypted_fw.shape)
if mask_table:
encrypted_fw = encrypted_fw.select_columns(mask_table)
mask_table_list.append(mask_table)
guest_forward_noise = self.rng_generator.fast_generate_random_number(
encrypted_fw.shape, encrypted_fw.partitions, keep_table=mask_table)
if self.fixed_point_encoder:
encrypted_fw += guest_forward_noise.encode(
self.fixed_point_encoder)
else:
encrypted_fw += guest_forward_noise
guest_nosies.append(guest_forward_noise)
self.send_guest_encrypted_forward_output_with_noise_to_host(
encrypted_fw.get_obj(), epoch, batch, idx=host_idx)
if mask_table:
self.send_interactive_layer_drop_out_table(
mask_table, epoch, batch, idx=host_idx)
host_idx += 1
# get list from hosts
decrypted_dense_outputs = self.get_guest_decrypted_forward_from_host(
epoch, batch, idx=-1)
merge_output = None
for idx, (outputs, noise) in enumerate(
zip(decrypted_dense_outputs, guest_nosies)):
out = PaillierTensor(outputs) - noise
if len(mask_table_list) != 0:
out = PaillierTensor(
out.get_obj().join(
mask_table_list[idx],
self.expand_columns))
if merge_output is None:
merge_output = out
else:
merge_output = merge_output + out
return merge_output
def forward(self, x, epoch: int, batch: int, train: bool = True, **kwargs):
self.print_log(
'interactive layer running forward propagation',
epoch,
batch,
train)
if self.plaintext:
return self.plaintext_forward(x, epoch, batch, train)
if self.model is None:
self.model = recover_sequential_from_dict(self.layer_config)[0]
LOGGER.debug('interactive model is {}'.format(self.model))
# for multi host cases
LOGGER.debug(
'host num is {}, len host model {}'.format(
self.host_num, len(
self.model.host_model)))
assert self.host_num == len(self.model.host_model), 'host number is {}, but host linear layer number is {},' \
'please check your interactive configuration, make sure' \
' that host layer number equals to host number' \
.format(self.host_num, len(self.model.host_model))
if self.float64:
self.model.type(torch.float64)
if train and not self.drop_out_initiated:
self.init_drop_out_parameter()
host_inputs = self.get_forward_from_host(epoch, batch, train, idx=-1)
host_bottom_inputs_tensor = []
host_input_shapes = []
for i in host_inputs:
pt = PaillierTensor(i)
host_bottom_inputs_tensor.append(pt)
host_input_shapes.append(pt.shape[1])
self.model.lazy_to_linear(x.shape[1], host_dims=host_input_shapes)
self.host_input_shapes = host_input_shapes
if self.guest_model is None:
LOGGER.info("building interactive layers' training model")
self._build_model()
if not self.partitions:
self.partitions = host_bottom_inputs_tensor[0].partitions
if not self.send_output_unit:
self.send_output_unit = True
for idx in range(self.host_num):
self.send_interactive_layer_output_unit(
self.host_model_list[idx].output_shape[0], idx=idx)
guest_output = self.guest_model(x)
host_output = self.forward_interactive(
host_bottom_inputs_tensor, epoch, batch, train)
if guest_output is not None:
dense_output_data = host_output + \
PaillierTensor(guest_output, partitions=self.partitions)
else:
dense_output_data = host_output
self.print_log(
"start to get interactive layer's activation output of",
epoch,
batch,
train)
if self.float64: # result after encrypt calculation is float 64
dense_out = torch.from_numpy(dense_output_data.numpy())
else:
dense_out = torch.Tensor(
dense_output_data.numpy()) # convert to float32
if self.do_backward_select_strategy:
for h in self.host_model_list:
h.activation_input = dense_out.cpu().detach().numpy()
# if is not backward strategy, can compute grad directly
if not train or self.do_backward_select_strategy:
with_grad = False
else:
with_grad = True
activation_out = self.activation_forward(
dense_out, with_grad=with_grad)
if train and self.drop_out:
return self.drop_out.forward(activation_out)
return activation_out
def backward_interactive(
self,
host_model,
activation_gradient,
epoch,
batch,
host_idx):
LOGGER.info(
"get encrypted weight gradient of epoch {} batch {}".format(
epoch, batch))
encrypted_weight_gradient = host_model.get_weight_gradient(
activation_gradient, encoder=self.fixed_point_encoder)
if self.fixed_point_encoder:
encrypted_weight_gradient = self.fixed_point_encoder.decode(
encrypted_weight_gradient)
noise_w = self.rng_generator.generate_random_number(
encrypted_weight_gradient.shape)
self.transfer_variable.encrypted_guest_weight_gradient.remote(
encrypted_weight_gradient +
noise_w,
role=consts.HOST,
idx=host_idx,
suffix=(
epoch,
batch,
))
LOGGER.info(
"get decrypted weight graident of epoch {} batch {}".format(
epoch, batch))
decrypted_weight_gradient = self.transfer_variable.decrypted_guest_weight_gradient.get(
idx=host_idx, suffix=(epoch, batch,))
decrypted_weight_gradient -= noise_w
encrypted_acc_noise = self.get_encrypted_acc_noise_from_host(
epoch, batch, idx=host_idx)
return decrypted_weight_gradient, encrypted_acc_noise
def backward(self, error, epoch: int, batch: int, selective_ids=None):
if self.plaintext:
return self.plaintext_backward(error, epoch, batch)
if selective_ids:
for host_model in self.host_model_list:
host_model.select_backward_sample(selective_ids)
self.guest_model.select_backward_sample(selective_ids)
if self.drop_out:
self.drop_out.select_backward_sample(selective_ids)
if self.do_backward_select_strategy:
# send to all host
self.send_backward_select_info(
selective_ids, len(error), epoch, batch, -1)
if len(error) > 0:
LOGGER.debug(
"interactive layer start backward propagation of epoch {} batch {}".format(
epoch, batch))
if not self.do_backward_select_strategy:
activation_gradient = self.activation_backward(error)
else:
act_input = self.host_model_list[0].get_selective_activation_input(
)
_ = self.activation_forward(torch.from_numpy(act_input), True)
activation_gradient = self.activation_backward(error)
if self.drop_out:
activation_gradient = self.drop_out.backward(
activation_gradient)
LOGGER.debug(
"interactive layer update guest weight of epoch {} batch {}".format(
epoch, batch))
# update guest model
guest_input_gradient = self.update_guest(activation_gradient)
LOGGER.debug('update host model weights')
for idx, host_model in enumerate(self.host_model_list):
# update host models
host_weight_gradient, acc_noise = self.backward_interactive(
host_model, activation_gradient, epoch, batch, host_idx=idx)
host_input_gradient = self.update_host(
host_model, activation_gradient, host_weight_gradient, acc_noise)
self.send_host_backward_to_host(
host_input_gradient.get_obj(), epoch, batch, idx=idx)
return guest_input_gradient
else:
return []
"""
Model update
"""
def update_guest(self, activation_gradient):
input_gradient = self.guest_model.get_input_gradient(
activation_gradient)
weight_gradient = self.guest_model.get_weight_gradient(
activation_gradient)
self.guest_model.update_weight(weight_gradient)
self.guest_model.update_bias(activation_gradient)
return input_gradient
def update_host(
self,
host_model,
activation_gradient,
weight_gradient,
acc_noise):
activation_gradient_tensor = PaillierTensor(
activation_gradient, partitions=self.partitions)
input_gradient = host_model.get_input_gradient(
activation_gradient_tensor, acc_noise, encoder=self.fixed_point_encoder)
host_model.update_weight(weight_gradient)
host_model.update_bias(activation_gradient)
return input_gradient
"""
Communication functions
"""
def send_interactive_layer_output_unit(self, shape, idx=0):
self.transfer_variable.interactive_layer_output_unit.remote(
shape, role=consts.HOST, idx=idx)
def send_backward_select_info(
self,
selective_ids,
gradient_len,
epoch,
batch,
idx):
self.transfer_variable.selective_info.remote(
(selective_ids, gradient_len), role=consts.HOST, idx=idx, suffix=(
epoch, batch,))
def send_host_backward_to_host(self, host_error, epoch, batch, idx):
self.transfer_variable.host_backward.remote(host_error,
role=consts.HOST,
idx=idx,
suffix=(epoch, batch,))
def get_forward_from_host(self, epoch, batch, train, idx=0):
return self.transfer_variable.encrypted_host_forward.get(
idx=idx, suffix=(epoch, batch, train))
def send_guest_encrypted_forward_output_with_noise_to_host(
self, encrypted_guest_forward_with_noise, epoch, batch, idx):
return self.transfer_variable.encrypted_guest_forward.remote(
encrypted_guest_forward_with_noise,
role=consts.HOST,
idx=idx,
suffix=(
epoch,
batch,
))
def send_interactive_layer_drop_out_table(
self, mask_table, epoch, batch, idx):
return self.transfer_variable.drop_out_table.remote(
mask_table, role=consts.HOST, idx=idx, suffix=(epoch, batch,))
def get_guest_decrypted_forward_from_host(self, epoch, batch, idx=0):
return self.transfer_variable.decrypted_guest_forward.get(
idx=idx, suffix=(epoch, batch,))
def get_encrypted_acc_noise_from_host(self, epoch, batch, idx=0):
return self.transfer_variable.encrypted_acc_noise.get(
idx=idx, suffix=(epoch, batch,))
"""
Model IO
"""
def transfer_np_model_to_torch_interactive_layer(self):
self.model = self.model.cpu()
if self.guest_model is not None:
guest_weight = self.guest_model.get_weight()
model: torch.nn.Linear = self.model.guest_model
model.weight.data.copy_(torch.Tensor(guest_weight))
if self.guest_model.bias is not None:
model.bias.data.copy_(torch.Tensor(self.guest_model.bias))
for host_np_model, torch_model in zip(
self.host_model_list, self.model.host_model):
host_weight = host_np_model.get_weight()
torch_model.weight.data.copy_(torch.Tensor(host_weight))
if host_np_model.bias is not None:
torch_model.bias.data.copy_(torch.Tensor(torch_model.bias))
def export_model(self):
self.transfer_np_model_to_torch_interactive_layer()
interactive_layer_param = InteractiveLayerParam()
interactive_layer_param.interactive_guest_saved_model_bytes = TorchNNModel.get_model_bytes(
self.model)
interactive_layer_param.host_input_shape.extend(self.host_input_shapes)
return interactive_layer_param
def restore_model(self, interactive_layer_param):
self.host_input_shapes = list(interactive_layer_param.host_input_shape)
self.model = TorchNNModel.recover_model_bytes(
interactive_layer_param.interactive_guest_saved_model_bytes)
self._build_model()
class HEInteractiveLayerHost(InteractiveLayerHost):
def __init__(self, params):
super(HEInteractiveLayerHost, self).__init__(params)
self.plaintext = PLAINTEXT
self.acc_noise = None
self.learning_rate = params.interactive_layer_lr
self.encrypter = self.generate_encrypter(params)
self.transfer_variable = HEInteractiveTransferVariable()
self.partitions = 1
self.input_shape = None
self.output_unit = None
self.rng_generator = RandomNumberGenerator()
self.do_backward_select_strategy = False
self.drop_out_init = False
self.drop_out_keep_rate = None
self.fixed_point_encoder = None if params.floating_point_precision is None else FixedPointEncoder(
2 ** params.floating_point_precision)
self.mask_table = None
"""
Init
"""
def set_transfer_variable(self, transfer_variable):
self.transfer_variable = transfer_variable
def set_partition(self, partition):
self.partitions = partition
def set_backward_select_strategy(self):
self.do_backward_select_strategy = True
"""
Forward & Backward
"""
def plaintext_forward(self, host_input, epoch, batch, train):
self.send_forward_to_guest(host_input, epoch, batch, train)
def plaintext_backward(self, epoch, batch):
return self.get_host_backward_from_guest(epoch, batch)
def forward(self, host_input, epoch=0, batch=0, train=True, **kwargs):
if self.plaintext:
self.plaintext_forward(host_input, epoch, batch, train)
return
if train and not self.drop_out_init:
self.drop_out_init = True
self.drop_out_keep_rate = self.transfer_variable.drop_out_info.get(
0, role=consts.GUEST, suffix=('dropout_rate', ))
if self.drop_out_keep_rate == -1:
self.drop_out_keep_rate = None
LOGGER.info(
"forward propagation: encrypt host_bottom_output of epoch {} batch {}".format(
epoch, batch))
host_input = PaillierTensor(host_input, partitions=self.partitions)
encrypted_host_input = host_input.encrypt(self.encrypter)
self.send_forward_to_guest(
encrypted_host_input.get_obj(), epoch, batch, train)
encrypted_guest_forward = PaillierTensor(
self.get_guest_encrypted_forward_from_guest(epoch, batch))
decrypted_guest_forward = encrypted_guest_forward.decrypt(
self.encrypter)
if self.fixed_point_encoder:
decrypted_guest_forward = decrypted_guest_forward.decode(
self.fixed_point_encoder)
if self.input_shape is None:
self.input_shape = host_input.shape[1]
self.output_unit = self.get_interactive_layer_output_unit()
if self.acc_noise is None:
self.acc_noise = np.zeros((self.input_shape, self.output_unit))
mask_table = None
if train and self.drop_out_keep_rate and self.drop_out_keep_rate < 1:
mask_table = self.get_interactive_layer_drop_out_table(
epoch, batch)
if mask_table:
decrypted_guest_forward_with_noise = decrypted_guest_forward + \
(host_input * self.acc_noise).select_columns(mask_table)
self.mask_table = mask_table
else:
noise_part = (host_input * self.acc_noise)
decrypted_guest_forward_with_noise = decrypted_guest_forward + noise_part
self.send_decrypted_guest_forward_with_noise_to_guest(
decrypted_guest_forward_with_noise.get_obj(), epoch, batch)
def backward(self, epoch, batch):
if self.plaintext:
return self.plaintext_backward(epoch, batch), []
do_backward = True
selective_ids = []
if self.do_backward_select_strategy:
selective_ids, do_backward = self.send_backward_select_info(
epoch, batch)
if not do_backward:
return [], selective_ids
encrypted_guest_weight_gradient = self.get_guest_encrypted_weight_gradient_from_guest(
epoch, batch)
LOGGER.info(
"decrypt weight gradient of epoch {} batch {}".format(
epoch, batch))
decrypted_guest_weight_gradient = self.encrypter.recursive_decrypt(
encrypted_guest_weight_gradient)
noise_weight_gradient = self.rng_generator.generate_random_number(
(self.input_shape, self.output_unit))
decrypted_guest_weight_gradient += noise_weight_gradient / self.learning_rate
self.send_guest_decrypted_weight_gradient_to_guest(
decrypted_guest_weight_gradient, epoch, batch)
LOGGER.info(
"encrypt acc_noise of epoch {} batch {}".format(
epoch, batch))
encrypted_acc_noise = self.encrypter.recursive_encrypt(self.acc_noise)
self.send_encrypted_acc_noise_to_guest(
encrypted_acc_noise, epoch, batch)
self.acc_noise += noise_weight_gradient
host_input_gradient = PaillierTensor(
self.get_host_backward_from_guest(epoch, batch))
host_input_gradient = host_input_gradient.decrypt(self.encrypter)
if self.fixed_point_encoder:
host_input_gradient = host_input_gradient.decode(
self.fixed_point_encoder).numpy()
else:
host_input_gradient = host_input_gradient.numpy()
return host_input_gradient, selective_ids
"""
Communication Function
"""
def send_backward_select_info(self, epoch, batch):
selective_ids, do_backward = self.transfer_variable.selective_info.get(
idx=0, suffix=(epoch, batch,))
return selective_ids, do_backward
def send_encrypted_acc_noise_to_guest(
self, encrypted_acc_noise, epoch, batch):
self.transfer_variable.encrypted_acc_noise.remote(encrypted_acc_noise,
idx=0,
role=consts.GUEST,
suffix=(epoch, batch,))
def get_interactive_layer_output_unit(self):
return self.transfer_variable.interactive_layer_output_unit.get(idx=0)
def get_guest_encrypted_weight_gradient_from_guest(self, epoch, batch):
encrypted_guest_weight_gradient = self.transfer_variable.encrypted_guest_weight_gradient.get(
idx=0, suffix=(epoch, batch,))
return encrypted_guest_weight_gradient
def get_interactive_layer_drop_out_table(self, epoch, batch):
return self.transfer_variable.drop_out_table.get(
idx=0, suffix=(epoch, batch,))
def send_forward_to_guest(self, encrypted_host_input, epoch, batch, train):
self.transfer_variable.encrypted_host_forward.remote(
encrypted_host_input, idx=0, role=consts.GUEST, suffix=(epoch, batch, train))
def send_guest_decrypted_weight_gradient_to_guest(
self, decrypted_guest_weight_gradient, epoch, batch):
self.transfer_variable.decrypted_guest_weight_gradient.remote(
decrypted_guest_weight_gradient, idx=0, role=consts.GUEST, suffix=(epoch, batch,))
def get_host_backward_from_guest(self, epoch, batch):
host_backward = self.transfer_variable.host_backward.get(
idx=0, suffix=(epoch, batch,))
return host_backward
def get_guest_encrypted_forward_from_guest(self, epoch, batch):
encrypted_guest_forward = self.transfer_variable.encrypted_guest_forward.get(
idx=0, suffix=(epoch, batch,))
return encrypted_guest_forward
def send_decrypted_guest_forward_with_noise_to_guest(
self, decrypted_guest_forward_with_noise, epoch, batch):
self.transfer_variable.decrypted_guest_forward.remote(
decrypted_guest_forward_with_noise,
idx=0,
role=consts.GUEST,
suffix=(
epoch,
batch,
))
"""
Encrypter
"""
def generate_encrypter(self, param):
LOGGER.info("generate encrypter")
if param.encrypt_param.method.lower() == consts.PAILLIER.lower():
encrypter = PaillierEncrypt()
encrypter.generate_key(param.encrypt_param.key_length)
else:
raise NotImplementedError("encrypt method not supported yet!!!")
return encrypter
"""
Model IO
"""
def export_model(self):
interactive_layer_param = InteractiveLayerParam()
interactive_layer_param.acc_noise = pickle.dumps(self.acc_noise)
return interactive_layer_param
def restore_model(self, interactive_layer_param):
self.acc_noise = pickle.loads(interactive_layer_param.acc_noise)
| 36,890 | 37.071207 | 122 | py |
FATE | FATE-master/python/federatedml/nn/hetero/interactive/utils/numpy_layer.py | import torch
import numpy as np
from federatedml.util import consts
from federatedml.secureprotol.paillier_tensor import PaillierTensor
class NumpyDenseLayer(object):
"""
NumpyDenseLayer is designed for Pailler Tensor compute
"""
def __init__(self):
self.input = None
self.model_weight = None
self.model_shape = None
self.bias = None
self.lr = 1.0
self.role = None
self.is_empty_model = False
self.activation_input = None
self.input_cached = np.array([])
self.activation_cached = np.array([])
self.do_backward_selective_strategy = False
self.batch_size = None
def set_backward_selective_strategy(self):
self.do_backward_selective_strategy = True
def set_batch(self, batch_size):
self.batch_size = batch_size
def build(self, torch_linear: torch.nn.Linear):
if torch_linear is None:
if self.role == "host":
raise ValueError("host input is empty!")
self.is_empty_model = True
return
assert isinstance(
torch_linear, torch.nn.Linear), 'must use a torch Linear to build this class, but got {}' .format(torch_linear)
self.model_weight = torch_linear.weight.cpu().detach().numpy().transpose()
if torch_linear.bias is not None:
self.bias = torch_linear.bias.cpu().detach().numpy()
def export_model(self):
if self.is_empty_model:
return "".encode()
layer_weights = [self.model_weight]
return layer_weights
def get_selective_activation_input(self):
self.activation_input = self.activation_cached[: self.batch_size]
self.activation_cached = self.activation_cached[self.batch_size:]
return self.activation_input
def get_weight(self):
return self.model_weight.transpose()
def get_bias(self):
return self.bias
def set_learning_rate(self, lr):
self.lr = lr
def forward(self, x, **kwargs):
pass
def get_weight_gradient(self, delta):
pass
def restore_model(self, model_bytes):
pass
def update_weight(self, delta):
pass
def update_bias(self, delta):
pass
@property
def empty(self):
return self.is_empty_model
@property
def output_shape(self):
return self.model_weight.shape[1:]
def __repr__(self):
return 'model weights: {}, model bias {}'.format(
self.model_weight, self.bias)
def __call__(self, *args, **kwargs):
return self.forward(*args, **kwargs)
class NumpyDenseLayerGuest(NumpyDenseLayer):
def __init__(self):
super(NumpyDenseLayerGuest, self).__init__()
self.role = consts.GUEST
def forward(self, x):
if self.empty:
return None
self.input = x
output = np.matmul(x, self.model_weight)
if self.bias is not None:
output += self.bias
return output
def select_backward_sample(self, selective_ids):
if self.input_cached.shape[0] == 0:
self.input_cached = self.input[selective_ids]
else:
self.input_cached = np.vstack(
(self.input_cached, self.input[selective_ids])
)
def get_input_gradient(self, delta):
if self.empty:
return None
error = np.matmul(delta, self.model_weight.T)
return error
def get_weight_gradient(self, delta):
if self.empty:
return None
if self.do_backward_selective_strategy:
self.input = self.input_cached[: self.batch_size]
self.input_cached = self.input_cached[self.batch_size:]
delta_w = np.matmul(delta.T, self.input)
return delta_w
def update_weight(self, delta):
if self.empty:
return None
self.model_weight -= self.lr * delta.T
def update_bias(self, delta):
if self.bias is not None:
self.bias -= np.sum(delta, axis=0) * self.lr
class NumpyDenseLayerHost(NumpyDenseLayer):
"""
This dense layer can directly compute pallier-tensor forward
"""
def __init__(self):
super(NumpyDenseLayerHost, self).__init__()
self.role = consts.HOST
def select_backward_sample(self, selective_ids):
cached_shape = self.input_cached.shape[0]
offsets = [i + cached_shape for i in range(len(selective_ids))]
id_map = dict(zip(selective_ids, offsets))
if cached_shape == 0:
self.input_cached = (
self.input.get_obj()
.filter(lambda k, v: k in id_map)
.map(lambda k, v: (id_map[k], v))
)
self.input_cached = PaillierTensor(self.input_cached)
self.activation_cached = self.activation_input[selective_ids]
else:
selective_input = (
self.input.get_obj()
.filter(lambda k, v: k in id_map)
.map(lambda k, v: (id_map[k], v))
)
self.input_cached = PaillierTensor(
self.input_cached.get_obj().union(selective_input)
)
self.activation_cached = np.vstack(
(self.activation_cached, self.activation_input[selective_ids])
)
def forward(self, x, encoder=None):
self.input = x
if encoder is not None:
output = x * encoder.encode(self.model_weight)
else:
output = x * self.model_weight
if self.bias is not None:
if encoder is not None:
output += encoder.encode(self.bias)
else:
output += self.bias
return output
def get_input_gradient(self, delta, acc_noise, encoder=None):
if not encoder:
error = delta * self.model_weight.T + delta * acc_noise.T
else:
error = delta.encode(encoder) * (self.model_weight + acc_noise).T
return error
def get_weight_gradient(self, delta, encoder=None):
if self.do_backward_selective_strategy:
batch_size = self.batch_size
self.input = PaillierTensor(
self.input_cached.get_obj().filter(lambda k, v: k < batch_size)
)
self.input_cached = PaillierTensor(
self.input_cached.get_obj()
.filter(lambda k, v: k >= batch_size)
.map(lambda k, v: (k - batch_size, v))
)
if encoder:
delta_w = self.input.fast_matmul_2d(encoder.encode(delta))
else:
delta_w = self.input.fast_matmul_2d(delta)
return delta_w
def update_weight(self, delta):
self.model_weight -= delta * self.lr
def update_bias(self, delta):
if self.bias is not None:
self.bias -= np.sum(delta, axis=0) * self.lr
| 6,956 | 27.62963 | 123 | py |
FATE | FATE-master/python/federatedml/nn/loss/cross_entropy.py | import torch as t
from federatedml.util import consts
from torch.nn.functional import one_hot
def cross_entropy(p2, p1, reduction='mean'):
p2 = p2 + consts.FLOAT_ZERO # to avoid nan
assert p2.shape == p1.shape
if reduction == 'sum':
return -t.sum(p1 * t.log(p2))
elif reduction == 'mean':
return -t.mean(t.sum(p1 * t.log(p2), dim=1))
elif reduction == 'none':
return -t.sum(p1 * t.log(p2), dim=1)
else:
raise ValueError('unknown reduction')
class CrossEntropyLoss(t.nn.Module):
"""
A CrossEntropy Loss that will not compute Softmax
"""
def __init__(self, reduction='mean'):
super(CrossEntropyLoss, self).__init__()
self.reduction = reduction
def forward(self, pred, label):
one_hot_label = one_hot(label.flatten())
loss_ = cross_entropy(pred, one_hot_label, self.reduction)
return loss_
| 913 | 25.114286 | 66 | py |
FATE | FATE-master/python/federatedml/nn/loss/weighted_loss.py | import torch as t
from torch.nn import BCELoss
class WeightedBCE(t.nn.Module):
def __init__(self) -> None:
super().__init__()
self.loss_fn = BCELoss(reduce=False)
def forward(self, pred, label_and_weight):
label, weights = label_and_weight
losses = self.loss_fn(pred, label)
losses = losses * weights
loss_val = losses.sum() / weights.sum()
return loss_val
| 425 | 24.058824 | 47 | py |
FATE | FATE-master/python/federatedml/linear_model/coordinated_linear_model/logistic_regression/homo_logistic_regression/homo_lr_client.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright 2019 The FATE Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import numpy as np
import torch as t
from fate_arch.computing._util import is_table
from federatedml.linear_model.coordinated_linear_model.logistic_regression.\
homo_logistic_regression.homo_lr_base import HomoLRBase
from federatedml.util import LOGGER
from federatedml.util import consts
from federatedml.util.io_check import assert_io_num_rows_equal
from federatedml.linear_model.linear_model_weight import LinearModelWeights
from federatedml.nn.dataset.table import TableDataset
from federatedml.nn.homo.trainer.trainer_base import ExporterBase
from federatedml.nn.homo.trainer.fedavg_trainer import FedAVGTrainer
from federatedml.callbacks.model_checkpoint import ModelCheckpoint
from federatedml.callbacks.validation_strategy import ValidationStrategy
from federatedml.protobuf.generated import lr_model_param_pb2
from federatedml.model_base import MetricMeta
from fate_arch.session import computing_session
from federatedml.nn.backend.utils.data import get_ret_predict_table, add_match_id
from federatedml.nn.loss.weighted_loss import WeightedBCE
from federatedml.statistic.data_overview import check_with_inst_id
def linear_weight_to_torch(model_weights):
model_weights: LinearModelWeights = model_weights
weights = model_weights.coef_
bias = None
use_bias = False
if model_weights.fit_intercept:
bias = model_weights.intercept_
use_bias = True
torch_linear_layer = t.nn.Linear(
in_features=weights.shape[0], out_features=1, bias=use_bias)
LOGGER.debug('weights are {}, biase is {}'.format(weights, bias))
torch_linear_layer.weight.data.copy_(t.Tensor(weights))
if use_bias:
torch_linear_layer.bias.data.copy_(t.Tensor([bias]))
torch_model = t.nn.Sequential(
torch_linear_layer,
t.nn.Sigmoid()
)
return torch_model
def torch_to_linear_weight(model_weights, torch_model):
if model_weights.fit_intercept:
model_weights._weights = np.concatenate([torch_model[0].weight.detach().numpy().flatten(),
torch_model[0].bias.detach().numpy().flatten()]).tolist()
else:
model_weights._weights = torch_model[0].weight.detach(
).numpy().flatten().tolist()
class WrappedOptAndScheduler(object):
def __init__(self, opt, scheduler):
self.opt = opt
self.scheduler = scheduler
def zero_grad(self, ):
self.opt.zero_grad()
def step(self, ):
self.opt.step()
self.scheduler.step()
def state_dict(self):
return self.opt.state_dict()
def restep(self, n):
for i in range(n):
self.opt.zero_grad()
self.opt.step()
self.scheduler.step()
class HomoLRClientExporter(ExporterBase):
def __init__(self, header, homo_lr_meta, model_weights, param_name, meta_name, *args, **kwargs):
super().__init__(*args, **kwargs)
self.header = header
self.homo_lr_meta = homo_lr_meta
self.model_weights = model_weights
self.param_name = param_name
self.meta_name = meta_name
def export_model_dict(
self,
model=None,
optimizer=None,
model_define=None,
optimizer_define=None,
loss_define=None,
epoch_idx=None,
converge_status=None,
loss_history=None,
best_epoch=None,
extra_data={}):
torch_to_linear_weight(self.model_weights, model)
weight_dict = {}
for idx, header_name in enumerate(self.header):
coef_i = self.model_weights.coef_[idx]
weight_dict[header_name] = float(coef_i)
result = {'iters': epoch_idx,
'loss_history': loss_history,
'is_converged': converge_status,
'weight': weight_dict,
'intercept': self.model_weights.intercept_,
'header': self.header,
'best_iteration': best_epoch
}
param = lr_model_param_pb2.LRModelParam(**result)
meta = self.homo_lr_meta
return {self.param_name: param, self.meta_name: meta}
class HomoLRClient(HomoLRBase):
def __init__(self):
super(HomoLRClient, self).__init__()
self.loss_history = []
self.role = consts.GUEST
self.dataset_cache = {}
self.trainer = None
self.best_iteration = -1
# check point
self.save_freq = None
self.model_checkpoint = None
def _init_model(self, params):
super()._init_model(params)
def get_dataset(self, data):
if id(data) in self.dataset_cache:
return self.dataset_cache[id(data)]
if is_table(data):
dataset = TableDataset()
dataset.load(data)
self.dataset_cache[id(data)] = dataset
return dataset
else:
raise RuntimeError('unknown data type {}'.format(data))
def init(self, dataset: TableDataset, partitions):
torch_model = linear_weight_to_torch(self.model_weights)
LOGGER.debug('torch model is {}, parameters are {} dataset {}'.format(
torch_model, list(torch_model.parameters()), dataset))
batch_size = len(dataset) if self.batch_size == -1 else self.batch_size
optimizer, scheduler = self.get_torch_optimizer(
torch_model, self.model_param)
wrap_optimizer = WrappedOptAndScheduler(optimizer, scheduler)
LOGGER.debug('init optimizer statedict is {}'.format(wrap_optimizer.state_dict()))
if dataset.with_sample_weight:
loss = WeightedBCE()
else:
loss = t.nn.BCELoss()
early_stop = None
if self.early_stop != 'weight_diff':
early_stop = self.early_stop
trainer = FedAVGTrainer(
epochs=self.max_iter,
batch_size=batch_size,
data_loader_worker=partitions,
secure_aggregate=True,
aggregate_every_n_epoch=self.aggregate_iters,
validation_freqs=self.validation_freqs,
task_type='binary',
checkpoint_save_freqs=self.save_freq,
early_stop=early_stop,
shuffle=False,
tol=self.tol)
if not self.callback_one_vs_rest:
trainer.set_tracker(self.tracker)
trainer.set_model(torch_model)
trainer.set_model_exporter(
HomoLRClientExporter(
header=self.header,
homo_lr_meta=self._get_meta(),
model_weights=self.model_weights,
meta_name=self.model_meta_name,
param_name=self.model_param_name))
trainer.set_checkpoint(self.model_checkpoint)
return trainer, torch_model, wrap_optimizer, loss
def get_model_summary(self, is_converged, best_iteration, loss_history, eval_summary):
header = self.header
if header is None:
return {}
weight_dict, intercept_ = self.get_weight_intercept_dict(header)
summary = {"coef": weight_dict,
"intercept": intercept_,
"is_converged": is_converged,
"best_iteration": best_iteration,
"local_loss_history": loss_history,
"validation_metrics": eval_summary
}
return summary
def fit_binary(self, data_instances, validate_data=None):
for callback_cpn in self.callback_list.callback_list:
if isinstance(callback_cpn, ModelCheckpoint):
self.save_freq = callback_cpn.save_freq
self.model_checkpoint = callback_cpn
elif isinstance(callback_cpn, ValidationStrategy):
self.validation_freqs = callback_cpn.validation_freqs
train_set = self.get_dataset(data_instances)
train_set.set_type('train')
if validate_data is not None:
val_set = self.get_dataset(validate_data)
val_set.set_type('validate')
else:
val_set = None
if not self.component_properties.is_warm_start:
self.model_weights = self._init_model_variables(data_instances)
else:
LOGGER.debug('callback warm start, iter {}'.format(self.n_iter_))
self.callback_warm_start_init_iter(self.n_iter_ + 1)
# fate loss callback setting
LOGGER.debug('need one vs rest {}'.format(self.need_one_vs_rest))
if not self.callback_one_vs_rest: # ovr does not display loss
self.callback_meta(
"loss",
"train",
MetricMeta(
name="train",
metric_type="LOSS",
extra_metas={
"unit_name": "epochs"}))
self.trainer, torch_model, wrap_optimizer, loss = self.init(
train_set, data_instances.partitions)
if self.component_properties.is_warm_start:
wrap_optimizer.restep(self.n_iter_ + 1)
self.trainer.train(train_set, val_set, loss=loss,
optimizer=wrap_optimizer)
torch_to_linear_weight(self.model_weights, torch_model)
eval_summary = self.trainer.get_evaluation_summary()
summary = self.trainer.get_summary()
self.is_converged, self.best_iteration, self.loss_history = summary[
'need_stop'], summary['best_epoch'], summary['loss_history']
self.n_iter_ = len(self.loss_history) - 1
self.set_summary(self.get_model_summary(
self.best_iteration, self.loss_history, self.is_converged, eval_summary))
@assert_io_num_rows_equal
def predict(self, data_instances):
self._abnormal_detection(data_instances)
self.init_schema(data_instances)
data_instances = self.align_data_header(data_instances, self.header)
with_inst_id = check_with_inst_id(data_instances)
dataset = self.get_dataset(data_instances)
if self.need_one_vs_rest:
predict_result = self.one_vs_rest_obj.predict(data_instances)
return predict_result
dataset.set_type('predict')
if self.trainer is None:
self.trainer, torch_model, wrap_optimizer, loss = self.init(
dataset, data_instances.partitions)
trainer_ret = self.trainer.predict(dataset)
id_table, pred_table, classes = trainer_ret()
if with_inst_id:
add_match_id(id_table=id_table, dataset_inst=dataset)
id_dtable, pred_dtable = get_ret_predict_table(
id_table, pred_table, classes, data_instances.partitions, computing_session)
ret_table = self.predict_score_to_output(
id_dtable, pred_dtable, classes)
return ret_table
| 11,511 | 35.087774 | 106 | py |
FATE | FATE-master/python/federatedml/linear_model/coordinated_linear_model/logistic_regression/homo_logistic_regression/homo_lr_base.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright 2019 The FATE Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import functools
import torch as t
import math
from torch.optim.lr_scheduler import LambdaLR
from federatedml.linear_model.linear_model_weight import LinearModelWeights
from federatedml.linear_model.coordinated_linear_model.logistic_regression.base_logistic_regression import BaseLogisticRegression
from federatedml.optim import activation
from federatedml.param.logistic_regression_param import HomoLogisticParam
from federatedml.protobuf.generated import lr_model_meta_pb2
from federatedml.secureprotol import PaillierEncrypt
from federatedml.statistic import data_overview
from federatedml.transfer_variable.transfer_class.homo_lr_transfer_variable import HomoLRTransferVariable
from federatedml.util import LOGGER
from federatedml.util import consts
from federatedml.util import fate_operator
class HomoLRBase(BaseLogisticRegression):
def __init__(self):
super(HomoLRBase, self).__init__()
self.model_name = 'HomoLogisticRegression'
self.model_param_name = 'HomoLogisticRegressionParam'
self.model_meta_name = 'HomoLogisticRegressionMeta'
self.mode = consts.HOMO
self.model_param = HomoLogisticParam()
self.aggregator = None
self.param = None
def get_torch_optimizer(self, torch_model: t.nn.Module, param: HomoLogisticParam):
try:
learning_rate = param.learning_rate
alpha = param.alpha # L2 penalty weight
decay = param.decay
decay_sqrt = param.decay_sqrt
if not decay_sqrt:
def decay_func(epoch): return 1 / (1 + epoch * decay)
else:
def decay_func(epoch): return 1 / math.sqrt(1 + epoch * decay)
except AttributeError:
raise AttributeError("Optimizer parameters has not been totally set")
optimizer_type = param.optimizer
if optimizer_type == 'sgd':
opt = t.optim.SGD(params=torch_model.parameters(), lr=learning_rate, weight_decay=alpha)
elif optimizer_type == 'nesterov_momentum_sgd':
opt = t.optim.SGD(
params=torch_model.parameters(),
nesterov=True,
momentum=0.9,
lr=learning_rate,
weight_decay=alpha)
elif optimizer_type == 'rmsprop':
opt = t.optim.RMSprop(params=torch_model.parameters(), alpha=0.99, lr=learning_rate, weight_decay=alpha)
elif optimizer_type == 'adam':
opt = t.optim.Adam(params=torch_model.parameters(), lr=learning_rate, weight_decay=alpha)
elif optimizer_type == 'adagrad':
opt = t.optim.Adagrad(params=torch_model.parameters(), lr=learning_rate, weight_decay=alpha)
else:
if optimizer_type == 'sqn':
raise NotImplementedError("Sqn optimizer is not supported in Homo-LR")
raise NotImplementedError("Optimize method cannot be recognized: {}".format(optimizer_type))
scheduler = LambdaLR(opt, lr_lambda=decay_func)
return opt, scheduler
def _init_model(self, params):
super(HomoLRBase, self)._init_model(params)
self.transfer_variable = HomoLRTransferVariable()
# self.aggregator.register_aggregator(self.transfer_variable)
self.param = params
self.aggregate_iters = params.aggregate_iters
@property
def use_loss(self):
if self.model_param.early_stop == 'weight_diff':
return False
return True
def fit(self, data_instances, validate_data=None):
classes = self.one_vs_rest_obj.get_data_classes(data_instances)
if self.role == consts.ARBITER:
self._server_check_data()
else:
self._client_check_data(data_instances)
if len(classes) > 2:
self.need_one_vs_rest = True
self.need_call_back_loss = False
self.one_vs_rest_fit(train_data=data_instances, validate_data=validate_data)
if self.header is None:
self.header = self.one_vs_rest_obj.header
else:
self.need_one_vs_rest = False
self.fit_binary(data_instances, validate_data)
def fit_binary(self, data_instances, validate_data):
raise NotImplementedError("Should not called here")
def _client_check_data(self, data_instances):
self._abnormal_detection(data_instances)
self.check_abnormal_values(data_instances)
self.init_schema(data_instances)
# Support multi-class now
"""
num_classes, classes_ = ClassifyLabelChecker.validate_label(data_instances)
aligned_label, new_label_mapping = HomoLabelEncoderClient().label_alignment(classes_)
if len(aligned_label) > 2:
raise ValueError("Homo LR support binary classification only now")
elif len(aligned_label) <= 1:
raise ValueError("Number of classes should be equal to 2")
"""
def _server_check_data(self):
# HomoLabelEncoderArbiter().label_alignment()
pass
def classify(self, predict_wx, threshold):
"""
convert a probability table into a predicted class table.
"""
# predict_wx = self.compute_wx(data_instances, self.model_weights.coef_, self.model_weights.intercept_)
def predict(x):
prob = activation.sigmoid(x)
pred_label = 1 if prob > threshold else 0
return prob, pred_label
predict_table = predict_wx.mapValues(predict)
return predict_table
def _init_model_variables(self, data_instances):
model_shape = data_overview.get_features_shape(data_instances)
LOGGER.info("Initialized model shape is {}".format(model_shape))
w = self.initializer.init_model(model_shape, init_params=self.init_param_obj,
data_instance=data_instances)
model_weights = LinearModelWeights(w, fit_intercept=self.fit_intercept)
return model_weights
def _compute_loss(self, data_instances, prev_round_weights):
f = functools.partial(self.gradient_operator.compute_loss,
coef=self.model_weights.coef_,
intercept=self.model_weights.intercept_)
loss = data_instances.applyPartitions(f).reduce(fate_operator.reduce_add)
if self.use_proximal: # use additional proximal term
loss_norm = self.optimizer.loss_norm(self.model_weights,
prev_round_weights)
else:
loss_norm = self.optimizer.loss_norm(self.model_weights)
if loss_norm is not None:
loss += loss_norm
loss /= data_instances.count()
if self.need_call_back_loss:
self.callback_loss(self.n_iter_, loss)
self.loss_history.append(loss)
return loss
def _get_meta(self):
meta_protobuf_obj = lr_model_meta_pb2.LRModelMeta(penalty=self.model_param.penalty,
tol=self.model_param.tol,
alpha=self.alpha,
optimizer=self.model_param.optimizer,
batch_size=self.batch_size,
learning_rate=self.model_param.learning_rate,
max_iter=self.max_iter,
early_stop=self.model_param.early_stop,
fit_intercept=self.fit_intercept,
module='HomoLR',
need_one_vs_rest=self.need_one_vs_rest)
return meta_protobuf_obj
| 8,574 | 42.090452 | 129 | py |
FATE | FATE-master/python/federatedml/util/consts.py | #
# Copyright 2019 The FATE Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
ARBITER = 'arbiter'
HOST = 'host'
GUEST = 'guest'
MODEL_AGG = "model_agg"
GRAD_AGG = "grad_agg"
BINARY = 'binary'
MULTY = 'multi'
CLASSIFICATION = "classification"
REGRESSION = 'regression'
CLUSTERING = 'clustering'
CAUSAL_LM = "causal_ml"
SEQ_2_SEQ_LM = "seq_2_seq_lm"
ONE_VS_REST = 'one_vs_rest'
PAILLIER = 'Paillier'
PAILLIER_IPCL = 'IPCL'
RANDOM_PADS = "RandomPads"
NONE = "None"
AFFINE = 'Affine'
ITERATIVEAFFINE = 'IterativeAffine'
RANDOM_ITERATIVEAFFINE = 'RandomIterativeAffine'
L1_PENALTY = 'L1'
L2_PENALTY = 'L2'
FLOAT_ZERO = 1e-8
OVERFLOW_THRESHOLD = 1e8
OT_HAUCK = 'OT_Hauck'
CE_PH = 'CommutativeEncryptionPohligHellman'
XOR = 'xor'
AES = 'aes'
PARAM_MAXDEPTH = 5
MAX_CLASSNUM = 1000
MIN_BATCH_SIZE = 10
SPARSE_VECTOR = "SparseVector"
HETERO = "hetero"
HOMO = "homo"
RAW = "raw"
RSA = "rsa"
DH = "dh"
ECDH = "ecdh"
# evaluation
AUC = "auc"
KS = "ks"
LIFT = "lift"
GAIN = "gain"
PRECISION = "precision"
RECALL = "recall"
ACCURACY = "accuracy"
EXPLAINED_VARIANCE = "explained_variance"
MEAN_ABSOLUTE_ERROR = "mean_absolute_error"
MEAN_SQUARED_ERROR = "mean_squared_error"
MEAN_SQUARED_LOG_ERROR = "mean_squared_log_error"
MEDIAN_ABSOLUTE_ERROR = "median_absolute_error"
R2_SCORE = "r2_score"
ROOT_MEAN_SQUARED_ERROR = "root_mean_squared_error"
ROC = "roc"
F1_SCORE = 'f1_score'
CONFUSION_MAT = 'confusion_mat'
PSI = 'psi'
VIF = 'vif'
PEARSON = 'pearson'
FEATURE_IMPORTANCE = 'feature_importance'
QUANTILE_PR = 'quantile_pr'
JACCARD_SIMILARITY_SCORE = 'jaccard_similarity_score'
FOWLKES_MALLOWS_SCORE = 'fowlkes_mallows_score'
ADJUSTED_RAND_SCORE = 'adjusted_rand_score'
DAVIES_BOULDIN_INDEX = 'davies_bouldin_index'
DISTANCE_MEASURE = 'distance_measure'
CONTINGENCY_MATRIX = 'contingency_matrix'
# evaluation alias metric
ALL_METRIC_NAME = [AUC, KS, LIFT, GAIN, PRECISION, RECALL, ACCURACY, EXPLAINED_VARIANCE, MEAN_ABSOLUTE_ERROR,
MEAN_SQUARED_ERROR, MEAN_SQUARED_LOG_ERROR, MEDIAN_ABSOLUTE_ERROR, R2_SCORE, ROOT_MEAN_SQUARED_ERROR,
ROC, F1_SCORE, CONFUSION_MAT, PSI, QUANTILE_PR, JACCARD_SIMILARITY_SCORE, FOWLKES_MALLOWS_SCORE,
ADJUSTED_RAND_SCORE, DAVIES_BOULDIN_INDEX, DISTANCE_MEASURE, CONTINGENCY_MATRIX]
ALIAS = {
('l1', 'mae', 'regression_l1'): MEAN_ABSOLUTE_ERROR,
('l2', 'mse', 'regression_l2', 'regression'): MEAN_SQUARED_ERROR,
('l2_root', 'rmse'): ROOT_MEAN_SQUARED_ERROR,
('msle', ): MEAN_SQUARED_LOG_ERROR,
('r2', ): R2_SCORE,
('acc', ): ACCURACY,
('DBI', ): DAVIES_BOULDIN_INDEX,
('FMI', ): FOWLKES_MALLOWS_SCORE,
('RI', ): ADJUSTED_RAND_SCORE,
('jaccard', ): JACCARD_SIMILARITY_SCORE
}
# default evaluation metrics
DEFAULT_BINARY_METRIC = [AUC, KS]
DEFAULT_REGRESSION_METRIC = [ROOT_MEAN_SQUARED_ERROR, MEAN_ABSOLUTE_ERROR]
DEFAULT_MULTI_METRIC = [ACCURACY, PRECISION, RECALL]
DEFAULT_CLUSTER_METRIC = [DAVIES_BOULDIN_INDEX]
# allowed metrics for different tasks
ALL_BINARY_METRICS = [
AUC,
KS,
LIFT,
GAIN,
ACCURACY,
PRECISION,
RECALL,
ROC,
CONFUSION_MAT,
PSI,
F1_SCORE,
QUANTILE_PR
]
ALL_REGRESSION_METRICS = [
EXPLAINED_VARIANCE,
MEAN_ABSOLUTE_ERROR,
MEAN_SQUARED_ERROR,
MEDIAN_ABSOLUTE_ERROR,
R2_SCORE,
ROOT_MEAN_SQUARED_ERROR
]
ALL_MULTI_METRICS = [
ACCURACY,
PRECISION,
RECALL
]
ALL_CLUSTER_METRICS = [
JACCARD_SIMILARITY_SCORE,
FOWLKES_MALLOWS_SCORE,
ADJUSTED_RAND_SCORE,
DAVIES_BOULDIN_INDEX,
DISTANCE_MEASURE,
CONTINGENCY_MATRIX
]
# single value metrics
REGRESSION_SINGLE_VALUE_METRICS = [
EXPLAINED_VARIANCE,
MEAN_ABSOLUTE_ERROR,
MEAN_SQUARED_ERROR,
MEAN_SQUARED_LOG_ERROR,
MEDIAN_ABSOLUTE_ERROR,
R2_SCORE,
ROOT_MEAN_SQUARED_ERROR,
]
BINARY_SINGLE_VALUE_METRIC = [
AUC,
KS
]
MULTI_SINGLE_VALUE_METRIC = [
PRECISION,
RECALL,
ACCURACY
]
CLUSTER_SINGLE_VALUE_METRIC = [
JACCARD_SIMILARITY_SCORE,
FOWLKES_MALLOWS_SCORE,
ADJUSTED_RAND_SCORE,
DAVIES_BOULDIN_INDEX
]
# workflow
TRAIN_DATA = "train_data"
TEST_DATA = "test_data"
# initialize method
RANDOM_NORMAL = "random_normal"
RANDOM_UNIFORM = 'random_uniform'
ONES = 'ones'
ZEROS = 'zeros'
CONST = 'const'
# decision tree
MAX_SPLIT_NODES = 2 ** 16
MAX_SPLITINFO_TO_COMPUTE = 2 ** 10
NORMAL_TREE = 'normal'
COMPLETE_SECURE_TREE = 'complete_secure'
STD_TREE = 'std'
MIX_TREE = 'mix'
LAYERED_TREE = 'layered'
SINGLE_OUTPUT = 'single_output'
MULTI_OUTPUT = 'multi_output'
HOST_LOCAL = 'hostLocal'
TRAIN_EVALUATE = 'train_evaluate'
VALIDATE_EVALUATE = 'validate_evaluate'
# Feature engineering
G_BIN_NUM = 10
DEFAULT_COMPRESS_THRESHOLD = 10000
DEFAULT_HEAD_SIZE = 10000
DEFAULT_RELATIVE_ERROR = 1e-4
ONE_HOT_LIMIT = 1024 # No more than 10 possible values
PERCENTAGE_VALUE_LIMIT = 0.1
SECURE_AGG_AMPLIFY_FACTOR = 1000
QUANTILE = 'quantile'
BUCKET = 'bucket'
OPTIMAL = 'optimal'
VIRTUAL_SUMMARY = 'virtual_summary'
RECURSIVE_QUERY = 'recursive_query'
# Feature selection methods
UNIQUE_VALUE = 'unique_value'
IV_VALUE_THRES = 'iv_value_thres'
IV_PERCENTILE = 'iv_percentile'
IV_TOP_K = 'iv_top_k'
COEFFICIENT_OF_VARIATION_VALUE_THRES = 'coefficient_of_variation_value_thres'
# COEFFICIENT_OF_VARIATION_PERCENTILE = 'coefficient_of_variation_percentile'
OUTLIER_COLS = 'outlier_cols'
MANUALLY_FILTER = 'manually'
PERCENTAGE_VALUE = 'percentage_value'
IV_FILTER = 'iv_filter'
STATISTIC_FILTER = 'statistic_filter'
PSI_FILTER = 'psi_filter'
VIF_FILTER = 'vif_filter'
CORRELATION_FILTER = 'correlation_filter'
SECUREBOOST = 'sbt'
HETERO_SBT_FILTER = 'hetero_sbt_filter'
HOMO_SBT_FILTER = 'homo_sbt_filter'
HETERO_FAST_SBT_FILTER = 'hetero_fast_sbt_filter'
IV = 'iv'
# Selection Pre-model
STATISTIC_MODEL = 'statistic_model'
BINNING_MODEL = 'binning_model'
# imputer
MIN = 'min'
MAX = 'max'
MEAN = 'mean'
DESIGNATED = 'designated'
STR = 'str'
FLOAT = 'float'
INT = 'int'
ORIGIN = 'origin'
MEDIAN = 'median'
# min_max_scaler
NORMAL = 'normal'
CAP = 'cap'
MINMAXSCALE = 'min_max_scale'
STANDARDSCALE = 'standard_scale'
ALL = 'all'
COL = 'col'
# intersection cache
PHONE = 'phone'
IMEI = 'imei'
MD5 = 'md5'
SHA1 = 'sha1'
SHA224 = 'sha224'
SHA256 = 'sha256'
SHA384 = 'sha384'
SHA512 = 'sha512'
SM3 = 'sm3'
INTERSECT_CACHE_TAG = 'Za'
SHARE_INFO_COL_NAME = "share_info"
# statistics
COUNT = 'count'
STANDARD_DEVIATION = 'stddev'
SUMMARY = 'summary'
DESCRIBE = 'describe'
SUM = 'sum'
COVARIANCE = 'cov'
CORRELATION = 'corr'
VARIANCE = 'variance'
COEFFICIENT_OF_VARIATION = 'coefficient_of_variance'
MISSING_COUNT = "missing_count"
MISSING_RATIO = "missing_ratio"
SKEWNESS = 'skewness'
KURTOSIS = 'kurtosis'
# adapters model name
HOMO_SBT = 'homo_sbt'
HETERO_SBT = 'hetero_sbt'
HETERO_FAST_SBT = 'hetero_fast_sbt'
HETERO_FAST_SBT_MIX = 'hetero_fast_sbt_mix'
HETERO_FAST_SBT_LAYERED = 'hetero_fast_sbt_layered'
# tree protobuf model name
HETERO_SBT_GUEST_MODEL = 'HeteroSecureBoostingTreeGuest'
HETERO_SBT_HOST_MODEL = 'HeteroSecureBoostingTreeHost'
HETERO_FAST_SBT_GUEST_MODEL = "HeteroFastSecureBoostingTreeGuest"
HETERO_FAST_SBT_HOST_MODEL = "HeteroFastSecureBoostingTreeHost"
HOMO_SBT_GUEST_MODEL = "HomoSecureBoostingTreeGuest"
HOMO_SBT_HOST_MODEL = "HomoSecureBoostingTreeHost"
# tree decimal round to prevent float error
TREE_DECIMAL_ROUND = 10
# homm sbt backend
MEMORY_BACKEND = 'memory'
DISTRIBUTED_BACKEND = 'distributed'
# column_expand
MANUAL = 'manual'
# scorecard
CREDIT = 'credit'
# sample weight
BALANCED = 'balanced'
# min r base fraction
MIN_BASE_FRACTION = 0.01
MAX_BASE_FRACTION = 0.99
MAX_SAMPLE_OUTPUT_LIMIT = 10 ** 6
# Hetero NN Selective BP Strategy
SELECTIVE_SIZE = 1024
# intersect join methods
INNER_JOIN = "inner_join"
LEFT_JOIN = "left_join"
DEFAULT_KEY_LENGTH = 1024
MIN_HASH_FUNC_COUNT = 4
MAX_HASH_FUNC_COUNT = 32
EINI_TREE_COMPLEXITY = 1000000000
pytorch_backend = 'pytorch'
keras_backend = 'keras'
CURVE25519 = 'curve25519'
# HOMO NN Framework
FEDAVG_TRAINER = 'fedavg_trainer'
# DEEPSPEED
DEEPSPEED_MODEL_DIR = "EGGROLL_CONTAINER_MODELS_DIR"
FLOW_MODEL_SYNC_PATH = "MODEL_PATH"
# positive unlabeled
PROBABILITY = "probability"
QUANTITY = "quantity"
PROPORTION = "proportion"
DISTRIBUTION = "distribution"
| 8,740 | 22.882514 | 120 | py |
FATE | FATE-master/python/federatedml/transfer_learning/hetero_ftl/ftl_dataloder.py | import numpy as np
import tensorflow as tf
from federatedml.util import LOGGER
class FTLDataLoader(tf.keras.utils.Sequence):
def __init__(self, non_overlap_samples, overlap_samples, batch_size, guest_side=True):
self.batch_size = batch_size
self.guest_side = guest_side
self._overlap_index = []
self._non_overlap_index = []
if guest_side:
self.size = non_overlap_samples.count() + overlap_samples.count()
else:
self.size = overlap_samples.count()
_, one_data = overlap_samples.first()
self.y_shape = (1,)
self.x_shape = one_data.features.shape
self.x = np.zeros((self.size, *self.x_shape))
self.y = np.zeros((self.size, *self.y_shape))
index = 0
self._overlap_keys = []
self._non_overlap_keys = []
for k, inst in overlap_samples.collect():
self._overlap_keys.append(k)
self.x[index] = inst.features
if guest_side:
self.y[index] = inst.label
index += 1
if self.guest_side:
for k, inst in non_overlap_samples.collect():
self._non_overlap_keys.append(k)
self.x[index] = inst.features
if guest_side:
self.y[index] = inst.label
index += 1
if guest_side:
self._overlap_index = np.array(list(range(0, overlap_samples.count())))
self._non_overlap_index = np.array(list(range(overlap_samples.count(), self.size)))
else:
self._overlap_index = list(range(len(self.x)))
def get_overlap_indexes(self):
return self._overlap_index
def get_non_overlap_indexes(self):
return self._non_overlap_index
def get_batch_indexes(self, batch_index):
start = self.batch_size * batch_index
end = self.batch_size * (batch_index + 1)
return start, end
def get_relative_overlap_index(self, batch_index):
start, end = self.get_batch_indexes(batch_index)
return self._overlap_index[(self._overlap_index >= start) & (self._overlap_index < end)] % self.batch_size
def get_overlap_x(self):
return self.x[self._overlap_index]
def get_overlap_y(self):
return self.y[self._overlap_index]
def get_overlap_keys(self):
return self._overlap_keys
def get_non_overlap_keys(self):
return self._non_overlap_keys
def __getitem__(self, index):
start, end = self.get_batch_indexes(index)
if self.guest_side:
return self.x[start: end], self.y[start: end]
else:
return self.x[start: end]
def __len__(self):
return int(np.ceil(self.size / float(self.batch_size)))
def get_idx(self):
return self._keys
def data_basic_info(self):
return 'total sample num is {}, overlap sample num is {}, non_overlap sample is {},'\
'x_shape is {}'.format(self.size, len(self._overlap_index), len(self._non_overlap_index),
self.x_shape)
| 3,111 | 31.416667 | 114 | py |
FATE | FATE-master/python/federatedml/transfer_learning/hetero_ftl/ftl_base.py | import copy
import json
import functools
import numpy as np
from federatedml.util import LOGGER
from federatedml.transfer_learning.hetero_ftl.backend.nn_model import get_nn_builder
from federatedml.model_base import ModelBase
from federatedml.param.ftl_param import FTLParam
from federatedml.transfer_learning.hetero_ftl.backend.tf_keras.nn_model import KerasNNModel
from federatedml.util.classify_label_checker import ClassifyLabelChecker
from federatedml.transfer_variable.transfer_class.ftl_transfer_variable import FTLTransferVariable
from federatedml.transfer_learning.hetero_ftl.ftl_dataloder import FTLDataLoader
from federatedml.transfer_learning.hetero_ftl.backend.tf_keras.data_generator import KerasSequenceDataConverter
from federatedml.nn.backend.utils import rng as random_number_generator
from federatedml.secureprotol import PaillierEncrypt
from federatedml.util import consts
from federatedml.secureprotol.paillier_tensor import PaillierTensor
from federatedml.protobuf.generated.ftl_model_param_pb2 import FTLModelParam
from federatedml.protobuf.generated.ftl_model_meta_pb2 import FTLModelMeta, FTLPredictParam, FTLOptimizerParam
class FTL(ModelBase):
def __init__(self):
super(FTL, self).__init__()
# input para
self.nn_define = None
self.alpha = None
self.tol = None
self.learning_rate = None
self.n_iter_no_change = None
self.validation_freqs = None
self.early_stopping_rounds = None
self.use_first_metric_only = None
self.optimizer = None
self.intersect_param = None
self.config_type = 'keras'
self.comm_eff = None
self.local_round = 1
# runtime variable
self.verbose = False
self.nn: KerasNNModel = None
self.nn_builder = None
self.model_param = FTLParam()
self.x_shape = None
self.input_dim = None
self.data_num = 0
self.overlap_num = 0
self.transfer_variable = FTLTransferVariable()
self.data_convertor = KerasSequenceDataConverter()
self.mode = 'plain'
self.encrypter = None
self.partitions = 16
self.batch_size = None
self.epochs = None
self.store_header = None # header of input data table
self.model_float_type = np.float32
self.cache_dataloader = {}
self.validation_strategy = None
def _init_model(self, param: FTLParam):
self.nn_define = param.nn_define
self.alpha = param.alpha
self.tol = param.tol
self.n_iter_no_change = param.n_iter_no_change
self.validation_freqs = param.validation_freqs
self.optimizer = param.optimizer
self.intersect_param = param.intersect_param
self.batch_size = param.batch_size
self.epochs = param.epochs
self.mode = param.mode
self.comm_eff = param.communication_efficient
self.local_round = param.local_round
assert 'learning_rate' in self.optimizer.kwargs, 'optimizer setting must contain learning_rate'
self.learning_rate = self.optimizer.kwargs['learning_rate']
if not self.comm_eff:
self.local_round = 1
LOGGER.debug('communication efficient mode is not enabled, local_round set as 1')
self.encrypter = self.generate_encrypter(param)
self.predict_param = param.predict_param
self.rng_generator = random_number_generator.RandomNumberGenerator()
@staticmethod
def debug_data_inst(data_inst):
collect_data = list(data_inst.collect())
LOGGER.debug('showing Table')
for d in collect_data:
LOGGER.debug('key {} id {}, features {} label {}'.format(d[0], d[1].inst_id, d[1].features, d[1].label))
@staticmethod
def reset_label(inst, mapping):
new_inst = copy.deepcopy(inst)
new_inst.label = mapping[new_inst.label]
return new_inst
@staticmethod
def check_label(data_inst):
"""
check label. FTL only supports binary classification, and labels should be 1 or -1
"""
LOGGER.debug('checking label')
label_checker = ClassifyLabelChecker()
num_class, class_set = label_checker.validate_label(data_inst)
if num_class != 2:
raise ValueError(
'ftl only support binary classification, however {} labels are provided.'.format(num_class))
if 1 in class_set and -1 in class_set:
return data_inst
else:
soreted_class_set = sorted(list(class_set))
new_label_mapping = {soreted_class_set[1]: 1, soreted_class_set[0]: -1}
reset_label = functools.partial(FTL.reset_label, mapping=new_label_mapping)
new_table = data_inst.mapValues(reset_label)
new_table.schema = copy.deepcopy(data_inst.schema)
return new_table
def generate_encrypter(self, param) -> PaillierEncrypt:
LOGGER.info("generate encrypter")
if param.encrypt_param.method.lower() == consts.PAILLIER.lower():
encrypter = PaillierEncrypt()
encrypter.generate_key(param.encrypt_param.key_length)
else:
raise NotImplementedError("encrypt method not supported yet!!!")
return encrypter
def encrypt_tensor(self, components, return_dtable=True):
"""
transform numpy array into Paillier tensor and encrypt
"""
encrypted_tensors = []
for comp in components:
encrypted_tensor = PaillierTensor(comp, partitions=self.partitions)
if return_dtable:
encrypted_tensors.append(encrypted_tensor.encrypt(self.encrypter).get_obj())
else:
encrypted_tensors.append(encrypted_tensor.encrypt(self.encrypter))
return encrypted_tensors
def learning_rate_decay(self, learning_rate, epoch):
"""
learning_rate decay
"""
return learning_rate * 1 / np.sqrt(epoch + 1)
def sync_stop_flag(self, num_round, stop_flag=None):
"""
stop flag for n_iter_no_change
"""
LOGGER.info("sync stop flag, boosting round is {}".format(num_round))
if self.role == consts.GUEST:
self.transfer_variable.stop_flag.remote(stop_flag,
role=consts.HOST,
idx=-1,
suffix=(num_round,))
elif self.role == consts.HOST:
return self.transfer_variable.stop_flag.get(idx=0, suffix=(num_round, ))
def prepare_data(self, intersect_obj, data_inst, guest_side=False):
"""
find intersect ids and prepare dataloader
"""
if guest_side:
data_inst = self.check_label(data_inst)
overlap_samples = intersect_obj.run_intersect(data_inst) # find intersect ids
overlap_samples = intersect_obj.get_value_from_data(overlap_samples, data_inst)
non_overlap_samples = data_inst.subtractByKey(overlap_samples)
LOGGER.debug('num of overlap/non-overlap sampels: {}/{}'.format(overlap_samples.count(),
non_overlap_samples.count()))
if overlap_samples.count() == 0:
raise ValueError('no overlap samples')
if guest_side and non_overlap_samples == 0:
raise ValueError('overlap samples are required in guest side')
self.store_header = data_inst.schema['header']
LOGGER.debug('data inst header is {}'.format(self.store_header))
LOGGER.debug('has {} overlap samples'.format(overlap_samples.count()))
batch_size = self.batch_size
if self.batch_size == -1:
batch_size = data_inst.count() + 1 # make sure larger than sample number
data_loader = FTLDataLoader(non_overlap_samples=non_overlap_samples,
batch_size=batch_size, overlap_samples=overlap_samples, guest_side=guest_side)
LOGGER.debug("data details are :{}".format(data_loader.data_basic_info()))
return data_loader, data_loader.x_shape, data_inst.count(), len(data_loader.get_overlap_indexes())
def get_model_float_type(self, nn):
weights = nn.get_trainable_weights()
self.model_float_type = weights[0].dtype
def initialize_nn(self, input_shape):
"""
initializing nn weights
"""
loss = "keep_predict_loss"
self.nn_builder = get_nn_builder(config_type=self.config_type)
self.nn = self.nn_builder(loss=loss, nn_define=self.nn_define, optimizer=self.optimizer, metrics=None,
input_shape=input_shape)
self.get_model_float_type(self.nn)
LOGGER.debug('printing nn layers structure')
for layer in self.nn._model.layers:
LOGGER.debug('input shape {}, output shape {}'.format(layer.input_shape, layer.output_shape))
def generate_mask(self, shape):
"""
generate random number mask
"""
return self.rng_generator.generate_random_number(shape)
def _batch_gradient_update(self, X, grads):
"""
compute and update gradients for all samples
"""
data = self.data_convertor.convert_data(X, grads)
self.nn.train(data)
def _get_mini_batch_gradient(self, X_batch, backward_grads_batch):
"""
compute gradient for a mini batch
"""
X_batch = X_batch.astype(self.model_float_type)
backward_grads_batch = backward_grads_batch.astype(self.model_float_type)
grads = self.nn.get_weight_gradients(X_batch, backward_grads_batch)
return grads
def update_nn_weights(self, backward_grads, data_loader: FTLDataLoader, epoch_idx, decay=False):
"""
updating bottom nn model weights using backward gradients
"""
LOGGER.debug('updating grads at epoch {}'.format(epoch_idx))
assert len(data_loader.x) == len(backward_grads)
weight_grads = []
for i in range(len(data_loader)):
start, end = data_loader.get_batch_indexes(i)
batch_x = data_loader.x[start: end]
batch_grads = backward_grads[start: end]
batch_weight_grads = self._get_mini_batch_gradient(batch_x, batch_grads)
if len(weight_grads) == 0:
weight_grads.extend(batch_weight_grads)
else:
for w, bw in zip(weight_grads, batch_weight_grads):
w += bw
if decay:
new_learning_rate = self.learning_rate_decay(self.learning_rate, epoch_idx)
self.nn.set_learning_rate(new_learning_rate)
LOGGER.debug('epoch {} optimizer details are {}'.format(epoch_idx, self.nn.export_optimizer_config()))
self.nn.apply_gradients(weight_grads)
def export_nn(self):
return self.nn.export_model()
@staticmethod
def get_dataset_key(data_inst):
return id(data_inst)
def get_model_meta(self):
model_meta = FTLModelMeta()
model_meta.config_type = self.config_type
model_meta.nn_define = json.dumps(self.nn_define)
model_meta.batch_size = self.batch_size
model_meta.epochs = self.epochs
model_meta.tol = self.tol
model_meta.input_dim = self.input_dim
predict_param = FTLPredictParam()
optimizer_param = FTLOptimizerParam()
optimizer_param.optimizer = self.optimizer.optimizer
optimizer_param.kwargs = json.dumps(self.optimizer.kwargs)
model_meta.optimizer_param.CopyFrom(optimizer_param)
model_meta.predict_param.CopyFrom(predict_param)
return model_meta
def get_model_param(self):
model_param = FTLModelParam()
model_bytes = self.nn.export_model()
model_param.model_bytes = model_bytes
model_param.header.extend(list(self.store_header))
return model_param
def set_model_meta(self, model_meta):
self.config_type = model_meta.config_type
self.nn_define = json.loads(model_meta.nn_define)
self.batch_size = model_meta.batch_size
self.epochs = model_meta.epochs
self.tol = model_meta.tol
self.optimizer = FTLParam()._parse_optimizer(FTLParam().optimizer)
self.input_dim = model_meta.input_dim
self.optimizer.optimizer = model_meta.optimizer_param.optimizer
self.optimizer.kwargs = json.loads(model_meta.optimizer_param.kwargs)
self.initialize_nn((self.input_dim,))
def set_model_param(self, model_param):
self.nn.restore_model(model_param.model_bytes)
self.store_header = list(model_param.header)
LOGGER.debug('stored header load, is {}'.format(self.store_header))
| 12,882 | 37.804217 | 116 | py |
FATE | FATE-master/python/federatedml/transfer_learning/hetero_ftl/backend/nn_model.py | #
# Copyright 2019 The FATE Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import typing
from federatedml.framework.weights import Weights
class NNModel(object):
def get_model_weights(self) -> Weights:
pass
def set_model_weights(self, weights: Weights):
pass
def export_model(self):
pass
def load_model(self):
pass
def train(self, data, **kwargs):
pass
def predict(self, data, **kwargs):
pass
def evaluate(self, data, **kwargs):
pass
def modify(self, func: typing.Callable[[Weights], Weights]) -> Weights:
weights = self.get_model_weights()
self.set_model_weights(func(weights))
return weights
class DataConverter(object):
def convert(self, data, *args, **kwargs):
pass
def get_nn_builder(config_type):
if config_type == "keras":
from federatedml.transfer_learning.hetero_ftl.backend.tf_keras.nn_model import build_keras
return build_keras
else:
raise ValueError(f"{config_type} is not supported")
| 1,611 | 25 | 98 | py |
FATE | FATE-master/python/federatedml/transfer_learning/hetero_ftl/backend/data_generator.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright 2019 The FATE Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import tensorflow as tf
import numpy as np
class KerasSequenceData(tf.keras.utils.Sequence):
def __init__(self, X, y=None):
if X.shape[0] == 0:
raise ValueError("Data is empty!")
self.X = X
if y is None:
self.y = np.zeros(X.shape[0])
else:
self.y = y
def __len__(self):
return 1
def __getitem__(self, idx):
return self.X, self.y
class KerasSequenceDataConverter(object):
@classmethod
def convert_data(cls, x=None, y=None):
return KerasSequenceData(x, y)
| 1,238 | 25.361702 | 75 | py |
FATE | FATE-master/python/federatedml/transfer_learning/hetero_ftl/backend/tf_keras/losses.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright 2019 The FATE Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from tensorflow.keras.losses import *
from tensorflow.python.keras import backend as K
from tensorflow.python.framework import ops
from tensorflow.python.util.tf_export import keras_export
@keras_export('keras.losses.keep_predict_loss')
def keep_predict_loss(y_true, y_pred):
y_pred = ops.convert_to_tensor(y_pred)
return K.sum(y_true * y_pred)
| 1,019 | 33 | 75 | py |
FATE | FATE-master/python/federatedml/transfer_learning/hetero_ftl/backend/tf_keras/nn_model.py | #
# Copyright 2019 The FATE Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import copy
import io
import json
import os
import uuid
import zipfile
import numpy as np
import tensorflow as tf
from federatedml.framework.weights import OrderDictWeights, Weights
from federatedml.transfer_learning.hetero_ftl.backend.tf_keras import losses
from federatedml.transfer_learning.hetero_ftl.backend.nn_model import DataConverter, NNModel
def _zip_dir_as_bytes(path):
with io.BytesIO() as io_bytes:
with zipfile.ZipFile(io_bytes, "w", zipfile.ZIP_DEFLATED) as zipper:
for root, dirs, files in os.walk(path, topdown=False):
for name in files:
full_path = os.path.join(root, name)
relative_path = os.path.relpath(full_path, path)
zipper.write(filename=full_path, arcname=relative_path)
for name in dirs:
full_path = os.path.join(root, name)
relative_path = os.path.relpath(full_path, path)
zipper.write(filename=full_path, arcname=relative_path)
zip_bytes = io_bytes.getvalue()
return zip_bytes
def _modify_model_input_shape(nn_struct, input_shape):
if not input_shape:
return json.dumps(nn_struct)
if isinstance(input_shape, int):
input_shape = [input_shape]
else:
input_shape = list(input_shape)
struct = copy.deepcopy(nn_struct)
if (
not struct.get("config")
or not struct["config"].get("layers")
or not struct["config"]["layers"][0].get("config")
):
return json.dumps(struct)
if struct["config"]["layers"][0].get("config"):
struct["config"]["layers"][0]["config"]["batch_input_shape"] = [
None,
*input_shape,
]
return json.dumps(struct)
else:
return json.dump(struct)
def build_keras(nn_define, loss, optimizer, metrics, **kwargs):
nn_define_json = _modify_model_input_shape(
nn_define, kwargs.get("input_shape", None)
)
model = tf.keras.models.model_from_json(nn_define_json, custom_objects={})
keras_model = KerasNNModel(model)
keras_model.compile(loss=loss, optimizer=optimizer, metrics=metrics)
return keras_model
class KerasNNModel(NNModel):
def __init__(self, model):
self._model: tf.keras.Sequential = model
self._trainable_weights = {v.name: v for v in self._model.trainable_weights}
self._loss = None
self._loss_fn = None
def compile(self, loss, optimizer, metrics):
optimizer_instance = getattr(tf.keras.optimizers, optimizer.optimizer)(
**optimizer.kwargs
)
self._loss_fn = getattr(losses, loss)
self._model.compile(
optimizer=optimizer_instance, loss=self._loss_fn, metrics=metrics
)
def get_model_weights(self) -> OrderDictWeights:
return OrderDictWeights(self._trainable_weights)
def set_model_weights(self, weights: Weights):
unboxed = weights.unboxed
for name, v in self._trainable_weights.items():
v.assign(unboxed[name])
def get_layer_by_index(self, layer_idx):
return self._model.layers[layer_idx]
def set_layer_weights_by_index(self, layer_idx, weights):
self._model.layers[layer_idx].set_weights(weights)
def get_input_gradients(self, X, y):
with tf.GradientTape() as tape:
X = tf.constant(X)
y = tf.constant(y)
tape.watch(X)
loss = self._loss_fn(y, self._model(X))
return [tape.gradient(loss, X).numpy()]
def get_trainable_gradients(self, X, y):
return self._get_gradients(X, y, self._trainable_weights)
def apply_gradients(self, grads):
self._model.optimizer.apply_gradients(
zip(grads, self._model.trainable_variables)
)
def get_weight_gradients(self, X, y):
return self._get_gradients(X, y, self._model.trainable_variables)
def get_trainable_weights(self):
return [w.numpy() for w in self._model.trainable_weights]
def get_loss(self):
return self._loss
def get_forward_loss_from_input(self, X, y):
loss = self._loss_fn(tf.constant(y), self._model(X))
return loss.numpy()
def _get_gradients(self, X, y, variable):
with tf.GradientTape() as tape:
y = tf.constant(y)
loss = self._loss_fn(y, self._model(X))
g = tape.gradient(loss, variable)
if isinstance(g, list):
return [t.numpy() for t in g]
else:
return [g.numpy()]
def set_learning_rate(self, learning_rate):
self._model.optimizer.learning_rate.assign(learning_rate)
def train(self, data: tf.keras.utils.Sequence, **kwargs):
epochs = 1
left_kwargs = copy.deepcopy(kwargs)
if "aggregate_every_n_epoch" in kwargs:
epochs = kwargs["aggregate_every_n_epoch"]
del left_kwargs["aggregate_every_n_epoch"]
left_kwargs["callbacks"] = [tf.keras.callbacks.History()]
self._model.fit(x=data, epochs=epochs, verbose=1, shuffle=True, **left_kwargs)
self._loss = left_kwargs["callbacks"][0].history["loss"]
return epochs * len(data)
def evaluate(self, data: tf.keras.utils.Sequence, **kwargs):
names = self._model.metrics_names
values = self._model.evaluate(x=data, verbose=1)
if not isinstance(values, list):
values = [values]
return dict(zip(names, values))
def predict(self, data: tf.keras.utils.Sequence, **kwargs):
return self._model.predict(data)
def export_model(self):
model_base = "./saved_model"
if not os.path.exists(model_base):
os.mkdir(model_base)
model_path = f"{model_base}/{uuid.uuid1()}"
os.mkdir(model_path)
self._model.save(model_path)
model_bytes = _zip_dir_as_bytes(model_path)
return model_bytes
@staticmethod
def restore_model(
model_bytes,
): # todo: restore optimizer to support incremental learning
model_base = "./restore_model"
if not os.path.exists(model_base):
os.mkdir(model_base)
model_path = f"{model_base}/{uuid.uuid1()}"
os.mkdir(model_path)
with io.BytesIO(model_bytes) as bytes_io:
with zipfile.ZipFile(bytes_io, "r", zipfile.ZIP_DEFLATED) as f:
f.extractall(model_path)
# add custom objects
from federatedml.transfer_learning.hetero_ftl.backend.tf_keras.losses import keep_predict_loss
tf.keras.utils.get_custom_objects().update(
{"keep_predict_loss": keep_predict_loss}
)
model = tf.keras.models.load_model(f"{model_path}")
return KerasNNModel(model)
def export_optimizer_config(self):
return self._model.optimizer.get_config()
class KerasSequenceData(tf.keras.utils.Sequence):
def get_shape(self):
return self.x_shape, self.y_shape
def __init__(self, data_instances, batch_size, encode_label, label_mapping):
self.size = data_instances.count()
if self.size <= 0:
raise ValueError("empty data")
_, one_data = data_instances.first()
self.x_shape = one_data.features.shape
num_label = len(label_mapping)
print(label_mapping)
if encode_label:
if num_label > 2:
self.y_shape = (num_label,)
self.x = np.zeros((self.size, *self.x_shape))
self.y = np.zeros((self.size, *self.y_shape))
index = 0
self._keys = []
for k, inst in data_instances.collect():
self._keys.append(k)
self.x[index] = inst.features
self.y[index][label_mapping[inst.label]] = 1
index += 1
else:
raise ValueError(f"num_label is {num_label}")
else:
if num_label >= 2:
self.y_shape = (1,)
else:
raise ValueError(f"num_label is {num_label}")
self.x = np.zeros((self.size, *self.x_shape))
self.y = np.zeros((self.size, *self.y_shape))
index = 0
self._keys = []
for k, inst in data_instances.collect():
self._keys.append(k)
self.x[index] = inst.features
self.y[index] = label_mapping[inst.label]
index += 1
self.batch_size = batch_size if batch_size > 0 else self.size
def __getitem__(self, index):
"""Gets batch at position `index`.
# Arguments
index: position of the batch in the Sequence.
# Returns
A batch
"""
start = self.batch_size * index
end = self.batch_size * (index + 1)
return self.x[start:end], self.y[start:end]
def __len__(self):
"""Number of batch in the Sequence.
# Returns
The number of batches in the Sequence.
"""
return int(np.ceil(self.size / float(self.batch_size)))
def get_keys(self):
return self._keys
class KerasSequenceDataConverter(DataConverter):
def convert(self, data, *args, **kwargs):
return KerasSequenceData(data, *args, **kwargs)
| 9,949 | 33.548611 | 102 | py |
FATE | FATE-master/python/federatedml/transfer_learning/hetero_ftl/backend/tf_keras/data_generator.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright 2019 The FATE Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import tensorflow as tf
import numpy as np
class KerasSequenceData(tf.keras.utils.Sequence):
def __init__(self, X, y=None):
if X.shape[0] == 0:
raise ValueError("Data is empty!")
self.X = X
if y is None:
self.y = np.zeros(X.shape[0])
else:
self.y = y
def __len__(self):
return 1
def __getitem__(self, idx):
return self.X, self.y
class KerasSequenceDataConverter(object):
@classmethod
def convert_data(cls, x=None, y=None):
return KerasSequenceData(x, y)
| 1,238 | 25.361702 | 75 | py |
FATE | FATE-master/python/federatedml/transfer_learning/hetero_ftl/backend/tf_keras/layers/pooling.py | #
# Copyright 2019 The FATE Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from tensorflow.python.keras import layers
def _build_maxpooling1d(pool_size=2,
strides=None,
padding='valid',
data_format=None,
**kwargs):
return layers.pooling.MaxPooling1D(pool_size=pool_size,
strides=strides,
padding=padding,
data_format=data_format,
**kwargs)
def _build_maxpooling2d(pool_size=(2, 2),
strides=None,
padding='valid',
data_format=None,
**kwargs):
return layers.pooling.MaxPooling2D(pool_size=pool_size,
strides=strides,
padding=padding,
data_format=data_format,
**kwargs)
def _build_maxpooling3d(pool_size=(2, 2, 2),
strides=None,
padding='valid',
data_format=None,
**kwargs):
return layers.pooling.MaxPooling3D(pool_size=pool_size,
strides=strides,
padding=padding,
data_format=data_format,
**kwargs)
def _build_averagepooling1d(pool_size=2,
strides=None,
padding='valid',
data_format=None,
**kwargs):
return layers.pooling.AveragePooling1D(pool_size=pool_size,
strides=strides,
padding=padding,
data_format=data_format,
**kwargs)
def _build_averagepooling2d(pool_size=(2, 2),
strides=None,
padding='valid',
data_format=None,
**kwargs):
return layers.pooling.AveragePooling2D(pool_size=pool_size,
strides=strides,
padding=padding,
data_format=data_format,
**kwargs)
def _build_averagepooling3d(pool_size=(2, 2, 2),
strides=None,
padding='valid',
data_format=None,
**kwargs):
return layers.pooling.AveragePooling3D(pool_size=pool_size,
strides=strides,
padding=padding,
data_format=data_format,
**kwargs)
_build_global_averagepooling1d = layers.pooling.GlobalAveragePooling1D.__init__
_build_global_averagepooling2d = layers.pooling.GlobalAveragePooling2D.__init__
_build_global_averagepooling3d = layers.pooling.GlobalAveragePooling3D.__init__
_build_global_maxpooling1d = layers.pooling.GlobalMaxPooling1D.__init__
_build_global_maxpooling2d = layers.pooling.GlobalMaxPooling2D.__init__
_build_global_maxpooling3d = layers.pooling.GlobalMaxPooling3D.__init__
| 4,149 | 39.291262 | 79 | py |
FATE | FATE-master/python/federatedml/transfer_learning/hetero_ftl/backend/tf_keras/layers/baisc.py | #
# Copyright 2019 The FATE Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from tensorflow.python.keras import layers
from .util import _get_initializer
def _build_dense(units, activation, use_bias=True, kernel_initializer="glorot_uniform",
bias_initializer="zeros", kernel_regularizer=None, bias_regularizer=None, activity_regularizer=None,
kernel_constraint=None, bias_constraint=None, seed=None, **kwargs):
return layers.Dense(units=units,
activation=activation,
use_bias=use_bias,
kernel_initializer=_get_initializer(kernel_initializer, seed),
bias_initializer=_get_initializer(bias_initializer, seed),
kernel_regularizer=kernel_regularizer,
bias_regularizer=bias_regularizer,
activity_regularizer=activity_regularizer,
kernel_constraint=kernel_constraint,
bias_constraint=bias_constraint,
**kwargs)
def _build_dropout(rate, noise_shape=None, seed=None, **kwargs):
return layers.Dropout(rate, noise_shape=noise_shape, seed=seed, **kwargs)
def _build_flatten(data_format=None, **kwargs):
return layers.Flatten(data_format=data_format, **kwargs)
| 1,881 | 43.809524 | 117 | py |
FATE | FATE-master/python/federatedml/transfer_learning/hetero_ftl/backend/tf_keras/layers/util.py | #
# Copyright 2019 The FATE Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from tensorflow.python.keras import initializers
def _get_initializer(initializer, seed):
if not seed:
return initializer
initializer_class = getattr(initializers, initializer, None)
if initializer_class:
initializer_instance = initializer_class()
if hasattr(initializer_instance, "seed"):
initializer_instance.seed = seed
return initializer_instance
return initializer
| 1,052 | 30.909091 | 75 | py |
FATE | FATE-master/python/federatedml/transfer_learning/hetero_ftl/backend/tf_keras/layers/conv.py | #
# Copyright 2019 The FATE Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from tensorflow.python.keras import layers
def _build_conv1d(filters, kernel_size, strides=1, padding='valid', data_format='channels_last', dilation_rate=1,
activation=None, use_bias=True, kernel_initializer='glorot_uniform', bias_initializer='zeros',
kernel_regularizer=None, bias_regularizer=None, activity_regularizer=None, kernel_constraint=None,
bias_constraint=None, **kwargs):
return layers.convolutional.Conv1D(
filters=filters,
kernel_size=kernel_size,
strides=strides,
padding=padding,
data_format=data_format,
dilation_rate=dilation_rate,
activation=activation,
use_bias=use_bias,
kernel_initializer=kernel_initializer,
bias_initializer=bias_initializer,
kernel_regularizer=kernel_regularizer,
bias_regularizer=bias_regularizer,
activity_regularizer=activity_regularizer,
kernel_constraint=kernel_constraint,
bias_constraint=bias_constraint,
**kwargs)
def _build_conv2d(filters, kernel_size, strides=(1, 1), padding='valid', data_format='channels_last',
dilation_rate=(1, 1), activation=None, use_bias=True, kernel_initializer='glorot_uniform',
bias_initializer='zeros', kernel_regularizer=None, bias_regularizer=None, activity_regularizer=None,
kernel_constraint=None, bias_constraint=None, **kwargs):
return layers.convolutional.Conv2D(
filters=filters,
kernel_size=kernel_size,
strides=strides,
padding=padding,
data_format=data_format,
dilation_rate=dilation_rate,
activation=activation,
use_bias=use_bias,
kernel_initializer=kernel_initializer,
bias_initializer=bias_initializer,
kernel_regularizer=kernel_regularizer,
bias_regularizer=bias_regularizer,
activity_regularizer=activity_regularizer,
kernel_constraint=kernel_constraint,
bias_constraint=bias_constraint,
**kwargs)
def _build_conv3d(filters, kernel_size, strides=(1, 1, 1), padding='valid', data_format='channels_last',
dilation_rate=(1, 1, 1), activation=None, use_bias=True, kernel_initializer='glorot_uniform',
bias_initializer='zeros', kernel_regularizer=None, bias_regularizer=None, activity_regularizer=None,
kernel_constraint=None, bias_constraint=None, **kwargs):
return layers.convolutional.Conv3D(
filters=filters,
kernel_size=kernel_size,
strides=strides,
padding=padding,
data_format=data_format,
dilation_rate=dilation_rate,
activation=activation,
use_bias=use_bias,
kernel_initializer=kernel_initializer,
bias_initializer=bias_initializer,
kernel_regularizer=kernel_regularizer,
bias_regularizer=bias_regularizer,
activity_regularizer=activity_regularizer,
kernel_constraint=kernel_constraint,
bias_constraint=bias_constraint,
**kwargs)
| 3,710 | 41.655172 | 118 | py |
FATE | FATE-master/python/federatedml/transfer_learning/hetero_ftl/test/test_ftl_modules.py | #
# Copyright 2019 The FATE Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import unittest
import numpy as np
from federatedml.util import consts
from federatedml.nn.homo_nn.nn_model import get_nn_builder
import json
from federatedml.param.ftl_param import FTLParam
from numpy import array
from fate_arch.session import computing_session as session
import pandas as pd
from federatedml.nn.hetero_nn.backend.tf_keras.data_generator import KerasSequenceDataConverter
from federatedml.transfer_learning.hetero_ftl.ftl_guest import FTLGuest
from federatedml.transfer_learning.hetero_ftl.ftl_host import FTLHost
from federatedml.transfer_learning.hetero_ftl.ftl_base import FTL
from federatedml.param.ftl_param import FTLParam
from federatedml.feature.instance import Instance
import json
class TestFTL(unittest.TestCase):
def setUp(self):
session.init('test', 0)
def test_guest_model_init(self):
model = FTLGuest()
param = FTLParam(
nn_define=json.loads('{"class_name": "Sequential", "config": {"name": "sequential_1", "layers": [{"class_name": "Dense", "config": {"name": "dense_1", "trainable": true, "batch_input_shape": [null, 32], "dtype": "float32", "units": 64, "activation": "sigmoid", "use_bias": true, "kernel_initializer": {"class_name": "RandomNormal", "config": {"mean": 0.0, "stddev": 1.0, "seed": 100, "dtype": "float32"}}, "bias_initializer": {"class_name": "Constant", "config": {"value": 0, "dtype": "float32"}}, "kernel_regularizer": null, "bias_regularizer": null, "activity_regularizer": null, "kernel_constraint": null, "bias_constraint": null}}]}, "keras_version": "2.2.4-tf", "backend": "tensorflow"}')
)
param.check()
model._init_model(param)
model.initialize_nn(input_shape=100)
print(model.nn.get_trainable_weights())
def test_host_model_init(self):
model = FTLHost()
param = FTLParam(
nn_define=json.loads('{"class_name": "Sequential", "config": {"name": "sequential_1", "layers": [{"class_name": "Dense", "config": {"name": "dense_1", "trainable": true, "batch_input_shape": [null, 32], "dtype": "float32", "units": 64, "activation": "sigmoid", "use_bias": true, "kernel_initializer": {"class_name": "RandomNormal", "config": {"mean": 0.0, "stddev": 1.0, "seed": 100, "dtype": "float32"}}, "bias_initializer": {"class_name": "Constant", "config": {"value": 0, "dtype": "float32"}}, "kernel_regularizer": null, "bias_regularizer": null, "activity_regularizer": null, "kernel_constraint": null, "bias_constraint": null}}]}, "keras_version": "2.2.4-tf", "backend": "tensorflow"}')
)
param.check()
model._init_model(param)
model.initialize_nn(input_shape=100)
print(model.nn.get_trainable_weights())
def test_label_reset(self):
l = []
for i in range(100):
inst = Instance()
inst.features = np.random.random(20)
l.append(inst)
inst.label = -1
for i in range(100):
inst = Instance()
inst.features = np.random.random(20)
l.append(inst)
inst.label = 1
table = session.parallelize(l, partition=4, include_key=False)
rs = FTL().check_label(table)
new_label = [i[1].label for i in list(rs.collect())]
print(new_label)
if __name__ == '__main__':
unittest.main()
| 3,947 | 44.906977 | 705 | py |
FATE | FATE-master/python/federatedml/secureprotol/encrypt.py | #
# Copyright 2019 The FATE Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import functools
import hashlib
from collections import Iterable
import numpy as np
from federatedml.util import LOGGER
from Cryptodome import Random
from Cryptodome.PublicKey import RSA
from federatedml.feature.instance import Instance
from federatedml.secureprotol import gmpy_math
from federatedml.secureprotol.fate_paillier import PaillierKeypair
from federatedml.secureprotol.fate_paillier import PaillierEncryptedNumber
from federatedml.secureprotol.random import RandomPads
try:
from ipcl_python import PaillierKeypair as IpclPaillierKeypair
except ImportError:
pass
_TORCH_VALID = False
try:
import torch
_TORCH_VALID = True
except ImportError:
pass
class Encrypt(object):
def __init__(self):
self.public_key = None
self.privacy_key = None
def generate_key(self, n_length=0):
pass
def set_public_key(self, public_key):
pass
def get_public_key(self):
pass
def set_privacy_key(self, privacy_key):
pass
def get_privacy_key(self):
pass
def encrypt(self, value):
pass
def decrypt(self, value):
pass
def raw_encrypt(self, value):
pass
def raw_decrypt(self, value):
pass
def encrypt_list(self, values):
result = [self.encrypt(msg) for msg in values]
return result
def decrypt_list(self, values):
result = [self.decrypt(msg) for msg in values]
return result
def distribute_decrypt(self, X):
decrypt_table = X.mapValues(lambda x: self.recursive_decrypt(x))
return decrypt_table
def distribute_encrypt(self, X):
encrypt_table = X.mapValues(lambda x: self.recursive_encrypt(x))
return encrypt_table
def distribute_raw_decrypt(self, X):
return X.mapValues(lambda x: self.recursive_raw_decrypt(x))
def distribute_raw_encrypt(self, X):
return X.mapValues(lambda x: self.recursive_raw_encrypt(x))
def _recursive_func(self, obj, func):
if isinstance(obj, np.ndarray):
if len(obj.shape) == 1:
return np.reshape([func(val) for val in obj], obj.shape)
else:
return np.reshape(
[self._recursive_func(o, func) for o in obj], obj.shape
)
elif isinstance(obj, Iterable):
return type(obj)(
self._recursive_func(o, func) if isinstance(o, Iterable) else func(o)
for o in obj
)
else:
return func(obj)
def recursive_encrypt(self, X):
return self._recursive_func(X, self.encrypt)
def recursive_decrypt(self, X):
return self._recursive_func(X, self.decrypt)
def recursive_raw_encrypt(self, X):
return self._recursive_func(X, self.raw_encrypt)
def recursive_raw_decrypt(self, X):
return self._recursive_func(X, self.raw_decrypt)
class RsaEncrypt(Encrypt):
def __init__(self):
super(RsaEncrypt, self).__init__()
self.e = None
self.d = None
self.n = None
self.p = None
self.q = None
def generate_key(self, rsa_bit=1024):
random_generator = Random.new().read
rsa = RSA.generate(rsa_bit, random_generator)
self.e = rsa.e
self.d = rsa.d
self.n = rsa.n
self.p = rsa.p
self.q = rsa.q
def get_key_pair(self):
return self.e, self.d, self.n, self.p, self.q
def set_public_key(self, public_key):
self.e = public_key["e"]
self.n = public_key["n"]
def get_public_key(self):
return self.e, self.n
def set_privacy_key(self, privacy_key):
self.d = privacy_key["d"]
self.n = privacy_key["n"]
def get_privacy_key(self):
return self.d, self.n
def encrypt(self, value):
if self.e is not None and self.n is not None and self.p is not None and self.q is not None:
cp, cq = gmpy_math.crt_coefficient(self.p, self.q)
return gmpy_math.powmod_crt(value, self.e, self.n, self.p, self.q, cp, cq)
if self.e is not None and self.n is not None:
return gmpy_math.powmod(value, self.e, self.n)
else:
return None
def decrypt(self, value):
if self.d is not None and self.n is not None:
return gmpy_math.powmod(value, self.d, self.n)
else:
return None
class PaillierEncrypt(Encrypt):
def __init__(self):
super(PaillierEncrypt, self).__init__()
def generate_key(self, n_length=1024):
self.public_key, self.privacy_key = PaillierKeypair.generate_keypair(
n_length=n_length
)
def get_key_pair(self):
return self.public_key, self.privacy_key
def set_public_key(self, public_key):
self.public_key = public_key
def get_public_key(self):
return self.public_key
def set_privacy_key(self, privacy_key):
self.privacy_key = privacy_key
def get_privacy_key(self):
return self.privacy_key
def encrypt(self, value):
if self.public_key is not None:
return self.public_key.encrypt(value)
else:
return None
def decrypt(self, value):
if self.privacy_key is not None:
return self.privacy_key.decrypt(value)
else:
return None
def raw_encrypt(self, plaintext, exponent=0):
cipher_int = self.public_key.raw_encrypt(plaintext)
paillier_num = PaillierEncryptedNumber(public_key=self.public_key, ciphertext=cipher_int, exponent=exponent)
return paillier_num
def raw_decrypt(self, ciphertext):
return self.privacy_key.raw_decrypt(ciphertext.ciphertext())
def recursive_raw_encrypt(self, X, exponent=0):
raw_en_func = functools.partial(self.raw_encrypt, exponent=exponent)
return self._recursive_func(X, raw_en_func)
class IpclPaillierEncrypt(Encrypt):
"""
A class to perform Paillier encryption with Intel Paillier Cryptosystem Library (IPCL)
"""
def __init__(self):
super(IpclPaillierEncrypt, self).__init__()
def generate_key(self, n_length=1024):
self.public_key, self.privacy_key = IpclPaillierKeypair.generate_keypair(
n_length=n_length
)
def get_key_pair(self):
return self.public_key, self.privacy_key
def set_public_key(self, public_key):
self.public_key = public_key
def get_public_key(self):
return self.public_key
def set_privacy_key(self, privacy_key):
self.privacy_key = privacy_key
def get_privacy_key(self):
return self.privacy_key
def encrypt(self, value):
if self.public_key is not None:
return self.public_key.encrypt(value)
else:
return None
def decrypt(self, value):
if self.privacy_key is not None:
return self.privacy_key.decrypt(value)
else:
return None
def raw_encrypt(self, plaintext, exponent=0):
"""
Encrypt without applying obfuscator.
Returns:
(PaillierEncryptedNumber from `ipcl_python`): one ciphertext
"""
return self.public_key.raw_encrypt(plaintext)
def raw_decrypt(self, ciphertext):
"""
Decrypt without constructing `FixedPointNumber`.
Returns:
(int or list): raw value(s)
"""
return self.privacy_key.raw_decrypt(ciphertext)
def encrypt_list(self, values):
"""Encrypt a list of raw values into one ciphertext.
Returns:
(PaillierEncryptedNumber from `ipcl_python`): all in one single ciphertext
"""
return self.encrypt(values)
def decrypt_list(self, values):
"""
Decrypt input values.
If the type is list or 1-d numpy array, use `decrypt_list` of the parent class.
Ohterwise, the type will be a 0-d numpy array, which contains one single ciphertext of multiple raw values.
Use `item(0)` to fetch the ciphertext and then decrypt.
Returns:
(list): a list of raw values
"""
if np.ndim(values) >= 1:
return super().decrypt_list(values)
return self.decrypt(values.item(0))
def recursive_raw_encrypt(self, X, exponent=0):
raw_en_func = functools.partial(self.raw_encrypt, exponent=exponent)
return self._recursive_func(X, raw_en_func)
class PadsCipher(Encrypt):
def __init__(self):
super().__init__()
self._uuid = None
self._rands = None
self._amplify_factor = 1
def set_self_uuid(self, uuid):
self._uuid = uuid
def set_amplify_factor(self, factor):
self._amplify_factor = factor
def set_exchanged_keys(self, keys):
self._seeds = {
uid: v & 0xFFFFFFFF for uid, v in keys.items() if uid != self._uuid
}
self._rands = {
uid: RandomPads(v & 0xFFFFFFFF)
for uid, v in keys.items()
if uid != self._uuid
}
def encrypt(self, value):
if isinstance(value, np.ndarray):
ret = value
for uid, rand in self._rands.items():
if uid > self._uuid:
ret = rand.add_rand_pads(ret, 1.0 * self._amplify_factor)
else:
ret = rand.add_rand_pads(ret, -1.0 * self._amplify_factor)
return ret
if _TORCH_VALID and isinstance(value, torch.Tensor):
ret = value.numpy()
for uid, rand in self._rands.items():
if uid > self._uuid:
ret = rand.add_rand_pads(ret, 1.0 * self._amplify_factor)
else:
ret = rand.add_rand_pads(ret, -1.0 * self._amplify_factor)
return torch.Tensor(ret)
ret = value
for uid, rand in self._rands.items():
if uid > self._uuid:
ret += rand.rand(1)[0] * self._amplify_factor
else:
ret -= rand.rand(1)[0] * self._amplify_factor
return ret
def encrypt_table(self, table):
def _pad(key, value, seeds, amplify_factor):
has_key = int(hashlib.md5(f"{key}".encode("ascii")).hexdigest(), 16)
# LOGGER.debug(f"hash_key: {has_key}")
cur_seeds = {uid: has_key + seed for uid, seed in seeds.items()}
# LOGGER.debug(f"cur_seeds: {cur_seeds}")
rands = {uid: RandomPads(v & 0xFFFFFFFF) for uid, v in cur_seeds.items()}
if isinstance(value, np.ndarray):
ret = value
for uid, rand in rands.items():
if uid > self._uuid:
ret = rand.add_rand_pads(ret, 1.0 * amplify_factor)
else:
ret = rand.add_rand_pads(ret, -1.0 * amplify_factor)
return key, ret
elif isinstance(value, Instance):
ret = value.features
for uid, rand in rands.items():
if uid > self._uuid:
ret = rand.add_rand_pads(ret, 1.0 * amplify_factor)
else:
ret = rand.add_rand_pads(ret, -1.0 * amplify_factor)
value.features = ret
return key, value
else:
ret = value
for uid, rand in rands.items():
if uid > self._uuid:
ret += rand.rand(1)[0] * self._amplify_factor
else:
ret -= rand.rand(1)[0] * self._amplify_factor
return key, ret
f = functools.partial(
_pad, seeds=self._seeds, amplify_factor=self._amplify_factor
)
return table.map(f)
def decrypt(self, value):
return value
| 12,519 | 29.990099 | 116 | py |
FATE | FATE-master/python/federatedml/param/ftl_param.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright 2019 The FATE Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import collections
import copy
from federatedml.param.intersect_param import IntersectParam
from types import SimpleNamespace
from federatedml.param.base_param import BaseParam, deprecated_param
from federatedml.util import consts
from federatedml.param.encrypt_param import EncryptParam
from federatedml.param.encrypted_mode_calculation_param import EncryptedModeCalculatorParam
from federatedml.param.predict_param import PredictParam
from federatedml.param.callback_param import CallbackParam
deprecated_param_list = ["validation_freqs", "metrics"]
@deprecated_param(*deprecated_param_list)
class FTLParam(BaseParam):
def __init__(self, alpha=1, tol=0.000001,
n_iter_no_change=False, validation_freqs=None, optimizer={'optimizer': 'Adam', 'learning_rate': 0.01},
nn_define={}, epochs=1, intersect_param=IntersectParam(consts.RSA), config_type='keras', batch_size=-1,
encrypte_param=EncryptParam(),
encrypted_mode_calculator_param=EncryptedModeCalculatorParam(mode="confusion_opt"),
predict_param=PredictParam(), mode='plain', communication_efficient=False,
local_round=5, callback_param=CallbackParam()):
"""
Parameters
----------
alpha : float
a loss coefficient defined in paper, it defines the importance of alignment loss
tol : float
loss tolerance
n_iter_no_change : bool
check loss convergence or not
validation_freqs : None or positive integer or container object in python
Do validation in training process or Not.
if equals None, will not do validation in train process;
if equals positive integer, will validate data every validation_freqs epochs passes;
if container object in python, will validate data if epochs belong to this container.
e.g. validation_freqs = [10, 15], will validate data when epoch equals to 10 and 15.
The default value is None, 1 is suggested. You can set it to a number larger than 1 in order to
speed up training by skipping validation rounds. When it is larger than 1, a number which is
divisible by "epochs" is recommended, otherwise, you will miss the validation scores
of last training epoch.
optimizer : str or dict
optimizer method, accept following types:
1. a string, one of "Adadelta", "Adagrad", "Adam", "Adamax", "Nadam", "RMSprop", "SGD"
2. a dict, with a required key-value pair keyed by "optimizer",
with optional key-value pairs such as learning rate.
defaults to "SGD"
nn_define : dict
a dict represents the structure of neural network, it can be output by tf-keras
epochs : int
epochs num
intersect_param
define the intersect method
config_type : {'tf-keras'}
config type
batch_size : int
batch size when computing transformed feature embedding, -1 use full data.
encrypte_param
encrypted param
encrypted_mode_calculator_param
encrypted mode calculator param:
predict_param
predict param
mode: {"plain", "encrypted"}
plain: will not use any encrypt algorithms, data exchanged in plaintext
encrypted: use paillier to encrypt gradients
communication_efficient: bool
will use communication efficient or not. when communication efficient is enabled, FTL model will
update gradients by several local rounds using intermediate data
local_round: int
local update round when using communication efficient
"""
super(FTLParam, self).__init__()
self.alpha = alpha
self.tol = tol
self.n_iter_no_change = n_iter_no_change
self.validation_freqs = validation_freqs
self.optimizer = optimizer
self.nn_define = nn_define
self.epochs = epochs
self.intersect_param = copy.deepcopy(intersect_param)
self.config_type = config_type
self.batch_size = batch_size
self.encrypted_mode_calculator_param = copy.deepcopy(encrypted_mode_calculator_param)
self.encrypt_param = copy.deepcopy(encrypte_param)
self.predict_param = copy.deepcopy(predict_param)
self.mode = mode
self.communication_efficient = communication_efficient
self.local_round = local_round
self.callback_param = copy.deepcopy(callback_param)
def check(self):
self.intersect_param.check()
self.encrypt_param.check()
self.encrypted_mode_calculator_param.check()
self.optimizer = self._parse_optimizer(self.optimizer)
supported_config_type = ["keras"]
if self.config_type not in supported_config_type:
raise ValueError(f"config_type should be one of {supported_config_type}")
if not isinstance(self.tol, (int, float)):
raise ValueError("tol should be numeric")
if not isinstance(self.epochs, int) or self.epochs <= 0:
raise ValueError("epochs should be a positive integer")
if self.nn_define and not isinstance(self.nn_define, dict):
raise ValueError("bottom_nn_define should be a dict defining the structure of neural network")
if self.batch_size != -1:
if not isinstance(self.batch_size, int) \
or self.batch_size < consts.MIN_BATCH_SIZE:
raise ValueError(
" {} not supported, should be larger than 10 or -1 represent for all data".format(self.batch_size))
for p in deprecated_param_list:
# if self._warn_to_deprecate_param(p, "", ""):
if self._deprecated_params_set.get(p):
if "callback_param" in self.get_user_feeded():
raise ValueError(f"{p} and callback param should not be set simultaneously,"
f"{self._deprecated_params_set}, {self.get_user_feeded()}")
else:
self.callback_param.callbacks = ["PerformanceEvaluate"]
break
descr = "ftl's"
if self._warn_to_deprecate_param("validation_freqs", descr, "callback_param's 'validation_freqs'"):
self.callback_param.validation_freqs = self.validation_freqs
if self._warn_to_deprecate_param("metrics", descr, "callback_param's 'metrics'"):
self.callback_param.metrics = self.metrics
if self.validation_freqs is None:
pass
elif isinstance(self.validation_freqs, int):
if self.validation_freqs < 1:
raise ValueError("validation_freqs should be larger than 0 when it's integer")
elif not isinstance(self.validation_freqs, collections.Container):
raise ValueError("validation_freqs should be None or positive integer or container")
assert isinstance(self.communication_efficient, bool), 'communication efficient must be a boolean'
assert self.mode in [
'encrypted', 'plain'], 'mode options: encrpyted or plain, but {} is offered'.format(
self.mode)
self.check_positive_integer(self.epochs, 'epochs')
self.check_positive_number(self.alpha, 'alpha')
self.check_positive_integer(self.local_round, 'local round')
@staticmethod
def _parse_optimizer(opt):
"""
Examples:
1. "optimize": "SGD"
2. "optimize": {
"optimizer": "SGD",
"learning_rate": 0.05
}
"""
kwargs = {}
if isinstance(opt, str):
return SimpleNamespace(optimizer=opt, kwargs=kwargs)
elif isinstance(opt, dict):
optimizer = opt.get("optimizer", kwargs)
if not optimizer:
raise ValueError(f"optimizer config: {opt} invalid")
kwargs = {k: v for k, v in opt.items() if k != "optimizer"}
return SimpleNamespace(optimizer=optimizer, kwargs=kwargs)
else:
raise ValueError(f"invalid type for optimize: {type(opt)}")
| 8,927 | 44.090909 | 120 | py |
FATE | FATE-master/python/federatedml/param/hetero_nn_param.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright 2019 The FATE Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import copy
from types import SimpleNamespace
from federatedml.param.base_param import BaseParam
from federatedml.param.base_param import deprecated_param
from federatedml.param.callback_param import CallbackParam
from federatedml.param.cross_validation_param import CrossValidationParam
from federatedml.param.encrypt_param import EncryptParam
from federatedml.param.encrypted_mode_calculation_param import EncryptedModeCalculatorParam
from federatedml.param.predict_param import PredictParam
from federatedml.util import consts
class DatasetParam(BaseParam):
def __init__(self, dataset_name=None, **kwargs):
super(DatasetParam, self).__init__()
self.dataset_name = dataset_name
self.param = kwargs
def check(self):
if self.dataset_name is not None:
self.check_string(self.dataset_name, 'dataset_name')
def to_dict(self):
ret = {'dataset_name': self.dataset_name, 'param': self.param}
return ret
class SelectorParam(object):
"""
Parameters
----------
method: None or str
back propagation select method, accept "relative" only, default: None
selective_size: int
deque size to use, store the most recent selective_size historical loss, default: 1024
beta: int
sample whose selective probability >= power(np.random, beta) will be selected
min_prob: Numeric
selective probability is max(min_prob, rank_rate)
"""
def __init__(self, method=None, beta=1, selective_size=consts.SELECTIVE_SIZE, min_prob=0, random_state=None):
self.method = method
self.selective_size = selective_size
self.beta = beta
self.min_prob = min_prob
self.random_state = random_state
def check(self):
if self.method is not None and self.method not in ["relative"]:
raise ValueError('selective method should be None be "relative"')
if not isinstance(self.selective_size, int) or self.selective_size <= 0:
raise ValueError("selective size should be a positive integer")
if not isinstance(self.beta, int):
raise ValueError("beta should be integer")
if not isinstance(self.min_prob, (float, int)):
raise ValueError("min_prob should be numeric")
class CoAEConfuserParam(BaseParam):
"""
A label protect mechanism proposed in paper: "Batch Label Inference and Replacement Attacks in Black-Boxed Vertical Federated Learning"
paper link: https://arxiv.org/abs/2112.05409
Convert true labels to fake soft labels by using an auto-encoder.
Args:
enable: boolean
run CoAE or not
epoch: None or int
auto-encoder training epochs
lr: float
auto-encoder learning rate
lambda1: float
parameter to control the difference between true labels and fake soft labels. Larger the parameter,
autoencoder will give more attention to making true labels and fake soft label different.
lambda2: float
parameter to control entropy loss, see original paper for details
verbose: boolean
print loss log while training auto encoder
"""
def __init__(self, enable=False, epoch=50, lr=0.001, lambda1=1.0, lambda2=2.0, verbose=False):
super(CoAEConfuserParam, self).__init__()
self.enable = enable
self.epoch = epoch
self.lr = lr
self.lambda1 = lambda1
self.lambda2 = lambda2
self.verbose = verbose
def check(self):
self.check_boolean(self.enable, 'enable')
if not isinstance(self.epoch, int) or self.epoch <= 0:
raise ValueError("epoch should be a positive integer")
if not isinstance(self.lr, float):
raise ValueError('lr should be a float number')
if not isinstance(self.lambda1, float):
raise ValueError('lambda1 should be a float number')
if not isinstance(self.lambda2, float):
raise ValueError('lambda2 should be a float number')
self.check_boolean(self.verbose, 'verbose')
@deprecated_param("validation_freqs", "early_stopping_rounds", "metrics", "use_first_metric_only")
class HeteroNNParam(BaseParam):
"""
Parameters used for Hetero Neural Network.
Parameters
----------
task_type: str, task type of hetero nn model, one of 'classification', 'regression'.
bottom_nn_define: a dict represents the structure of bottom neural network.
interactive_layer_define: a dict represents the structure of interactive layer.
interactive_layer_lr: float, the learning rate of interactive layer.
top_nn_define: a dict represents the structure of top neural network.
optimizer: optimizer method, accept following types:
1. a string, one of "Adadelta", "Adagrad", "Adam", "Adamax", "Nadam", "RMSprop", "SGD"
2. a dict, with a required key-value pair keyed by "optimizer",
with optional key-value pairs such as learning rate.
defaults to "SGD".
loss: str, a string to define loss function used
epochs: int, the maximum iteration for aggregation in training.
batch_size : int, batch size when updating model.
-1 means use all data in a batch. i.e. Not to use mini-batch strategy.
defaults to -1.
early_stop : str, accept 'diff' only in this version, default: 'diff'
Method used to judge converge or not.
a) diff: Use difference of loss between two iterations to judge whether converge.
floating_point_precision: None or integer, if not None, means use floating_point_precision-bit to speed up calculation,
e.g.: convert an x to round(x * 2**floating_point_precision) during Paillier operation, divide
the result by 2**floating_point_precision in the end.
callback_param: CallbackParam object
"""
def __init__(self,
task_type='classification',
bottom_nn_define=None,
top_nn_define=None,
interactive_layer_define=None,
interactive_layer_lr=0.9,
config_type='pytorch',
optimizer='SGD',
loss=None,
epochs=100,
batch_size=-1,
early_stop="diff",
tol=1e-5,
seed=100,
encrypt_param=EncryptParam(),
encrypted_mode_calculator_param=EncryptedModeCalculatorParam(),
predict_param=PredictParam(),
cv_param=CrossValidationParam(),
validation_freqs=None,
early_stopping_rounds=None,
metrics=None,
use_first_metric_only=True,
selector_param=SelectorParam(),
floating_point_precision=23,
callback_param=CallbackParam(),
coae_param=CoAEConfuserParam(),
dataset=DatasetParam()
):
super(HeteroNNParam, self).__init__()
self.task_type = task_type
self.bottom_nn_define = bottom_nn_define
self.interactive_layer_define = interactive_layer_define
self.interactive_layer_lr = interactive_layer_lr
self.top_nn_define = top_nn_define
self.batch_size = batch_size
self.epochs = epochs
self.early_stop = early_stop
self.tol = tol
self.optimizer = optimizer
self.loss = loss
self.validation_freqs = validation_freqs
self.early_stopping_rounds = early_stopping_rounds
self.metrics = metrics or []
self.use_first_metric_only = use_first_metric_only
self.encrypt_param = copy.deepcopy(encrypt_param)
self.encrypted_model_calculator_param = encrypted_mode_calculator_param
self.predict_param = copy.deepcopy(predict_param)
self.cv_param = copy.deepcopy(cv_param)
self.selector_param = selector_param
self.floating_point_precision = floating_point_precision
self.callback_param = copy.deepcopy(callback_param)
self.coae_param = coae_param
self.dataset = dataset
self.seed = seed
self.config_type = 'pytorch' # pytorch only
def check(self):
assert isinstance(self.dataset, DatasetParam), 'dataset must be a DatasetParam()'
self.dataset.check()
self.check_positive_integer(self.seed, 'seed')
if self.task_type not in ["classification", "regression"]:
raise ValueError("config_type should be classification or regression")
if not isinstance(self.tol, (int, float)):
raise ValueError("tol should be numeric")
if not isinstance(self.epochs, int) or self.epochs <= 0:
raise ValueError("epochs should be a positive integer")
if self.bottom_nn_define and not isinstance(self.bottom_nn_define, dict):
raise ValueError("bottom_nn_define should be a dict defining the structure of neural network")
if self.top_nn_define and not isinstance(self.top_nn_define, dict):
raise ValueError("top_nn_define should be a dict defining the structure of neural network")
if self.interactive_layer_define is not None and not isinstance(self.interactive_layer_define, dict):
raise ValueError(
"the interactive_layer_define should be a dict defining the structure of interactive layer")
if self.batch_size != -1:
if not isinstance(self.batch_size, int) \
or self.batch_size < consts.MIN_BATCH_SIZE:
raise ValueError(
" {} not supported, should be larger than 10 or -1 represent for all data".format(self.batch_size))
if self.early_stop != "diff":
raise ValueError("early stop should be diff in this version")
if self.metrics is not None and not isinstance(self.metrics, list):
raise ValueError("metrics should be a list")
if self.floating_point_precision is not None and \
(not isinstance(self.floating_point_precision, int) or
self.floating_point_precision < 0 or self.floating_point_precision > 63):
raise ValueError("floating point precision should be null or a integer between 0 and 63")
self.encrypt_param.check()
self.encrypted_model_calculator_param.check()
self.predict_param.check()
self.selector_param.check()
self.coae_param.check()
descr = "hetero nn param's "
for p in ["early_stopping_rounds", "validation_freqs",
"use_first_metric_only"]:
if self._deprecated_params_set.get(p):
if "callback_param" in self.get_user_feeded():
raise ValueError(f"{p} and callback param should not be set simultaneously,"
f"{self._deprecated_params_set}, {self.get_user_feeded()}")
else:
self.callback_param.callbacks = ["PerformanceEvaluate"]
break
if self._warn_to_deprecate_param("validation_freqs", descr, "callback_param's 'validation_freqs'"):
self.callback_param.validation_freqs = self.validation_freqs
if self._warn_to_deprecate_param("early_stopping_rounds", descr, "callback_param's 'early_stopping_rounds'"):
self.callback_param.early_stopping_rounds = self.early_stopping_rounds
if self._warn_to_deprecate_param("metrics", descr, "callback_param's 'metrics'"):
if self.metrics:
self.callback_param.metrics = self.metrics
if self._warn_to_deprecate_param("use_first_metric_only", descr, "callback_param's 'use_first_metric_only'"):
self.callback_param.use_first_metric_only = self.use_first_metric_only
| 12,596 | 41.557432 | 139 | py |
FATE | FATE-master/python/federatedml/param/boosting_param.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright 2019 The FATE Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from federatedml.param.base_param import BaseParam, deprecated_param
from federatedml.param.encrypt_param import EncryptParam
from federatedml.param.encrypted_mode_calculation_param import EncryptedModeCalculatorParam
from federatedml.param.cross_validation_param import CrossValidationParam
from federatedml.param.predict_param import PredictParam
from federatedml.param.callback_param import CallbackParam
from federatedml.util import consts, LOGGER
import copy
import collections
hetero_deprecated_param_list = ["early_stopping_rounds", "validation_freqs", "metrics", "use_first_metric_only"]
homo_deprecated_param_list = ["validation_freqs", "metrics"]
class ObjectiveParam(BaseParam):
"""
Define objective parameters that used in federated ml.
Parameters
----------
objective : {None, 'cross_entropy', 'lse', 'lae', 'log_cosh', 'tweedie', 'fair', 'huber'}
None in host's config, should be str in guest'config.
when task_type is classification, only support 'cross_entropy',
other 6 types support in regression task
params : None or list
should be non empty list when objective is 'tweedie','fair','huber',
first element of list shoulf be a float-number large than 0.0 when objective is 'fair', 'huber',
first element of list should be a float-number in [1.0, 2.0) when objective is 'tweedie'
"""
def __init__(self, objective='cross_entropy', params=None):
self.objective = objective
self.params = params
def check(self, task_type=None):
if self.objective is None:
return True
descr = "objective param's"
LOGGER.debug('check objective {}'.format(self.objective))
if task_type not in [consts.CLASSIFICATION, consts.REGRESSION]:
self.objective = self.check_and_change_lower(self.objective,
["cross_entropy", "lse", "lae", "huber", "fair",
"log_cosh", "tweedie"],
descr)
if task_type == consts.CLASSIFICATION:
if self.objective != "cross_entropy":
raise ValueError("objective param's objective {} not supported".format(self.objective))
elif task_type == consts.REGRESSION:
self.objective = self.check_and_change_lower(self.objective,
["lse", "lae", "huber", "fair", "log_cosh", "tweedie"],
descr)
params = self.params
if self.objective in ["huber", "fair", "tweedie"]:
if type(params).__name__ != 'list' or len(params) < 1:
raise ValueError(
"objective param's params {} not supported, should be non-empty list".format(params))
if type(params[0]).__name__ not in ["float", "int", "long"]:
raise ValueError("objective param's params[0] {} not supported".format(self.params[0]))
if self.objective == 'tweedie':
if params[0] < 1 or params[0] >= 2:
raise ValueError("in tweedie regression, objective params[0] should betweend [1, 2)")
if self.objective == 'fair' or 'huber':
if params[0] <= 0.0:
raise ValueError("in {} regression, objective params[0] should greater than 0.0".format(
self.objective))
return True
class DecisionTreeParam(BaseParam):
"""
Define decision tree parameters that used in federated ml.
Parameters
----------
criterion_method : {"xgboost"}, default: "xgboost"
the criterion function to use
criterion_params: list or dict
should be non empty and elements are float-numbers,
if a list is offered, the first one is l2 regularization value, and the second one is
l1 regularization value.
if a dict is offered, make sure it contains key 'l1', and 'l2'.
l1, l2 regularization values are non-negative floats.
default: [0.1, 0] or {'l1':0, 'l2':0,1}
max_depth: positive integer
the max depth of a decision tree, default: 3
min_sample_split: int
least quantity of nodes to split, default: 2
min_impurity_split: float
least gain of a single split need to reach, default: 1e-3
min_child_weight: float
sum of hessian needed in child nodes. default is 0
min_leaf_node: int
when samples no more than min_leaf_node, it becomes a leave, default: 1
max_split_nodes: positive integer
we will use no more than max_split_nodes to
parallel finding their splits in a batch, for memory consideration. default is 65536
feature_importance_type: {'split', 'gain'}
if is 'split', feature_importances calculate by feature split times,
if is 'gain', feature_importances calculate by feature split gain.
default: 'split'
Due to the safety concern, we adjust training strategy of Hetero-SBT in FATE-1.8,
When running Hetero-SBT, this parameter is now abandoned.
In Hetero-SBT of FATE-1.8, guest side will compute split, gain of local features,
and receive anonymous feature importance results from hosts. Hosts will compute split
importance of local features.
use_missing: bool, accepted True, False only, default: False
use missing value in training process or not.
zero_as_missing: bool
regard 0 as missing value or not,
will be use only if use_missing=True, default: False
deterministic: bool
ensure stability when computing histogram. Set this to true to ensure stable result when using
same data and same parameter. But it may slow down computation.
"""
def __init__(self, criterion_method="xgboost", criterion_params=[0.1, 0], max_depth=3,
min_sample_split=2, min_impurity_split=1e-3, min_leaf_node=1,
max_split_nodes=consts.MAX_SPLIT_NODES, feature_importance_type='split',
n_iter_no_change=True, tol=0.001, min_child_weight=0,
use_missing=False, zero_as_missing=False, deterministic=False):
super(DecisionTreeParam, self).__init__()
self.criterion_method = criterion_method
self.criterion_params = criterion_params
self.max_depth = max_depth
self.min_sample_split = min_sample_split
self.min_impurity_split = min_impurity_split
self.min_leaf_node = min_leaf_node
self.min_child_weight = min_child_weight
self.max_split_nodes = max_split_nodes
self.feature_importance_type = feature_importance_type
self.n_iter_no_change = n_iter_no_change
self.tol = tol
self.use_missing = use_missing
self.zero_as_missing = zero_as_missing
self.deterministic = deterministic
def check(self):
descr = "decision tree param"
self.criterion_method = self.check_and_change_lower(self.criterion_method,
["xgboost"],
descr)
if len(self.criterion_params) == 0:
raise ValueError("decisition tree param's criterio_params should be non empty")
if isinstance(self.criterion_params, list):
assert len(self.criterion_params) == 2, 'length of criterion_param should be 2: l1, l2 regularization ' \
'values are needed'
self.check_nonnegative_number(self.criterion_params[0], 'l2 reg value')
self.check_nonnegative_number(self.criterion_params[1], 'l1 reg value')
elif isinstance(self.criterion_params, dict):
assert 'l1' in self.criterion_params and 'l2' in self.criterion_params, 'l1 and l2 keys are needed in ' \
'criterion_params dict'
self.criterion_params = [self.criterion_params['l2'], self.criterion_params['l1']]
else:
raise ValueError('criterion_params should be a dict or a list contains l1, l2 reg value')
if type(self.max_depth).__name__ not in ["int", "long"]:
raise ValueError("decision tree param's max_depth {} not supported, should be integer".format(
self.max_depth))
if self.max_depth < 1:
raise ValueError("decision tree param's max_depth should be positive integer, no less than 1")
if type(self.min_sample_split).__name__ not in ["int", "long"]:
raise ValueError("decision tree param's min_sample_split {} not supported, should be integer".format(
self.min_sample_split))
if type(self.min_impurity_split).__name__ not in ["int", "long", "float"]:
raise ValueError("decision tree param's min_impurity_split {} not supported, should be numeric".format(
self.min_impurity_split))
if type(self.min_leaf_node).__name__ not in ["int", "long"]:
raise ValueError("decision tree param's min_leaf_node {} not supported, should be integer".format(
self.min_leaf_node))
if type(self.max_split_nodes).__name__ not in ["int", "long"] or self.max_split_nodes < 1:
raise ValueError("decision tree param's max_split_nodes {} not supported, " +
"should be positive integer between 1 and {}".format(self.max_split_nodes,
consts.MAX_SPLIT_NODES))
if type(self.n_iter_no_change).__name__ != "bool":
raise ValueError("decision tree param's n_iter_no_change {} not supported, should be bool type".format(
self.n_iter_no_change))
if type(self.tol).__name__ not in ["float", "int", "long"]:
raise ValueError("decision tree param's tol {} not supported, should be numeric".format(self.tol))
self.feature_importance_type = self.check_and_change_lower(self.feature_importance_type,
["split", "gain"],
descr)
self.check_nonnegative_number(self.min_child_weight, 'min_child_weight')
self.check_boolean(self.deterministic, 'deterministic')
return True
class BoostingParam(BaseParam):
"""
Basic parameter for Boosting Algorithms
Parameters
----------
task_type : {'classification', 'regression'}, default: 'classification'
task type
objective_param : ObjectiveParam Object, default: ObjectiveParam()
objective param
learning_rate : float, int or long
the learning rate of secure boost. default: 0.3
num_trees : int or float
the max number of boosting round. default: 5
subsample_feature_rate : float
a float-number in [0, 1], default: 1.0
n_iter_no_change : bool,
when True and residual error less than tol, tree building process will stop. default: True
bin_num: positive integer greater than 1
bin number use in quantile. default: 32
validation_freqs: None or positive integer or container object in python
Do validation in training process or Not.
if equals None, will not do validation in train process;
if equals positive integer, will validate data every validation_freqs epochs passes;
if container object in python, will validate data if epochs belong to this container.
e.g. validation_freqs = [10, 15], will validate data when epoch equals to 10 and 15.
Default: None
"""
def __init__(self, task_type=consts.CLASSIFICATION,
objective_param=ObjectiveParam(),
learning_rate=0.3, num_trees=5, subsample_feature_rate=1, n_iter_no_change=True,
tol=0.0001, bin_num=32,
predict_param=PredictParam(), cv_param=CrossValidationParam(),
validation_freqs=None, metrics=None, random_seed=100,
binning_error=consts.DEFAULT_RELATIVE_ERROR):
super(BoostingParam, self).__init__()
self.task_type = task_type
self.objective_param = copy.deepcopy(objective_param)
self.learning_rate = learning_rate
self.num_trees = num_trees
self.subsample_feature_rate = subsample_feature_rate
self.n_iter_no_change = n_iter_no_change
self.tol = tol
self.bin_num = bin_num
self.predict_param = copy.deepcopy(predict_param)
self.cv_param = copy.deepcopy(cv_param)
self.validation_freqs = validation_freqs
self.metrics = metrics
self.random_seed = random_seed
self.binning_error = binning_error
def check(self):
descr = "boosting tree param's"
if self.task_type not in [consts.CLASSIFICATION, consts.REGRESSION]:
raise ValueError("boosting_core tree param's task_type {} not supported, should be {} or {}".format(
self.task_type, consts.CLASSIFICATION, consts.REGRESSION))
self.objective_param.check(self.task_type)
if type(self.learning_rate).__name__ not in ["float", "int", "long"]:
raise ValueError("boosting_core tree param's learning_rate {} not supported, should be numeric".format(
self.learning_rate))
if type(self.subsample_feature_rate).__name__ not in ["float", "int", "long"] or \
self.subsample_feature_rate < 0 or self.subsample_feature_rate > 1:
raise ValueError(
"boosting_core tree param's subsample_feature_rate should be a numeric number between 0 and 1")
if type(self.n_iter_no_change).__name__ != "bool":
raise ValueError("boosting_core tree param's n_iter_no_change {} not supported, should be bool type".format(
self.n_iter_no_change))
if type(self.tol).__name__ not in ["float", "int", "long"]:
raise ValueError("boosting_core tree param's tol {} not supported, should be numeric".format(self.tol))
if type(self.bin_num).__name__ not in ["int", "long"] or self.bin_num < 2:
raise ValueError(
"boosting_core tree param's bin_num {} not supported, should be positive integer greater than 1".format(
self.bin_num))
if self.validation_freqs is None:
pass
elif isinstance(self.validation_freqs, int):
if self.validation_freqs < 1:
raise ValueError("validation_freqs should be larger than 0 when it's integer")
elif not isinstance(self.validation_freqs, collections.Container):
raise ValueError("validation_freqs should be None or positive integer or container")
if self.metrics is not None and not isinstance(self.metrics, list):
raise ValueError("metrics should be a list")
if self.random_seed is not None:
assert isinstance(self.random_seed, int) and self.random_seed >= 0, 'random seed must be an integer >= 0'
self.check_decimal_float(self.binning_error, descr)
return True
class HeteroBoostingParam(BoostingParam):
"""
Parameters
----------
encrypt_param : EncodeParam Object
encrypt method use in secure boost, default: EncryptParam()
encrypted_mode_calculator_param: EncryptedModeCalculatorParam object
the calculation mode use in secureboost,
default: EncryptedModeCalculatorParam()
"""
def __init__(self, task_type=consts.CLASSIFICATION,
objective_param=ObjectiveParam(),
learning_rate=0.3, num_trees=5, subsample_feature_rate=1, n_iter_no_change=True,
tol=0.0001, encrypt_param=EncryptParam(),
bin_num=32,
encrypted_mode_calculator_param=EncryptedModeCalculatorParam(),
predict_param=PredictParam(), cv_param=CrossValidationParam(),
validation_freqs=None, early_stopping_rounds=None, metrics=None, use_first_metric_only=False,
random_seed=100, binning_error=consts.DEFAULT_RELATIVE_ERROR):
super(HeteroBoostingParam, self).__init__(task_type, objective_param, learning_rate, num_trees,
subsample_feature_rate, n_iter_no_change, tol, bin_num,
predict_param, cv_param, validation_freqs, metrics=metrics,
random_seed=random_seed,
binning_error=binning_error)
self.encrypt_param = copy.deepcopy(encrypt_param)
self.encrypted_mode_calculator_param = copy.deepcopy(encrypted_mode_calculator_param)
self.early_stopping_rounds = early_stopping_rounds
self.use_first_metric_only = use_first_metric_only
def check(self):
super(HeteroBoostingParam, self).check()
self.encrypted_mode_calculator_param.check()
self.encrypt_param.check()
if self.early_stopping_rounds is None:
pass
elif isinstance(self.early_stopping_rounds, int):
if self.early_stopping_rounds < 1:
raise ValueError("early stopping rounds should be larger than 0 when it's integer")
if self.validation_freqs is None:
raise ValueError("validation freqs must be set when early stopping is enabled")
if not isinstance(self.use_first_metric_only, bool):
raise ValueError("use_first_metric_only should be a boolean")
return True
@deprecated_param(*hetero_deprecated_param_list)
class HeteroSecureBoostParam(HeteroBoostingParam):
"""
Define boosting tree parameters that used in federated ml.
Parameters
----------
task_type : {'classification', 'regression'}, default: 'classification'
task type
tree_param : DecisionTreeParam Object, default: DecisionTreeParam()
tree param
objective_param : ObjectiveParam Object, default: ObjectiveParam()
objective param
learning_rate : float, int or long
the learning rate of secure boost. default: 0.3
num_trees : int or float
the max number of trees to build. default: 5
subsample_feature_rate : float
a float-number in [0, 1], default: 1.0
random_seed: int
seed that controls all random functions
n_iter_no_change : bool,
when True and residual error less than tol, tree building process will stop. default: True
encrypt_param : EncodeParam Object
encrypt method use in secure boost, default: EncryptParam(), this parameter
is only for hetero-secureboost
bin_num: positive integer greater than 1
bin number use in quantile. default: 32
encrypted_mode_calculator_param: EncryptedModeCalculatorParam object
the calculation mode use in secureboost, default: EncryptedModeCalculatorParam(), only for hetero-secureboost
use_missing: bool
use missing value in training process or not. default: False
zero_as_missing: bool
regard 0 as missing value or not, will be use only if use_missing=True, default: False
validation_freqs: None or positive integer or container object in python
Do validation in training process or Not.
if equals None, will not do validation in train process;
if equals positive integer, will validate data every validation_freqs epochs passes;
if container object in python, will validate data if epochs belong to this container.
e.g. validation_freqs = [10, 15], will validate data when epoch equals to 10 and 15.
Default: None
The default value is None, 1 is suggested. You can set it to a number larger than 1 in order to
speed up training by skipping validation rounds. When it is larger than 1, a number which is
divisible by "num_trees" is recommended, otherwise, you will miss the validation scores
of last training iteration.
early_stopping_rounds: integer larger than 0
will stop training if one metric of one validation data
doesn’t improve in last early_stopping_round rounds,
need to set validation freqs and will check early_stopping every at every validation epoch,
metrics: list, default: []
Specify which metrics to be used when performing evaluation during training process.
If set as empty, default metrics will be used. For regression tasks, default metrics are
['root_mean_squared_error', 'mean_absolute_error'], For binary-classificatiin tasks, default metrics
are ['auc', 'ks']. For multi-classification tasks, default metrics are ['accuracy', 'precision', 'recall']
use_first_metric_only: bool
use only the first metric for early stopping
complete_secure: int, defualt: 0
if use complete_secure, when use complete secure, build first 'complete secure' tree using only guest features
sparse_optimization:
this parameter is abandoned in FATE-1.7.1
run_goss: bool
activate Gradient-based One-Side Sampling, which selects large gradient and small
gradient samples using top_rate and other_rate.
top_rate: float, the retain ratio of large gradient data, used when run_goss is True
other_rate: float, the retain ratio of small gradient data, used when run_goss is True
cipher_compress_error: This param is now abandoned
cipher_compress: bool, default is True, use cipher compressing to reduce computation cost and transfer cost
boosting_strategy:str
std: standard sbt setting
mix: alternate using guest/host features to build trees. For example, the first 'tree_num_per_party' trees
use guest features,
the second k trees use host features, and so on
layered: only support 2 party, when running layered mode, first 'host_depth' layer will use host features,
and then next 'guest_depth' will only use guest features
work_mode: str
This parameter has the same function as boosting_strategy, but is deprecated
tree_num_per_party: int, every party will alternate build 'tree_num_per_party' trees until reach max tree num, this
param is valid when boosting_strategy is mix
guest_depth: int, guest will build last guest_depth of a decision tree using guest features, is valid when boosting_strategy
is layered
host_depth: int, host will build first host_depth of a decision tree using host features, is valid when work boosting_strategy
layered
multi_mode: str, decide which mode to use when running multi-classification task:
single_output standard gbdt multi-classification strategy
multi_output every leaf give a multi-dimension predict, using multi_mode can save time
by learning a model with less trees.
EINI_inference: bool
default is False, this option changes the inference algorithm used in predict tasks.
a secure prediction method that hides decision path to enhance security in the inference
step. This method is insprired by EINI inference algorithm.
EINI_random_mask: bool
default is False
multiply predict result by a random float number to confuse original predict result. This operation further
enhances the security of naive EINI algorithm.
EINI_complexity_check: bool
default is False
check the complexity of tree models when running EINI algorithms. Complexity models are easy to hide their
decision path, while simple tree models are not, therefore if a tree model is too simple, it is not allowed
to run EINI predict algorithms.
"""
def __init__(self, tree_param: DecisionTreeParam = DecisionTreeParam(), task_type=consts.CLASSIFICATION,
objective_param=ObjectiveParam(),
learning_rate=0.3, num_trees=5, subsample_feature_rate=1.0, n_iter_no_change=True,
tol=0.0001, encrypt_param=EncryptParam(),
bin_num=32,
encrypted_mode_calculator_param=EncryptedModeCalculatorParam(),
predict_param=PredictParam(), cv_param=CrossValidationParam(),
validation_freqs=None, early_stopping_rounds=None, use_missing=False, zero_as_missing=False,
complete_secure=0, metrics=None, use_first_metric_only=False, random_seed=100,
binning_error=consts.DEFAULT_RELATIVE_ERROR,
sparse_optimization=False, run_goss=False, top_rate=0.2, other_rate=0.1,
cipher_compress_error=None, cipher_compress=True, new_ver=True, boosting_strategy=consts.STD_TREE,
work_mode=None, tree_num_per_party=1, guest_depth=2, host_depth=3, callback_param=CallbackParam(),
multi_mode=consts.SINGLE_OUTPUT, EINI_inference=False, EINI_random_mask=False,
EINI_complexity_check=False):
super(HeteroSecureBoostParam, self).__init__(task_type, objective_param, learning_rate, num_trees,
subsample_feature_rate, n_iter_no_change, tol, encrypt_param,
bin_num, encrypted_mode_calculator_param, predict_param, cv_param,
validation_freqs, early_stopping_rounds, metrics=metrics,
use_first_metric_only=use_first_metric_only,
random_seed=random_seed,
binning_error=binning_error)
self.tree_param = copy.deepcopy(tree_param)
self.zero_as_missing = zero_as_missing
self.use_missing = use_missing
self.complete_secure = complete_secure
self.sparse_optimization = sparse_optimization
self.run_goss = run_goss
self.top_rate = top_rate
self.other_rate = other_rate
self.cipher_compress_error = cipher_compress_error
self.cipher_compress = cipher_compress
self.new_ver = new_ver
self.EINI_inference = EINI_inference
self.EINI_random_mask = EINI_random_mask
self.EINI_complexity_check = EINI_complexity_check
self.boosting_strategy = boosting_strategy
self.work_mode = work_mode
self.tree_num_per_party = tree_num_per_party
self.guest_depth = guest_depth
self.host_depth = host_depth
self.callback_param = copy.deepcopy(callback_param)
self.multi_mode = multi_mode
def check(self):
super(HeteroSecureBoostParam, self).check()
self.tree_param.check()
if not isinstance(self.use_missing, bool):
raise ValueError('use missing should be bool type')
if not isinstance(self.zero_as_missing, bool):
raise ValueError('zero as missing should be bool type')
self.check_boolean(self.run_goss, 'run goss')
self.check_decimal_float(self.top_rate, 'top rate')
self.check_decimal_float(self.other_rate, 'other rate')
self.check_positive_number(self.other_rate, 'other_rate')
self.check_positive_number(self.top_rate, 'top_rate')
self.check_boolean(self.new_ver, 'code version switcher')
self.check_boolean(self.cipher_compress, 'cipher compress')
self.check_boolean(self.EINI_inference, 'eini inference')
self.check_boolean(self.EINI_random_mask, 'eini random mask')
self.check_boolean(self.EINI_complexity_check, 'eini complexity check')
assert isinstance(self.complete_secure,
int) and self.complete_secure >= 0, "complete secure should be an int >= 0"
if self.EINI_inference and self.EINI_random_mask:
LOGGER.warning('To protect the inference decision path, notice that current setting will multiply'
' predict result by a random number, hence SecureBoost will return confused predict scores'
' that is not the same as the original predict scores')
if self.work_mode == consts.MIX_TREE and self.EINI_inference:
LOGGER.warning('Mix tree mode does not support EINI, use default predict setting')
if self.work_mode is not None:
self.boosting_strategy = self.work_mode
if self.multi_mode not in [consts.SINGLE_OUTPUT, consts.MULTI_OUTPUT]:
raise ValueError('unsupported multi-classification mode')
if self.multi_mode == consts.MULTI_OUTPUT:
if self.boosting_strategy != consts.STD_TREE:
raise ValueError('MO trees only works when boosting strategy is std tree')
if not self.cipher_compress:
raise ValueError('Mo trees only works when cipher compress is enabled')
if self.boosting_strategy not in [consts.STD_TREE, consts.LAYERED_TREE, consts.MIX_TREE]:
raise ValueError('unknown sbt boosting strategy{}'.format(self.boosting_strategy))
for p in ["early_stopping_rounds", "validation_freqs", "metrics",
"use_first_metric_only"]:
# if self._warn_to_deprecate_param(p, "", ""):
if self._deprecated_params_set.get(p):
if "callback_param" in self.get_user_feeded():
raise ValueError(f"{p} and callback param should not be set simultaneously,"
f"{self._deprecated_params_set}, {self.get_user_feeded()}")
else:
self.callback_param.callbacks = ["PerformanceEvaluate"]
break
descr = "boosting_param's"
if self._warn_to_deprecate_param("validation_freqs", descr, "callback_param's 'validation_freqs'"):
self.callback_param.validation_freqs = self.validation_freqs
if self._warn_to_deprecate_param("early_stopping_rounds", descr, "callback_param's 'early_stopping_rounds'"):
self.callback_param.early_stopping_rounds = self.early_stopping_rounds
if self._warn_to_deprecate_param("metrics", descr, "callback_param's 'metrics'"):
self.callback_param.metrics = self.metrics
if self._warn_to_deprecate_param("use_first_metric_only", descr, "callback_param's 'use_first_metric_only'"):
self.callback_param.use_first_metric_only = self.use_first_metric_only
if self.top_rate + self.other_rate >= 1:
raise ValueError('sum of top rate and other rate should be smaller than 1')
return True
@deprecated_param(*homo_deprecated_param_list)
class HomoSecureBoostParam(BoostingParam):
"""
Parameters
----------
backend: {'distributed', 'memory'}
decides which backend to use when computing histograms for homo-sbt
"""
def __init__(self, tree_param: DecisionTreeParam = DecisionTreeParam(), task_type=consts.CLASSIFICATION,
objective_param=ObjectiveParam(),
learning_rate=0.3, num_trees=5, subsample_feature_rate=1, n_iter_no_change=True,
tol=0.0001, bin_num=32, predict_param=PredictParam(), cv_param=CrossValidationParam(),
validation_freqs=None, use_missing=False, zero_as_missing=False, random_seed=100,
binning_error=consts.DEFAULT_RELATIVE_ERROR, backend=consts.DISTRIBUTED_BACKEND,
callback_param=CallbackParam(), multi_mode=consts.SINGLE_OUTPUT):
super(HomoSecureBoostParam, self).__init__(task_type=task_type,
objective_param=objective_param,
learning_rate=learning_rate,
num_trees=num_trees,
subsample_feature_rate=subsample_feature_rate,
n_iter_no_change=n_iter_no_change,
tol=tol,
bin_num=bin_num,
predict_param=predict_param,
cv_param=cv_param,
validation_freqs=validation_freqs,
random_seed=random_seed,
binning_error=binning_error
)
self.use_missing = use_missing
self.zero_as_missing = zero_as_missing
self.tree_param = copy.deepcopy(tree_param)
self.backend = backend
self.callback_param = copy.deepcopy(callback_param)
self.multi_mode = multi_mode
def check(self):
super(HomoSecureBoostParam, self).check()
self.tree_param.check()
if not isinstance(self.use_missing, bool):
raise ValueError('use missing should be bool type')
if not isinstance(self.zero_as_missing, bool):
raise ValueError('zero as missing should be bool type')
if self.backend not in [consts.MEMORY_BACKEND, consts.DISTRIBUTED_BACKEND]:
raise ValueError('unsupported backend')
if self.multi_mode not in [consts.SINGLE_OUTPUT, consts.MULTI_OUTPUT]:
raise ValueError('unsupported multi-classification mode')
for p in ["validation_freqs", "metrics"]:
# if self._warn_to_deprecate_param(p, "", ""):
if self._deprecated_params_set.get(p):
if "callback_param" in self.get_user_feeded():
raise ValueError(f"{p} and callback param should not be set simultaneously,"
f"{self._deprecated_params_set}, {self.get_user_feeded()}")
else:
self.callback_param.callbacks = ["PerformanceEvaluate"]
break
descr = "boosting_param's"
if self._warn_to_deprecate_param("validation_freqs", descr, "callback_param's 'validation_freqs'"):
self.callback_param.validation_freqs = self.validation_freqs
if self._warn_to_deprecate_param("metrics", descr, "callback_param's 'metrics'"):
self.callback_param.metrics = self.metrics
if self.multi_mode not in [consts.SINGLE_OUTPUT, consts.MULTI_OUTPUT]:
raise ValueError('unsupported multi-classification mode')
if self.multi_mode == consts.MULTI_OUTPUT:
if self.task_type == consts.REGRESSION:
raise ValueError('regression tasks not support multi-output trees')
return True
| 35,803 | 51.268613 | 130 | py |
FATE | FATE-master/python/federatedml/param/homo_nn_param.py | from federatedml.param.base_param import BaseParam
class TrainerParam(BaseParam):
def __init__(self, trainer_name=None, **kwargs):
super(TrainerParam, self).__init__()
self.trainer_name = trainer_name
self.param = kwargs
def check(self):
if self.trainer_name is not None:
self.check_string(self.trainer_name, 'trainer_name')
def to_dict(self):
ret = {'trainer_name': self.trainer_name, 'param': self.param}
return ret
class DatasetParam(BaseParam):
def __init__(self, dataset_name=None, **kwargs):
super(DatasetParam, self).__init__()
self.dataset_name = dataset_name
self.param = kwargs
def check(self):
if self.dataset_name is not None:
self.check_string(self.dataset_name, 'dataset_name')
def to_dict(self):
ret = {'dataset_name': self.dataset_name, 'param': self.param}
return ret
class HomoNNParam(BaseParam):
def __init__(self,
trainer: TrainerParam = TrainerParam(),
dataset: DatasetParam = DatasetParam(),
torch_seed: int = 100,
nn_define: dict = None,
loss: dict = None,
optimizer: dict = None,
ds_config: dict = None
):
super(HomoNNParam, self).__init__()
self.trainer = trainer
self.dataset = dataset
self.torch_seed = torch_seed
self.nn_define = nn_define
self.loss = loss
self.optimizer = optimizer
self.ds_config = ds_config
def check(self):
assert isinstance(self.trainer, TrainerParam), 'trainer must be a TrainerParam()'
assert isinstance(self.dataset, DatasetParam), 'dataset must be a DatasetParam()'
self.trainer.check()
self.dataset.check()
# torch seed >= 0
if isinstance(self.torch_seed, int):
assert self.torch_seed >= 0, 'torch seed should be an int >=0'
else:
raise ValueError('torch seed should be an int >=0')
if self.nn_define is not None:
assert isinstance(self.nn_define, dict), 'nn define should be a dict defining model structures'
if self.loss is not None:
assert isinstance(self.loss, dict), 'loss parameter should be a loss config dict'
if self.optimizer is not None:
assert isinstance(self.optimizer, dict), 'optimizer parameter should be a config dict'
| 2,502 | 31.506494 | 107 | py |
FATE | FATE-master/python/federatedml/ensemble/basic_algorithms/decision_tree/tree_core/splitter.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright 2019 The FATE Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
################################################################################
#
#
################################################################################
# =============================================================================
#
# =============================================================================
import numpy as np
import warnings
import functools
import random
from fate_arch.session import computing_session as session
from fate_arch.common import log
from federatedml.ensemble.basic_algorithms.decision_tree.tree_core.criterion import XgboostCriterion
from federatedml.util import consts
LOGGER = log.getLogger()
class SplitInfo(object):
def __init__(self, sitename=consts.GUEST, best_fid=None, best_bid=None,
sum_grad=0, sum_hess=0, gain=None, missing_dir=1, mask_id=None, sample_count=-1):
self.sitename = sitename
self.best_fid = best_fid
self.best_bid = best_bid
self.sum_grad = sum_grad
self.sum_hess = sum_hess
self.gain = gain
self.missing_dir = missing_dir
self.mask_id = mask_id
self.sample_count = sample_count
def __str__(self):
return '(fid {} bid {}, sum_grad {}, sum_hess {}, gain {}, sitename {}, missing dir {}, mask_id {}, ' \
'sample_count {})\n'.format(
self.best_fid, self.best_bid, self.sum_grad, self.sum_hess, self.gain, self.sitename, self.missing_dir,
self.mask_id, self.sample_count)
def __repr__(self):
return self.__str__()
class Splitter(object):
def __init__(self, criterion_method, criterion_params=[0, 0], min_impurity_split=1e-2, min_sample_split=2,
min_leaf_node=1, min_child_weight=1):
LOGGER.info("splitter init!")
if not isinstance(criterion_method, str):
raise TypeError("criterion_method type should be str, but %s find" % (type(criterion_method).__name__))
if criterion_method == "xgboost":
if not criterion_params:
self.criterion = XgboostCriterion()
else:
try:
reg_lambda, reg_alpha = 0, 0
if isinstance(criterion_params, list):
reg_lambda = float(criterion_params[0])
reg_alpha = float(criterion_params[1])
self.criterion = XgboostCriterion(reg_lambda=reg_lambda, reg_alpha=reg_alpha)
except BaseException:
warnings.warn("criterion_params' first criterion_params should be numeric")
self.criterion = XgboostCriterion()
self.min_impurity_split = min_impurity_split
self.min_sample_split = min_sample_split
self.min_leaf_node = min_leaf_node
self.min_child_weight = min_child_weight
def _check_min_child_weight(self, l_h, r_h):
if isinstance(l_h, np.ndarray):
l_h, r_h = np.sum(l_h), np.sum(r_h)
rs = l_h >= self.min_child_weight and r_h >= self.min_child_weight
return rs
def _check_sample_num(self, l_cnt, r_cnt):
return l_cnt >= self.min_leaf_node and r_cnt >= self.min_leaf_node
def find_split_single_histogram_guest(self, histogram, valid_features, sitename, use_missing, zero_as_missing,
reshape_tuple=None):
if reshape_tuple:
histogram = histogram.reshape(reshape_tuple)
# default values
best_fid = None
best_gain = self.min_impurity_split - consts.FLOAT_ZERO
best_bid = None
best_sum_grad_l = None
best_sum_hess_l = None
missing_bin = 0
if use_missing:
missing_bin = 1
# in default, missing value going to right
missing_dir = 1
for fid in range(len(histogram)):
if valid_features[fid] is False:
continue
bin_num = len(histogram[fid])
if bin_num == 0 + missing_bin:
continue
# last bin contains sum values (cumsum from left)
sum_grad = histogram[fid][bin_num - 1][0]
sum_hess = histogram[fid][bin_num - 1][1]
node_cnt = histogram[fid][bin_num - 1][2]
if node_cnt < self.min_sample_split:
break
if node_cnt < 1: # avoid float error
break
# last bin will not participate in split find, so bin_num - 1
for bid in range(bin_num - missing_bin - 1):
# left gh
sum_grad_l = histogram[fid][bid][0]
sum_hess_l = histogram[fid][bid][1]
node_cnt_l = histogram[fid][bid][2]
# right gh
sum_grad_r = sum_grad - sum_grad_l
sum_hess_r = sum_hess - sum_hess_l
node_cnt_r = node_cnt - node_cnt_l
if self._check_min_child_weight(sum_hess_l, sum_hess_r) and self._check_sample_num(node_cnt_l,
node_cnt_r):
gain = self.criterion.split_gain([sum_grad, sum_hess],
[sum_grad_l, sum_hess_l], [sum_grad_r, sum_hess_r])
if gain > self.min_impurity_split and gain > best_gain + consts.FLOAT_ZERO:
best_gain = gain
best_fid = fid
best_bid = bid
best_sum_grad_l = sum_grad_l
best_sum_hess_l = sum_hess_l
missing_dir = 1
""" missing value handle: dispatch to left child"""
if use_missing:
# add sum of samples with missing features to left
sum_grad_l += histogram[fid][-1][0] - histogram[fid][-2][0]
sum_hess_l += histogram[fid][-1][1] - histogram[fid][-2][1]
node_cnt_l += histogram[fid][-1][2] - histogram[fid][-2][2]
sum_grad_r -= histogram[fid][-1][0] - histogram[fid][-2][0]
sum_hess_r -= histogram[fid][-1][1] - histogram[fid][-2][1]
node_cnt_r -= histogram[fid][-1][2] - histogram[fid][-2][2]
# if have a better gain value, missing dir is left
if self._check_sample_num(node_cnt_l, node_cnt_r) and self._check_min_child_weight(sum_hess_l,
sum_hess_r):
gain = self.criterion.split_gain([sum_grad, sum_hess],
[sum_grad_l, sum_hess_l], [sum_grad_r, sum_hess_r])
if gain > self.min_impurity_split and gain > best_gain + consts.FLOAT_ZERO:
best_gain = gain
best_fid = fid
best_bid = bid
best_sum_grad_l = sum_grad_l
best_sum_hess_l = sum_hess_l
missing_dir = -1
splitinfo = SplitInfo(sitename=sitename, best_fid=best_fid, best_bid=best_bid,
gain=best_gain, sum_grad=best_sum_grad_l, sum_hess=best_sum_hess_l,
missing_dir=missing_dir)
return splitinfo
def find_split(self, histograms, valid_features, partitions=1, sitename=consts.GUEST,
use_missing=False, zero_as_missing=False):
LOGGER.info("splitter find split of raw data")
histogram_table = session.parallelize(histograms, include_key=False, partition=partitions)
splitinfo_table = histogram_table.mapValues(lambda sub_hist:
self.find_split_single_histogram_guest(sub_hist,
valid_features,
sitename,
use_missing,
zero_as_missing))
tree_node_splitinfo = [None for i in range(len(histograms))]
for id, splitinfo in splitinfo_table.collect():
tree_node_splitinfo[id] = splitinfo
return tree_node_splitinfo
def find_split_single_histogram_host(self, fid_with_histogram, valid_features, sitename, use_missing=False,
zero_as_missing=False):
node_splitinfo = []
node_grad_hess = []
missing_bin = 0
if use_missing:
missing_bin = 1
fid, histogram = fid_with_histogram
if valid_features[fid] is False:
return [], []
bin_num = len(histogram)
if bin_num == 0:
return [], []
node_cnt = histogram[bin_num - 1][2]
if node_cnt < self.min_sample_split:
return [], []
for bid in range(bin_num - missing_bin - 1):
sum_grad_l = histogram[bid][0]
sum_hess_l = histogram[bid][1]
node_cnt_l = histogram[bid][2]
node_cnt_r = node_cnt - node_cnt_l
if node_cnt_l >= self.min_leaf_node and node_cnt_r >= self.min_leaf_node:
splitinfo = SplitInfo(sitename=sitename, best_fid=fid,
best_bid=bid, sum_grad=sum_grad_l, sum_hess=sum_hess_l,
missing_dir=1)
node_splitinfo.append(splitinfo)
node_grad_hess.append((sum_grad_l, sum_hess_l))
if use_missing:
sum_grad_l += histogram[-1][0] - histogram[-2][0]
sum_hess_l += histogram[-1][1] - histogram[-2][1]
node_cnt_l += histogram[-1][2] - histogram[-2][2]
splitinfo = SplitInfo(sitename=sitename, best_fid=fid,
best_bid=bid, sum_grad=sum_grad_l, sum_hess=sum_hess_l,
missing_dir=-1)
node_splitinfo.append(splitinfo)
node_grad_hess.append((sum_grad_l, sum_hess_l))
return node_splitinfo, node_grad_hess
def construct_feature_split_points(self, fid_with_histogram, valid_features, sitename, use_missing,
left_missing_dir, right_missing_dir, mask_id_mapping):
feature_split_info = []
missing_bin = 0
if use_missing:
missing_bin = 1
fid, histogram = fid_with_histogram
if valid_features[fid] is False:
return [], None
bin_num = len(histogram)
if bin_num == 0:
return [], None
node_cnt = histogram[bin_num - 1][2]
if node_cnt < self.min_sample_split:
return [], None
for bid in range(bin_num - missing_bin - 1):
sum_grad_l = histogram[bid][0]
sum_hess_l = histogram[bid][1]
node_cnt_l = histogram[bid][2]
node_cnt_r = node_cnt - node_cnt_l
mask_id = mask_id_mapping[(fid, bid)]
if self._check_sample_num(node_cnt_l, node_cnt_r):
missing_dir = np.random.choice(right_missing_dir)
splitinfo = SplitInfo(sitename=sitename, sum_grad=sum_grad_l, sum_hess=sum_hess_l,
missing_dir=missing_dir, mask_id=mask_id, sample_count=node_cnt_l) # 1
feature_split_info.append(splitinfo)
if use_missing:
sum_grad_l += histogram[-1][0] - histogram[-2][0]
sum_hess_l += histogram[-1][1] - histogram[-2][1]
node_cnt_l += histogram[-1][2] - histogram[-2][2]
missing_dir = np.random.choice(left_missing_dir)
splitinfo = SplitInfo(sitename=sitename, sum_grad=sum_grad_l, sum_hess=sum_hess_l,
missing_dir=missing_dir, mask_id=mask_id, sample_count=node_cnt_l) # -1
feature_split_info.append(splitinfo)
# split info contains g/h sum and node cnt
g_sum, h_sum = histogram[-1][0], histogram[-1][1]
g_h_sum_info = SplitInfo(sum_grad=g_sum, sum_hess=h_sum, sample_count=node_cnt)
return feature_split_info, g_h_sum_info
def construct_feature_split_points_batches(self, kv_iter, valid_features, sitename,
use_missing, mask_id_mapping, left_missing_dir,
right_missing_dir, batch_size,
cipher_compressor=None,
shuffle_random_seed=None):
result_list = []
split_info_dict = {}
g_h_sum_dict = {}
partition_key = None
for key, value in kv_iter:
nid, fid = key
if partition_key is None:
partition_key = str((nid, fid))
split_info_list, g_h_sum_info = self.construct_feature_split_points(value, valid_features, sitename,
use_missing,
left_missing_dir, right_missing_dir,
mask_id_mapping)
# collect all splitinfo of a node
if nid not in split_info_dict:
split_info_dict[nid] = []
split_info_dict[nid] += split_info_list
if nid not in g_h_sum_dict:
if g_h_sum_info is not None:
g_h_sum_dict[nid] = g_h_sum_info
# cut split info into batches
for nid in split_info_dict:
split_info_list = split_info_dict[nid]
if len(split_info_list) == 0:
result_list.append(
((nid, partition_key + '-empty'), [])) # add an empty split info list if no split info available
continue
if shuffle_random_seed:
random.seed(shuffle_random_seed)
random.shuffle(split_info_list)
# LOGGER.debug('nid {} mask id list {}'.format(nid, shuffle_list))
LOGGER.debug('split info len is {}'.format(len(split_info_list)))
batch_start_idx = range(0, len(split_info_list), batch_size)
batch_idx = 0
for i in batch_start_idx:
key = (nid, (partition_key + '-{}'.format(batch_idx))) # nid, batch_id
batch_idx += 1
g_h_sum_info = g_h_sum_dict[nid]
batch_split_info_list = split_info_list[i: i + batch_size]
# compress ciphers
if cipher_compressor is not None:
compressed_packages = cipher_compressor.compress_split_info(batch_split_info_list, g_h_sum_info)
result_list.append((key, (nid, compressed_packages)))
else:
result_list.append((key, (batch_split_info_list, g_h_sum_info)))
return result_list
def _find_host_best_splits_map_func(self, value, decrypter, gh_packer=None,
host_sitename=consts.HOST):
# find best split points in a node for every host feature, mapValues function
best_gain = self.min_impurity_split - consts.FLOAT_ZERO
best_idx = -1
best_split_info = SplitInfo(sitename=host_sitename, best_fid=-1, best_bid=-1, gain=best_gain,
mask_id=-1)
if len(value) == 0: # this node can not be further split, because split info list is empty
return best_idx, best_split_info
if gh_packer is None:
split_info_list, g_h_info = value
for split_info in split_info_list:
split_info.sum_grad, split_info.sum_hess = decrypter.decrypt(split_info.sum_grad), decrypter.decrypt(
split_info.sum_hess)
g_sum, h_sum = decrypter.decrypt(g_h_info.sum_grad), decrypter.decrypt(g_h_info.sum_hess)
else:
nid, package = value
split_info_list = gh_packer.decompress_and_unpack(package)
g_sum, h_sum = split_info_list[-1].sum_grad, split_info_list[-1].sum_hess # g/h sum is at last index
split_info_list = split_info_list[:-1]
for idx, split_info in enumerate(split_info_list):
l_g, l_h = split_info.sum_grad, split_info.sum_hess
r_g, r_h = g_sum - l_g, h_sum - l_h
gain = self.split_gain(g_sum, h_sum, l_g, l_h, r_g, r_h)
if self._check_min_child_weight(l_h, r_h) and \
gain > self.min_impurity_split and gain > best_gain + consts.FLOAT_ZERO:
new_split_info = SplitInfo(sitename=host_sitename, best_fid=split_info.best_fid,
best_bid=split_info.best_bid, gain=gain,
sum_grad=l_g, sum_hess=l_h, missing_dir=split_info.missing_dir,
mask_id=split_info.mask_id)
best_gain = gain
best_idx = idx
best_split_info = new_split_info
best_split_info.gain = best_gain
return best_idx, best_split_info
@staticmethod
def key_sort_func(a, b):
key_1, key_2 = a[0], b[0]
if key_1[0] == key_2[0]:
if key_1[1] > key_2[1]:
return 1
else:
return -1
else:
if key_1[0] > key_2[0]:
return 1
else:
return -1
def find_host_best_split_info(self, host_split_info_table, host_sitename, decrypter, gh_packer=None):
map_func = functools.partial(self._find_host_best_splits_map_func,
decrypter=decrypter,
host_sitename=host_sitename,
gh_packer=gh_packer
)
host_feature_best_split_table = host_split_info_table.mapValues(map_func)
feature_best_splits = list(host_feature_best_split_table.collect())
sorted_list = sorted(feature_best_splits, key=functools.cmp_to_key(self.key_sort_func))
node_best_splits = {}
for key, result in sorted_list:
node_id, fid = key
best_idx, split_info = result
if node_id not in node_best_splits:
node_best_splits[node_id] = SplitInfo(sitename=host_sitename, best_bid=-1, best_fid=-1,
gain=self.min_impurity_split - consts.FLOAT_ZERO)
if best_idx == -1:
continue
elif split_info.gain > self.min_impurity_split and split_info.gain > node_best_splits[node_id].gain \
+ consts.FLOAT_ZERO:
node_best_splits[node_id] = split_info
return node_best_splits
def host_prepare_split_points(self, histograms, valid_features, mask_id_mapping, use_missing, left_missing_dir,
right_missing_dir, sitename=consts.HOST, batch_size=consts.MAX_SPLITINFO_TO_COMPUTE,
cipher_compressor=None, shuffle_random_seed=None):
LOGGER.info("splitter find split of host")
LOGGER.debug('missing dir mask dict {}, {}'.format(left_missing_dir, right_missing_dir))
map_partition_func = functools.partial(self.construct_feature_split_points_batches,
valid_features=valid_features,
sitename=sitename,
use_missing=use_missing,
left_missing_dir=left_missing_dir,
right_missing_dir=right_missing_dir,
mask_id_mapping=mask_id_mapping,
batch_size=batch_size,
cipher_compressor=cipher_compressor,
shuffle_random_seed=shuffle_random_seed
)
host_splitinfo_table = histograms.mapPartitions(map_partition_func, use_previous_behavior=False)
return host_splitinfo_table
def find_split_host(self, histograms, valid_features, node_map, sitename=consts.HOST,
use_missing=False, zero_as_missing=False):
LOGGER.info("splitter find split of host")
LOGGER.debug('node map len is {}'.format(len(node_map)))
tree_node_splitinfo = [[] for i in range(len(node_map))]
encrypted_node_grad_hess = [[] for i in range(len(node_map))]
host_splitinfo_table = histograms.mapValues(lambda fid_with_hist:
self.find_split_single_histogram_host(fid_with_hist, valid_features,
sitename,
use_missing,
zero_as_missing))
# node_id, map it to node index
for (idx, fid), splitinfo in host_splitinfo_table.collect():
idx = node_map[idx]
tree_node_splitinfo[idx].extend(splitinfo[0])
encrypted_node_grad_hess[idx].extend(splitinfo[1])
return tree_node_splitinfo, BigObjectTransfer(encrypted_node_grad_hess)
def node_gain(self, grad, hess):
return self.criterion.node_gain(grad, hess)
def node_weight(self, grad, hess):
return self.criterion.node_weight(grad, hess)
def split_gain(self, sum_grad, sum_hess, sum_grad_l, sum_hess_l, sum_grad_r, sum_hess_r):
gain = self.criterion.split_gain([sum_grad, sum_hess],
[sum_grad_l, sum_hess_l], [sum_grad_r, sum_hess_r])
return gain
class BigObjectTransfer:
def __init__(self, data):
self._obj = data
def get_data(self):
return self._obj
| 23,387 | 44.063584 | 122 | py |
FATE | FATE-master/python/federatedml/protobuf/homo_model_convert/homo_model_convert.py | #
# Copyright 2021 The FATE Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import importlib
import inspect
import os
from federatedml.util import LOGGER
from .component_converter import ComponentConverterBase
SKLEARN_FILENAME = "sklearn.joblib"
PYTORCH_FILENAME = "pytorch.pth"
TF_DIRNAME = "tensorflow_saved_model"
LGB_FILENAME = "lgb.txt"
def _get_component_converter(module_name: str,
framework_name: str):
if framework_name in ["tensorflow", "tf", "tf_keras"]:
framework_name = "tf_keras"
elif framework_name in ["pytorch", "torch"]:
framework_name = "pytorch"
elif framework_name in ["sklearn", "scikit-learn"]:
framework_name = "sklearn"
elif framework_name in ['lightgbm']:
framework_name = 'lightgbm'
package_name = "." + framework_name
parent_package = importlib.import_module(package_name, __package__)
parent_package_path = os.path.dirname(os.path.realpath(parent_package.__file__))
for f in os.listdir(parent_package_path):
if f.startswith('.') or f.startswith('_'):
continue
if not f.endswith('.py'):
continue
proto_module = importlib.import_module("." + f.rstrip('.py'), parent_package.__name__)
for name, obj in inspect.getmembers(proto_module):
if inspect.isclass(obj) and issubclass(obj, ComponentConverterBase):
for module in obj.get_target_modules():
if module.lower() == module_name.lower():
return framework_name, obj()
return None, None
def get_default_target_framework(model_contents: dict,
module_name: str):
"""
Returns the name of a supported ML framework based on the
original FATE model module name and model contents.
:param model_contents: the model content of the FATE model
:param module_name: The module name, typically as HomoXXXX.
:return: the corresponding framework name that this model can be converted to.
"""
framework_name = None
if module_name == "HomoLR":
framework_name = "sklearn"
elif module_name == 'HomoNN':
# in FATE-1.10 currently support pytorch only
framework_name = "pytorch"
# if model_contents['HomoNNModelMeta'].params.config_type == "pytorch":
# framework_name = "pytorch"
# else:
# framework_name = "tf_keras"
elif module_name.lower() == 'homosecureboost':
framework_name = 'lightgbm'
else:
LOGGER.debug(
f"Module {module_name} is not a supported homogeneous model")
return framework_name
def model_convert(model_contents: dict,
module_name: str,
framework_name=None):
"""Convert a Homo model component into format of a common ML framework
:param model_contents: The model dict un-serialized from the model protobuf.
:param module_name: The module name, typically as HomoXXXX.
:param framework_name: The wanted framework, e.g. "sklearn", "pytorch", etc.
If not specified, the target framework will be chosen
automatically.
:return: the converted framework name and a instance of the model object from
the specified framework.
"""
if not framework_name:
framework_name = get_default_target_framework(
model_contents, module_name)
if not framework_name:
return None, None
target_framework, component_converter = _get_component_converter(
module_name, framework_name)
if not component_converter:
LOGGER.warn(
f"Module {module_name} cannot be converted to framework {framework_name}")
return None, None
LOGGER.info(
f"Converting {module_name} module to a model of framework {target_framework}")
return target_framework, component_converter.convert(model_contents)
def _get_model_saver_loader(framework_name: str):
if framework_name in ["sklearn", "scikit-learn"]:
import joblib
return joblib.dump, joblib.load, SKLEARN_FILENAME
elif framework_name in ["pytorch", "torch"]:
import torch
return torch.save, torch.load, PYTORCH_FILENAME
elif framework_name in ["tensorflow", "tf", "tf_keras"]:
import tensorflow
return tensorflow.saved_model.save, tensorflow.saved_model.load, TF_DIRNAME
elif framework_name in ['lightgbm']:
from federatedml.protobuf.homo_model_convert.lightgbm.gbdt import save_lgb, load_lgb
return save_lgb, load_lgb, LGB_FILENAME
else:
raise NotImplementedError("save method for framework: {} is not implemented"
.format(framework_name))
def save_converted_model(model_object,
framework_name: str,
base_dir: str):
"""Save the model into target destination
:param model_object: the model object
:param framework_name: name of the framework of the model
:param base_dir: the base directory to save the model file
:return: local file/folder path
"""
save, _, dest_filename = _get_model_saver_loader(framework_name)
dest = os.path.join(base_dir, dest_filename)
save(model_object, dest)
LOGGER.info(f"Saved {framework_name} model to {dest}")
return dest
def load_converted_model(framework_name: str,
base_dir: str):
"""Load a model from the specified directory previously used to save the converted model
:param framework_name: name of the framework of the model
:param base_dir: the base directory to save the model file
:return: model object of the specified framework
"""
_, load, src_filename = _get_model_saver_loader(framework_name)
src = os.path.join(base_dir, src_filename)
if not os.path.exists(src):
raise FileNotFoundError(
"expected file or folder {} doesn't exist".format(src))
return load(src)
| 6,574 | 38.136905 | 94 | py |
FATE | FATE-master/python/federatedml/protobuf/homo_model_convert/test/homo_nn_test.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright 2021 The FATE Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
import os
import tempfile
import torch as t
from collections import OrderedDict
from federatedml.nn.backend.utils.common import get_torch_model_bytes
from federatedml.protobuf.homo_model_convert.homo_model_convert import model_convert, save_converted_model
from federatedml.protobuf.generated.homo_nn_model_meta_pb2 import HomoNNMeta
from federatedml.protobuf.generated.homo_nn_model_param_pb2 import HomoNNParam
class FakeModule(t.nn.Module):
def __init__(self):
super(FakeModule, self).__init__()
self.fc = t.nn.Linear(100, 10)
self.transformer = t.nn.Transformer()
def forward(self, x):
print(self.fc)
return x
class TestHomoNNConverter(unittest.TestCase):
def _get_param_meta(self, torch_model):
param = HomoNNParam()
meta = HomoNNMeta()
# save param
param.model_bytes = get_torch_model_bytes({'model': torch_model.state_dict()})
return param, meta
def setUp(self):
self.param_list = []
self.meta_list = []
self.model_list = []
# generate some pytorch model
model = t.nn.Sequential(
t.nn.Linear(10, 10),
t.nn.ReLU(),
t.nn.LSTM(input_size=10, hidden_size=10),
t.nn.Sigmoid()
)
self.model_list.append(model)
param, meta = self._get_param_meta(model)
self.param_list.append(param)
self.meta_list.append(meta)
model = t.nn.Sequential(t.nn.ReLU())
self.model_list.append(model)
param, meta = self._get_param_meta(model)
self.param_list.append(param)
self.meta_list.append(meta)
fake_model = FakeModule()
self.model_list.append(fake_model)
param, meta = self._get_param_meta(fake_model)
self.param_list.append(param)
self.meta_list.append(meta)
def test_pytorch_converter(self):
for param, meta, origin_model in zip(self.param_list, self.meta_list, self.model_list):
target_framework, model = self._do_convert(param, meta)
self.assertTrue(target_framework == "pytorch")
self.assertTrue(isinstance(model['model'], OrderedDict)) # state dict
origin_model.load_state_dict(model['model']) # can load state dict
with tempfile.TemporaryDirectory() as d:
dest = save_converted_model(model, target_framework, d)
self.assertTrue(os.path.isfile(dest))
self.assertTrue(dest.endswith(".pth"))
@staticmethod
def _do_convert(model_param, model_meta):
return model_convert(model_contents={
'HomoNNParam': model_param,
'HomoNNMeta': model_meta
},
module_name='HomoNN')
if __name__ == '__main__':
unittest.main()
| 3,476 | 33.425743 | 106 | py |
FATE | FATE-master/python/federatedml/protobuf/homo_model_convert/tf_keras/nn.py | #
# Copyright 2021 The FATE Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import io
import tempfile
import tensorflow
import zipfile
from ..component_converter import ComponentConverterBase
class NNComponentConverter(ComponentConverterBase):
@staticmethod
def get_target_modules():
return ['HomoNN']
def convert(self, model_dict):
param_obj = model_dict["HomoNNModelParam"]
meta_obj = model_dict["HomoNNModelMeta"]
if meta_obj.params.config_type != "nn" and meta_obj.params.config_type != "keras":
raise ValueError("Invalid config type: {}".format(meta_obj.config_type))
with tempfile.TemporaryDirectory() as tmp_path:
with io.BytesIO(param_obj.saved_model_bytes) as bytes_io:
with zipfile.ZipFile(bytes_io, 'r', zipfile.ZIP_DEFLATED) as f:
f.extractall(tmp_path)
try:
model = tensorflow.keras.models.load_model(tmp_path)
except Exception as e:
model = tensorflow.compat.v1.keras.experimental.load_from_saved_model(tmp_path)
return model
| 1,695 | 35.869565 | 103 | py |
FATE | FATE-master/python/federatedml/protobuf/homo_model_convert/pytorch/nn.py | #
# Copyright 2021 The FATE Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import io
import torch as t
import tempfile
from ..component_converter import ComponentConverterBase
class NNComponentConverter(ComponentConverterBase):
@staticmethod
def get_target_modules():
return ['HomoNN']
def convert(self, model_dict):
param_obj = model_dict["HomoNNParam"]
meta_obj = model_dict["HomoNNMeta"]
if not hasattr(param_obj, 'model_bytes'):
raise ValueError("Did not find model_bytes in model param protobuf")
with tempfile.TemporaryFile() as f:
f.write(param_obj.model_bytes)
f.seek(0)
model_dict = t.load(f)
return model_dict
| 1,278 | 28.744186 | 80 | py |
FATE | FATE-master/python/federatedml/framework/homo/aggregator/secure_aggregator.py | from federatedml.framework.homo.blocks import RandomPaddingCipherClient, RandomPaddingCipherServer, PadsCipher, RandomPaddingCipherTransVar
from federatedml.framework.homo.aggregator.aggregator_base import AggregatorBaseClient, AutoSuffix, AggregatorBaseServer
import numpy as np
from federatedml.framework.weights import Weights, NumpyWeights
from federatedml.util import LOGGER
import torch as t
from typing import Union, List
from fate_arch.computing._util import is_table
from federatedml.util import consts
AGG_TYPE = ['weighted_mean', 'sum', 'mean']
class SecureAggregatorClient(AggregatorBaseClient):
def __init__(self, secure_aggregate=True, aggregate_type='weighted_mean', aggregate_weight=1.0,
communicate_match_suffix=None):
super(SecureAggregatorClient, self).__init__(
communicate_match_suffix=communicate_match_suffix)
self.secure_aggregate = secure_aggregate
self.suffix = {
"local_loss": AutoSuffix("local_loss"),
"agg_loss": AutoSuffix("agg_loss"),
"local_model": AutoSuffix("local_model"),
"agg_model": AutoSuffix("agg_model"),
"converge_status": AutoSuffix("converge_status")
}
# init secure aggregate random padding:
if self.secure_aggregate:
self._random_padding_cipher: PadsCipher = RandomPaddingCipherClient(
trans_var=RandomPaddingCipherTransVar(prefix=communicate_match_suffix)).create_cipher()
LOGGER.info('initialize secure aggregator done')
# compute weight
assert aggregate_type in AGG_TYPE, 'aggregate type must in {}'.format(
AGG_TYPE)
if aggregate_type == 'weighted_mean':
aggregate_weight = aggregate_weight
elif aggregate_type == 'mean':
aggregate_weight = 1
self.send(aggregate_weight, suffix=('agg_weight', ))
self._weight = aggregate_weight / \
self.get(suffix=('agg_weight', ))[0] # local weight / total weight
if aggregate_type == 'sum': # reset _weight
self._weight = 1
self._set_table_amplify_factor = False
LOGGER.debug('aggregate compute weight is {}'.format(self._weight))
def _process_model(self, model):
to_agg = None
if isinstance(model, np.ndarray) or isinstance(model, Weights):
if isinstance(model, np.ndarray):
to_agg = NumpyWeights(model * self._weight)
else:
to_agg = model * self._weight
if self.secure_aggregate:
to_agg: Weights = to_agg.encrypted(
self._random_padding_cipher)
return to_agg
# is FATE distrubed Table
elif is_table(model):
model = model.mapValues(lambda x: x * self._weight)
if self.secure_aggregate:
if not self._set_table_amplify_factor:
self._random_padding_cipher.set_amplify_factor(
consts.SECURE_AGG_AMPLIFY_FACTOR)
model = self._random_padding_cipher.encrypt_table(model)
return model
if isinstance(model, t.nn.Module):
parameters = list(model.parameters())
tmp_list = [[np.array(p.cpu().detach().tolist()) for p in parameters if p.requires_grad]]
LOGGER.debug('Aggregate trainable parameters: {}/{}'.format(len(tmp_list[0]), len(parameters)))
elif isinstance(model, t.optim.Optimizer):
tmp_list = [[np.array(p.cpu().detach().tolist()) for p in group["params"]]
for group in model.param_groups]
elif isinstance(model, list):
for p in model:
assert isinstance(
p, np.ndarray), 'expecting List[np.ndarray], but got {}'.format(p)
tmp_list = [model]
if self.secure_aggregate:
to_agg = [
[
NumpyWeights(
arr *
self._weight).encrypted(
self._random_padding_cipher) for arr in arr_list] for arr_list in tmp_list]
else:
to_agg = [[arr * self._weight for arr in arr_list]
for arr_list in tmp_list]
return to_agg
def _recover_model(self, model, agg_model):
if isinstance(model, np.ndarray):
return agg_model.unboxed
elif isinstance(model, Weights):
return agg_model
elif is_table(agg_model):
return agg_model
else:
if self.secure_aggregate:
agg_model = [[np_weight.unboxed for np_weight in arr_list]
for arr_list in agg_model]
if isinstance(model, t.nn.Module):
for agg_p, p in zip(agg_model[0], [p for p in model.parameters() if p.requires_grad]):
p.data.copy_(t.Tensor(agg_p))
return model
elif isinstance(model, t.optim.Optimizer):
for agg_group, group in zip(agg_model, model.param_groups):
for agg_p, p in zip(agg_group, group["params"]):
p.data.copy_(t.Tensor(agg_p))
return model
else:
return agg_model
def send_loss(self, loss, suffix=tuple()):
suffix = self._get_suffix('local_loss', suffix)
assert isinstance(loss, float) or isinstance(
loss, np.ndarray), 'illegal loss type {}, loss should be a float or a np array'.format(type(loss))
self.send(loss * self._weight, suffix)
def send_model(self,
model: Union[np.ndarray,
Weights,
List[np.ndarray],
t.nn.Module,
t.optim.Optimizer],
suffix=tuple()):
"""Sending model to arbiter for aggregation
Parameters
----------
model : model can be:
A numpy array
A Weight instance(or subclass of Weights), see federatedml.framework.weights
List of numpy array
A pytorch model, is the subclass of torch.nn.Module
A pytorch optimizer, will extract param group from this optimizer as weights to aggregate
suffix : sending suffix, by default tuple(), can be None or tuple contains str&number. If None, will automatically generate suffix
"""
suffix = self._get_suffix('local_model', suffix)
# judge model type
to_agg_model = self._process_model(model)
self.send(to_agg_model, suffix)
def get_aggregated_model(self, suffix=tuple()):
suffix = self._get_suffix("agg_model", suffix)
return self.get(suffix)[0]
def get_aggregated_loss(self, suffix=tuple()):
suffix = self._get_suffix("agg_loss", suffix)
return self.get(suffix)[0]
def get_converge_status(self, suffix=tuple()):
suffix = self._get_suffix("converge_status", suffix)
return self.get(suffix)[0]
def model_aggregation(self, model, suffix=tuple()):
self.send_model(model, suffix=suffix)
agg_model = self.get_aggregated_model(suffix=suffix)
return self._recover_model(model, agg_model)
def loss_aggregation(self, loss, suffix=tuple()):
self.send_loss(loss, suffix=suffix)
converge_status = self.get_converge_status(suffix=suffix)
return converge_status
class SecureAggregatorServer(AggregatorBaseServer):
def __init__(self, secure_aggregate=True, communicate_match_suffix=None):
super(SecureAggregatorServer, self).__init__(
communicate_match_suffix=communicate_match_suffix)
self.suffix = {
"local_loss": AutoSuffix("local_loss"),
"agg_loss": AutoSuffix("agg_loss"),
"local_model": AutoSuffix("local_model"),
"agg_model": AutoSuffix("agg_model"),
"converge_status": AutoSuffix("converge_status")
}
self.secure_aggregate = secure_aggregate
if self.secure_aggregate:
RandomPaddingCipherServer(trans_var=RandomPaddingCipherTransVar(
prefix=communicate_match_suffix)).exchange_secret_keys()
LOGGER.info('initialize secure aggregator done')
agg_weights = self.collect(suffix=('agg_weight', ))
sum_weights = 0
for i in agg_weights:
sum_weights += i
self.broadcast(sum_weights, suffix=('agg_weight', ))
def aggregate_model(self, suffix=None, party_idx=-1):
# get suffix
suffix = self._get_suffix('local_model', suffix)
# recv params for aggregation
models = self.collect(suffix=suffix, party_idx=party_idx)
agg_result = None
# Aggregate Weights or Numpy Array
if isinstance(models[0], Weights):
agg_result = models[0]
for w in models[1:]:
agg_result += w
# Aggregate Table
elif is_table(models[0]):
agg_result = models[0]
for table in models[1:]:
agg_result = agg_result.join(table, lambda x1, x2: x1 + x2)
return agg_result
# Aggregate numpy groups
elif isinstance(models[0], list):
# aggregation
agg_result = models[0]
# aggregate numpy model weights from all clients
for params_group in models[1:]:
for agg_params, params in zip(
agg_result, params_group):
for agg_p, p in zip(agg_params, params):
# agg_p: NumpyWeights or numpy array
agg_p += p
if agg_result is None:
raise ValueError(
'can not aggregate receive model, format is illegal: {}'.format(models))
return agg_result
def broadcast_model(self, model, suffix=tuple(), party_idx=-1):
suffix = self._get_suffix('agg_model', suffix)
self.broadcast(model, suffix=suffix, party_idx=party_idx)
def aggregate_loss(self, suffix=tuple(), party_idx=-1):
# get loss
suffix = self._get_suffix('local_loss', suffix)
losses = self.collect(suffix, party_idx=party_idx)
# aggregate loss
total_loss = losses[0]
for loss in losses[1:]:
total_loss += loss
return total_loss
def broadcast_loss(self, loss_sum, suffix=tuple(), party_idx=-1):
suffix = self._get_suffix('agg_loss', suffix)
self.broadcast(loss_sum, suffix=suffix, party_idx=party_idx)
def model_aggregation(self, suffix=tuple(), party_idx=-1):
agg_model = self.aggregate_model(suffix=suffix, party_idx=party_idx)
self.broadcast_model(agg_model, suffix=suffix, party_idx=party_idx)
return agg_model
def broadcast_converge_status(self, converge_status, suffix=tuple(), party_idx=-1):
suffix = self._get_suffix('converge_status', suffix)
self.broadcast(converge_status, suffix=suffix, party_idx=party_idx)
def loss_aggregation(self, check_converge=False, converge_func=None, suffix=tuple(), party_idx=-1):
agg_loss = self.aggregate_loss(suffix=suffix, party_idx=party_idx)
if check_converge:
converge_status = converge_func(agg_loss)
else:
converge_status = False
self.broadcast_converge_status(
converge_status, suffix=suffix, party_idx=party_idx)
return agg_loss, converge_status
| 11,628 | 39.378472 | 139 | py |
CRL | CRL-main/run_continual.py | import torch
from config import Param
from methods.utils import setup_seed
from methods.manager import Manager
def run(args):
setup_seed(args.seed)
print("hyper-parameter configurations:")
print(str(args.__dict__))
manager = Manager(args)
manager.train(args)
if __name__ == '__main__':
param = Param() # There are detailed hyper-parameter configurations.
args = param.args
torch.cuda.set_device(args.gpu)
args.device = torch.device(args.device)
args.n_gpu = torch.cuda.device_count()
args.task_name = args.dataname
args.rel_per_task = 8 if args.dataname == 'FewRel' else 4
run(args)
| 653 | 24.153846 | 72 | py |
CRL | CRL-main/methods/utils.py | from dataloaders.data_loader import get_data_loader
import torch
import torch.nn as nn
import torch.nn.functional as F
import numpy as np
from tqdm import tqdm, trange
import random
class Moment:
def __init__(self, args) -> None:
self.labels = None
self.mem_labels = None
self.memlen = 0
self.sample_k = 500
self.temperature= args.temp
def get_mem_proto(self):
c = self._compute_centroids_ind()
return c
def _compute_centroids_ind(self):
cinds = []
for x in self.mem_labels:
if x.item() not in cinds:
cinds.append(x.item())
num = len(cinds)
feats = self.mem_features
centroids = torch.zeros((num, feats.size(1)), dtype=torch.float32, device=feats.device)
for i, c in enumerate(cinds):
ind = np.where(self.mem_labels.cpu().numpy() == c)[0]
centroids[i, :] = F.normalize(feats[ind, :].mean(dim=0), p=2, dim=0)
return centroids
def update(self, ind, feature, init=False):
self.features[ind] = feature
def update_mem(self, ind, feature, hidden=None):
self.mem_features[ind] = feature
if hidden is not None:
self.hidden_features[ind] = hidden
@torch.no_grad()
def init_moment(self, args, encoder, datasets, is_memory=False):
encoder.eval()
datalen = len(datasets)
if not is_memory:
self.features = torch.zeros(datalen, args.feat_dim).cuda()
data_loader = get_data_loader(args, datasets)
td = tqdm(data_loader)
lbs = []
for step, batch_data in enumerate(td):
labels, tokens, ind = batch_data
tokens = torch.stack([x.to(args.device) for x in tokens], dim=0)
_, reps = encoder.bert_forward(tokens)
self.update(ind, reps.detach())
lbs.append(labels)
lbs = torch.cat(lbs)
self.labels = lbs.to(args.device)
else:
self.memlen = datalen
self.mem_features = torch.zeros(datalen, args.feat_dim).cuda()
self.hidden_features = torch.zeros(datalen, args.encoder_output_size).cuda()
lbs = []
data_loader = get_data_loader(args, datasets)
td = tqdm(data_loader)
for step, batch_data in enumerate(td):
labels, tokens, ind = batch_data
tokens = torch.stack([x.to(args.device) for x in tokens], dim=0)
hidden, reps = encoder.bert_forward(tokens)
self.update_mem(ind, reps.detach(), hidden.detach())
lbs.append(labels)
lbs = torch.cat(lbs)
self.mem_labels = lbs.to(args.device)
def loss(self, x, labels, is_mem=False, mapping=None):
if is_mem:
ct_x = self.mem_features
ct_y = self.mem_labels
else:
if self.sample_k is not None:
# sample some instances
idx = list(range(len(self.features)))
if len(idx) > self.sample_k:
sample_id = random.sample(idx, self.sample_k)
else:
sample_id = idx
ct_x = self.features[sample_id]
ct_y = self.labels[sample_id]
else:
ct_x = self.features
ct_y = self.labels
device = torch.device("cuda") if x.is_cuda else torch.device("cpu")
dot_product_tempered = torch.mm(x, ct_x.T) / self.temperature # n * m
# Minus max for numerical stability with exponential. Same done in cross entropy. Epsilon added to avoid log(0)
exp_dot_tempered = (
torch.exp(dot_product_tempered - torch.max(dot_product_tempered, dim=1, keepdim=True)[0].detach()) + 1e-5
)
mask_combined = (labels.unsqueeze(1).repeat(1, ct_y.shape[0]) == ct_y).to(device) # n*m
cardinality_per_samples = torch.sum(mask_combined, dim=1)
log_prob = -torch.log(exp_dot_tempered / (torch.sum(exp_dot_tempered, dim=1, keepdim=True)))
supervised_contrastive_loss_per_sample = torch.sum(log_prob * mask_combined, dim=1) / cardinality_per_samples
supervised_contrastive_loss = torch.mean(supervised_contrastive_loss_per_sample)
return supervised_contrastive_loss
def dot_dist(x1, x2):
return torch.matmul(x1, x2.t())
def osdist(x, c):
pairwise_distances_squared = torch.sum(x ** 2, dim=1, keepdim=True) + \
torch.sum(c.t() ** 2, dim=0, keepdim=True) - \
2.0 * torch.matmul(x, c.t())
error_mask = pairwise_distances_squared <= 0.0
pairwise_distances = pairwise_distances_squared.clamp(min=1e-16)#.sqrt()
pairwise_distances = torch.mul(pairwise_distances, ~error_mask)
return pairwise_distances
def setup_seed(seed):
torch.manual_seed(seed)
torch.cuda.manual_seed_all(seed)
np.random.seed(seed)
random.seed(seed)
torch.backends.cudnn.deterministic = True | 5,091 | 38.78125 | 119 | py |
CRL | CRL-main/methods/model.py | import torch.nn as nn
import torch
import torch.nn.functional as F
from .backbone import Bert_Encoder
class Encoder(nn.Module):
def __init__(self, args):
super().__init__()
self.encoder = Bert_Encoder(args)
self.output_size = self.encoder.out_dim
dim_in = self.output_size
self.head = nn.Sequential(
nn.Linear(dim_in, dim_in),
nn.ReLU(inplace=True),
nn.Linear(dim_in, args.feat_dim)
)
def bert_forward(self, x):
out = self.encoder(x)
xx = self.head(out)
xx = F.normalize(xx, p=2, dim=1)
return out, xx
| 644 | 27.043478 | 48 | py |
CRL | CRL-main/methods/backbone.py | import torch.nn as nn
import torch
import numpy as np
from transformers import BertModel, BertConfig
class Bert_Encoder(nn.Module):
def __init__(self, config, out_token=False):
super(Bert_Encoder, self).__init__()
# load model
self.encoder = BertModel.from_pretrained(config.bert_path).cuda()
self.bert_config = BertConfig.from_pretrained(config.bert_path)
# the dimension for the final outputs
self.output_size = config.encoder_output_size
self.out_dim = self.output_size
# find which encoding is used
if config.pattern in ['standard', 'entity_marker']:
self.pattern = config.pattern
else:
raise Exception('Wrong encoding.')
if self.pattern == 'entity_marker':
self.encoder.resize_token_embeddings(config.vocab_size + config.marker_size)
self.linear_transform = nn.Linear(self.bert_config.hidden_size*2, self.output_size, bias=True)
else:
self.linear_transform = nn.Linear(self.bert_config.hidden_size, self.output_size, bias=True)
self.layer_normalization = nn.LayerNorm([self.output_size])
def get_output_size(self):
return self.output_size
def forward(self, inputs):
# generate representation under a certain encoding strategy
if self.pattern == 'standard':
# in the standard mode, the representation is generated according to
# the representation of[CLS] mark.
output = self.encoder(inputs)[1]
else:
# in the entity_marker mode, the representation is generated from the representations of
# marks [E11] and [E21] of the head and tail entities.
e11 = []
e21 = []
# for each sample in the batch, acquire the positions of its [E11] and [E21]
for i in range(inputs.size()[0]):
tokens = inputs[i].cpu().numpy()
e11.append(np.argwhere(tokens == 30522)[0][0])
e21.append(np.argwhere(tokens == 30524)[0][0])
# input the sample to BERT
tokens_output = self.encoder(inputs)[0] # [B,N] --> [B,N,H]
output = []
# for each sample in the batch, acquire its representations for [E11] and [E21]
for i in range(len(e11)):
if inputs.device.type in ['cuda']:
instance_output = torch.index_select(tokens_output, 0, torch.tensor(i).cuda())
instance_output = torch.index_select(instance_output, 1, torch.tensor([e11[i], e21[i]]).cuda())
else:
instance_output = torch.index_select(tokens_output, 0, torch.tensor(i))
instance_output = torch.index_select(instance_output, 1, torch.tensor([e11[i], e21[i]]))
output.append(instance_output) # [B,N] --> [B,2,H]
# for each sample in the batch, concatenate the representations of [E11] and [E21], and reshape
output = torch.cat(output, dim=0)
output = output.view(output.size()[0], -1) # [B,N] --> [B,H*2]
output = self.linear_transform(output)
return output | 3,220 | 42.527027 | 115 | py |
CRL | CRL-main/methods/manager.py | from dataloaders.sampler import data_sampler
from dataloaders.data_loader import get_data_loader
from .model import Encoder
from .utils import Moment, dot_dist
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
import numpy as np
import random
from tqdm import tqdm, trange
from sklearn.cluster import KMeans
from .utils import osdist
class Manager(object):
def __init__(self, args):
super().__init__()
self.id2rel = None
self.rel2id = None
def get_proto(self, args, encoder, mem_set):
# aggregate the prototype set for further use.
data_loader = get_data_loader(args, mem_set, False, False, 1)
features = []
encoder.eval()
for step, batch_data in enumerate(data_loader):
labels, tokens, ind = batch_data
tokens = torch.stack([x.to(args.device) for x in tokens], dim=0)
with torch.no_grad():
feature, rep= encoder.bert_forward(tokens)
features.append(feature)
self.lbs.append(labels.item())
features = torch.cat(features, dim=0)
proto = torch.mean(features, dim=0, keepdim=True)
return proto, features
# Use K-Means to select what samples to save, similar to at_least = 0
def select_data(self, args, encoder, sample_set):
data_loader = get_data_loader(args, sample_set, shuffle=False, drop_last=False, batch_size=1)
features = []
encoder.eval()
for step, batch_data in enumerate(data_loader):
labels, tokens, ind = batch_data
tokens=torch.stack([x.to(args.device) for x in tokens],dim=0)
with torch.no_grad():
feature, rp = encoder.bert_forward(tokens)
features.append(feature.detach().cpu())
features = np.concatenate(features)
num_clusters = min(args.num_protos, len(sample_set))
distances = KMeans(n_clusters=num_clusters, random_state=0).fit_transform(features)
mem_set = []
current_feat = []
for k in range(num_clusters):
sel_index = np.argmin(distances[:, k])
instance = sample_set[sel_index]
mem_set.append(instance)
current_feat.append(features[sel_index])
current_feat = np.stack(current_feat, axis=0)
current_feat = torch.from_numpy(current_feat)
return mem_set, current_feat, current_feat.mean(0)
def get_optimizer(self, args, encoder):
print('Use {} optim!'.format(args.optim))
def set_param(module, lr, decay=0):
parameters_to_optimize = list(module.named_parameters())
no_decay = ['undecay']
parameters_to_optimize = [
{'params': [p for n, p in parameters_to_optimize
if not any(nd in n for nd in no_decay)], 'weight_decay': 0.0, 'lr': lr},
{'params': [p for n, p in parameters_to_optimize
if any(nd in n for nd in no_decay)], 'weight_decay': 0.0, 'lr': lr}
]
return parameters_to_optimize
params = set_param(encoder, args.learning_rate)
if args.optim == 'adam':
pytorch_optim = optim.Adam
else:
raise NotImplementedError
optimizer = pytorch_optim(
params
)
return optimizer
def train_simple_model(self, args, encoder, training_data, epochs):
data_loader = get_data_loader(args, training_data, shuffle=True)
encoder.train()
optimizer = self.get_optimizer(args, encoder)
def train_data(data_loader_, name = "", is_mem = False):
losses = []
td = tqdm(data_loader_, desc=name)
for step, batch_data in enumerate(td):
optimizer.zero_grad()
labels, tokens, ind = batch_data
labels = labels.to(args.device)
tokens = torch.stack([x.to(args.device) for x in tokens], dim=0)
hidden, reps = encoder.bert_forward(tokens)
loss = self.moment.loss(reps, labels)
losses.append(loss.item())
td.set_postfix(loss = np.array(losses).mean())
loss.backward()
torch.nn.utils.clip_grad_norm_(encoder.parameters(), args.max_grad_norm)
optimizer.step()
# update moemnt
if is_mem:
self.moment.update_mem(ind, reps.detach())
else:
self.moment.update(ind, reps.detach())
print(f"{name} loss is {np.array(losses).mean()}")
for epoch_i in range(epochs):
train_data(data_loader, "init_train_{}".format(epoch_i), is_mem=False)
def train_mem_model(self, args, encoder, mem_data, proto_mem, epochs, seen_relations):
history_nums = len(seen_relations) - args.rel_per_task
if len(proto_mem)>0:
proto_mem = F.normalize(proto_mem, p =2, dim=1)
dist = dot_dist(proto_mem, proto_mem)
dist = dist.to(args.device)
mem_loader = get_data_loader(args, mem_data, shuffle=True)
encoder.train()
temp_rel2id = [self.rel2id[x] for x in seen_relations]
map_relid2tempid = {k:v for v,k in enumerate(temp_rel2id)}
map_tempid2relid = {k:v for k, v in map_relid2tempid.items()}
optimizer = self.get_optimizer(args, encoder)
def train_data(data_loader_, name = "", is_mem = False):
losses = []
kl_losses = []
td = tqdm(data_loader_, desc=name)
for step, batch_data in enumerate(td):
optimizer.zero_grad()
labels, tokens, ind = batch_data
labels = labels.to(args.device)
tokens = torch.stack([x.to(args.device) for x in tokens], dim=0)
zz, reps = encoder.bert_forward(tokens)
hidden = reps
need_ratio_compute = ind < history_nums * args.num_protos
total_need = need_ratio_compute.sum()
if total_need >0 :
# Knowledge Distillation for Relieve Forgetting
need_ind = ind[need_ratio_compute]
need_labels = labels[need_ratio_compute]
temp_labels = [map_relid2tempid[x.item()] for x in need_labels]
gold_dist = dist[temp_labels]
current_proto = self.moment.get_mem_proto()[:history_nums]
this_dist = dot_dist(hidden[need_ratio_compute], current_proto.to(args.device))
loss1 = self.kl_div_loss(gold_dist, this_dist, t=args.kl_temp)
loss1.backward(retain_graph=True)
else:
loss1 = 0.0
# Contrastive Replay
cl_loss = self.moment.loss(reps, labels, is_mem=True, mapping=map_relid2tempid)
if isinstance(loss1, float):
kl_losses.append(loss1)
else:
kl_losses.append(loss1.item())
loss = cl_loss
if isinstance(loss, float):
losses.append(loss)
td.set_postfix(loss = np.array(losses).mean(), kl_loss = np.array(kl_losses).mean())
# update moemnt
if is_mem:
self.moment.update_mem(ind, reps.detach(), hidden.detach())
else:
self.moment.update(ind, reps.detach())
continue
losses.append(loss.item())
td.set_postfix(loss = np.array(losses).mean(), kl_loss = np.array(kl_losses).mean())
loss.backward()
torch.nn.utils.clip_grad_norm_(encoder.parameters(), args.max_grad_norm)
optimizer.step()
# update moemnt
if is_mem:
self.moment.update_mem(ind, reps.detach())
else:
self.moment.update(ind, reps.detach())
print(f"{name} loss is {np.array(losses).mean()}")
for epoch_i in range(epochs):
train_data(mem_loader, "memory_train_{}".format(epoch_i), is_mem=True)
def kl_div_loss(self, x1, x2, t=10):
batch_dist = F.softmax(t * x1, dim=1)
temp_dist = F.log_softmax(t * x2, dim=1)
loss = F.kl_div(temp_dist, batch_dist, reduction="batchmean")
return loss
@torch.no_grad()
def evaluate_strict_model(self, args, encoder, test_data, protos4eval, featrues4eval, seen_relations):
data_loader = get_data_loader(args, test_data, batch_size=1)
encoder.eval()
n = len(test_data)
temp_rel2id = [self.rel2id[x] for x in seen_relations]
map_relid2tempid = {k:v for v,k in enumerate(temp_rel2id)}
map_tempid2relid = {k:v for k, v in map_relid2tempid.items()}
correct = 0
for step, batch_data in enumerate(data_loader):
labels, tokens, ind = batch_data
labels = labels.to(args.device)
tokens = torch.stack([x.to(args.device) for x in tokens], dim=0)
hidden, reps = encoder.bert_forward(tokens)
labels = [map_relid2tempid[x.item()] for x in labels]
logits = -osdist(hidden, protos4eval)
seen_relation_ids = [self.rel2id[relation] for relation in seen_relations]
seen_relation_ids = [map_relid2tempid[x] for x in seen_relation_ids]
seen_sim = logits[:,seen_relation_ids]
seen_sim = seen_sim.cpu().data.numpy()
max_smi = np.max(seen_sim,axis=1)
label_smi = logits[:,labels].cpu().data.numpy()
if label_smi >= max_smi:
correct += 1
return correct/n
def train(self, args):
# set training batch
for i in range(args.total_round):
test_cur = []
test_total = []
# set random seed
random.seed(args.seed+i*100)
# sampler setup
sampler = data_sampler(args=args, seed=args.seed+i*100)
self.id2rel = sampler.id2rel
self.rel2id = sampler.rel2id
# encoder setup
encoder = Encoder(args=args).to(args.device)
# initialize memory and prototypes
num_class = len(sampler.id2rel)
memorized_samples = {}
# load data and start computation
history_relation = []
proto4repaly = []
for steps, (training_data, valid_data, test_data, current_relations, historic_test_data, seen_relations) in enumerate(sampler):
print(current_relations)
# Initial
train_data_for_initial = []
for relation in current_relations:
history_relation.append(relation)
train_data_for_initial += training_data[relation]
# train model
# no memory. first train with current task
self.moment = Moment(args)
self.moment.init_moment(args, encoder, train_data_for_initial, is_memory=False)
self.train_simple_model(args, encoder, train_data_for_initial, args.step1_epochs)
# repaly
if len(memorized_samples)>0:
# select current task sample
for relation in current_relations:
memorized_samples[relation], _, _ = self.select_data(args, encoder, training_data[relation])
train_data_for_memory = []
for relation in history_relation:
train_data_for_memory += memorized_samples[relation]
self.moment.init_moment(args, encoder, train_data_for_memory, is_memory=True)
self.train_mem_model(args, encoder, train_data_for_memory, proto4repaly, args.step2_epochs, seen_relations)
feat_mem = []
proto_mem = []
for relation in current_relations:
memorized_samples[relation], feat, temp_proto = self.select_data(args, encoder, training_data[relation])
feat_mem.append(feat)
proto_mem.append(temp_proto)
feat_mem = torch.cat(feat_mem, dim=0)
temp_proto = torch.stack(proto_mem, dim=0)
protos4eval = []
featrues4eval = []
self.lbs = []
for relation in history_relation:
if relation not in current_relations:
protos, featrues = self.get_proto(args, encoder, memorized_samples[relation])
protos4eval.append(protos)
featrues4eval.append(featrues)
if protos4eval:
protos4eval = torch.cat(protos4eval, dim=0).detach()
protos4eval = torch.cat([protos4eval, temp_proto.to(args.device)], dim=0)
else:
protos4eval = temp_proto.to(args.device)
proto4repaly = protos4eval.clone()
test_data_1 = []
for relation in current_relations:
test_data_1 += test_data[relation]
test_data_2 = []
for relation in seen_relations:
test_data_2 += historic_test_data[relation]
cur_acc = self.evaluate_strict_model(args, encoder, test_data_1, protos4eval, featrues4eval,seen_relations)
total_acc = self.evaluate_strict_model(args, encoder, test_data_2, protos4eval, featrues4eval,seen_relations)
print(f'Restart Num {i+1}')
print(f'task--{steps + 1}:')
print(f'current test acc:{cur_acc}')
print(f'history test acc:{total_acc}')
test_cur.append(cur_acc)
test_total.append(total_acc)
print(test_cur)
print(test_total)
del self.moment
| 14,339 | 42.98773 | 139 | py |
CRL | CRL-main/dataloaders/data_loader.py | import torch
from torch.utils.data import Dataset, DataLoader
class data_set(Dataset):
def __init__(self, data,config=None):
self.data = data
self.config = config
self.bert = True
def __len__(self):
return len(self.data)
def __getitem__(self, idx):
return (self.data[idx], idx)
def collate_fn(self, data):
label = torch.tensor([item[0]['relation'] for item in data])
tokens = [torch.tensor(item[0]['tokens']) for item in data]
ind = torch.tensor([item[1] for item in data])
return (
label,
tokens,
ind
)
def get_data_loader(config, data, shuffle = False, drop_last = False, batch_size = None):
dataset = data_set(data, config)
if batch_size == None:
batch_size = min(config.batch_size, len(data))
else:
batch_size = min(batch_size, len(data))
data_loader = DataLoader(
dataset=dataset,
batch_size=batch_size,
shuffle=shuffle,
pin_memory=True,
num_workers=config.num_workers,
collate_fn=dataset.collate_fn,
drop_last=drop_last)
return data_loader | 1,180 | 24.673913 | 89 | py |
wav2letter | wav2letter-main/recipes/lexicon_free/utilities/compute_upper_ppl_convlm.py | """
Copyright (c) Facebook, Inc. and its affiliates.
All rights reserved.
This source code is licensed under the MIT-style license found in the
LICENSE file in the root directory of this source tree.
----------
Compute upper limit on word perplexity for convlm models
Command (for word) : python3 compute_upper_ppl_convlm.py --model [...] \
--dict [...] --text [...] --model_type word --dataset_type ls
Command (for char) : python3 compute_upper_ppl_convlm.py --model [...] \
--dict [...] --word_dict [...] --text [...] \
--model_type char14B --dataset_type ls
Command (for char) : python3 compute_upper_ppl_convlm.py --model [...] \
--dict [...] --word_dict [...] --text [...] \
--model_type char20B --dataset_type ls
Replace [...] with appropriate paths
"""
from __future__ import absolute_import, division, print_function, unicode_literals
import argparse
import numpy
import torch
from convlm_utils import (
EOSIDX,
UNKIDX,
build_token_index_correspondence,
decodeInputText,
load_char_model_14B,
load_char_model_20B,
load_word_model,
)
from fairseq.data import Dictionary
from utils import transform_asg
def compute_ppl_upper_limit_char_convlm(
model,
input_charlm,
charLM_indices_token_dict,
charLM_token_indices_dict,
known_words,
):
sum_logp = 0
n_words = 0
sum_logp_known = 0
n_known_words = 0
sum_logp_unknown = 0
n_unknown_words = 0
n_letters = 0
for sentence in input_charlm:
x = torch.LongTensor([EOSIDX] + sentence).reshape(1, len(sentence) + 1).cuda()
with torch.no_grad():
y = model.forward(x)[0]
logprobs = torch.nn.functional.log_softmax(y, 2).detach().cpu().numpy()[0]
current_word = ""
word_ppl = 0.0
for index, token_id in enumerate(sentence):
n_letters += 1
current_word += charLM_indices_token_dict[token_id]
word_ppl += logprobs[index, token_id]
if charLM_indices_token_dict[token_id] == "|":
if current_word in known_words:
sum_logp_known += word_ppl
n_known_words += 1
else:
sum_logp_unknown += word_ppl
n_unknown_words += 1
current_word = ""
word_ppl = 0
sum_logp += numpy.sum(logprobs[numpy.arange(len(sentence)), sentence])
n_words += numpy.sum(numpy.array(sentence) == charLM_token_indices_dict["|"])
# add eos
sum_logp += logprobs[-1, EOSIDX]
n_words += 1
sum_logp_known += logprobs[-1, EOSIDX]
n_known_words += 1
n_letters += 1
loss_letter = -(sum_logp + sum_logp_unknown) / n_letters
ppl_word_no_unk = numpy.exp(-sum_logp_known / n_known_words)
ppl_word_unk = numpy.exp(-sum_logp_unknown / n_unknown_words)
assert n_known_words + n_unknown_words == n_words, "Error in words counting"
assert numpy.allclose(sum_logp, sum_logp_known + sum_logp_unknown), "Error in loss"
ppl_word = numpy.exp(-sum_logp / n_words)
print(
"Letter loss: {}, letter perplexity: {}".format(
loss_letter, numpy.exp(loss_letter)
)
)
print("Upper word perplexity for all words: {}".format(ppl_word))
print("Upper word perplexity for unknown words: {}".format(ppl_word_unk))
print(
"(Reported in the paper) "
"Upper word perplexity for known words: {}".format(ppl_word_no_unk)
)
def compute_ppl_upper_limit_word_convlm(model, input_wordlm):
sum_logp_known = 0
n_known_words = 0
sum_logp_unknown = 0
n_unknown_words = 0
for sentence in input_wordlm:
x = torch.LongTensor([EOSIDX] + sentence).reshape(1, len(sentence) + 1).cuda()
with torch.no_grad():
y = model.forward(x)[0]
logprobs = (
model.adaptive_softmax.get_log_prob(y, None).detach().cpu().numpy()[0]
)
for index, token_id in enumerate(sentence):
if token_id != UNKIDX:
sum_logp_known += logprobs[index, token_id]
n_known_words += 1
else:
sum_logp_unknown += logprobs[index, token_id]
n_unknown_words += 1
# add eos
sum_logp_known += logprobs[-1, EOSIDX]
n_known_words += 1
ppl_word_no_unk = numpy.exp(-sum_logp_known / n_known_words)
ppl_word_unk = numpy.exp(-sum_logp_unknown / n_unknown_words)
ppl_word = numpy.exp(
-(sum_logp_known + sum_logp_unknown) / (n_known_words + n_unknown_words)
)
print("Word perplexity for all words: {}".format(ppl_word))
print("Word perplexity for unknown words: {}".format(ppl_word_unk))
print(
"(Reported in the paper) "
"Word perplexity for known words: {}".format(ppl_word_no_unk)
)
if __name__ == "__main__":
parser = argparse.ArgumentParser(
description="Upper limit on word perplexity for convlm models"
)
parser.add_argument("--model", help="path to convlm model")
parser.add_argument("--dict", help="path to convlm dict file in data")
parser.add_argument(
"--text", help="file to evaluate, in necessary format for model"
)
parser.add_argument("--model_type", help='"word" or "char14B" or "char20B"')
parser.add_argument("--dataset_type", help='"ls" or "wsj"', default="ls")
parser.add_argument(
"--word_dict",
help="path to convlm word convlm dict file"
"in data (ignored for word model eval)",
default=None,
)
args = parser.parse_args()
print("Evaluate file {}".format(args.text))
token_indices_dict, indices_token_dict = build_token_index_correspondence(args.dict)
with open(args.text, "r") as f:
sentences = [line.strip() for line in f]
input_data = decodeInputText(sentences, token_indices_dict)
fairseq_dict = Dictionary.load(args.dict)
if args.model_type == "word":
model = load_word_model(args.model, fairseq_dict, args.dataset_type)
compute_ppl_upper_limit_word_convlm(model, input_data)
else:
with open(args.word_dict, "r") as f:
known_words = set(
[transform_asg(line.strip().split(" ")[0]) + "|" for line in f]
)
if "14B" in args.model_type:
model = load_char_model_14B(args.model, fairseq_dict, args.dataset_type)
else:
model = load_char_model_20B(args.model, fairseq_dict, args.dataset_type)
compute_ppl_upper_limit_char_convlm(
model, input_data, indices_token_dict, token_indices_dict, known_words
)
| 6,783 | 33.969072 | 88 | py |
wav2letter | wav2letter-main/recipes/lexicon_free/utilities/convlm_utils.py | """
Copyright (c) Facebook, Inc. and its affiliates.
All rights reserved.
This source code is licensed under the MIT-style license found in the
LICENSE file in the root directory of this source tree.
"""
from __future__ import absolute_import, division, print_function, unicode_literals
import torch
from fairseq import options
from fairseq.models.fconv import FConvDecoder
EOS = '</s>'
UNK = '<unk>'
EOSIDX = 2
UNKIDX = 3
def compute_new_state(model_state):
new_state = dict()
for key, val in model_state["model"].items():
if "1.weight" in key and "adaptive" in key:
new_state[
".".join(key.split(".")[1:]).replace("1.weight", "2.weight")
] = val
else:
new_state[".".join(key.split(".")[1:])] = val
return new_state
def load_char_model_20B(pytorch_model_path, fairseq_dict, dataset_type):
layer = eval(
"[(512, 5)] + [(128, 1, 0), (128, 5, 0), (256, 1, 3)] * 3 + "
"[(256, 1, 0), (256, 5, 0), (512, 1, 3)] * 3 + "
"[(512, 1, 0), (512, 5, 0), (1024, 1, 3)] * 3 + "
"[(1024, 1, 0), (1024, 5, 0), (2048, 1, 3)] * 9 + "
"[(1024, 1, 0), (1024, 5, 0), (4096, 1, 3)]"
)
model_state = torch.load(pytorch_model_path)
convLM_char = FConvDecoder(
fairseq_dict,
embed_dim=256,
out_embed_dim=256,
max_positions=1024,
convolutions=layer,
dropout=0.1,
share_embed=False,
attention=False,
positional_embeddings=False,
adaptive_softmax_cutoff=None,
adaptive_softmax_dropout=0,
).cuda()
convLM_char.load_state_dict(compute_new_state(model_state))
convLM_char.eval()
return convLM_char
def load_char_model_14B(pytorch_model_path, fairseq_dict, dataset_type):
layer = eval(
"[(512, 5)] + [(128, 1, 0), (128, 5, 0), (512, 1, 3)] * 3 + "
"[(512, 1, 0), (512, 5, 0), (1024, 1, 3)] * 3 + "
"[(1024, 1, 0), (1024, 5, 0), (2048, 1, 3)] * 6 + "
"[(1024, 1, 0), (1024, 5, 0), (4096, 1, 3)]"
)
model_state = torch.load(pytorch_model_path)
convLM_char = FConvDecoder(
fairseq_dict,
embed_dim=128,
out_embed_dim=128,
max_positions=1024,
convolutions=layer,
dropout=0.1,
share_embed=False,
attention=False,
positional_embeddings=False,
adaptive_softmax_cutoff=None,
adaptive_softmax_dropout=0,
).cuda()
convLM_char.load_state_dict(compute_new_state(model_state))
convLM_char.eval()
return convLM_char
def load_word_model(pytorch_model_path, fairseq_dict, dataset_type):
layer = eval(
"[(512, 5)] + [(128, 1, 0), (128, 5, 0), (512, 1, 3)] * 3 + "
"[(512, 1, 0), (512, 5, 0), (1024, 1, 3)] * 3 + "
"[(1024, 1, 0), (1024, 5, 0), (2048, 1, 3)] * 6 + "
"[(1024, 1, 0), (1024, 5, 0), (4096, 1, 3)]"
)
model_state = torch.load(pytorch_model_path)
if dataset_type == "wsj":
cutoff = "10000,50000,100000"
elif dataset_type == "ls":
cutoff = "10000,50000,200000"
else:
cutoff = ""
convLM = FConvDecoder(
fairseq_dict,
embed_dim=128,
out_embed_dim=128,
max_positions=1024,
convolutions=layer,
dropout=0.1,
share_embed=False,
attention=False,
positional_embeddings=False,
adaptive_softmax_cutoff=(options.eval_str_list(cutoff, type=int)),
adaptive_softmax_dropout=0,
).cuda()
convLM.load_state_dict(compute_new_state(model_state))
convLM.eval()
convLM.adaptive_softmax.eval()
return convLM
def decodeInputText(sentences, token_indices_dict):
sentences_decoded = []
for line in sentences:
sentences_decoded.append(
[
token_indices_dict[UNK]
if token not in token_indices_dict
else token_indices_dict[token]
for token in line.split(" ")
]
)
return sentences_decoded
def build_token_index_correspondence(dict_fname):
# follow fairseq
token_indices_dict = dict()
indices_token_dict = dict()
with open(dict_fname, "r") as f:
for index, line in enumerate(f):
token_indices_dict[line.strip().split(" ")[0]] = index + 4
indices_token_dict[index + 4] = line.strip().split(" ")[0]
token_indices_dict[EOS] = EOSIDX
indices_token_dict[EOSIDX] = EOS
token_indices_dict[UNK] = UNKIDX
indices_token_dict[UNKIDX] = UNK
return token_indices_dict, indices_token_dict
| 4,597 | 29.052288 | 82 | py |
wav2letter | wav2letter-main/recipes/lexicon_free/utilities/compute_lower_ppl_convlm.py | """
Copyright (c) Facebook, Inc. and its affiliates.
All rights reserved.
This source code is licensed under the MIT-style license found in the
LICENSE file in the root directory of this source tree.
----------
Compute upper and lower limits on word perplexity for convlm models
Command : python3 compute_lower_ppl_convlm.py --model [...] --dict [...] \
--word_model [...] --word_dict [...] \
--text [...] --model_type char14B --dataset_type ls
Replace [...] with appropriate paths
"""
from __future__ import absolute_import, division, print_function, unicode_literals
import argparse
import numpy
import torch
from convlm_utils import (
EOS,
EOSIDX,
UNKIDX,
build_token_index_correspondence,
decodeInputText,
load_char_model_14B,
load_char_model_20B,
load_word_model,
)
from fairseq.data import Dictionary
from utils import prepare_vocabs_convlm, transform_asg, transform_asg_back
# reusing previous states for some reason is slower than reevaluating the full sentence.
# TODO speedup with batching and using previous state
def compute_word_logprob(model, current_state, target_word, token_index_dict):
if target_word == EOS:
x = torch.LongTensor(current_state).reshape(1, len(current_state)).cuda()
with torch.no_grad():
y = model.forward(x)[0]
logprobs = torch.nn.functional.log_softmax(y, 2).detach().cpu().numpy()[0]
return logprobs[-1, EOSIDX]
else:
additional_state = [token_index_dict[token] for token in list(target_word)]
with torch.no_grad():
x = (
torch.LongTensor(current_state + additional_state[:-1])
.reshape(1, len(current_state) + len(additional_state) - 1)
.cuda()
)
y = model.forward(x)[0]
logprobs = torch.nn.functional.log_softmax(y, 2).detach().cpu().numpy()[0]
return numpy.sum(
logprobs[-len(additional_state) :][
numpy.arange(len(additional_state)), additional_state
]
)
def compute_denominator(model, current_state, words, token_index_dict):
preds = [
compute_word_logprob(model, current_state, word, token_index_dict)
for word in words
]
assert len(preds) != 0, "Invalid denominator"
max_pred = numpy.max(preds)
return max_pred + numpy.log(numpy.sum(numpy.exp(preds - max_pred)))
def compute_words_model_pdf_mass(
word_probs, current_state_position, known_words, known_words_decoded
):
probs = word_probs[current_state_position, known_words_decoded]
indices = numpy.argsort(-probs)
# unk word is not added to this pdf mass, sometimes its prob is huge
# so take percentile from known word pdf
probs_sum = numpy.sum(probs)
top = numpy.where(numpy.cumsum(probs[indices]) > 0.95 * probs_sum)[0][0]
return [
transform_asg(w) + "|" if w != EOS else w for w in known_words[indices[:top]]
]
def compute_ppl_lower_limit(
model,
word_model,
sentences,
known_words,
known_words_original,
known_words_original_decoded,
indices_token_dict,
token_indices_dict,
):
n_words = 0
unk_n_words = 0
ppl = 0.0
ppl_lower = 0.0
n_logging = len(sentences)
for n, sentence in enumerate(sentences):
current_state = [EOSIDX]
current_word = ""
current_word_state_position = 0
addition_state = []
wordLM_sentence = (
"".join([indices_token_dict[idx] for idx in sentence])
.replace("|", " ")
.strip()
)
wordLM_sentence = [
transform_asg_back(word) for word in wordLM_sentence.split(" ")
]
wordLM_sentence_decoded = [EOSIDX] + [
UNKIDX if word not in word_indices_dict else word_indices_dict[word]
for word in wordLM_sentence
]
with torch.no_grad():
x = (
torch.LongTensor(wordLM_sentence_decoded)
.reshape(1, len(wordLM_sentence_decoded))
.cuda()
)
y = word_model.forward(x)[0]
words_probs = numpy.exp(
word_model.adaptive_softmax.get_log_prob(y, None)
.detach()
.cpu()
.numpy()[0]
)
for token_idx in sentence:
current_word += indices_token_dict[token_idx]
addition_state.append(token_idx)
if indices_token_dict[token_idx] == "|":
if current_word in known_words:
n_words += 1
pdf_mass_words = set(
compute_words_model_pdf_mass(
words_probs,
current_word_state_position,
known_words_original,
known_words_original_decoded,
)
)
if current_word not in pdf_mass_words:
pdf_mass_words.add(current_word)
word_score = compute_word_logprob(
model, current_state, current_word, token_indices_dict
)
ppl += word_score
ppl_lower += word_score - compute_denominator(
model, current_state, pdf_mass_words, token_indices_dict
)
else:
unk_n_words += 1
current_word = ""
current_state += addition_state
addition_state = []
current_word_state_position += 1
# process eos
word_score = compute_word_logprob(model, current_state, EOS, token_indices_dict)
n_words += 1
ppl += word_score
pdf_mass_words = set(
compute_words_model_pdf_mass(
words_probs,
current_word_state_position,
known_words_original,
known_words_original_decoded,
)
)
if EOS not in pdf_mass_words:
pdf_mass_words.add(EOS)
ppl_lower += word_score - compute_denominator(
model, current_state, pdf_mass_words, token_indices_dict
)
if n % 10 == 0:
print(
"Evaluated",
n,
"sentences among",
n_logging,
"upper limit perplexity",
numpy.exp(-ppl / n_words),
"lower limit perplexity",
numpy.exp(-ppl_lower / n_words),
"number of words",
n_words,
flush=True,
)
print("Final loss", ppl, "loss lower", ppl_lower)
print("Upper limit on perplexity:", numpy.exp(-ppl / n_words))
print("Lower limit on perplexity:", numpy.exp(-ppl_lower / n_words))
print("Total number of words:", n_words, "unknown words:", unk_n_words)
if __name__ == "__main__":
parser = argparse.ArgumentParser(
description="Upper and lower limits on word perplexity for convlm models"
)
parser.add_argument("--model", help="path to convlm model")
parser.add_argument("--word_model", help="path to convlm model")
parser.add_argument("--dict", help="path to convlm dict file in data")
parser.add_argument(
"--word_dict", help="path to convlm word convlm dict file in data"
)
parser.add_argument(
"--text", help="file to evaluate, in necessary format for model"
)
parser.add_argument("--model_type", help='"char14B" or "char20B"')
parser.add_argument("--dataset_type", help='"ls" or "wsj"', default="ls")
args = parser.parse_args()
print("Evaluate file {}".format(args.text))
token_indices_dict, indices_token_dict = build_token_index_correspondence(args.dict)
word_indices_dict, indices_word_dict = build_token_index_correspondence(
args.word_dict
)
known_words, known_words_original = prepare_vocabs_convlm(args.word_dict)
known_words_original_decoded = numpy.array(
[
UNKIDX if w not in word_indices_dict else word_indices_dict[w]
for w in known_words_original
]
)
with open(args.text, "r") as f:
sentences = [line.strip() for line in f]
input_data = decodeInputText(sentences, token_indices_dict)
fairseq_dict = Dictionary.load(args.dict)
word_fairseq_dict = Dictionary.load(args.word_dict)
word_model = load_word_model(args.word_model, word_fairseq_dict, args.dataset_type)
if "14B" in args.model_type:
char_model = load_char_model_14B(args.model, fairseq_dict, args.dataset_type)
else:
char_model = load_char_model_20B(args.model, fairseq_dict, args.dataset_type)
compute_ppl_lower_limit(
char_model,
word_model,
input_data,
known_words,
known_words_original,
known_words_original_decoded,
indices_token_dict,
token_indices_dict,
)
| 9,068 | 33.48289 | 88 | py |
wav2letter | wav2letter-main/recipes/utilities/convlm_serializer/save_pytorch_model.py | from __future__ import absolute_import, division, print_function, unicode_literals
import sys
from collections import defaultdict
import torch
def convert(model_state, key, suffix=""):
string = ""
param = model_state[key]
# param name
string += ".".join(key.split(".")[1:-1]) + suffix + "." + key.split(".")[-1] + " "
change_to_lin_layer = False
if "conv" in key and len(param.shape) == 3:
if ("weight_v" in key and param.shape[0] == 1) or (
"weight_g" in key
and model_state[key.replace("weight_g", "weight_v")].shape[0] == 1
):
change_to_lin_layer = True
if change_to_lin_layer:
# param shapes
string += (
str(len(param.shape) - 1) + " " + " ".join(map(str, param.shape[1:][::-1]))
)
# param matrix
string += " " + " ".join(map(str, param.cpu().numpy()[0].T.flatten()))
else:
# param shapes
string += str(len(param.shape)) + " " + " ".join(map(str, param.shape))
# param matrix
string += " " + " ".join(map(str, param.cpu().numpy().flatten()))
return string
def save_model(pytorch_model_path, dst):
model_state = torch.load(pytorch_model_path)
model_state = model_state["model"]
add_string = ""
prev_key = ""
with open(dst, "w") as f:
projections = defaultdict(list)
for key in model_state:
print("Process param", key)
if "version" in key:
print("Skip", key)
continue
if "projection" in key:
projections[key.split(".")[-2]].append(
convert(model_state, key, "-projection")
)
else:
if prev_key != key.split(".")[2]:
if add_string != "":
f.write(add_string + "\n")
add_string = ""
prev_key = key.split(".")[2]
if key.split(".")[2] in projections:
add_string = "\n".join(projections[key.split(".")[2]])
f.write(convert(model_state, key) + "\n")
if __name__ == "__main__":
print("Converting the model. Usage: save_pytorch_model.py [path/to/model] [dst]")
path = sys.argv[1]
dst = sys.argv[2]
save_model(path, dst)
| 2,325 | 32.710145 | 87 | py |
wav2letter | wav2letter-main/recipes/joint_training_vox_populi/prepare_data/common_voice_to_wav2letter.py | # Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import csv
import argparse
import torch
import torchaudio
import string
from tqdm import tqdm
from pathlib import Path
from typing import Dict, List, Optional, Set, Tuple
from lst_utils import FileInfo, save_lst, get_speakers_list
PUNCTUATION = (string.punctuation + "¡¿").replace("'", "").replace("-", "")
PUNCTUATION += "–…»“«·—’”„"
def get_size_audio_file(path_file: Path) -> float:
r"""
Give the size in hours on the given sequence
"""
try:
info = torchaudio.info(str(path_file))[0]
except RuntimeError:
return 0
return info.length / (info.rate * 3600)
def to_wav2letterFormat(data: torch.tensor, sr: int) -> torch.tensor:
r"""
Wav2letter needs mono 16kHz inputs
"""
if len(data.size()) == 2:
data = data.mean(dim=0, keepdim=True)
elif len(data.size()) == 1:
data = data.view(1, -1)
else:
raise ValueError("Invalid tensor format")
if sr != 16000:
data = torchaudio.transforms.Resample(orig_freq=sr, new_freq=16000)(data)
data = torch.clamp(data, min=-1.0, max=1.0)
return data
def get_base_data_from_csv(pathTSV) -> List[Dict[str, str]]:
out = []
with open(pathTSV, "r", encoding="utf-8") as tsvfile:
reader = csv.DictReader(tsvfile, dialect="excel-tab")
for row in reader:
speaker_id = row["client_id"]
name = row["path"]
text = row["sentence"]
out.append({"speaker_id": speaker_id, "local_path": name, "text": text})
return out
def norm_text(
text: str,
char_set: Set[str],
replace_set: Optional[Dict[str, str]] = None,
del_set: Optional[Set[str]] = None,
) -> Tuple[bool, str]:
text = text.lower()
if replace_set is not None:
for char_, val in replace_set.items():
text = text.replace(char_, val)
if del_set is not None:
for char_ in del_set:
text = text.replace(char_, "")
valid = True
for char_ in text.replace(" ", ""):
if char_ not in char_set:
valid = False
break
return text, valid
def load_letters(path_letter: Path):
with open(path_letter, "r") as file:
data = file.readlines()
return [x.strip() for x in data]
def get_full_audio_data(
path_dir_audio: Path,
base_data: List[Dict[str, str]],
char_set: Set[str],
replace_set: Optional[Dict[str, str]] = None,
del_set: Optional[Set[str]] = None,
file_extension: str = None,
) -> List[FileInfo]:
output = []
for audio_data in tqdm(base_data, total=len(base_data)):
path_audio = path_dir_audio / audio_data["local_path"]
if file_extension is not None:
path_audio = path_audio.with_suffix(file_extension)
if not path_audio.is_file():
continue
size_sec = get_size_audio_file(path_audio)
text, status = norm_text(
audio_data["text"], char_set, replace_set=replace_set, del_set=del_set
)
output.append(
FileInfo(
size=size_sec,
path_=path_audio,
id_=path_audio.stem,
text=text,
speaker=audio_data["speaker_id"],
)
)
print(f"{len(output)} files found out of {len(base_data)}")
return output
def convert_audio_data(
input_list: List[FileInfo], out_dir_audio: Path
) -> List[FileInfo]:
out_dir_audio.mkdir(exist_ok=True)
output = []
for file_info in tqdm(input_list, total=len(input_list)):
audio, sr = torchaudio.load(str(file_info.path_))
audio = to_wav2letterFormat(audio, sr)
path_out = (out_dir_audio / file_info.path_.name).with_suffix(".flac")
torchaudio.save(str(path_out), audio, 16000)
output.append(
FileInfo(
size=file_info.size,
path_=path_out,
id_=file_info.id_,
text=file_info.text,
speaker=file_info.speaker,
)
)
return output
def load_filter(path_filter: Path) -> List[str]:
with open(path_filter, "r") as f:
return [x.strip() for x in f.readlines()]
def filter_data_by_id(input_lst: List[FileInfo], to_filter: List[str]):
input_lst.sort(key=lambda x: x.id_)
to_filter.sort()
index_filter = 0
len_filter = len(to_filter)
out = []
for lst_data in input_lst:
id_ = lst_data.id_
while index_filter < len_filter and to_filter[index_filter] < id_:
index_filter += 1
if index_filter >= len_filter:
break
if to_filter[index_filter] == id_:
out.append(lst_data)
print(f"{len(out)} files out of {len(to_filter)}")
return out
def main(args):
letters = load_letters(Path(args.path_tokens))
data = get_base_data_from_csv(Path(args.path_tsv))
audio_data = get_full_audio_data(
Path(args.path_audio),
data,
char_set=set(letters),
del_set=PUNCTUATION,
file_extension=args.file_extension,
)
if args.path_filter is not None:
filter_ids = load_filter(Path(args.path_filter))
audio_data = filter_data_by_id(audio_data, filter_ids)
if args.path_conversion is not None:
audio_data = convert_audio_data(audio_data, Path(args.path_conversion))
speakers = get_speakers_list(audio_data)
print(f"{len(speakers)} speakers found")
save_lst(audio_data, args.path_output)
if __name__ == "__main__":
parser = argparse.ArgumentParser(
description="Build the lst input files for common voices datasets"
)
parser.add_argument(
"--path_tsv",
type=str,
default="/private/home/mriviere/Common_voices/en/dev.tsv",
help="Path to the target tsv file",
)
parser.add_argument(
"--path_audio",
type=str,
default="/private/home/mriviere/Common_voices/en/clips_16k",
help="Path to the directory containing the audio data",
)
parser.add_argument(
"--path_output",
type=str,
required=True,
help="Output lst file.",
)
parser.add_argument(
"--path_tokens",
type=str,
default="/checkpoint/mriviere/VoxPopuli/segmentation_output/en/en_grapheme.tokens",
help="Path to the token file",
)
parser.add_argument(
"--path_filter",
type=str,
default=None,
help="If given, path to a file containing the files ids to keep.",
)
parser.add_argument(
"--path_conversion",
type=str,
default=None,
help="If given, path to a directory where the audio should be converted",
)
parser.add_argument("--file_extension", type=str, default=".mp3")
args = parser.parse_args()
main(args)
| 7,022 | 26.758893 | 91 | py |
wav2letter | wav2letter-main/recipes/sota/2019/lm_analysis/tts_forward.py | # https://github.com/mozilla/TTS/blob/master/notebooks/Benchmark.ipynb - original code which we adapted
import io
import os
import sys
import time
from collections import OrderedDict
import numpy as np
import torch
from localimport import localimport
from matplotlib import pylab as plt
from TTS.layers import *
from TTS.models.tacotron import Tacotron
from TTS.utils.audio import AudioProcessor
from TTS.utils.data import *
from TTS.utils.generic_utils import load_config, setup_model
from TTS.utils.synthesis import synthesis
from TTS.utils.text import text_to_sequence
from TTS.utils.text.symbols import phonemes, symbols
sys.path.append("TTS")
sys.path.append("WaveRNN")
tts_pretrained_model_config = "tts_models/config.json"
wavernn_pretrained_model_config = "wavernn_models/config.json"
wavernn_pretrained_model = "wavernn_models/checkpoint_433000.pth.tar"
tts_pretrained_model = "tts_models/checkpoint_261000.pth.tar"
def tts(model, text, CONFIG, use_cuda, ap, use_gl, speaker_id=None):
t_1 = time.time()
waveform, alignment, mel_spec, mel_postnet_spec, stop_tokens = synthesis(
model,
text,
CONFIG,
use_cuda,
ap,
truncated=True,
enable_eos_bos_chars=CONFIG.enable_eos_bos_chars,
)
if CONFIG.model == "Tacotron" and not use_gl:
mel_postnet_spec = ap.out_linear_to_mel(mel_postnet_spec.T).T
if not use_gl:
waveform = wavernn.generate(
torch.FloatTensor(mel_postnet_spec.T).unsqueeze(0).cuda(),
batched=batched_wavernn,
target=11000,
overlap=550,
)
print(" > Run-time: {}".format(time.time() - t_1))
return alignment, mel_postnet_spec, stop_tokens, waveform
use_cuda = True
batched_wavernn = True
# initialize TTS
CONFIG = load_config(tts_pretrained_model_config)
print(CONFIG)
# load the model
num_chars = len(phonemes) if CONFIG.use_phonemes else len(symbols)
model = setup_model(num_chars, CONFIG)
# load the audio processor
ap = AudioProcessor(**CONFIG.audio)
# load model state
if use_cuda:
cp = torch.load(tts_pretrained_model)
else:
cp = torch.load(tts_pretrained_model, map_location=lambda storage, loc: storage)
# load the model
model.load_state_dict(cp["model"])
if use_cuda:
model.cuda()
model.eval()
print(cp["step"])
model.decoder.max_decoder_steps = 2000
# initialize WaveRNN
VOCODER_CONFIG = load_config(wavernn_pretrained_model_config)
with localimport("/content/WaveRNN") as _importer:
from models.wavernn import Model
bits = 10
wavernn = Model(
rnn_dims=512,
fc_dims=512,
mode="mold",
pad=2,
upsample_factors=VOCODER_CONFIG.upsample_factors, # set this depending on dataset
feat_dims=VOCODER_CONFIG.audio["num_mels"],
compute_dims=128,
res_out_dims=128,
res_blocks=10,
hop_length=ap.hop_length,
sample_rate=ap.sample_rate,
).cuda()
check = torch.load(wavernn_pretrained_model)
wavernn.load_state_dict(check["model"])
if use_cuda:
wavernn.cuda()
wavernn.eval()
print(check["step"])
def run_tts(transcription, sample_id, name):
_, _, _, wav = tts(
model,
transcription,
CONFIG,
use_cuda,
ap,
speaker_id=0,
use_gl=False,
figures=False,
)
ap.save_wav(wav, name)
with open(sys.argv[1], "r") as f:
transcriptions = [line.strip() for line in f]
sample_ids = np.arange(len(transcriptions))
names = [sys.argv[2] + str(sample_id) + ".wav" for sample_id in sample_ids]
for index in range(len(transcriptions)):
run_tts(transcriptions[index], sample_ids[index], names[index])
| 3,613 | 26.378788 | 103 | py |
wav2letter | wav2letter-main/recipes/sota/2019/rescoring/forward_lm.py | from __future__ import absolute_import, division, print_function, unicode_literals
import argparse
import os
import numpy
import torch
from fairseq.data import Dictionary
from fairseq.models.fconv_lm import FConvLanguageModel
from fairseq.models.transformer_lm import TransformerLanguageModel
def load_lm(lm_path, model_type, dict_path):
path, checkpoint = os.path.split(lm_path)
if model_type == "convlm":
model_handle = FConvLanguageModel.from_pretrained(
path, checkpoint, os.path.split(dict_path)[0]
)
elif model_type == "transformer":
model_handle = TransformerLanguageModel.from_pretrained(
path, checkpoint, os.path.split(dict_path)[0]
)
else:
raise Exception(
"Unsupported language model type: use 'convlm' or 'transformer' models"
)
model = model_handle.models[0].decoder.cuda()
model.eval()
print(model)
return model
def predict_batch(sentences, model, fairseq_dict, max_len):
encoded_input = []
padded_input = []
ppls = []
total_loss = 0.0
nwords = 0
for sentence in sentences:
encoded_input.append([fairseq_dict.index(token) for token in sentence])
assert (
len(encoded_input[-1]) <= max_len
), "Error in the input length, it should be less than max_len {}".format(
max_len
)
if len(encoded_input[-1]) < max_len:
padded_input.append(
[fairseq_dict.eos()]
+ encoded_input[-1]
+ [fairseq_dict.eos()] * (max_len - len(encoded_input[-1]))
)
else:
padded_input.append([fairseq_dict.eos()] + encoded_input[-1])
x = torch.LongTensor(padded_input).cuda()
with torch.no_grad():
y = model.forward(x)[0]
if model.adaptive_softmax is not None:
logprobs = (
model.adaptive_softmax.get_log_prob(y, None).detach().cpu().numpy()
)
else:
logprobs = torch.nn.functional.log_softmax(y, 2).detach().cpu().numpy()
for index, input_i in enumerate(encoded_input):
loss = numpy.sum(logprobs[index, numpy.arange(len(input_i)), input_i])
loss += logprobs[index, len(input_i), fairseq_dict.eos()]
ppls.append(loss)
total_loss += loss
nwords += len(input_i) + 1
return ppls, total_loss, nwords
if __name__ == "__main__":
parser = argparse.ArgumentParser(
description="Running forward pass for language model"
)
parser.add_argument("--model", required=True, type=str, help="path to the model")
parser.add_argument(
"--dict", required=True, type=str, help="path to the dict of the model"
)
parser.add_argument(
"--max-tokens",
required=True,
type=int,
default=1024,
help="max tokens in the batch",
)
parser.add_argument(
"--text", required=True, type=str, help="path to text to be evaluated"
)
parser.add_argument(
"--out", type=str, default="out.txt", help="path to text to be saved"
)
parser.add_argument(
"--skip",
type=bool,
default=False,
help="skip <sampleID> <decoder score> <AM score> tokens",
)
parser.add_argument(
"--model-type",
required=True,
type=str,
help="Language model type, supported values 'convlm' and 'transformer'",
)
args = parser.parse_args()
fairseq_dict = Dictionary.load(args.dict)
model = load_lm(args.model, args.model_type, args.dict)
total_loss = 0.0
nwords = 0.0
batch = []
original_lines = []
max_len = 0
with open(args.text, "r") as ftext, open(args.out, "w") as fout:
for line in ftext:
# id | decoder score | am score | lm score | wer | transcription
line_parsed = line.rstrip().split("|")
sentence = line_parsed[-1].strip().split(" ")
if (len(batch) + 1) * numpy.maximum(
max_len, len(sentence)
) > args.max_tokens:
if len(batch) == 0:
if args.skip:
original_lines.append(line_parsed[0].strip().split(" ")[0])
batch.append(sentence)
max_len = len(sentence)
continue
ppls, loss_batch, nwords_batch = predict_batch(
batch, model, fairseq_dict, max_len
)
total_loss += loss_batch
nwords += nwords_batch
for index in range(len(batch)):
if args.skip:
fout.write(original_lines[index] + " {}\n".format(ppls[index]))
else:
fout.write("{}\n".format(ppls[index]))
batch = [sentence]
if args.skip:
original_lines = [line_parsed[0].strip().split(" ")[0]]
max_len = len(sentence)
else:
batch.append(sentence)
if args.skip:
original_lines.append(line_parsed[0].strip().split(" ")[0])
max_len = numpy.maximum(max_len, len(sentence))
if len(batch) > 0:
ppls, loss_batch, nwords_batch = predict_batch(
batch, model, fairseq_dict, max_len
)
total_loss += loss_batch
nwords += nwords_batch
for index in range(len(batch)):
if args.skip:
fout.write(original_lines[index] + " {}\n".format(ppls[index]))
else:
fout.write("{}\n".format(ppls[index]))
print("Total PPL", numpy.exp(-total_loss / nwords))
| 5,773 | 34.207317 | 87 | py |
ERD | ERD-main/setup.py | #!/usr/bin/env python
# Copyright (c) OpenMMLab. All rights reserved.
import os
import os.path as osp
import platform
import shutil
import sys
import warnings
from setuptools import find_packages, setup
import torch
from torch.utils.cpp_extension import (BuildExtension, CppExtension,
CUDAExtension)
def readme():
with open('README.md', encoding='utf-8') as f:
content = f.read()
return content
version_file = 'mmdet/version.py'
def get_version():
with open(version_file, 'r') as f:
exec(compile(f.read(), version_file, 'exec'))
return locals()['__version__']
def make_cuda_ext(name, module, sources, sources_cuda=[]):
define_macros = []
extra_compile_args = {'cxx': []}
if torch.cuda.is_available() or os.getenv('FORCE_CUDA', '0') == '1':
define_macros += [('WITH_CUDA', None)]
extension = CUDAExtension
extra_compile_args['nvcc'] = [
'-D__CUDA_NO_HALF_OPERATORS__',
'-D__CUDA_NO_HALF_CONVERSIONS__',
'-D__CUDA_NO_HALF2_OPERATORS__',
]
sources += sources_cuda
else:
print(f'Compiling {name} without CUDA')
extension = CppExtension
return extension(
name=f'{module}.{name}',
sources=[os.path.join(*module.split('.'), p) for p in sources],
define_macros=define_macros,
extra_compile_args=extra_compile_args)
def parse_requirements(fname='requirements.txt', with_version=True):
"""Parse the package dependencies listed in a requirements file but strips
specific versioning information.
Args:
fname (str): path to requirements file
with_version (bool, default=False): if True include version specs
Returns:
List[str]: list of requirements items
CommandLine:
python -c "import setup; print(setup.parse_requirements())"
"""
import re
import sys
from os.path import exists
require_fpath = fname
def parse_line(line):
"""Parse information from a line in a requirements text file."""
if line.startswith('-r '):
# Allow specifying requirements in other files
target = line.split(' ')[1]
for info in parse_require_file(target):
yield info
else:
info = {'line': line}
if line.startswith('-e '):
info['package'] = line.split('#egg=')[1]
elif '@git+' in line:
info['package'] = line
else:
# Remove versioning from the package
pat = '(' + '|'.join(['>=', '==', '>']) + ')'
parts = re.split(pat, line, maxsplit=1)
parts = [p.strip() for p in parts]
info['package'] = parts[0]
if len(parts) > 1:
op, rest = parts[1:]
if ';' in rest:
# Handle platform specific dependencies
# http://setuptools.readthedocs.io/en/latest/setuptools.html#declaring-platform-specific-dependencies
version, platform_deps = map(str.strip,
rest.split(';'))
info['platform_deps'] = platform_deps
else:
version = rest # NOQA
info['version'] = (op, version)
yield info
def parse_require_file(fpath):
with open(fpath, 'r') as f:
for line in f.readlines():
line = line.strip()
if line and not line.startswith('#'):
for info in parse_line(line):
yield info
def gen_packages_items():
if exists(require_fpath):
for info in parse_require_file(require_fpath):
parts = [info['package']]
if with_version and 'version' in info:
parts.extend(info['version'])
if not sys.version.startswith('3.4'):
# apparently package_deps are broken in 3.4
platform_deps = info.get('platform_deps')
if platform_deps is not None:
parts.append(';' + platform_deps)
item = ''.join(parts)
yield item
packages = list(gen_packages_items())
return packages
def add_mim_extension():
"""Add extra files that are required to support MIM into the package.
These files will be added by creating a symlink to the originals if the
package is installed in `editable` mode (e.g. pip install -e .), or by
copying from the originals otherwise.
"""
# parse installment mode
if 'develop' in sys.argv:
# installed by `pip install -e .`
if platform.system() == 'Windows':
# set `copy` mode here since symlink fails on Windows.
mode = 'copy'
else:
mode = 'symlink'
elif 'sdist' in sys.argv or 'bdist_wheel' in sys.argv:
# installed by `pip install .`
# or create source distribution by `python setup.py sdist`
mode = 'copy'
else:
return
filenames = ['tools', 'configs', 'demo', 'model-index.yml']
repo_path = osp.dirname(__file__)
mim_path = osp.join(repo_path, 'mmdet', '.mim')
os.makedirs(mim_path, exist_ok=True)
for filename in filenames:
if osp.exists(filename):
src_path = osp.join(repo_path, filename)
tar_path = osp.join(mim_path, filename)
if osp.isfile(tar_path) or osp.islink(tar_path):
os.remove(tar_path)
elif osp.isdir(tar_path):
shutil.rmtree(tar_path)
if mode == 'symlink':
src_relpath = osp.relpath(src_path, osp.dirname(tar_path))
os.symlink(src_relpath, tar_path)
elif mode == 'copy':
if osp.isfile(src_path):
shutil.copyfile(src_path, tar_path)
elif osp.isdir(src_path):
shutil.copytree(src_path, tar_path)
else:
warnings.warn(f'Cannot copy file {src_path}.')
else:
raise ValueError(f'Invalid mode {mode}')
if __name__ == '__main__':
add_mim_extension()
setup(
name='mmdet',
version=get_version(),
description='OpenMMLab Detection Toolbox and Benchmark',
long_description=readme(),
long_description_content_type='text/markdown',
author='MMDetection Contributors',
author_email='openmmlab@gmail.com',
keywords='computer vision, object detection',
url='https://github.com/open-mmlab/mmdetection',
packages=find_packages(exclude=('configs', 'tools', 'demo')),
include_package_data=True,
classifiers=[
'Development Status :: 5 - Production/Stable',
'License :: OSI Approved :: Apache Software License',
'Operating System :: OS Independent',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3.8',
'Programming Language :: Python :: 3.9',
],
license='Apache License 2.0',
install_requires=parse_requirements('requirements/runtime.txt'),
extras_require={
'all': parse_requirements('requirements.txt'),
'tests': parse_requirements('requirements/tests.txt'),
'build': parse_requirements('requirements/build.txt'),
'optional': parse_requirements('requirements/optional.txt'),
'mim': parse_requirements('requirements/mminstall.txt'),
},
ext_modules=[],
cmdclass={'build_ext': BuildExtension},
zip_safe=False)
| 7,887 | 34.692308 | 125 | py |
ERD | ERD-main/tools/test.py | # Copyright (c) OpenMMLab. All rights reserved.
import argparse
import os
import os.path as osp
import warnings
from copy import deepcopy
from mmengine import ConfigDict
from mmengine.config import Config, DictAction
from mmengine.runner import Runner
from mmdet.engine.hooks.utils import trigger_visualization_hook
from mmdet.evaluation import DumpDetResults
from mmdet.registry import RUNNERS
from mmdet.utils import setup_cache_size_limit_of_dynamo
# TODO: support fuse_conv_bn and format_only
def parse_args():
parser = argparse.ArgumentParser(
description='MMDet test (and eval) a model')
parser.add_argument('config', help='test config file path')
parser.add_argument('checkpoint', help='checkpoint file')
parser.add_argument(
'--work-dir',
help='the directory to save the file containing evaluation metrics')
parser.add_argument(
'--out',
type=str,
help='dump predictions to a pickle file for offline evaluation')
parser.add_argument(
'--show', action='store_true', help='show prediction results')
parser.add_argument(
'--show-dir',
help='directory where painted images will be saved. '
'If specified, it will be automatically saved '
'to the work_dir/timestamp/show_dir')
parser.add_argument(
'--wait-time', type=float, default=2, help='the interval of show (s)')
parser.add_argument(
'--cfg-options',
nargs='+',
action=DictAction,
help='override some settings in the used config, the key-value pair '
'in xxx=yyy format will be merged into config file. If the value to '
'be overwritten is a list, it should be like key="[a,b]" or key=a,b '
'It also allows nested list/tuple values, e.g. key="[(a,b),(c,d)]" '
'Note that the quotation marks are necessary and that no white space '
'is allowed.')
parser.add_argument(
'--launcher',
choices=['none', 'pytorch', 'slurm', 'mpi'],
default='none',
help='job launcher')
parser.add_argument('--tta', action='store_true')
# When using PyTorch version >= 2.0.0, the `torch.distributed.launch`
# will pass the `--local-rank` parameter to `tools/train.py` instead
# of `--local_rank`.
parser.add_argument('--local_rank', '--local-rank', type=int, default=0)
args = parser.parse_args()
if 'LOCAL_RANK' not in os.environ:
os.environ['LOCAL_RANK'] = str(args.local_rank)
return args
def main():
args = parse_args()
# Reduce the number of repeated compilations and improve
# testing speed.
setup_cache_size_limit_of_dynamo()
# load config
cfg = Config.fromfile(args.config)
cfg.launcher = args.launcher
if args.cfg_options is not None:
cfg.merge_from_dict(args.cfg_options)
# work_dir is determined in this priority: CLI > segment in file > filename
if args.work_dir is not None:
# update configs according to CLI args if args.work_dir is not None
cfg.work_dir = args.work_dir
elif cfg.get('work_dir', None) is None:
# use config filename as default work_dir if cfg.work_dir is None
cfg.work_dir = osp.join('./work_dirs',
osp.splitext(osp.basename(args.config))[0])
cfg.load_from = args.checkpoint
if args.show or args.show_dir:
cfg = trigger_visualization_hook(cfg, args)
if args.tta:
if 'tta_model' not in cfg:
warnings.warn('Cannot find ``tta_model`` in config, '
'we will set it as default.')
cfg.tta_model = dict(
type='DetTTAModel',
tta_cfg=dict(
nms=dict(type='nms', iou_threshold=0.5), max_per_img=100))
if 'tta_pipeline' not in cfg:
warnings.warn('Cannot find ``tta_pipeline`` in config, '
'we will set it as default.')
test_data_cfg = cfg.test_dataloader.dataset
while 'dataset' in test_data_cfg:
test_data_cfg = test_data_cfg['dataset']
cfg.tta_pipeline = deepcopy(test_data_cfg.pipeline)
flip_tta = dict(
type='TestTimeAug',
transforms=[
[
dict(type='RandomFlip', prob=1.),
dict(type='RandomFlip', prob=0.)
],
[
dict(
type='PackDetInputs',
meta_keys=('img_id', 'img_path', 'ori_shape',
'img_shape', 'scale_factor', 'flip',
'flip_direction'))
],
])
cfg.tta_pipeline[-1] = flip_tta
cfg.model = ConfigDict(**cfg.tta_model, module=cfg.model)
cfg.test_dataloader.dataset.pipeline = cfg.tta_pipeline
# build the runner from config
if 'runner_type' not in cfg:
# build the default runner
runner = Runner.from_cfg(cfg)
else:
# build customized runner from the registry
# if 'runner_type' is set in the cfg
runner = RUNNERS.build(cfg)
# add `DumpResults` dummy metric
if args.out is not None:
assert args.out.endswith(('.pkl', '.pickle')), \
'The dump file must be a pkl file.'
runner.test_evaluator.metrics.append(
DumpDetResults(out_file_path=args.out))
# start testing
runner.test()
if __name__ == '__main__':
main()
| 5,594 | 36.3 | 79 | py |
ERD | ERD-main/tools/train.py | # Copyright (c) OpenMMLab. All rights reserved.
import argparse
import logging
import os
import os.path as osp
from mmengine.config import Config, DictAction
from mmengine.logging import print_log
from mmengine.registry import RUNNERS
from mmengine.runner import Runner
from mmdet.utils import setup_cache_size_limit_of_dynamo
def parse_args():
parser = argparse.ArgumentParser(description='Train a detector')
parser.add_argument('config', help='train config file path')
parser.add_argument('--work-dir', help='the dir to save logs and models')
parser.add_argument(
'--amp',
action='store_true',
default=False,
help='enable automatic-mixed-precision training')
parser.add_argument(
'--auto-scale-lr',
action='store_true',
help='enable automatically scaling LR.')
parser.add_argument(
'--resume',
nargs='?',
type=str,
const='auto',
help='If specify checkpoint path, resume from it, while if not '
'specify, try to auto resume from the latest checkpoint '
'in the work directory.')
parser.add_argument(
'--cfg-options',
nargs='+',
action=DictAction,
help='override some settings in the used config, the key-value pair '
'in xxx=yyy format will be merged into config file. If the value to '
'be overwritten is a list, it should be like key="[a,b]" or key=a,b '
'It also allows nested list/tuple values, e.g. key="[(a,b),(c,d)]" '
'Note that the quotation marks are necessary and that no white space '
'is allowed.')
parser.add_argument(
'--launcher',
choices=['none', 'pytorch', 'slurm', 'mpi'],
default='none',
help='job launcher')
# When using PyTorch version >= 2.0.0, the `torch.distributed.launch`
# will pass the `--local-rank` parameter to `tools/train.py` instead
# of `--local_rank`.
parser.add_argument('--local_rank', '--local-rank', type=int, default=0)
args = parser.parse_args()
if 'LOCAL_RANK' not in os.environ:
os.environ['LOCAL_RANK'] = str(args.local_rank)
return args
def main():
args = parse_args()
# Reduce the number of repeated compilations and improve
# training speed.
setup_cache_size_limit_of_dynamo()
# load config
cfg = Config.fromfile(args.config)
cfg.launcher = args.launcher
if args.cfg_options is not None:
cfg.merge_from_dict(args.cfg_options)
# work_dir is determined in this priority: CLI > segment in file > filename
if args.work_dir is not None:
# update configs according to CLI args if args.work_dir is not None
cfg.work_dir = args.work_dir
elif cfg.get('work_dir', None) is None:
# use config filename as default work_dir if cfg.work_dir is None
cfg.work_dir = osp.join('./work_dirs',
osp.splitext(osp.basename(args.config))[0])
# enable automatic-mixed-precision training
if args.amp is True:
optim_wrapper = cfg.optim_wrapper.type
if optim_wrapper == 'AmpOptimWrapper':
print_log(
'AMP training is already enabled in your config.',
logger='current',
level=logging.WARNING)
else:
assert optim_wrapper == 'OptimWrapper', (
'`--amp` is only supported when the optimizer wrapper type is '
f'`OptimWrapper` but got {optim_wrapper}.')
cfg.optim_wrapper.type = 'AmpOptimWrapper'
cfg.optim_wrapper.loss_scale = 'dynamic'
# enable automatically scaling LR
if args.auto_scale_lr:
if 'auto_scale_lr' in cfg and \
'enable' in cfg.auto_scale_lr and \
'base_batch_size' in cfg.auto_scale_lr:
cfg.auto_scale_lr.enable = True
else:
raise RuntimeError('Can not find "auto_scale_lr" or '
'"auto_scale_lr.enable" or '
'"auto_scale_lr.base_batch_size" in your'
' configuration file.')
# resume is determined in this priority: resume from > auto_resume
if args.resume == 'auto':
cfg.resume = True
cfg.load_from = None
elif args.resume is not None:
cfg.resume = True
cfg.load_from = args.resume
# build the runner from config
if 'runner_type' not in cfg:
# build the default runner
runner = Runner.from_cfg(cfg)
else:
# build customized runner from the registry
# if 'runner_type' is set in the cfg
runner = RUNNERS.build(cfg)
# start training
runner.train()
if __name__ == '__main__':
main()
| 4,770 | 34.604478 | 79 | py |
ERD | ERD-main/tools/deployment/test_torchserver.py | import os
from argparse import ArgumentParser
import mmcv
import requests
import torch
from mmengine.structures import InstanceData
from mmdet.apis import inference_detector, init_detector
from mmdet.registry import VISUALIZERS
from mmdet.structures import DetDataSample
def parse_args():
parser = ArgumentParser()
parser.add_argument('img', help='Image file')
parser.add_argument('config', help='Config file')
parser.add_argument('checkpoint', help='Checkpoint file')
parser.add_argument('model_name', help='The model name in the server')
parser.add_argument(
'--inference-addr',
default='127.0.0.1:8080',
help='Address and port of the inference server')
parser.add_argument(
'--device', default='cuda:0', help='Device used for inference')
parser.add_argument(
'--score-thr', type=float, default=0.5, help='bbox score threshold')
parser.add_argument(
'--work-dir',
type=str,
default=None,
help='output directory to save drawn results.')
args = parser.parse_args()
return args
def align_ts_output(inputs, metainfo, device):
bboxes = []
labels = []
scores = []
for i, pred in enumerate(inputs):
bboxes.append(pred['bbox'])
labels.append(pred['class_label'])
scores.append(pred['score'])
pred_instances = InstanceData(metainfo=metainfo)
pred_instances.bboxes = torch.tensor(
bboxes, dtype=torch.float32, device=device)
pred_instances.labels = torch.tensor(
labels, dtype=torch.int64, device=device)
pred_instances.scores = torch.tensor(
scores, dtype=torch.float32, device=device)
ts_data_sample = DetDataSample(pred_instances=pred_instances)
return ts_data_sample
def main(args):
# build the model from a config file and a checkpoint file
model = init_detector(args.config, args.checkpoint, device=args.device)
# test a single image
pytorch_results = inference_detector(model, args.img)
keep = pytorch_results.pred_instances.scores >= args.score_thr
pytorch_results.pred_instances = pytorch_results.pred_instances[keep]
# init visualizer
visualizer = VISUALIZERS.build(model.cfg.visualizer)
# the dataset_meta is loaded from the checkpoint and
# then pass to the model in init_detector
visualizer.dataset_meta = model.dataset_meta
# show the results
img = mmcv.imread(args.img)
img = mmcv.imconvert(img, 'bgr', 'rgb')
pt_out_file = None
ts_out_file = None
if args.work_dir is not None:
os.makedirs(args.work_dir, exist_ok=True)
pt_out_file = os.path.join(args.work_dir, 'pytorch_result.png')
ts_out_file = os.path.join(args.work_dir, 'torchserve_result.png')
visualizer.add_datasample(
'pytorch_result',
img.copy(),
data_sample=pytorch_results,
draw_gt=False,
out_file=pt_out_file,
show=True,
wait_time=0)
url = 'http://' + args.inference_addr + '/predictions/' + args.model_name
with open(args.img, 'rb') as image:
response = requests.post(url, image)
metainfo = pytorch_results.pred_instances.metainfo
ts_results = align_ts_output(response.json(), metainfo, args.device)
visualizer.add_datasample(
'torchserve_result',
img,
data_sample=ts_results,
draw_gt=False,
out_file=ts_out_file,
show=True,
wait_time=0)
assert torch.allclose(pytorch_results.pred_instances.bboxes,
ts_results.pred_instances.bboxes)
assert torch.allclose(pytorch_results.pred_instances.labels,
ts_results.pred_instances.labels)
assert torch.allclose(pytorch_results.pred_instances.scores,
ts_results.pred_instances.scores)
if __name__ == '__main__':
args = parse_args()
main(args)
| 3,906 | 33.27193 | 77 | py |
ERD | ERD-main/tools/deployment/mmdet2torchserve.py | # Copyright (c) OpenMMLab. All rights reserved.
from argparse import ArgumentParser, Namespace
from pathlib import Path
from tempfile import TemporaryDirectory
from mmengine.config import Config
from mmengine.utils import mkdir_or_exist
try:
from model_archiver.model_packaging import package_model
from model_archiver.model_packaging_utils import ModelExportUtils
except ImportError:
package_model = None
def mmdet2torchserve(
config_file: str,
checkpoint_file: str,
output_folder: str,
model_name: str,
model_version: str = '1.0',
force: bool = False,
):
"""Converts MMDetection model (config + checkpoint) to TorchServe `.mar`.
Args:
config_file:
In MMDetection config format.
The contents vary for each task repository.
checkpoint_file:
In MMDetection checkpoint format.
The contents vary for each task repository.
output_folder:
Folder where `{model_name}.mar` will be created.
The file created will be in TorchServe archive format.
model_name:
If not None, used for naming the `{model_name}.mar` file
that will be created under `output_folder`.
If None, `{Path(checkpoint_file).stem}` will be used.
model_version:
Model's version.
force:
If True, if there is an existing `{model_name}.mar`
file under `output_folder` it will be overwritten.
"""
mkdir_or_exist(output_folder)
config = Config.fromfile(config_file)
with TemporaryDirectory() as tmpdir:
config.dump(f'{tmpdir}/config.py')
args = Namespace(
**{
'model_file': f'{tmpdir}/config.py',
'serialized_file': checkpoint_file,
'handler': f'{Path(__file__).parent}/mmdet_handler.py',
'model_name': model_name or Path(checkpoint_file).stem,
'version': model_version,
'export_path': output_folder,
'force': force,
'requirements_file': None,
'extra_files': None,
'runtime': 'python',
'archive_format': 'default'
})
manifest = ModelExportUtils.generate_manifest_json(args)
package_model(args, manifest)
def parse_args():
parser = ArgumentParser(
description='Convert MMDetection models to TorchServe `.mar` format.')
parser.add_argument('config', type=str, help='config file path')
parser.add_argument('checkpoint', type=str, help='checkpoint file path')
parser.add_argument(
'--output-folder',
type=str,
required=True,
help='Folder where `{model_name}.mar` will be created.')
parser.add_argument(
'--model-name',
type=str,
default=None,
help='If not None, used for naming the `{model_name}.mar`'
'file that will be created under `output_folder`.'
'If None, `{Path(checkpoint_file).stem}` will be used.')
parser.add_argument(
'--model-version',
type=str,
default='1.0',
help='Number used for versioning.')
parser.add_argument(
'-f',
'--force',
action='store_true',
help='overwrite the existing `{model_name}.mar`')
args = parser.parse_args()
return args
if __name__ == '__main__':
args = parse_args()
if package_model is None:
raise ImportError('`torch-model-archiver` is required.'
'Try: pip install torch-model-archiver')
mmdet2torchserve(args.config, args.checkpoint, args.output_folder,
args.model_name, args.model_version, args.force)
| 3,748 | 32.473214 | 78 | py |
ERD | ERD-main/tools/deployment/mmdet_handler.py | # Copyright (c) OpenMMLab. All rights reserved.
import base64
import os
import mmcv
import numpy as np
import torch
from ts.torch_handler.base_handler import BaseHandler
from mmdet.apis import inference_detector, init_detector
class MMdetHandler(BaseHandler):
threshold = 0.5
def initialize(self, context):
properties = context.system_properties
self.map_location = 'cuda' if torch.cuda.is_available() else 'cpu'
self.device = torch.device(self.map_location + ':' +
str(properties.get('gpu_id')) if torch.cuda.
is_available() else self.map_location)
self.manifest = context.manifest
model_dir = properties.get('model_dir')
serialized_file = self.manifest['model']['serializedFile']
checkpoint = os.path.join(model_dir, serialized_file)
self.config_file = os.path.join(model_dir, 'config.py')
self.model = init_detector(self.config_file, checkpoint, self.device)
self.initialized = True
def preprocess(self, data):
images = []
for row in data:
image = row.get('data') or row.get('body')
if isinstance(image, str):
image = base64.b64decode(image)
image = mmcv.imfrombytes(image)
images.append(image)
return images
def inference(self, data, *args, **kwargs):
results = inference_detector(self.model, data)
return results
def postprocess(self, data):
# Format output following the example ObjectDetectionHandler format
output = []
for data_sample in data:
pred_instances = data_sample.pred_instances
bboxes = pred_instances.bboxes.cpu().numpy().astype(
np.float32).tolist()
labels = pred_instances.labels.cpu().numpy().astype(
np.int32).tolist()
scores = pred_instances.scores.cpu().numpy().astype(
np.float32).tolist()
preds = []
for idx in range(len(labels)):
cls_score, bbox, cls_label = scores[idx], bboxes[idx], labels[
idx]
if cls_score >= self.threshold:
class_name = self.model.dataset_meta['classes'][cls_label]
result = dict(
class_label=cls_label,
class_name=class_name,
bbox=bbox,
score=cls_score)
preds.append(result)
output.append(preds)
return output
| 2,620 | 34.90411 | 79 | py |
ERD | ERD-main/tools/misc/download_dataset.py | import argparse
import tarfile
from itertools import repeat
from multiprocessing.pool import ThreadPool
from pathlib import Path
from tarfile import TarFile
from zipfile import ZipFile
import torch
from mmengine.utils.path import mkdir_or_exist
def parse_args():
parser = argparse.ArgumentParser(
description='Download datasets for training')
parser.add_argument(
'--dataset-name', type=str, help='dataset name', default='coco2017')
parser.add_argument(
'--save-dir',
type=str,
help='the dir to save dataset',
default='data/coco')
parser.add_argument(
'--unzip',
action='store_true',
help='whether unzip dataset or not, zipped files will be saved')
parser.add_argument(
'--delete',
action='store_true',
help='delete the download zipped files')
parser.add_argument(
'--threads', type=int, help='number of threading', default=4)
args = parser.parse_args()
return args
def download(url, dir, unzip=True, delete=False, threads=1):
def download_one(url, dir):
f = dir / Path(url).name
if Path(url).is_file():
Path(url).rename(f)
elif not f.exists():
print(f'Downloading {url} to {f}')
torch.hub.download_url_to_file(url, f, progress=True)
if unzip and f.suffix in ('.zip', '.tar'):
print(f'Unzipping {f.name}')
if f.suffix == '.zip':
ZipFile(f).extractall(path=dir)
elif f.suffix == '.tar':
TarFile(f).extractall(path=dir)
if delete:
f.unlink()
print(f'Delete {f}')
dir = Path(dir)
if threads > 1:
pool = ThreadPool(threads)
pool.imap(lambda x: download_one(*x), zip(url, repeat(dir)))
pool.close()
pool.join()
else:
for u in [url] if isinstance(url, (str, Path)) else url:
download_one(u, dir)
def download_objects365v2(url, dir, unzip=True, delete=False, threads=1):
def download_single(url, dir):
if 'train' in url:
saving_dir = dir / Path('train_zip')
mkdir_or_exist(saving_dir)
f = saving_dir / Path(url).name
unzip_dir = dir / Path('train')
mkdir_or_exist(unzip_dir)
elif 'val' in url:
saving_dir = dir / Path('val')
mkdir_or_exist(saving_dir)
f = saving_dir / Path(url).name
unzip_dir = dir / Path('val')
mkdir_or_exist(unzip_dir)
else:
raise NotImplementedError
if Path(url).is_file():
Path(url).rename(f)
elif not f.exists():
print(f'Downloading {url} to {f}')
torch.hub.download_url_to_file(url, f, progress=True)
if unzip and str(f).endswith('.tar.gz'):
print(f'Unzipping {f.name}')
tar = tarfile.open(f)
tar.extractall(path=unzip_dir)
if delete:
f.unlink()
print(f'Delete {f}')
# process annotations
full_url = []
for _url in url:
if 'zhiyuan_objv2_train.tar.gz' in _url or \
'zhiyuan_objv2_val.json' in _url:
full_url.append(_url)
elif 'train' in _url:
for i in range(51):
full_url.append(f'{_url}patch{i}.tar.gz')
elif 'val/images/v1' in _url:
for i in range(16):
full_url.append(f'{_url}patch{i}.tar.gz')
elif 'val/images/v2' in _url:
for i in range(16, 44):
full_url.append(f'{_url}patch{i}.tar.gz')
else:
raise NotImplementedError
dir = Path(dir)
if threads > 1:
pool = ThreadPool(threads)
pool.imap(lambda x: download_single(*x), zip(full_url, repeat(dir)))
pool.close()
pool.join()
else:
for u in full_url:
download_single(u, dir)
def main():
args = parse_args()
path = Path(args.save_dir)
if not path.exists():
path.mkdir(parents=True, exist_ok=True)
data2url = dict(
# TODO: Support for downloading Panoptic Segmentation of COCO
coco2017=[
'http://images.cocodataset.org/zips/train2017.zip',
'http://images.cocodataset.org/zips/val2017.zip',
'http://images.cocodataset.org/zips/test2017.zip',
'http://images.cocodataset.org/zips/unlabeled2017.zip',
'http://images.cocodataset.org/annotations/annotations_trainval2017.zip', # noqa
'http://images.cocodataset.org/annotations/stuff_annotations_trainval2017.zip', # noqa
'http://images.cocodataset.org/annotations/panoptic_annotations_trainval2017.zip', # noqa
'http://images.cocodataset.org/annotations/image_info_test2017.zip', # noqa
'http://images.cocodataset.org/annotations/image_info_unlabeled2017.zip', # noqa
],
lvis=[
'https://s3-us-west-2.amazonaws.com/dl.fbaipublicfiles.com/LVIS/lvis_v1_train.json.zip', # noqa
'https://s3-us-west-2.amazonaws.com/dl.fbaipublicfiles.com/LVIS/lvis_v1_train.json.zip', # noqa
],
voc2007=[
'http://host.robots.ox.ac.uk/pascal/VOC/voc2007/VOCtrainval_06-Nov-2007.tar', # noqa
'http://host.robots.ox.ac.uk/pascal/VOC/voc2007/VOCtest_06-Nov-2007.tar', # noqa
'http://host.robots.ox.ac.uk/pascal/VOC/voc2007/VOCdevkit_08-Jun-2007.tar', # noqa
],
# Note: There is no download link for Objects365-V1 right now. If you
# would like to download Objects365-V1, please visit
# http://www.objects365.org/ to concat the author.
objects365v2=[
# training annotations
'https://dorc.ks3-cn-beijing.ksyun.com/data-set/2020Objects365%E6%95%B0%E6%8D%AE%E9%9B%86/train/zhiyuan_objv2_train.tar.gz', # noqa
# validation annotations
'https://dorc.ks3-cn-beijing.ksyun.com/data-set/2020Objects365%E6%95%B0%E6%8D%AE%E9%9B%86/val/zhiyuan_objv2_val.json', # noqa
# training url root
'https://dorc.ks3-cn-beijing.ksyun.com/data-set/2020Objects365%E6%95%B0%E6%8D%AE%E9%9B%86/train/', # noqa
# validation url root_1
'https://dorc.ks3-cn-beijing.ksyun.com/data-set/2020Objects365%E6%95%B0%E6%8D%AE%E9%9B%86/val/images/v1/', # noqa
# validation url root_2
'https://dorc.ks3-cn-beijing.ksyun.com/data-set/2020Objects365%E6%95%B0%E6%8D%AE%E9%9B%86/val/images/v2/' # noqa
])
url = data2url.get(args.dataset_name, None)
if url is None:
print('Only support COCO, VOC, LVIS, and Objects365v2 now!')
return
if args.dataset_name == 'objects365v2':
download_objects365v2(
url,
dir=path,
unzip=args.unzip,
delete=args.delete,
threads=args.threads)
else:
download(
url,
dir=path,
unzip=args.unzip,
delete=args.delete,
threads=args.threads)
if __name__ == '__main__':
main()
| 7,177 | 35.810256 | 144 | py |
ERD | ERD-main/tools/model_converters/selfsup2mmdet.py | # Copyright (c) OpenMMLab. All rights reserved.
import argparse
from collections import OrderedDict
import torch
def moco_convert(src, dst):
"""Convert keys in pycls pretrained moco models to mmdet style."""
# load caffe model
moco_model = torch.load(src)
blobs = moco_model['state_dict']
# convert to pytorch style
state_dict = OrderedDict()
for k, v in blobs.items():
if not k.startswith('module.encoder_q.'):
continue
old_k = k
k = k.replace('module.encoder_q.', '')
state_dict[k] = v
print(old_k, '->', k)
# save checkpoint
checkpoint = dict()
checkpoint['state_dict'] = state_dict
torch.save(checkpoint, dst)
def main():
parser = argparse.ArgumentParser(description='Convert model keys')
parser.add_argument('src', help='src detectron model path')
parser.add_argument('dst', help='save path')
parser.add_argument(
'--selfsup', type=str, choices=['moco', 'swav'], help='save path')
args = parser.parse_args()
if args.selfsup == 'moco':
moco_convert(args.src, args.dst)
elif args.selfsup == 'swav':
print('SWAV does not need to convert the keys')
if __name__ == '__main__':
main()
| 1,243 | 27.930233 | 74 | py |
ERD | ERD-main/tools/model_converters/publish_model.py | # Copyright (c) OpenMMLab. All rights reserved.
import argparse
import subprocess
import torch
from mmengine.logging import print_log
def parse_args():
parser = argparse.ArgumentParser(
description='Process a checkpoint to be published')
parser.add_argument('in_file', help='input checkpoint filename')
parser.add_argument('out_file', help='output checkpoint filename')
parser.add_argument(
'--save-keys',
nargs='+',
type=str,
default=['meta', 'state_dict'],
help='keys to save in the published checkpoint')
args = parser.parse_args()
return args
def process_checkpoint(in_file, out_file, save_keys=['meta', 'state_dict']):
checkpoint = torch.load(in_file, map_location='cpu')
# only keep `meta` and `state_dict` for smaller file size
ckpt_keys = list(checkpoint.keys())
for k in ckpt_keys:
if k not in save_keys:
print_log(
f'Key `{k}` will be removed because it is not in '
f'save_keys. If you want to keep it, '
f'please set --save-keys.',
logger='current')
checkpoint.pop(k, None)
# if it is necessary to remove some sensitive data in checkpoint['meta'],
# add the code here.
if torch.__version__ >= '1.6':
torch.save(checkpoint, out_file, _use_new_zipfile_serialization=False)
else:
torch.save(checkpoint, out_file)
sha = subprocess.check_output(['sha256sum', out_file]).decode()
if out_file.endswith('.pth'):
out_file_name = out_file[:-4]
else:
out_file_name = out_file
final_file = out_file_name + f'-{sha[:8]}.pth'
subprocess.Popen(['mv', out_file, final_file])
print_log(
f'The published model is saved at {final_file}.', logger='current')
def main():
args = parse_args()
process_checkpoint(args.in_file, args.out_file, args.save_keys)
if __name__ == '__main__':
main()
| 1,966 | 30.725806 | 78 | py |
ERD | ERD-main/tools/model_converters/regnet2mmdet.py | # Copyright (c) OpenMMLab. All rights reserved.
import argparse
from collections import OrderedDict
import torch
def convert_stem(model_key, model_weight, state_dict, converted_names):
new_key = model_key.replace('stem.conv', 'conv1')
new_key = new_key.replace('stem.bn', 'bn1')
state_dict[new_key] = model_weight
converted_names.add(model_key)
print(f'Convert {model_key} to {new_key}')
def convert_head(model_key, model_weight, state_dict, converted_names):
new_key = model_key.replace('head.fc', 'fc')
state_dict[new_key] = model_weight
converted_names.add(model_key)
print(f'Convert {model_key} to {new_key}')
def convert_reslayer(model_key, model_weight, state_dict, converted_names):
split_keys = model_key.split('.')
layer, block, module = split_keys[:3]
block_id = int(block[1:])
layer_name = f'layer{int(layer[1:])}'
block_name = f'{block_id - 1}'
if block_id == 1 and module == 'bn':
new_key = f'{layer_name}.{block_name}.downsample.1.{split_keys[-1]}'
elif block_id == 1 and module == 'proj':
new_key = f'{layer_name}.{block_name}.downsample.0.{split_keys[-1]}'
elif module == 'f':
if split_keys[3] == 'a_bn':
module_name = 'bn1'
elif split_keys[3] == 'b_bn':
module_name = 'bn2'
elif split_keys[3] == 'c_bn':
module_name = 'bn3'
elif split_keys[3] == 'a':
module_name = 'conv1'
elif split_keys[3] == 'b':
module_name = 'conv2'
elif split_keys[3] == 'c':
module_name = 'conv3'
new_key = f'{layer_name}.{block_name}.{module_name}.{split_keys[-1]}'
else:
raise ValueError(f'Unsupported conversion of key {model_key}')
print(f'Convert {model_key} to {new_key}')
state_dict[new_key] = model_weight
converted_names.add(model_key)
def convert(src, dst):
"""Convert keys in pycls pretrained RegNet models to mmdet style."""
# load caffe model
regnet_model = torch.load(src)
blobs = regnet_model['model_state']
# convert to pytorch style
state_dict = OrderedDict()
converted_names = set()
for key, weight in blobs.items():
if 'stem' in key:
convert_stem(key, weight, state_dict, converted_names)
elif 'head' in key:
convert_head(key, weight, state_dict, converted_names)
elif key.startswith('s'):
convert_reslayer(key, weight, state_dict, converted_names)
# check if all layers are converted
for key in blobs:
if key not in converted_names:
print(f'not converted: {key}')
# save checkpoint
checkpoint = dict()
checkpoint['state_dict'] = state_dict
torch.save(checkpoint, dst)
def main():
parser = argparse.ArgumentParser(description='Convert model keys')
parser.add_argument('src', help='src detectron model path')
parser.add_argument('dst', help='save path')
args = parser.parse_args()
convert(args.src, args.dst)
if __name__ == '__main__':
main()
| 3,063 | 32.67033 | 77 | py |
ERD | ERD-main/tools/model_converters/upgrade_model_version.py | # Copyright (c) OpenMMLab. All rights reserved.
import argparse
import re
import tempfile
from collections import OrderedDict
import torch
from mmengine import Config
def is_head(key):
valid_head_list = [
'bbox_head', 'mask_head', 'semantic_head', 'grid_head', 'mask_iou_head'
]
return any(key.startswith(h) for h in valid_head_list)
def parse_config(config_strings):
temp_file = tempfile.NamedTemporaryFile()
config_path = f'{temp_file.name}.py'
with open(config_path, 'w') as f:
f.write(config_strings)
config = Config.fromfile(config_path)
is_two_stage = True
is_ssd = False
is_retina = False
reg_cls_agnostic = False
if 'rpn_head' not in config.model:
is_two_stage = False
# check whether it is SSD
if config.model.bbox_head.type == 'SSDHead':
is_ssd = True
elif config.model.bbox_head.type == 'RetinaHead':
is_retina = True
elif isinstance(config.model['bbox_head'], list):
reg_cls_agnostic = True
elif 'reg_class_agnostic' in config.model.bbox_head:
reg_cls_agnostic = config.model.bbox_head \
.reg_class_agnostic
temp_file.close()
return is_two_stage, is_ssd, is_retina, reg_cls_agnostic
def reorder_cls_channel(val, num_classes=81):
# bias
if val.dim() == 1:
new_val = torch.cat((val[1:], val[:1]), dim=0)
# weight
else:
out_channels, in_channels = val.shape[:2]
# conv_cls for softmax output
if out_channels != num_classes and out_channels % num_classes == 0:
new_val = val.reshape(-1, num_classes, in_channels, *val.shape[2:])
new_val = torch.cat((new_val[:, 1:], new_val[:, :1]), dim=1)
new_val = new_val.reshape(val.size())
# fc_cls
elif out_channels == num_classes:
new_val = torch.cat((val[1:], val[:1]), dim=0)
# agnostic | retina_cls | rpn_cls
else:
new_val = val
return new_val
def truncate_cls_channel(val, num_classes=81):
# bias
if val.dim() == 1:
if val.size(0) % num_classes == 0:
new_val = val[:num_classes - 1]
else:
new_val = val
# weight
else:
out_channels, in_channels = val.shape[:2]
# conv_logits
if out_channels % num_classes == 0:
new_val = val.reshape(num_classes, in_channels, *val.shape[2:])[1:]
new_val = new_val.reshape(-1, *val.shape[1:])
# agnostic
else:
new_val = val
return new_val
def truncate_reg_channel(val, num_classes=81):
# bias
if val.dim() == 1:
# fc_reg | rpn_reg
if val.size(0) % num_classes == 0:
new_val = val.reshape(num_classes, -1)[:num_classes - 1]
new_val = new_val.reshape(-1)
# agnostic
else:
new_val = val
# weight
else:
out_channels, in_channels = val.shape[:2]
# fc_reg | rpn_reg
if out_channels % num_classes == 0:
new_val = val.reshape(num_classes, -1, in_channels,
*val.shape[2:])[1:]
new_val = new_val.reshape(-1, *val.shape[1:])
# agnostic
else:
new_val = val
return new_val
def convert(in_file, out_file, num_classes):
"""Convert keys in checkpoints.
There can be some breaking changes during the development of mmdetection,
and this tool is used for upgrading checkpoints trained with old versions
to the latest one.
"""
checkpoint = torch.load(in_file)
in_state_dict = checkpoint.pop('state_dict')
out_state_dict = OrderedDict()
meta_info = checkpoint['meta']
is_two_stage, is_ssd, is_retina, reg_cls_agnostic = parse_config(
'#' + meta_info['config'])
if meta_info['mmdet_version'] <= '0.5.3' and is_retina:
upgrade_retina = True
else:
upgrade_retina = False
# MMDetection v2.5.0 unifies the class order in RPN
# if the model is trained in version<v2.5.0
# The RPN model should be upgraded to be used in version>=2.5.0
if meta_info['mmdet_version'] < '2.5.0':
upgrade_rpn = True
else:
upgrade_rpn = False
for key, val in in_state_dict.items():
new_key = key
new_val = val
if is_two_stage and is_head(key):
new_key = 'roi_head.{}'.format(key)
# classification
if upgrade_rpn:
m = re.search(
r'(conv_cls|retina_cls|rpn_cls|fc_cls|fcos_cls|'
r'fovea_cls).(weight|bias)', new_key)
else:
m = re.search(
r'(conv_cls|retina_cls|fc_cls|fcos_cls|'
r'fovea_cls).(weight|bias)', new_key)
if m is not None:
print(f'reorder cls channels of {new_key}')
new_val = reorder_cls_channel(val, num_classes)
# regression
if upgrade_rpn:
m = re.search(r'(fc_reg).(weight|bias)', new_key)
else:
m = re.search(r'(fc_reg|rpn_reg).(weight|bias)', new_key)
if m is not None and not reg_cls_agnostic:
print(f'truncate regression channels of {new_key}')
new_val = truncate_reg_channel(val, num_classes)
# mask head
m = re.search(r'(conv_logits).(weight|bias)', new_key)
if m is not None:
print(f'truncate mask prediction channels of {new_key}')
new_val = truncate_cls_channel(val, num_classes)
m = re.search(r'(cls_convs|reg_convs).\d.(weight|bias)', key)
# Legacy issues in RetinaNet since V1.x
# Use ConvModule instead of nn.Conv2d in RetinaNet
# cls_convs.0.weight -> cls_convs.0.conv.weight
if m is not None and upgrade_retina:
param = m.groups()[1]
new_key = key.replace(param, f'conv.{param}')
out_state_dict[new_key] = val
print(f'rename the name of {key} to {new_key}')
continue
m = re.search(r'(cls_convs).\d.(weight|bias)', key)
if m is not None and is_ssd:
print(f'reorder cls channels of {new_key}')
new_val = reorder_cls_channel(val, num_classes)
out_state_dict[new_key] = new_val
checkpoint['state_dict'] = out_state_dict
torch.save(checkpoint, out_file)
def main():
parser = argparse.ArgumentParser(description='Upgrade model version')
parser.add_argument('in_file', help='input checkpoint file')
parser.add_argument('out_file', help='output checkpoint file')
parser.add_argument(
'--num-classes',
type=int,
default=81,
help='number of classes of the original model')
args = parser.parse_args()
convert(args.in_file, args.out_file, args.num_classes)
if __name__ == '__main__':
main()
| 6,852 | 31.478673 | 79 | py |
ERD | ERD-main/tools/model_converters/detectron2_to_mmdet.py | # Copyright (c) OpenMMLab. All rights reserved.
import argparse
from collections import OrderedDict
import torch
from mmengine.fileio import load
from mmengine.runner import save_checkpoint
def convert(src: str, dst: str, prefix: str = 'd2_model') -> None:
"""Convert Detectron2 checkpoint to MMDetection style.
Args:
src (str): The Detectron2 checkpoint path, should endswith `pkl`.
dst (str): The MMDetection checkpoint path.
prefix (str): The prefix of MMDetection model, defaults to 'd2_model'.
"""
# load arch_settings
assert src.endswith('pkl'), \
'the source Detectron2 checkpoint should endswith `pkl`.'
d2_model = load(src, encoding='latin1').get('model')
assert d2_model is not None
# convert to mmdet style
dst_state_dict = OrderedDict()
for name, value in d2_model.items():
if not isinstance(value, torch.Tensor):
value = torch.from_numpy(value)
dst_state_dict[f'{prefix}.{name}'] = value
mmdet_model = dict(state_dict=dst_state_dict, meta=dict())
save_checkpoint(mmdet_model, dst)
print(f'Convert Detectron2 model {src} to MMDetection model {dst}')
def main():
parser = argparse.ArgumentParser(
description='Convert Detectron2 checkpoint to MMDetection style')
parser.add_argument('src', help='Detectron2 model path')
parser.add_argument('dst', help='MMDetectron model save path')
parser.add_argument(
'--prefix', default='d2_model', type=str, help='prefix of the model')
args = parser.parse_args()
convert(args.src, args.dst, args.prefix)
if __name__ == '__main__':
main()
| 1,653 | 32.755102 | 78 | py |
ERD | ERD-main/tools/model_converters/upgrade_ssd_version.py | # Copyright (c) OpenMMLab. All rights reserved.
import argparse
import tempfile
from collections import OrderedDict
import torch
from mmengine import Config
def parse_config(config_strings):
temp_file = tempfile.NamedTemporaryFile()
config_path = f'{temp_file.name}.py'
with open(config_path, 'w') as f:
f.write(config_strings)
config = Config.fromfile(config_path)
# check whether it is SSD
if config.model.bbox_head.type != 'SSDHead':
raise AssertionError('This is not a SSD model.')
def convert(in_file, out_file):
checkpoint = torch.load(in_file)
in_state_dict = checkpoint.pop('state_dict')
out_state_dict = OrderedDict()
meta_info = checkpoint['meta']
parse_config('#' + meta_info['config'])
for key, value in in_state_dict.items():
if 'extra' in key:
layer_idx = int(key.split('.')[2])
new_key = 'neck.extra_layers.{}.{}.conv.'.format(
layer_idx // 2, layer_idx % 2) + key.split('.')[-1]
elif 'l2_norm' in key:
new_key = 'neck.l2_norm.weight'
elif 'bbox_head' in key:
new_key = key[:21] + '.0' + key[21:]
else:
new_key = key
out_state_dict[new_key] = value
checkpoint['state_dict'] = out_state_dict
if torch.__version__ >= '1.6':
torch.save(checkpoint, out_file, _use_new_zipfile_serialization=False)
else:
torch.save(checkpoint, out_file)
def main():
parser = argparse.ArgumentParser(description='Upgrade SSD version')
parser.add_argument('in_file', help='input checkpoint file')
parser.add_argument('out_file', help='output checkpoint file')
args = parser.parse_args()
convert(args.in_file, args.out_file)
if __name__ == '__main__':
main()
| 1,793 | 29.40678 | 78 | py |
ERD | ERD-main/tools/model_converters/detectron2pytorch.py | # Copyright (c) OpenMMLab. All rights reserved.
import argparse
from collections import OrderedDict
import torch
from mmengine.fileio import load
arch_settings = {50: (3, 4, 6, 3), 101: (3, 4, 23, 3)}
def convert_bn(blobs, state_dict, caffe_name, torch_name, converted_names):
# detectron replace bn with affine channel layer
state_dict[torch_name + '.bias'] = torch.from_numpy(blobs[caffe_name +
'_b'])
state_dict[torch_name + '.weight'] = torch.from_numpy(blobs[caffe_name +
'_s'])
bn_size = state_dict[torch_name + '.weight'].size()
state_dict[torch_name + '.running_mean'] = torch.zeros(bn_size)
state_dict[torch_name + '.running_var'] = torch.ones(bn_size)
converted_names.add(caffe_name + '_b')
converted_names.add(caffe_name + '_s')
def convert_conv_fc(blobs, state_dict, caffe_name, torch_name,
converted_names):
state_dict[torch_name + '.weight'] = torch.from_numpy(blobs[caffe_name +
'_w'])
converted_names.add(caffe_name + '_w')
if caffe_name + '_b' in blobs:
state_dict[torch_name + '.bias'] = torch.from_numpy(blobs[caffe_name +
'_b'])
converted_names.add(caffe_name + '_b')
def convert(src, dst, depth):
"""Convert keys in detectron pretrained ResNet models to pytorch style."""
# load arch_settings
if depth not in arch_settings:
raise ValueError('Only support ResNet-50 and ResNet-101 currently')
block_nums = arch_settings[depth]
# load caffe model
caffe_model = load(src, encoding='latin1')
blobs = caffe_model['blobs'] if 'blobs' in caffe_model else caffe_model
# convert to pytorch style
state_dict = OrderedDict()
converted_names = set()
convert_conv_fc(blobs, state_dict, 'conv1', 'conv1', converted_names)
convert_bn(blobs, state_dict, 'res_conv1_bn', 'bn1', converted_names)
for i in range(1, len(block_nums) + 1):
for j in range(block_nums[i - 1]):
if j == 0:
convert_conv_fc(blobs, state_dict, f'res{i + 1}_{j}_branch1',
f'layer{i}.{j}.downsample.0', converted_names)
convert_bn(blobs, state_dict, f'res{i + 1}_{j}_branch1_bn',
f'layer{i}.{j}.downsample.1', converted_names)
for k, letter in enumerate(['a', 'b', 'c']):
convert_conv_fc(blobs, state_dict,
f'res{i + 1}_{j}_branch2{letter}',
f'layer{i}.{j}.conv{k+1}', converted_names)
convert_bn(blobs, state_dict,
f'res{i + 1}_{j}_branch2{letter}_bn',
f'layer{i}.{j}.bn{k + 1}', converted_names)
# check if all layers are converted
for key in blobs:
if key not in converted_names:
print(f'Not Convert: {key}')
# save checkpoint
checkpoint = dict()
checkpoint['state_dict'] = state_dict
torch.save(checkpoint, dst)
def main():
parser = argparse.ArgumentParser(description='Convert model keys')
parser.add_argument('src', help='src detectron model path')
parser.add_argument('dst', help='save path')
parser.add_argument('depth', type=int, help='ResNet model depth')
args = parser.parse_args()
convert(args.src, args.dst, args.depth)
if __name__ == '__main__':
main()
| 3,594 | 41.797619 | 78 | py |
ERD | ERD-main/tools/analysis_tools/benchmark.py | # Copyright (c) OpenMMLab. All rights reserved.
import argparse
import os
from mmengine import MMLogger
from mmengine.config import Config, DictAction
from mmengine.dist import init_dist
from mmengine.registry import init_default_scope
from mmengine.utils import mkdir_or_exist
from mmdet.utils.benchmark import (DataLoaderBenchmark, DatasetBenchmark,
InferenceBenchmark)
def parse_args():
parser = argparse.ArgumentParser(description='MMDet benchmark')
parser.add_argument('config', help='test config file path')
parser.add_argument('--checkpoint', help='checkpoint file')
parser.add_argument(
'--task',
choices=['inference', 'dataloader', 'dataset'],
default='dataloader',
help='Which task do you want to go to benchmark')
parser.add_argument(
'--repeat-num',
type=int,
default=1,
help='number of repeat times of measurement for averaging the results')
parser.add_argument(
'--max-iter', type=int, default=2000, help='num of max iter')
parser.add_argument(
'--log-interval', type=int, default=50, help='interval of logging')
parser.add_argument(
'--num-warmup', type=int, default=5, help='Number of warmup')
parser.add_argument(
'--fuse-conv-bn',
action='store_true',
help='Whether to fuse conv and bn, this will slightly increase'
'the inference speed')
parser.add_argument(
'--dataset-type',
choices=['train', 'val', 'test'],
default='test',
help='Benchmark dataset type. only supports train, val and test')
parser.add_argument(
'--work-dir',
help='the directory to save the file containing '
'benchmark metrics')
parser.add_argument(
'--cfg-options',
nargs='+',
action=DictAction,
help='override some settings in the used config, the key-value pair '
'in xxx=yyy format will be merged into config file. If the value to '
'be overwritten is a list, it should be like key="[a,b]" or key=a,b '
'It also allows nested list/tuple values, e.g. key="[(a,b),(c,d)]" '
'Note that the quotation marks are necessary and that no white space '
'is allowed.')
parser.add_argument(
'--launcher',
choices=['none', 'pytorch', 'slurm', 'mpi'],
default='none',
help='job launcher')
parser.add_argument('--local_rank', type=int, default=0)
args = parser.parse_args()
if 'LOCAL_RANK' not in os.environ:
os.environ['LOCAL_RANK'] = str(args.local_rank)
return args
def inference_benchmark(args, cfg, distributed, logger):
benchmark = InferenceBenchmark(
cfg,
args.checkpoint,
distributed,
args.fuse_conv_bn,
args.max_iter,
args.log_interval,
args.num_warmup,
logger=logger)
return benchmark
def dataloader_benchmark(args, cfg, distributed, logger):
benchmark = DataLoaderBenchmark(
cfg,
distributed,
args.dataset_type,
args.max_iter,
args.log_interval,
args.num_warmup,
logger=logger)
return benchmark
def dataset_benchmark(args, cfg, distributed, logger):
benchmark = DatasetBenchmark(
cfg,
args.dataset_type,
args.max_iter,
args.log_interval,
args.num_warmup,
logger=logger)
return benchmark
def main():
args = parse_args()
cfg = Config.fromfile(args.config)
if args.cfg_options is not None:
cfg.merge_from_dict(args.cfg_options)
init_default_scope(cfg.get('default_scope', 'mmdet'))
distributed = False
if args.launcher != 'none':
init_dist(args.launcher, **cfg.get('env_cfg', {}).get('dist_cfg', {}))
distributed = True
log_file = None
if args.work_dir:
log_file = os.path.join(args.work_dir, 'benchmark.log')
mkdir_or_exist(args.work_dir)
logger = MMLogger.get_instance(
'mmdet', log_file=log_file, log_level='INFO')
benchmark = eval(f'{args.task}_benchmark')(args, cfg, distributed, logger)
benchmark.run(args.repeat_num)
if __name__ == '__main__':
main()
| 4,242 | 30.664179 | 79 | py |
ERD | ERD-main/tools/analysis_tools/optimize_anchors.py | # Copyright (c) OpenMMLab. All rights reserved.
"""Optimize anchor settings on a specific dataset.
This script provides two method to optimize YOLO anchors including k-means
anchor cluster and differential evolution. You can use ``--algorithm k-means``
and ``--algorithm differential_evolution`` to switch two method.
Example:
Use k-means anchor cluster::
python tools/analysis_tools/optimize_anchors.py ${CONFIG} \
--algorithm k-means --input-shape ${INPUT_SHAPE [WIDTH HEIGHT]} \
--output-dir ${OUTPUT_DIR}
Use differential evolution to optimize anchors::
python tools/analysis_tools/optimize_anchors.py ${CONFIG} \
--algorithm differential_evolution \
--input-shape ${INPUT_SHAPE [WIDTH HEIGHT]} \
--output-dir ${OUTPUT_DIR}
"""
import argparse
import os.path as osp
import numpy as np
import torch
from mmengine.config import Config
from mmengine.fileio import dump
from mmengine.logging import MMLogger
from mmengine.registry import init_default_scope
from mmengine.utils import ProgressBar
from scipy.optimize import differential_evolution
from mmdet.registry import DATASETS
from mmdet.structures.bbox import (bbox_cxcywh_to_xyxy, bbox_overlaps,
bbox_xyxy_to_cxcywh)
from mmdet.utils import replace_cfg_vals, update_data_root
def parse_args():
parser = argparse.ArgumentParser(description='Optimize anchor parameters.')
parser.add_argument('config', help='Train config file path.')
parser.add_argument(
'--device', default='cuda:0', help='Device used for calculating.')
parser.add_argument(
'--input-shape',
type=int,
nargs='+',
default=[608, 608],
help='input image size')
parser.add_argument(
'--algorithm',
default='differential_evolution',
help='Algorithm used for anchor optimizing.'
'Support k-means and differential_evolution for YOLO.')
parser.add_argument(
'--iters',
default=1000,
type=int,
help='Maximum iterations for optimizer.')
parser.add_argument(
'--output-dir',
default=None,
type=str,
help='Path to save anchor optimize result.')
args = parser.parse_args()
return args
class BaseAnchorOptimizer:
"""Base class for anchor optimizer.
Args:
dataset (obj:`Dataset`): Dataset object.
input_shape (list[int]): Input image shape of the model.
Format in [width, height].
logger (obj:`logging.Logger`): The logger for logging.
device (str, optional): Device used for calculating.
Default: 'cuda:0'
out_dir (str, optional): Path to save anchor optimize result.
Default: None
"""
def __init__(self,
dataset,
input_shape,
logger,
device='cuda:0',
out_dir=None):
self.dataset = dataset
self.input_shape = input_shape
self.logger = logger
self.device = device
self.out_dir = out_dir
bbox_whs, img_shapes = self.get_whs_and_shapes()
ratios = img_shapes.max(1, keepdims=True) / np.array([input_shape])
# resize to input shape
self.bbox_whs = bbox_whs / ratios
def get_whs_and_shapes(self):
"""Get widths and heights of bboxes and shapes of images.
Returns:
tuple[np.ndarray]: Array of bbox shapes and array of image
shapes with shape (num_bboxes, 2) in [width, height] format.
"""
self.logger.info('Collecting bboxes from annotation...')
bbox_whs = []
img_shapes = []
prog_bar = ProgressBar(len(self.dataset))
for idx in range(len(self.dataset)):
data_info = self.dataset.get_data_info(idx)
img_shape = np.array([data_info['width'], data_info['height']])
gt_instances = data_info['instances']
for instance in gt_instances:
bbox = np.array(instance['bbox'])
wh = bbox[2:4] - bbox[0:2]
img_shapes.append(img_shape)
bbox_whs.append(wh)
prog_bar.update()
print('\n')
bbox_whs = np.array(bbox_whs)
img_shapes = np.array(img_shapes)
self.logger.info(f'Collected {bbox_whs.shape[0]} bboxes.')
return bbox_whs, img_shapes
def get_zero_center_bbox_tensor(self):
"""Get a tensor of bboxes centered at (0, 0).
Returns:
Tensor: Tensor of bboxes with shape (num_bboxes, 4)
in [xmin, ymin, xmax, ymax] format.
"""
whs = torch.from_numpy(self.bbox_whs).to(
self.device, dtype=torch.float32)
bboxes = bbox_cxcywh_to_xyxy(
torch.cat([torch.zeros_like(whs), whs], dim=1))
return bboxes
def optimize(self):
raise NotImplementedError
def save_result(self, anchors, path=None):
anchor_results = []
for w, h in anchors:
anchor_results.append([round(w), round(h)])
self.logger.info(f'Anchor optimize result:{anchor_results}')
if path:
json_path = osp.join(path, 'anchor_optimize_result.json')
dump(anchor_results, json_path)
self.logger.info(f'Result saved in {json_path}')
class YOLOKMeansAnchorOptimizer(BaseAnchorOptimizer):
r"""YOLO anchor optimizer using k-means. Code refer to `AlexeyAB/darknet.
<https://github.com/AlexeyAB/darknet/blob/master/src/detector.c>`_.
Args:
num_anchors (int) : Number of anchors.
iters (int): Maximum iterations for k-means.
"""
def __init__(self, num_anchors, iters, **kwargs):
super(YOLOKMeansAnchorOptimizer, self).__init__(**kwargs)
self.num_anchors = num_anchors
self.iters = iters
def optimize(self):
anchors = self.kmeans_anchors()
self.save_result(anchors, self.out_dir)
def kmeans_anchors(self):
self.logger.info(
f'Start cluster {self.num_anchors} YOLO anchors with K-means...')
bboxes = self.get_zero_center_bbox_tensor()
cluster_center_idx = torch.randint(
0, bboxes.shape[0], (self.num_anchors, )).to(self.device)
assignments = torch.zeros((bboxes.shape[0], )).to(self.device)
cluster_centers = bboxes[cluster_center_idx]
if self.num_anchors == 1:
cluster_centers = self.kmeans_maximization(bboxes, assignments,
cluster_centers)
anchors = bbox_xyxy_to_cxcywh(cluster_centers)[:, 2:].cpu().numpy()
anchors = sorted(anchors, key=lambda x: x[0] * x[1])
return anchors
prog_bar = ProgressBar(self.iters)
for i in range(self.iters):
converged, assignments = self.kmeans_expectation(
bboxes, assignments, cluster_centers)
if converged:
self.logger.info(f'K-means process has converged at iter {i}.')
break
cluster_centers = self.kmeans_maximization(bboxes, assignments,
cluster_centers)
prog_bar.update()
print('\n')
avg_iou = bbox_overlaps(bboxes,
cluster_centers).max(1)[0].mean().item()
anchors = bbox_xyxy_to_cxcywh(cluster_centers)[:, 2:].cpu().numpy()
anchors = sorted(anchors, key=lambda x: x[0] * x[1])
self.logger.info(f'Anchor cluster finish. Average IOU: {avg_iou}')
return anchors
def kmeans_maximization(self, bboxes, assignments, centers):
"""Maximization part of EM algorithm(Expectation-Maximization)"""
new_centers = torch.zeros_like(centers)
for i in range(centers.shape[0]):
mask = (assignments == i)
if mask.sum():
new_centers[i, :] = bboxes[mask].mean(0)
return new_centers
def kmeans_expectation(self, bboxes, assignments, centers):
"""Expectation part of EM algorithm(Expectation-Maximization)"""
ious = bbox_overlaps(bboxes, centers)
closest = ious.argmax(1)
converged = (closest == assignments).all()
return converged, closest
class YOLODEAnchorOptimizer(BaseAnchorOptimizer):
"""YOLO anchor optimizer using differential evolution algorithm.
Args:
num_anchors (int) : Number of anchors.
iters (int): Maximum iterations for k-means.
strategy (str): The differential evolution strategy to use.
Should be one of:
- 'best1bin'
- 'best1exp'
- 'rand1exp'
- 'randtobest1exp'
- 'currenttobest1exp'
- 'best2exp'
- 'rand2exp'
- 'randtobest1bin'
- 'currenttobest1bin'
- 'best2bin'
- 'rand2bin'
- 'rand1bin'
Default: 'best1bin'.
population_size (int): Total population size of evolution algorithm.
Default: 15.
convergence_thr (float): Tolerance for convergence, the
optimizing stops when ``np.std(pop) <= abs(convergence_thr)
+ convergence_thr * np.abs(np.mean(population_energies))``,
respectively. Default: 0.0001.
mutation (tuple[float]): Range of dithering randomly changes the
mutation constant. Default: (0.5, 1).
recombination (float): Recombination constant of crossover probability.
Default: 0.7.
"""
def __init__(self,
num_anchors,
iters,
strategy='best1bin',
population_size=15,
convergence_thr=0.0001,
mutation=(0.5, 1),
recombination=0.7,
**kwargs):
super(YOLODEAnchorOptimizer, self).__init__(**kwargs)
self.num_anchors = num_anchors
self.iters = iters
self.strategy = strategy
self.population_size = population_size
self.convergence_thr = convergence_thr
self.mutation = mutation
self.recombination = recombination
def optimize(self):
anchors = self.differential_evolution()
self.save_result(anchors, self.out_dir)
def differential_evolution(self):
bboxes = self.get_zero_center_bbox_tensor()
bounds = []
for i in range(self.num_anchors):
bounds.extend([(0, self.input_shape[0]), (0, self.input_shape[1])])
result = differential_evolution(
func=self.avg_iou_cost,
bounds=bounds,
args=(bboxes, ),
strategy=self.strategy,
maxiter=self.iters,
popsize=self.population_size,
tol=self.convergence_thr,
mutation=self.mutation,
recombination=self.recombination,
updating='immediate',
disp=True)
self.logger.info(
f'Anchor evolution finish. Average IOU: {1 - result.fun}')
anchors = [(w, h) for w, h in zip(result.x[::2], result.x[1::2])]
anchors = sorted(anchors, key=lambda x: x[0] * x[1])
return anchors
@staticmethod
def avg_iou_cost(anchor_params, bboxes):
assert len(anchor_params) % 2 == 0
anchor_whs = torch.tensor(
[[w, h]
for w, h in zip(anchor_params[::2], anchor_params[1::2])]).to(
bboxes.device, dtype=bboxes.dtype)
anchor_boxes = bbox_cxcywh_to_xyxy(
torch.cat([torch.zeros_like(anchor_whs), anchor_whs], dim=1))
ious = bbox_overlaps(bboxes, anchor_boxes)
max_ious, _ = ious.max(1)
cost = 1 - max_ious.mean().item()
return cost
def main():
logger = MMLogger.get_current_instance()
args = parse_args()
cfg = args.config
cfg = Config.fromfile(cfg)
init_default_scope(cfg.get('default_scope', 'mmdet'))
# replace the ${key} with the value of cfg.key
cfg = replace_cfg_vals(cfg)
# update data root according to MMDET_DATASETS
update_data_root(cfg)
input_shape = args.input_shape
assert len(input_shape) == 2
anchor_type = cfg.model.bbox_head.anchor_generator.type
assert anchor_type == 'YOLOAnchorGenerator', \
f'Only support optimize YOLOAnchor, but get {anchor_type}.'
base_sizes = cfg.model.bbox_head.anchor_generator.base_sizes
num_anchors = sum([len(sizes) for sizes in base_sizes])
train_data_cfg = cfg.train_dataloader
while 'dataset' in train_data_cfg:
train_data_cfg = train_data_cfg['dataset']
dataset = DATASETS.build(train_data_cfg)
if args.algorithm == 'k-means':
optimizer = YOLOKMeansAnchorOptimizer(
dataset=dataset,
input_shape=input_shape,
device=args.device,
num_anchors=num_anchors,
iters=args.iters,
logger=logger,
out_dir=args.output_dir)
elif args.algorithm == 'differential_evolution':
optimizer = YOLODEAnchorOptimizer(
dataset=dataset,
input_shape=input_shape,
device=args.device,
num_anchors=num_anchors,
iters=args.iters,
logger=logger,
out_dir=args.output_dir)
else:
raise NotImplementedError(
f'Only support k-means and differential_evolution, '
f'but get {args.algorithm}')
optimizer.optimize()
if __name__ == '__main__':
main()
| 13,631 | 34.592689 | 79 | py |
ERD | ERD-main/tools/analysis_tools/get_flops.py | # Copyright (c) OpenMMLab. All rights reserved.
import argparse
import tempfile
from functools import partial
from pathlib import Path
import numpy as np
import torch
from mmengine.config import Config, DictAction
from mmengine.logging import MMLogger
from mmengine.model import revert_sync_batchnorm
from mmengine.registry import init_default_scope
from mmengine.runner import Runner
from mmdet.registry import MODELS
try:
from mmengine.analysis import get_model_complexity_info
from mmengine.analysis.print_helper import _format_size
except ImportError:
raise ImportError('Please upgrade mmengine >= 0.6.0')
def parse_args():
parser = argparse.ArgumentParser(description='Get a detector flops')
parser.add_argument('config', help='train config file path')
parser.add_argument(
'--num-images',
type=int,
default=100,
help='num images of calculate model flops')
parser.add_argument(
'--cfg-options',
nargs='+',
action=DictAction,
help='override some settings in the used config, the key-value pair '
'in xxx=yyy format will be merged into config file. If the value to '
'be overwritten is a list, it should be like key="[a,b]" or key=a,b '
'It also allows nested list/tuple values, e.g. key="[(a,b),(c,d)]" '
'Note that the quotation marks are necessary and that no white space '
'is allowed.')
args = parser.parse_args()
return args
def inference(args, logger):
if str(torch.__version__) < '1.12':
logger.warning(
'Some config files, such as configs/yolact and configs/detectors,'
'may have compatibility issues with torch.jit when torch<1.12. '
'If you want to calculate flops for these models, '
'please make sure your pytorch version is >=1.12.')
config_name = Path(args.config)
if not config_name.exists():
logger.error(f'{config_name} not found.')
cfg = Config.fromfile(args.config)
cfg.val_dataloader.batch_size = 1
cfg.work_dir = tempfile.TemporaryDirectory().name
if args.cfg_options is not None:
cfg.merge_from_dict(args.cfg_options)
init_default_scope(cfg.get('default_scope', 'mmdet'))
# TODO: The following usage is temporary and not safe
# use hard code to convert mmSyncBN to SyncBN. This is a known
# bug in mmengine, mmSyncBN requires a distributed environment,
# this question involves models like configs/strong_baselines
if hasattr(cfg, 'head_norm_cfg'):
cfg['head_norm_cfg'] = dict(type='SyncBN', requires_grad=True)
cfg['model']['roi_head']['bbox_head']['norm_cfg'] = dict(
type='SyncBN', requires_grad=True)
cfg['model']['roi_head']['mask_head']['norm_cfg'] = dict(
type='SyncBN', requires_grad=True)
result = {}
avg_flops = []
data_loader = Runner.build_dataloader(cfg.val_dataloader)
model = MODELS.build(cfg.model)
if torch.cuda.is_available():
model = model.cuda()
model = revert_sync_batchnorm(model)
model.eval()
_forward = model.forward
for idx, data_batch in enumerate(data_loader):
if idx == args.num_images:
break
data = model.data_preprocessor(data_batch)
result['ori_shape'] = data['data_samples'][0].ori_shape
result['pad_shape'] = data['data_samples'][0].pad_shape
if hasattr(data['data_samples'][0], 'batch_input_shape'):
result['pad_shape'] = data['data_samples'][0].batch_input_shape
model.forward = partial(_forward, data_samples=data['data_samples'])
outputs = get_model_complexity_info(
model,
None,
inputs=data['inputs'],
show_table=False,
show_arch=False)
avg_flops.append(outputs['flops'])
params = outputs['params']
result['compute_type'] = 'dataloader: load a picture from the dataset'
del data_loader
mean_flops = _format_size(int(np.average(avg_flops)))
params = _format_size(params)
result['flops'] = mean_flops
result['params'] = params
return result
def main():
args = parse_args()
logger = MMLogger.get_instance(name='MMLogger')
result = inference(args, logger)
split_line = '=' * 30
ori_shape = result['ori_shape']
pad_shape = result['pad_shape']
flops = result['flops']
params = result['params']
compute_type = result['compute_type']
if pad_shape != ori_shape:
print(f'{split_line}\nUse size divisor set input shape '
f'from {ori_shape} to {pad_shape}')
print(f'{split_line}\nCompute type: {compute_type}\n'
f'Input shape: {pad_shape}\nFlops: {flops}\n'
f'Params: {params}\n{split_line}')
print('!!!Please be cautious if you use the results in papers. '
'You may need to check if all ops are supported and verify '
'that the flops computation is correct.')
if __name__ == '__main__':
main()
| 5,026 | 34.907143 | 78 | py |
ERD | ERD-main/tools/analysis_tools/test_robustness.py | # Copyright (c) OpenMMLab. All rights reserved.
import argparse
import copy
import os
import os.path as osp
from mmengine.config import Config, DictAction
from mmengine.dist import get_dist_info
from mmengine.evaluator import DumpResults
from mmengine.fileio import dump
from mmengine.runner import Runner
from mmdet.engine.hooks.utils import trigger_visualization_hook
from mmdet.registry import RUNNERS
from tools.analysis_tools.robustness_eval import get_results
def parse_args():
parser = argparse.ArgumentParser(description='MMDet test detector')
parser.add_argument('config', help='test config file path')
parser.add_argument('checkpoint', help='checkpoint file')
parser.add_argument(
'--out',
type=str,
help='dump predictions to a pickle file for offline evaluation')
parser.add_argument(
'--corruptions',
type=str,
nargs='+',
default='benchmark',
choices=[
'all', 'benchmark', 'noise', 'blur', 'weather', 'digital',
'holdout', 'None', 'gaussian_noise', 'shot_noise', 'impulse_noise',
'defocus_blur', 'glass_blur', 'motion_blur', 'zoom_blur', 'snow',
'frost', 'fog', 'brightness', 'contrast', 'elastic_transform',
'pixelate', 'jpeg_compression', 'speckle_noise', 'gaussian_blur',
'spatter', 'saturate'
],
help='corruptions')
parser.add_argument(
'--work-dir',
help='the directory to save the file containing evaluation metrics')
parser.add_argument(
'--severities',
type=int,
nargs='+',
default=[0, 1, 2, 3, 4, 5],
help='corruption severity levels')
parser.add_argument(
'--summaries',
type=bool,
default=False,
help='Print summaries for every corruption and severity')
parser.add_argument('--show', action='store_true', help='show results')
parser.add_argument(
'--show-dir', help='directory where painted images will be saved')
parser.add_argument(
'--wait-time', type=float, default=2, help='the interval of show (s)')
parser.add_argument('--seed', type=int, default=None, help='random seed')
parser.add_argument(
'--launcher',
choices=['none', 'pytorch', 'slurm', 'mpi'],
default='none',
help='job launcher')
parser.add_argument('--local_rank', type=int, default=0)
parser.add_argument(
'--final-prints',
type=str,
nargs='+',
choices=['P', 'mPC', 'rPC'],
default='mPC',
help='corruption benchmark metric to print at the end')
parser.add_argument(
'--final-prints-aggregate',
type=str,
choices=['all', 'benchmark'],
default='benchmark',
help='aggregate all results or only those for benchmark corruptions')
parser.add_argument(
'--cfg-options',
nargs='+',
action=DictAction,
help='override some settings in the used config, the key-value pair '
'in xxx=yyy format will be merged into config file. If the value to '
'be overwritten is a list, it should be like key="[a,b]" or key=a,b '
'It also allows nested list/tuple values, e.g. key="[(a,b),(c,d)]" '
'Note that the quotation marks are necessary and that no white space '
'is allowed.')
args = parser.parse_args()
if 'LOCAL_RANK' not in os.environ:
os.environ['LOCAL_RANK'] = str(args.local_rank)
return args
def main():
args = parse_args()
assert args.out or args.show or args.show_dir, \
('Please specify at least one operation (save or show the results) '
'with the argument "--out", "--show" or "show-dir"')
# load config
cfg = Config.fromfile(args.config)
cfg.launcher = args.launcher
if args.cfg_options is not None:
cfg.merge_from_dict(args.cfg_options)
# work_dir is determined in this priority: CLI > segment in file > filename
if args.work_dir is not None:
# update configs according to CLI args if args.work_dir is not None
cfg.work_dir = args.work_dir
elif cfg.get('work_dir', None) is None:
# use config filename as default work_dir if cfg.work_dir is None
cfg.work_dir = osp.join('./work_dirs',
osp.splitext(osp.basename(args.config))[0])
cfg.model.backbone.init_cfg.type = None
cfg.test_dataloader.dataset.test_mode = True
cfg.load_from = args.checkpoint
if args.show or args.show_dir:
cfg = trigger_visualization_hook(cfg, args)
# build the runner from config
if 'runner_type' not in cfg:
# build the default runner
runner = Runner.from_cfg(cfg)
else:
# build customized runner from the registry
# if 'runner_type' is set in the cfg
runner = RUNNERS.build(cfg)
# add `DumpResults` dummy metric
if args.out is not None:
assert args.out.endswith(('.pkl', '.pickle')), \
'The dump file must be a pkl file.'
runner.test_evaluator.metrics.append(
DumpResults(out_file_path=args.out))
if 'all' in args.corruptions:
corruptions = [
'gaussian_noise', 'shot_noise', 'impulse_noise', 'defocus_blur',
'glass_blur', 'motion_blur', 'zoom_blur', 'snow', 'frost', 'fog',
'brightness', 'contrast', 'elastic_transform', 'pixelate',
'jpeg_compression', 'speckle_noise', 'gaussian_blur', 'spatter',
'saturate'
]
elif 'benchmark' in args.corruptions:
corruptions = [
'gaussian_noise', 'shot_noise', 'impulse_noise', 'defocus_blur',
'glass_blur', 'motion_blur', 'zoom_blur', 'snow', 'frost', 'fog',
'brightness', 'contrast', 'elastic_transform', 'pixelate',
'jpeg_compression'
]
elif 'noise' in args.corruptions:
corruptions = ['gaussian_noise', 'shot_noise', 'impulse_noise']
elif 'blur' in args.corruptions:
corruptions = [
'defocus_blur', 'glass_blur', 'motion_blur', 'zoom_blur'
]
elif 'weather' in args.corruptions:
corruptions = ['snow', 'frost', 'fog', 'brightness']
elif 'digital' in args.corruptions:
corruptions = [
'contrast', 'elastic_transform', 'pixelate', 'jpeg_compression'
]
elif 'holdout' in args.corruptions:
corruptions = ['speckle_noise', 'gaussian_blur', 'spatter', 'saturate']
elif 'None' in args.corruptions:
corruptions = ['None']
args.severities = [0]
else:
corruptions = args.corruptions
aggregated_results = {}
for corr_i, corruption in enumerate(corruptions):
aggregated_results[corruption] = {}
for sev_i, corruption_severity in enumerate(args.severities):
# evaluate severity 0 (= no corruption) only once
if corr_i > 0 and corruption_severity == 0:
aggregated_results[corruption][0] = \
aggregated_results[corruptions[0]][0]
continue
test_loader_cfg = copy.deepcopy(cfg.test_dataloader)
# assign corruption and severity
if corruption_severity > 0:
corruption_trans = dict(
type='Corrupt',
corruption=corruption,
severity=corruption_severity)
# TODO: hard coded "1", we assume that the first step is
# loading images, which needs to be fixed in the future
test_loader_cfg.dataset.pipeline.insert(1, corruption_trans)
test_loader = runner.build_dataloader(test_loader_cfg)
runner.test_loop.dataloader = test_loader
# set random seeds
if args.seed is not None:
runner.set_randomness(args.seed)
# print info
print(f'\nTesting {corruption} at severity {corruption_severity}')
eval_results = runner.test()
if args.out:
eval_results_filename = (
osp.splitext(args.out)[0] + '_results' +
osp.splitext(args.out)[1])
aggregated_results[corruption][
corruption_severity] = eval_results
dump(aggregated_results, eval_results_filename)
rank, _ = get_dist_info()
if rank == 0:
eval_results_filename = (
osp.splitext(args.out)[0] + '_results' + osp.splitext(args.out)[1])
# print final results
print('\nAggregated results:')
prints = args.final_prints
aggregate = args.final_prints_aggregate
if cfg.dataset_type == 'VOCDataset':
get_results(
eval_results_filename,
dataset='voc',
prints=prints,
aggregate=aggregate)
else:
get_results(
eval_results_filename,
dataset='coco',
prints=prints,
aggregate=aggregate)
if __name__ == '__main__':
main()
| 9,120 | 37.004167 | 79 | py |
ERD | ERD-main/projects/Detic/demo.py | # Copyright (c) OpenMMLab. All rights reserved.
import os
import urllib
from argparse import ArgumentParser
import mmcv
import torch
from mmengine.logging import print_log
from mmengine.utils import ProgressBar, scandir
from mmdet.apis import inference_detector, init_detector
from mmdet.registry import VISUALIZERS
from mmdet.utils import register_all_modules
IMG_EXTENSIONS = ('.jpg', '.jpeg', '.png', '.ppm', '.bmp', '.pgm', '.tif',
'.tiff', '.webp')
def get_file_list(source_root: str) -> [list, dict]:
"""Get file list.
Args:
source_root (str): image or video source path
Return:
source_file_path_list (list): A list for all source file.
source_type (dict): Source type: file or url or dir.
"""
is_dir = os.path.isdir(source_root)
is_url = source_root.startswith(('http:/', 'https:/'))
is_file = os.path.splitext(source_root)[-1].lower() in IMG_EXTENSIONS
source_file_path_list = []
if is_dir:
# when input source is dir
for file in scandir(source_root, IMG_EXTENSIONS, recursive=True):
source_file_path_list.append(os.path.join(source_root, file))
elif is_url:
# when input source is url
filename = os.path.basename(
urllib.parse.unquote(source_root).split('?')[0])
file_save_path = os.path.join(os.getcwd(), filename)
print(f'Downloading source file to {file_save_path}')
torch.hub.download_url_to_file(source_root, file_save_path)
source_file_path_list = [file_save_path]
elif is_file:
# when input source is single image
source_file_path_list = [source_root]
else:
print('Cannot find image file.')
source_type = dict(is_dir=is_dir, is_url=is_url, is_file=is_file)
return source_file_path_list, source_type
def parse_args():
parser = ArgumentParser()
parser.add_argument(
'img', help='Image path, include image file, dir and URL.')
parser.add_argument('config', help='Config file')
parser.add_argument('checkpoint', help='Checkpoint file')
parser.add_argument(
'--out-dir', default='./output', help='Path to output file')
parser.add_argument(
'--device', default='cuda:0', help='Device used for inference')
parser.add_argument(
'--show', action='store_true', help='Show the detection results')
parser.add_argument(
'--score-thr', type=float, default=0.3, help='Bbox score threshold')
parser.add_argument(
'--dataset', type=str, help='dataset name to load the text embedding')
parser.add_argument(
'--class-name', nargs='+', type=str, help='custom class names')
args = parser.parse_args()
return args
def main():
args = parse_args()
# register all modules in mmdet into the registries
register_all_modules()
# build the model from a config file and a checkpoint file
model = init_detector(args.config, args.checkpoint, device=args.device)
if not os.path.exists(args.out_dir) and not args.show:
os.mkdir(args.out_dir)
# init visualizer
visualizer = VISUALIZERS.build(model.cfg.visualizer)
visualizer.dataset_meta = model.dataset_meta
# get file list
files, source_type = get_file_list(args.img)
from detic.utils import (get_class_names, get_text_embeddings,
reset_cls_layer_weight)
# class name embeddings
if args.class_name:
dataset_classes = args.class_name
elif args.dataset:
dataset_classes = get_class_names(args.dataset)
embedding = get_text_embeddings(
dataset=args.dataset, custom_vocabulary=args.class_name)
visualizer.dataset_meta['classes'] = dataset_classes
reset_cls_layer_weight(model, embedding)
# start detector inference
progress_bar = ProgressBar(len(files))
for file in files:
result = inference_detector(model, file)
img = mmcv.imread(file)
img = mmcv.imconvert(img, 'bgr', 'rgb')
if source_type['is_dir']:
filename = os.path.relpath(file, args.img).replace('/', '_')
else:
filename = os.path.basename(file)
out_file = None if args.show else os.path.join(args.out_dir, filename)
progress_bar.update()
visualizer.add_datasample(
filename,
img,
data_sample=result,
draw_gt=False,
show=args.show,
wait_time=0,
out_file=out_file,
pred_score_thr=args.score_thr)
if not args.show:
print_log(
f'\nResults have been saved at {os.path.abspath(args.out_dir)}')
if __name__ == '__main__':
main()
| 4,710 | 31.944056 | 78 | py |
ERD | ERD-main/projects/Detic/detic/detic_bbox_head.py | # Copyright (c) OpenMMLab. All rights reserved.
from typing import Optional, Union
from mmengine.config import ConfigDict
from mmengine.structures import InstanceData
from torch import Tensor
from mmdet.models.layers import multiclass_nms
from mmdet.models.roi_heads.bbox_heads import Shared2FCBBoxHead
from mmdet.models.utils import empty_instances
from mmdet.registry import MODELS
from mmdet.structures.bbox import get_box_tensor, scale_boxes
@MODELS.register_module(force=True) # avoid bug
class DeticBBoxHead(Shared2FCBBoxHead):
def __init__(self,
*args,
init_cfg: Optional[Union[dict, ConfigDict]] = None,
**kwargs) -> None:
super().__init__(*args, init_cfg=init_cfg, **kwargs)
# reconstruct fc_cls and fc_reg since input channels are changed
assert self.with_cls
cls_channels = self.num_classes
cls_predictor_cfg_ = self.cls_predictor_cfg.copy()
cls_predictor_cfg_.update(
in_features=self.cls_last_dim, out_features=cls_channels)
self.fc_cls = MODELS.build(cls_predictor_cfg_)
def _predict_by_feat_single(
self,
roi: Tensor,
cls_score: Tensor,
bbox_pred: Tensor,
img_meta: dict,
rescale: bool = False,
rcnn_test_cfg: Optional[ConfigDict] = None) -> InstanceData:
"""Transform a single image's features extracted from the head into
bbox results.
Args:
roi (Tensor): Boxes to be transformed. Has shape (num_boxes, 5).
last dimension 5 arrange as (batch_index, x1, y1, x2, y2).
cls_score (Tensor): Box scores, has shape
(num_boxes, num_classes + 1).
bbox_pred (Tensor): Box energies / deltas.
has shape (num_boxes, num_classes * 4).
img_meta (dict): image information.
rescale (bool): If True, return boxes in original image space.
Defaults to False.
rcnn_test_cfg (obj:`ConfigDict`): `test_cfg` of Bbox Head.
Defaults to None
Returns:
:obj:`InstanceData`: Detection results of each image\
Each item usually contains following keys.
- scores (Tensor): Classification scores, has a shape
(num_instance, )
- labels (Tensor): Labels of bboxes, has a shape
(num_instances, ).
- bboxes (Tensor): Has a shape (num_instances, 4),
the last dimension 4 arrange as (x1, y1, x2, y2).
"""
results = InstanceData()
if roi.shape[0] == 0:
return empty_instances([img_meta],
roi.device,
task_type='bbox',
instance_results=[results],
box_type=self.predict_box_type,
use_box_type=False,
num_classes=self.num_classes,
score_per_cls=rcnn_test_cfg is None)[0]
scores = cls_score
img_shape = img_meta['img_shape']
num_rois = roi.size(0)
num_classes = 1 if self.reg_class_agnostic else self.num_classes
roi = roi.repeat_interleave(num_classes, dim=0)
bbox_pred = bbox_pred.view(-1, self.bbox_coder.encode_size)
bboxes = self.bbox_coder.decode(
roi[..., 1:], bbox_pred, max_shape=img_shape)
if rescale and bboxes.size(0) > 0:
assert img_meta.get('scale_factor') is not None
scale_factor = [1 / s for s in img_meta['scale_factor']]
bboxes = scale_boxes(bboxes, scale_factor)
# Get the inside tensor when `bboxes` is a box type
bboxes = get_box_tensor(bboxes)
box_dim = bboxes.size(-1)
bboxes = bboxes.view(num_rois, -1)
if rcnn_test_cfg is None:
# This means that it is aug test.
# It needs to return the raw results without nms.
results.bboxes = bboxes
results.scores = scores
else:
det_bboxes, det_labels = multiclass_nms(
bboxes,
scores,
rcnn_test_cfg.score_thr,
rcnn_test_cfg.nms,
rcnn_test_cfg.max_per_img,
box_dim=box_dim)
results.bboxes = det_bboxes[:, :-1]
results.scores = det_bboxes[:, -1]
results.labels = det_labels
return results
| 4,599 | 39.707965 | 76 | py |
ERD | ERD-main/projects/Detic/detic/utils.py | # Copyright (c) OpenMMLab. All rights reserved.
import numpy as np
import torch
import torch.nn.functional as F
from mmengine.logging import print_log
from .text_encoder import CLIPTextEncoder
# download from
# https://github.com/facebookresearch/Detic/tree/main/datasets/metadata
DATASET_EMBEDDINGS = {
'lvis': 'datasets/metadata/lvis_v1_clip_a+cname.npy',
'objects365': 'datasets/metadata/o365_clip_a+cnamefix.npy',
'openimages': 'datasets/metadata/oid_clip_a+cname.npy',
'coco': 'datasets/metadata/coco_clip_a+cname.npy',
}
def get_text_embeddings(dataset=None,
custom_vocabulary=None,
prompt_prefix='a '):
assert (dataset is None) ^ (custom_vocabulary is None), \
'Either `dataset` or `custom_vocabulary` should be specified.'
if dataset:
if dataset in DATASET_EMBEDDINGS:
return DATASET_EMBEDDINGS[dataset]
else:
custom_vocabulary = get_class_names(dataset)
text_encoder = CLIPTextEncoder()
text_encoder.eval()
texts = [prompt_prefix + x for x in custom_vocabulary]
print_log(
f'Computing text embeddings for {len(custom_vocabulary)} classes.')
embeddings = text_encoder(texts).detach().permute(1, 0).contiguous().cpu()
return embeddings
def get_class_names(dataset):
if dataset == 'coco':
from mmdet.datasets import CocoDataset
class_names = CocoDataset.METAINFO['classes']
elif dataset == 'cityscapes':
from mmdet.datasets import CityscapesDataset
class_names = CityscapesDataset.METAINFO['classes']
elif dataset == 'voc':
from mmdet.datasets import VOCDataset
class_names = VOCDataset.METAINFO['classes']
elif dataset == 'openimages':
from mmdet.datasets import OpenImagesDataset
class_names = OpenImagesDataset.METAINFO['classes']
elif dataset == 'lvis':
from mmdet.datasets import LVISV1Dataset
class_names = LVISV1Dataset.METAINFO['classes']
else:
raise TypeError(f'Invalid type for dataset name: {type(dataset)}')
return class_names
def reset_cls_layer_weight(model, weight):
if type(weight) == str:
print_log(f'Resetting cls_layer_weight from file: {weight}')
zs_weight = torch.tensor(
np.load(weight),
dtype=torch.float32).permute(1, 0).contiguous() # D x C
else:
zs_weight = weight
zs_weight = torch.cat(
[zs_weight, zs_weight.new_zeros(
(zs_weight.shape[0], 1))], dim=1) # D x (C + 1)
zs_weight = F.normalize(zs_weight, p=2, dim=0)
zs_weight = zs_weight.to('cuda')
num_classes = zs_weight.shape[-1]
for bbox_head in model.roi_head.bbox_head:
bbox_head.num_classes = num_classes
del bbox_head.fc_cls.zs_weight
bbox_head.fc_cls.zs_weight = zs_weight
| 2,864 | 35.265823 | 78 | py |
ERD | ERD-main/projects/Detic/detic/detic_roi_head.py | # Copyright (c) OpenMMLab. All rights reserved.
from typing import List, Sequence, Tuple
import torch
from mmengine.structures import InstanceData
from torch import Tensor
from mmdet.models.roi_heads import CascadeRoIHead
from mmdet.models.task_modules.samplers import SamplingResult
from mmdet.models.test_time_augs import merge_aug_masks
from mmdet.models.utils.misc import empty_instances
from mmdet.registry import MODELS
from mmdet.structures import SampleList
from mmdet.structures.bbox import bbox2roi, get_box_tensor
from mmdet.utils import ConfigType, InstanceList, MultiConfig
@MODELS.register_module(force=True) # avoid bug
class DeticRoIHead(CascadeRoIHead):
def init_mask_head(self, mask_roi_extractor: MultiConfig,
mask_head: MultiConfig) -> None:
"""Initialize mask head and mask roi extractor.
Args:
mask_head (dict): Config of mask in mask head.
mask_roi_extractor (:obj:`ConfigDict`, dict or list):
Config of mask roi extractor.
"""
self.mask_head = MODELS.build(mask_head)
if mask_roi_extractor is not None:
self.share_roi_extractor = False
self.mask_roi_extractor = MODELS.build(mask_roi_extractor)
else:
self.share_roi_extractor = True
self.mask_roi_extractor = self.bbox_roi_extractor
def _refine_roi(self, x: Tuple[Tensor], rois: Tensor,
batch_img_metas: List[dict],
num_proposals_per_img: Sequence[int], **kwargs) -> tuple:
"""Multi-stage refinement of RoI.
Args:
x (tuple[Tensor]): List of multi-level img features.
rois (Tensor): shape (n, 5), [batch_ind, x1, y1, x2, y2]
batch_img_metas (list[dict]): List of image information.
num_proposals_per_img (sequence[int]): number of proposals
in each image.
Returns:
tuple:
- rois (Tensor): Refined RoI.
- cls_scores (list[Tensor]): Average predicted
cls score per image.
- bbox_preds (list[Tensor]): Bbox branch predictions
for the last stage of per image.
"""
# "ms" in variable names means multi-stage
ms_scores = []
for stage in range(self.num_stages):
bbox_results = self._bbox_forward(
stage=stage, x=x, rois=rois, **kwargs)
# split batch bbox prediction back to each image
cls_scores = bbox_results['cls_score'].sigmoid()
bbox_preds = bbox_results['bbox_pred']
rois = rois.split(num_proposals_per_img, 0)
cls_scores = cls_scores.split(num_proposals_per_img, 0)
ms_scores.append(cls_scores)
bbox_preds = bbox_preds.split(num_proposals_per_img, 0)
if stage < self.num_stages - 1:
bbox_head = self.bbox_head[stage]
refine_rois_list = []
for i in range(len(batch_img_metas)):
if rois[i].shape[0] > 0:
bbox_label = cls_scores[i][:, :-1].argmax(dim=1)
# Refactor `bbox_head.regress_by_class` to only accept
# box tensor without img_idx concatenated.
refined_bboxes = bbox_head.regress_by_class(
rois[i][:, 1:], bbox_label, bbox_preds[i],
batch_img_metas[i])
refined_bboxes = get_box_tensor(refined_bboxes)
refined_rois = torch.cat(
[rois[i][:, [0]], refined_bboxes], dim=1)
refine_rois_list.append(refined_rois)
rois = torch.cat(refine_rois_list)
# ms_scores aligned
# average scores of each image by stages
cls_scores = [
sum([score[i] for score in ms_scores]) / float(len(ms_scores))
for i in range(len(batch_img_metas))
] # aligned
return rois, cls_scores, bbox_preds
def _bbox_forward(self, stage: int, x: Tuple[Tensor],
rois: Tensor) -> dict:
"""Box head forward function used in both training and testing.
Args:
stage (int): The current stage in Cascade RoI Head.
x (tuple[Tensor]): List of multi-level img features.
rois (Tensor): RoIs with the shape (n, 5) where the first
column indicates batch id of each RoI.
Returns:
dict[str, Tensor]: Usually returns a dictionary with keys:
- `cls_score` (Tensor): Classification scores.
- `bbox_pred` (Tensor): Box energies / deltas.
- `bbox_feats` (Tensor): Extract bbox RoI features.
"""
bbox_roi_extractor = self.bbox_roi_extractor[stage]
bbox_head = self.bbox_head[stage]
bbox_feats = bbox_roi_extractor(x[:bbox_roi_extractor.num_inputs],
rois)
# do not support caffe_c4 model anymore
cls_score, bbox_pred = bbox_head(bbox_feats)
bbox_results = dict(
cls_score=cls_score, bbox_pred=bbox_pred, bbox_feats=bbox_feats)
return bbox_results
def predict_bbox(self,
x: Tuple[Tensor],
batch_img_metas: List[dict],
rpn_results_list: InstanceList,
rcnn_test_cfg: ConfigType,
rescale: bool = False,
**kwargs) -> InstanceList:
"""Perform forward propagation of the bbox head and predict detection
results on the features of the upstream network.
Args:
x (tuple[Tensor]): Feature maps of all scale level.
batch_img_metas (list[dict]): List of image information.
rpn_results_list (list[:obj:`InstanceData`]): List of region
proposals.
rcnn_test_cfg (obj:`ConfigDict`): `test_cfg` of R-CNN.
rescale (bool): If True, return boxes in original image space.
Defaults to False.
Returns:
list[:obj:`InstanceData`]: Detection results of each image
after the post process.
Each item usually contains following keys.
- scores (Tensor): Classification scores, has a shape
(num_instance, )
- labels (Tensor): Labels of bboxes, has a shape
(num_instances, ).
- bboxes (Tensor): Has a shape (num_instances, 4),
the last dimension 4 arrange as (x1, y1, x2, y2).
"""
proposals = [res.bboxes for res in rpn_results_list]
proposal_scores = [res.scores for res in rpn_results_list]
num_proposals_per_img = tuple(len(p) for p in proposals)
rois = bbox2roi(proposals)
if rois.shape[0] == 0:
return empty_instances(
batch_img_metas,
rois.device,
task_type='bbox',
box_type=self.bbox_head[-1].predict_box_type,
num_classes=self.bbox_head[-1].num_classes,
score_per_cls=rcnn_test_cfg is None)
# rois aligned
rois, cls_scores, bbox_preds = self._refine_roi(
x=x,
rois=rois,
batch_img_metas=batch_img_metas,
num_proposals_per_img=num_proposals_per_img,
**kwargs)
# score reweighting in centernet2
cls_scores = [(s * ps[:, None])**0.5
for s, ps in zip(cls_scores, proposal_scores)]
cls_scores = [
s * (s == s[:, :-1].max(dim=1)[0][:, None]).float()
for s in cls_scores
]
# fast_rcnn_inference
results_list = self.bbox_head[-1].predict_by_feat(
rois=rois,
cls_scores=cls_scores,
bbox_preds=bbox_preds,
batch_img_metas=batch_img_metas,
rescale=rescale,
rcnn_test_cfg=rcnn_test_cfg)
return results_list
def _mask_forward(self, x: Tuple[Tensor], rois: Tensor) -> dict:
"""Mask head forward function used in both training and testing.
Args:
stage (int): The current stage in Cascade RoI Head.
x (tuple[Tensor]): Tuple of multi-level img features.
rois (Tensor): RoIs with the shape (n, 5) where the first
column indicates batch id of each RoI.
Returns:
dict: Usually returns a dictionary with keys:
- `mask_preds` (Tensor): Mask prediction.
"""
mask_feats = self.mask_roi_extractor(
x[:self.mask_roi_extractor.num_inputs], rois)
# do not support caffe_c4 model anymore
mask_preds = self.mask_head(mask_feats)
mask_results = dict(mask_preds=mask_preds)
return mask_results
def mask_loss(self, x, sampling_results: List[SamplingResult],
batch_gt_instances: InstanceList) -> dict:
"""Run forward function and calculate loss for mask head in training.
Args:
x (tuple[Tensor]): Tuple of multi-level img features.
sampling_results (list["obj:`SamplingResult`]): Sampling results.
batch_gt_instances (list[:obj:`InstanceData`]): Batch of
gt_instance. It usually includes ``bboxes``, ``labels``, and
``masks`` attributes.
Returns:
dict: Usually returns a dictionary with keys:
- `mask_preds` (Tensor): Mask prediction.
- `loss_mask` (dict): A dictionary of mask loss components.
"""
pos_rois = bbox2roi([res.pos_priors for res in sampling_results])
mask_results = self._mask_forward(x, pos_rois)
mask_loss_and_target = self.mask_head.loss_and_target(
mask_preds=mask_results['mask_preds'],
sampling_results=sampling_results,
batch_gt_instances=batch_gt_instances,
rcnn_train_cfg=self.train_cfg[-1])
mask_results.update(mask_loss_and_target)
return mask_results
def loss(self, x: Tuple[Tensor], rpn_results_list: InstanceList,
batch_data_samples: SampleList) -> dict:
"""Perform forward propagation and loss calculation of the detection
roi on the features of the upstream network.
Args:
x (tuple[Tensor]): List of multi-level img features.
rpn_results_list (list[:obj:`InstanceData`]): List of region
proposals.
batch_data_samples (list[:obj:`DetDataSample`]): The batch
data samples. It usually includes information such
as `gt_instance` or `gt_panoptic_seg` or `gt_sem_seg`.
Returns:
dict[str, Tensor]: A dictionary of loss components
"""
raise NotImplementedError
def predict_mask(self,
x: Tuple[Tensor],
batch_img_metas: List[dict],
results_list: List[InstanceData],
rescale: bool = False) -> List[InstanceData]:
"""Perform forward propagation of the mask head and predict detection
results on the features of the upstream network.
Args:
x (tuple[Tensor]): Feature maps of all scale level.
batch_img_metas (list[dict]): List of image information.
results_list (list[:obj:`InstanceData`]): Detection results of
each image.
rescale (bool): If True, return boxes in original image space.
Defaults to False.
Returns:
list[:obj:`InstanceData`]: Detection results of each image
after the post process.
Each item usually contains following keys.
- scores (Tensor): Classification scores, has a shape
(num_instance, )
- labels (Tensor): Labels of bboxes, has a shape
(num_instances, ).
- bboxes (Tensor): Has a shape (num_instances, 4),
the last dimension 4 arrange as (x1, y1, x2, y2).
- masks (Tensor): Has a shape (num_instances, H, W).
"""
bboxes = [res.bboxes for res in results_list]
mask_rois = bbox2roi(bboxes)
if mask_rois.shape[0] == 0:
results_list = empty_instances(
batch_img_metas,
mask_rois.device,
task_type='mask',
instance_results=results_list,
mask_thr_binary=self.test_cfg.mask_thr_binary)
return results_list
num_mask_rois_per_img = [len(res) for res in results_list]
aug_masks = []
mask_results = self._mask_forward(x, mask_rois)
mask_preds = mask_results['mask_preds']
# split batch mask prediction back to each image
mask_preds = mask_preds.split(num_mask_rois_per_img, 0)
aug_masks.append([m.sigmoid().detach() for m in mask_preds])
merged_masks = []
for i in range(len(batch_img_metas)):
aug_mask = [mask[i] for mask in aug_masks]
merged_mask = merge_aug_masks(aug_mask, batch_img_metas[i])
merged_masks.append(merged_mask)
results_list = self.mask_head.predict_by_feat(
mask_preds=merged_masks,
results_list=results_list,
batch_img_metas=batch_img_metas,
rcnn_test_cfg=self.test_cfg,
rescale=rescale,
activate_map=True)
return results_list
| 13,673 | 40.816514 | 78 | py |
ERD | ERD-main/projects/Detic/detic/zero_shot_classifier.py | # Copyright (c) Facebook, Inc. and its affiliates.
import numpy as np
import torch
from torch import nn
from torch.nn import functional as F
from mmdet.registry import MODELS
@MODELS.register_module(force=True) # avoid bug
class ZeroShotClassifier(nn.Module):
def __init__(
self,
in_features: int,
out_features: int, # num_classes
zs_weight_path: str,
zs_weight_dim: int = 512,
use_bias: float = 0.0,
norm_weight: bool = True,
norm_temperature: float = 50.0,
):
super().__init__()
num_classes = out_features
self.norm_weight = norm_weight
self.norm_temperature = norm_temperature
self.use_bias = use_bias < 0
if self.use_bias:
self.cls_bias = nn.Parameter(torch.ones(1) * use_bias)
self.linear = nn.Linear(in_features, zs_weight_dim)
if zs_weight_path == 'rand':
zs_weight = torch.randn((zs_weight_dim, num_classes))
nn.init.normal_(zs_weight, std=0.01)
else:
zs_weight = torch.tensor(
np.load(zs_weight_path),
dtype=torch.float32).permute(1, 0).contiguous() # D x C
zs_weight = torch.cat(
[zs_weight, zs_weight.new_zeros(
(zs_weight_dim, 1))], dim=1) # D x (C + 1)
if self.norm_weight:
zs_weight = F.normalize(zs_weight, p=2, dim=0)
if zs_weight_path == 'rand':
self.zs_weight = nn.Parameter(zs_weight)
else:
self.register_buffer('zs_weight', zs_weight)
assert self.zs_weight.shape[1] == num_classes + 1, self.zs_weight.shape
def forward(self, x, classifier=None):
'''
Inputs:
x: B x D'
classifier_info: (C', C' x D)
'''
x = self.linear(x)
if classifier is not None:
zs_weight = classifier.permute(1, 0).contiguous() # D x C'
zs_weight = F.normalize(zs_weight, p=2, dim=0) \
if self.norm_weight else zs_weight
else:
zs_weight = self.zs_weight
if self.norm_weight:
x = self.norm_temperature * F.normalize(x, p=2, dim=1)
x = torch.mm(x, zs_weight)
if self.use_bias:
x = x + self.cls_bias
return x
| 2,324 | 30.418919 | 79 | py |
ERD | ERD-main/projects/Detic/detic/centernet_rpn_head.py | # Copyright (c) OpenMMLab. All rights reserved.
import copy
from typing import List, Sequence, Tuple
import torch
import torch.nn as nn
from mmcv.cnn import Scale
from mmengine import ConfigDict
from mmengine.structures import InstanceData
from torch import Tensor
from mmdet.models.dense_heads import CenterNetUpdateHead
from mmdet.models.utils import multi_apply
from mmdet.registry import MODELS
INF = 1000000000
RangeType = Sequence[Tuple[int, int]]
@MODELS.register_module(force=True) # avoid bug
class CenterNetRPNHead(CenterNetUpdateHead):
"""CenterNetUpdateHead is an improved version of CenterNet in CenterNet2.
Paper link `<https://arxiv.org/abs/2103.07461>`_.
"""
def _init_layers(self) -> None:
"""Initialize layers of the head."""
self._init_reg_convs()
self._init_predictor()
def _init_predictor(self) -> None:
"""Initialize predictor layers of the head."""
self.conv_cls = nn.Conv2d(
self.feat_channels, self.num_classes, 3, padding=1)
self.conv_reg = nn.Conv2d(self.feat_channels, 4, 3, padding=1)
def forward(self, x: Tuple[Tensor]) -> Tuple[List[Tensor], List[Tensor]]:
"""Forward features from the upstream network.
Args:
x (tuple[Tensor]): Features from the upstream network, each is
a 4D-tensor.
Returns:
tuple: A tuple of each level outputs.
- cls_scores (list[Tensor]): Box scores for each scale level, \
each is a 4D-tensor, the channel number is num_classes.
- bbox_preds (list[Tensor]): Box energies / deltas for each \
scale level, each is a 4D-tensor, the channel number is 4.
"""
res = multi_apply(self.forward_single, x, self.scales, self.strides)
return res
def forward_single(self, x: Tensor, scale: Scale,
stride: int) -> Tuple[Tensor, Tensor]:
"""Forward features of a single scale level.
Args:
x (Tensor): FPN feature maps of the specified stride.
scale (:obj:`mmcv.cnn.Scale`): Learnable scale module to resize
the bbox prediction.
stride (int): The corresponding stride for feature maps.
Returns:
tuple: scores for each class, bbox predictions of
input feature maps.
"""
for m in self.reg_convs:
x = m(x)
cls_score = self.conv_cls(x)
bbox_pred = self.conv_reg(x)
# scale the bbox_pred of different level
# float to avoid overflow when enabling FP16
bbox_pred = scale(bbox_pred).float()
# bbox_pred needed for gradient computation has been modified
# by F.relu(bbox_pred) when run with PyTorch 1.10. So replace
# F.relu(bbox_pred) with bbox_pred.clamp(min=0)
bbox_pred = bbox_pred.clamp(min=0)
if not self.training:
bbox_pred *= stride
return cls_score, bbox_pred # score aligned, box larger
def _predict_by_feat_single(self,
cls_score_list: List[Tensor],
bbox_pred_list: List[Tensor],
score_factor_list: List[Tensor],
mlvl_priors: List[Tensor],
img_meta: dict,
cfg: ConfigDict,
rescale: bool = False,
with_nms: bool = True) -> InstanceData:
"""Transform a single image's features extracted from the head into
bbox results.
Args:
cls_score_list (list[Tensor]): Box scores from all scale
levels of a single image, each item has shape
(num_priors * num_classes, H, W).
bbox_pred_list (list[Tensor]): Box energies / deltas from
all scale levels of a single image, each item has shape
(num_priors * 4, H, W).
score_factor_list (list[Tensor]): Score factor from all scale
levels of a single image, each item has shape
(num_priors * 1, H, W).
mlvl_priors (list[Tensor]): Each element in the list is
the priors of a single level in feature pyramid. In all
anchor-based methods, it has shape (num_priors, 4). In
all anchor-free methods, it has shape (num_priors, 2)
when `with_stride=True`, otherwise it still has shape
(num_priors, 4).
img_meta (dict): Image meta info.
cfg (mmengine.Config): Test / postprocessing configuration,
if None, test_cfg would be used.
rescale (bool): If True, return boxes in original image space.
Defaults to False.
with_nms (bool): If True, do nms before return boxes.
Defaults to True.
Returns:
:obj:`InstanceData`: Detection results of each image
after the post process.
Each item usually contains following keys.
- scores (Tensor): Classification scores, has a shape
(num_instance, )
- labels (Tensor): Labels of bboxes, has a shape
(num_instances, ).
- bboxes (Tensor): Has a shape (num_instances, 4),
the last dimension 4 arrange as (x1, y1, x2, y2).
"""
cfg = self.test_cfg if cfg is None else cfg
cfg = copy.deepcopy(cfg)
nms_pre = cfg.get('nms_pre', -1)
mlvl_bbox_preds = []
mlvl_valid_priors = []
mlvl_scores = []
mlvl_labels = []
for level_idx, (cls_score, bbox_pred, score_factor, priors) in \
enumerate(zip(cls_score_list, bbox_pred_list,
score_factor_list, mlvl_priors)):
assert cls_score.size()[-2:] == bbox_pred.size()[-2:]
dim = self.bbox_coder.encode_size
bbox_pred = bbox_pred.permute(1, 2, 0).reshape(-1, dim)
cls_score = cls_score.permute(1, 2,
0).reshape(-1, self.cls_out_channels)
heatmap = cls_score.sigmoid()
score_thr = cfg.get('score_thr', 0)
candidate_inds = heatmap > score_thr # 0.05
pre_nms_top_n = candidate_inds.sum() # N
pre_nms_top_n = pre_nms_top_n.clamp(max=nms_pre) # N
heatmap = heatmap[candidate_inds] # n
candidate_nonzeros = candidate_inds.nonzero() # n
box_loc = candidate_nonzeros[:, 0] # n
labels = candidate_nonzeros[:, 1] # n
bbox_pred = bbox_pred[box_loc] # n x 4
per_grids = priors[box_loc] # n x 2
if candidate_inds.sum().item() > pre_nms_top_n.item():
heatmap, top_k_indices = \
heatmap.topk(pre_nms_top_n, sorted=False)
labels = labels[top_k_indices]
bbox_pred = bbox_pred[top_k_indices]
per_grids = per_grids[top_k_indices]
bboxes = self.bbox_coder.decode(per_grids, bbox_pred)
# avoid invalid boxes in RoI heads
bboxes[:, 2] = torch.max(bboxes[:, 2], bboxes[:, 0] + 0.01)
bboxes[:, 3] = torch.max(bboxes[:, 3], bboxes[:, 1] + 0.01)
mlvl_bbox_preds.append(bboxes)
mlvl_valid_priors.append(priors)
mlvl_scores.append(torch.sqrt(heatmap))
mlvl_labels.append(labels)
results = InstanceData()
results.bboxes = torch.cat(mlvl_bbox_preds)
results.scores = torch.cat(mlvl_scores)
results.labels = torch.cat(mlvl_labels)
return self._bbox_post_process(
results=results,
cfg=cfg,
rescale=rescale,
with_nms=with_nms,
img_meta=img_meta)
| 7,938 | 39.299492 | 79 | py |
ERD | ERD-main/projects/Detic/detic/text_encoder.py | # Copyright (c) OpenMMLab. All rights reserved.
from typing import List, Union
import torch
import torch.nn as nn
class CLIPTextEncoder(nn.Module):
def __init__(self, model_name='ViT-B/32'):
super().__init__()
import clip
from clip.simple_tokenizer import SimpleTokenizer
self.tokenizer = SimpleTokenizer()
pretrained_model, _ = clip.load(model_name, device='cpu')
self.clip = pretrained_model
@property
def device(self):
return self.clip.device
@property
def dtype(self):
return self.clip.dtype
def tokenize(self,
texts: Union[str, List[str]],
context_length: int = 77) -> torch.LongTensor:
if isinstance(texts, str):
texts = [texts]
sot_token = self.tokenizer.encoder['<|startoftext|>']
eot_token = self.tokenizer.encoder['<|endoftext|>']
all_tokens = [[sot_token] + self.tokenizer.encode(text) + [eot_token]
for text in texts]
result = torch.zeros(len(all_tokens), context_length, dtype=torch.long)
for i, tokens in enumerate(all_tokens):
if len(tokens) > context_length:
st = torch.randint(len(tokens) - context_length + 1,
(1, ))[0].item()
tokens = tokens[st:st + context_length]
result[i, :len(tokens)] = torch.tensor(tokens)
return result
def forward(self, text):
text = self.tokenize(text)
text_features = self.clip.encode_text(text)
return text_features
| 1,605 | 30.490196 | 79 | py |
ERD | ERD-main/projects/Detic/configs/detic_centernet2_swin-b_fpn_4x_lvis-coco-in21k.py | _base_ = 'mmdet::common/lsj-200e_coco-detection.py'
custom_imports = dict(
imports=['projects.Detic.detic'], allow_failed_imports=False)
image_size = (1024, 1024)
batch_augments = [dict(type='BatchFixedSizePad', size=image_size)]
cls_layer = dict(
type='ZeroShotClassifier',
zs_weight_path='rand',
zs_weight_dim=512,
use_bias=0.0,
norm_weight=True,
norm_temperature=50.0)
reg_layer = [
dict(type='Linear', in_features=1024, out_features=1024),
dict(type='ReLU', inplace=True),
dict(type='Linear', in_features=1024, out_features=4)
]
num_classes = 22047
model = dict(
type='CascadeRCNN',
data_preprocessor=dict(
type='DetDataPreprocessor',
mean=[123.675, 116.28, 103.53],
std=[58.395, 57.12, 57.375],
bgr_to_rgb=True,
pad_size_divisor=32,
batch_augments=batch_augments),
backbone=dict(
type='SwinTransformer',
embed_dims=128,
depths=[2, 2, 18, 2],
num_heads=[4, 8, 16, 32],
window_size=7,
mlp_ratio=4,
qkv_bias=True,
qk_scale=None,
drop_rate=0.,
attn_drop_rate=0.,
drop_path_rate=0.3,
patch_norm=True,
out_indices=(1, 2, 3),
with_cp=False),
neck=dict(
type='FPN',
in_channels=[256, 512, 1024],
out_channels=256,
start_level=0,
add_extra_convs='on_output',
num_outs=5,
init_cfg=dict(type='Caffe2Xavier', layer='Conv2d'),
relu_before_extra_convs=True),
rpn_head=dict(
type='CenterNetRPNHead',
num_classes=1,
in_channels=256,
stacked_convs=4,
feat_channels=256,
strides=[8, 16, 32, 64, 128],
conv_bias=True,
norm_cfg=dict(type='GN', num_groups=32, requires_grad=True),
loss_cls=dict(
type='GaussianFocalLoss',
pos_weight=0.25,
neg_weight=0.75,
loss_weight=1.0),
loss_bbox=dict(type='GIoULoss', loss_weight=2.0),
),
roi_head=dict(
type='DeticRoIHead',
num_stages=3,
stage_loss_weights=[1, 0.5, 0.25],
bbox_roi_extractor=dict(
type='SingleRoIExtractor',
roi_layer=dict(
type='RoIAlign',
output_size=7,
sampling_ratio=0,
use_torchvision=True),
out_channels=256,
featmap_strides=[8, 16, 32],
# approximately equal to
# canonical_box_size=224, canonical_level=4 in D2
finest_scale=112),
bbox_head=[
dict(
type='DeticBBoxHead',
in_channels=256,
fc_out_channels=1024,
roi_feat_size=7,
num_classes=num_classes,
cls_predictor_cfg=cls_layer,
reg_predictor_cfg=reg_layer,
bbox_coder=dict(
type='DeltaXYWHBBoxCoder',
target_means=[0., 0., 0., 0.],
target_stds=[0.1, 0.1, 0.2, 0.2]),
reg_class_agnostic=True,
loss_cls=dict(
type='CrossEntropyLoss', use_sigmoid=True,
loss_weight=1.0),
loss_bbox=dict(type='SmoothL1Loss', beta=1.0,
loss_weight=1.0)),
dict(
type='DeticBBoxHead',
in_channels=256,
fc_out_channels=1024,
roi_feat_size=7,
num_classes=num_classes,
cls_predictor_cfg=cls_layer,
reg_predictor_cfg=reg_layer,
bbox_coder=dict(
type='DeltaXYWHBBoxCoder',
target_means=[0., 0., 0., 0.],
target_stds=[0.05, 0.05, 0.1, 0.1]),
reg_class_agnostic=True,
loss_cls=dict(
type='CrossEntropyLoss', use_sigmoid=True,
loss_weight=1.0),
loss_bbox=dict(type='SmoothL1Loss', beta=1.0,
loss_weight=1.0)),
dict(
type='DeticBBoxHead',
in_channels=256,
fc_out_channels=1024,
roi_feat_size=7,
num_classes=num_classes,
cls_predictor_cfg=cls_layer,
reg_predictor_cfg=reg_layer,
bbox_coder=dict(
type='DeltaXYWHBBoxCoder',
target_means=[0., 0., 0., 0.],
target_stds=[0.033, 0.033, 0.067, 0.067]),
reg_class_agnostic=True,
loss_cls=dict(
type='CrossEntropyLoss', use_sigmoid=True,
loss_weight=1.0),
loss_bbox=dict(type='SmoothL1Loss', beta=1.0, loss_weight=1.0))
],
mask_roi_extractor=dict(
type='SingleRoIExtractor',
roi_layer=dict(type='RoIAlign', output_size=14, sampling_ratio=0),
out_channels=256,
featmap_strides=[8, 16, 32],
# approximately equal to
# canonical_box_size=224, canonical_level=4 in D2
finest_scale=112),
mask_head=dict(
type='FCNMaskHead',
num_convs=4,
in_channels=256,
conv_out_channels=256,
class_agnostic=True,
num_classes=num_classes,
loss_mask=dict(
type='CrossEntropyLoss', use_mask=True, loss_weight=1.0))),
# model training and testing settings
train_cfg=dict(
rpn=dict(
assigner=dict(
type='MaxIoUAssigner',
pos_iou_thr=0.7,
neg_iou_thr=0.3,
min_pos_iou=0.3,
match_low_quality=True,
ignore_iof_thr=-1),
sampler=dict(
type='RandomSampler',
num=256,
pos_fraction=0.5,
neg_pos_ub=-1,
add_gt_as_proposals=False),
allowed_border=0,
pos_weight=-1,
debug=False),
rpn_proposal=dict(
nms_pre=2000,
max_per_img=2000,
nms=dict(type='nms', iou_threshold=0.7),
min_bbox_size=0),
rcnn=[
dict(
assigner=dict(
type='MaxIoUAssigner',
pos_iou_thr=0.6,
neg_iou_thr=0.6,
min_pos_iou=0.6,
match_low_quality=False,
ignore_iof_thr=-1),
sampler=dict(
type='RandomSampler',
num=512,
pos_fraction=0.25,
neg_pos_ub=-1,
add_gt_as_proposals=True),
mask_size=28,
pos_weight=-1,
debug=False),
dict(
assigner=dict(
type='MaxIoUAssigner',
pos_iou_thr=0.7,
neg_iou_thr=0.7,
min_pos_iou=0.7,
match_low_quality=False,
ignore_iof_thr=-1),
sampler=dict(
type='RandomSampler',
num=512,
pos_fraction=0.25,
neg_pos_ub=-1,
add_gt_as_proposals=True),
mask_size=28,
pos_weight=-1,
debug=False),
dict(
assigner=dict(
type='MaxIoUAssigner',
pos_iou_thr=0.8,
neg_iou_thr=0.8,
min_pos_iou=0.8,
match_low_quality=False,
ignore_iof_thr=-1),
sampler=dict(
type='RandomSampler',
num=512,
pos_fraction=0.25,
neg_pos_ub=-1,
add_gt_as_proposals=True),
mask_size=28,
pos_weight=-1,
debug=False)
]),
test_cfg=dict(
rpn=dict(
score_thr=0.0001,
nms_pre=1000,
max_per_img=256,
nms=dict(type='nms', iou_threshold=0.9),
min_bbox_size=0),
rcnn=dict(
score_thr=0.02,
nms=dict(type='nms', iou_threshold=0.5),
max_per_img=300,
mask_thr_binary=0.5)))
backend = 'pillow'
test_pipeline = [
dict(
type='LoadImageFromFile',
backend_args=_base_.backend_args,
imdecode_backend=backend),
dict(type='Resize', scale=(1333, 800), keep_ratio=True, backend=backend),
dict(
type='LoadAnnotations',
with_bbox=True,
with_mask=True,
poly2mask=False),
dict(
type='PackDetInputs',
meta_keys=('img_id', 'img_path', 'ori_shape', 'img_shape',
'scale_factor'))
]
train_dataloader = dict(batch_size=8, num_workers=4)
val_dataloader = dict(dataset=dict(pipeline=test_pipeline))
test_dataloader = val_dataloader
# Enable automatic-mixed-precision training with AmpOptimWrapper.
optim_wrapper = dict(
type='AmpOptimWrapper',
optimizer=dict(
type='SGD', lr=0.01 * 4, momentum=0.9, weight_decay=0.00004),
paramwise_cfg=dict(norm_decay_mult=0.))
param_scheduler = [
dict(
type='LinearLR',
start_factor=0.00025,
by_epoch=False,
begin=0,
end=4000),
dict(
type='MultiStepLR',
begin=0,
end=25,
by_epoch=True,
milestones=[22, 24],
gamma=0.1)
]
# NOTE: `auto_scale_lr` is for automatically scaling LR,
# USER SHOULD NOT CHANGE ITS VALUES.
# base_batch_size = (8 GPUs) x (8 samples per GPU)
auto_scale_lr = dict(base_batch_size=64)
| 9,887 | 32.070234 | 79 | py |
ERD | ERD-main/projects/DiffusionDet/diffusiondet/loss.py | # Copyright (c) OpenMMLab. All rights reserved.
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
# Modified from https://github.com/ShoufaChen/DiffusionDet/blob/main/diffusiondet/loss.py # noqa
# This work is licensed under the CC-BY-NC 4.0 License.
# Users should be careful about adopting these features in any commercial matters. # noqa
# For more details, please refer to https://github.com/ShoufaChen/DiffusionDet/blob/main/LICENSE # noqa
from typing import List, Tuple, Union
import torch
import torch.nn as nn
from mmengine.config import ConfigDict
from mmengine.structures import InstanceData
from torch import Tensor
from mmdet.registry import MODELS, TASK_UTILS
from mmdet.structures.bbox import bbox_cxcywh_to_xyxy, bbox_xyxy_to_cxcywh
from mmdet.utils import ConfigType
@TASK_UTILS.register_module()
class DiffusionDetCriterion(nn.Module):
def __init__(
self,
num_classes,
assigner: Union[ConfigDict, nn.Module],
deep_supervision=True,
loss_cls=dict(
type='FocalLoss',
use_sigmoid=True,
alpha=0.25,
gamma=2.0,
reduction='sum',
loss_weight=2.0),
loss_bbox=dict(type='L1Loss', reduction='sum', loss_weight=5.0),
loss_giou=dict(type='GIoULoss', reduction='sum', loss_weight=2.0),
):
super().__init__()
self.num_classes = num_classes
if isinstance(assigner, nn.Module):
self.assigner = assigner
else:
self.assigner = TASK_UTILS.build(assigner)
self.deep_supervision = deep_supervision
self.loss_cls = MODELS.build(loss_cls)
self.loss_bbox = MODELS.build(loss_bbox)
self.loss_giou = MODELS.build(loss_giou)
def forward(self, outputs, batch_gt_instances, batch_img_metas):
batch_indices = self.assigner(outputs, batch_gt_instances,
batch_img_metas)
# Compute all the requested losses
loss_cls = self.loss_classification(outputs, batch_gt_instances,
batch_indices)
loss_bbox, loss_giou = self.loss_boxes(outputs, batch_gt_instances,
batch_indices)
losses = dict(
loss_cls=loss_cls, loss_bbox=loss_bbox, loss_giou=loss_giou)
if self.deep_supervision:
assert 'aux_outputs' in outputs
for i, aux_outputs in enumerate(outputs['aux_outputs']):
batch_indices = self.assigner(aux_outputs, batch_gt_instances,
batch_img_metas)
loss_cls = self.loss_classification(aux_outputs,
batch_gt_instances,
batch_indices)
loss_bbox, loss_giou = self.loss_boxes(aux_outputs,
batch_gt_instances,
batch_indices)
tmp_losses = dict(
loss_cls=loss_cls,
loss_bbox=loss_bbox,
loss_giou=loss_giou)
for name, value in tmp_losses.items():
losses[f's.{i}.{name}'] = value
return losses
def loss_classification(self, outputs, batch_gt_instances, indices):
assert 'pred_logits' in outputs
src_logits = outputs['pred_logits']
target_classes_list = [
gt.labels[J] for gt, (_, J) in zip(batch_gt_instances, indices)
]
target_classes = torch.full(
src_logits.shape[:2],
self.num_classes,
dtype=torch.int64,
device=src_logits.device)
for idx in range(len(batch_gt_instances)):
target_classes[idx, indices[idx][0]] = target_classes_list[idx]
src_logits = src_logits.flatten(0, 1)
target_classes = target_classes.flatten(0, 1)
# comp focal loss.
num_instances = max(torch.cat(target_classes_list).shape[0], 1)
loss_cls = self.loss_cls(
src_logits,
target_classes,
) / num_instances
return loss_cls
def loss_boxes(self, outputs, batch_gt_instances, indices):
assert 'pred_boxes' in outputs
pred_boxes = outputs['pred_boxes']
target_bboxes_norm_list = [
gt.norm_bboxes_cxcywh[J]
for gt, (_, J) in zip(batch_gt_instances, indices)
]
target_bboxes_list = [
gt.bboxes[J] for gt, (_, J) in zip(batch_gt_instances, indices)
]
pred_bboxes_list = []
pred_bboxes_norm_list = []
for idx in range(len(batch_gt_instances)):
pred_bboxes_list.append(pred_boxes[idx, indices[idx][0]])
image_size = batch_gt_instances[idx].image_size
pred_bboxes_norm_list.append(pred_boxes[idx, indices[idx][0]] /
image_size)
pred_boxes_cat = torch.cat(pred_bboxes_list)
pred_boxes_norm_cat = torch.cat(pred_bboxes_norm_list)
target_bboxes_cat = torch.cat(target_bboxes_list)
target_bboxes_norm_cat = torch.cat(target_bboxes_norm_list)
if len(pred_boxes_cat) > 0:
num_instances = pred_boxes_cat.shape[0]
loss_bbox = self.loss_bbox(
pred_boxes_norm_cat,
bbox_cxcywh_to_xyxy(target_bboxes_norm_cat)) / num_instances
loss_giou = self.loss_giou(pred_boxes_cat,
target_bboxes_cat) / num_instances
else:
loss_bbox = pred_boxes.sum() * 0
loss_giou = pred_boxes.sum() * 0
return loss_bbox, loss_giou
@TASK_UTILS.register_module()
class DiffusionDetMatcher(nn.Module):
"""This class computes an assignment between the targets and the
predictions of the network For efficiency reasons, the targets don't
include the no_object.
Because of this, in general, there are more predictions than targets. In
this case, we do a 1-to-k (dynamic) matching of the best predictions, while
the others are un-matched (and thus treated as non-objects).
"""
def __init__(self,
match_costs: Union[List[Union[dict, ConfigDict]], dict,
ConfigDict],
center_radius: float = 2.5,
candidate_topk: int = 5,
iou_calculator: ConfigType = dict(type='BboxOverlaps2D'),
**kwargs):
super().__init__()
self.center_radius = center_radius
self.candidate_topk = candidate_topk
if isinstance(match_costs, dict):
match_costs = [match_costs]
elif isinstance(match_costs, list):
assert len(match_costs) > 0, \
'match_costs must not be a empty list.'
self.use_focal_loss = False
self.use_fed_loss = False
for _match_cost in match_costs:
if _match_cost.get('type') == 'FocalLossCost':
self.use_focal_loss = True
if _match_cost.get('type') == 'FedLoss':
self.use_fed_loss = True
raise NotImplementedError
self.match_costs = [
TASK_UTILS.build(match_cost) for match_cost in match_costs
]
self.iou_calculator = TASK_UTILS.build(iou_calculator)
def forward(self, outputs, batch_gt_instances, batch_img_metas):
assert 'pred_logits' in outputs and 'pred_boxes' in outputs
pred_logits = outputs['pred_logits']
pred_bboxes = outputs['pred_boxes']
batch_size = len(batch_gt_instances)
assert batch_size == pred_logits.shape[0] == pred_bboxes.shape[0]
batch_indices = []
for i in range(batch_size):
pred_instances = InstanceData()
pred_instances.bboxes = pred_bboxes[i, ...]
pred_instances.scores = pred_logits[i, ...]
gt_instances = batch_gt_instances[i]
img_meta = batch_img_metas[i]
indices = self.single_assigner(pred_instances, gt_instances,
img_meta)
batch_indices.append(indices)
return batch_indices
def single_assigner(self, pred_instances, gt_instances, img_meta):
with torch.no_grad():
gt_bboxes = gt_instances.bboxes
pred_bboxes = pred_instances.bboxes
num_gt = gt_bboxes.size(0)
if num_gt == 0: # empty object in key frame
valid_mask = pred_bboxes.new_zeros((pred_bboxes.shape[0], ),
dtype=torch.bool)
matched_gt_inds = pred_bboxes.new_zeros((gt_bboxes.shape[0], ),
dtype=torch.long)
return valid_mask, matched_gt_inds
valid_mask, is_in_boxes_and_center = \
self.get_in_gt_and_in_center_info(
bbox_xyxy_to_cxcywh(pred_bboxes),
bbox_xyxy_to_cxcywh(gt_bboxes)
)
cost_list = []
for match_cost in self.match_costs:
cost = match_cost(
pred_instances=pred_instances,
gt_instances=gt_instances,
img_meta=img_meta)
cost_list.append(cost)
pairwise_ious = self.iou_calculator(pred_bboxes, gt_bboxes)
cost_list.append((~is_in_boxes_and_center) * 100.0)
cost_matrix = torch.stack(cost_list).sum(0)
cost_matrix[~valid_mask] = cost_matrix[~valid_mask] + 10000.0
fg_mask_inboxes, matched_gt_inds = \
self.dynamic_k_matching(
cost_matrix, pairwise_ious, num_gt)
return fg_mask_inboxes, matched_gt_inds
def get_in_gt_and_in_center_info(
self, pred_bboxes: Tensor,
gt_bboxes: Tensor) -> Tuple[Tensor, Tensor]:
"""Get the information of which prior is in gt bboxes and gt center
priors."""
xy_target_gts = bbox_cxcywh_to_xyxy(gt_bboxes) # (x1, y1, x2, y2)
pred_bboxes_center_x = pred_bboxes[:, 0].unsqueeze(1)
pred_bboxes_center_y = pred_bboxes[:, 1].unsqueeze(1)
# whether the center of each anchor is inside a gt box
b_l = pred_bboxes_center_x > xy_target_gts[:, 0].unsqueeze(0)
b_r = pred_bboxes_center_x < xy_target_gts[:, 2].unsqueeze(0)
b_t = pred_bboxes_center_y > xy_target_gts[:, 1].unsqueeze(0)
b_b = pred_bboxes_center_y < xy_target_gts[:, 3].unsqueeze(0)
# (b_l.long()+b_r.long()+b_t.long()+b_b.long())==4 [300,num_gt] ,
is_in_boxes = ((b_l.long() + b_r.long() + b_t.long() +
b_b.long()) == 4)
is_in_boxes_all = is_in_boxes.sum(1) > 0 # [num_query]
# in fixed center
center_radius = 2.5
# Modified to self-adapted sampling --- the center size depends
# on the size of the gt boxes
# https://github.com/dulucas/UVO_Challenge/blob/main/Track1/detection/mmdet/core/bbox/assigners/rpn_sim_ota_assigner.py#L212 # noqa
b_l = pred_bboxes_center_x > (
gt_bboxes[:, 0] -
(center_radius *
(xy_target_gts[:, 2] - xy_target_gts[:, 0]))).unsqueeze(0)
b_r = pred_bboxes_center_x < (
gt_bboxes[:, 0] +
(center_radius *
(xy_target_gts[:, 2] - xy_target_gts[:, 0]))).unsqueeze(0)
b_t = pred_bboxes_center_y > (
gt_bboxes[:, 1] -
(center_radius *
(xy_target_gts[:, 3] - xy_target_gts[:, 1]))).unsqueeze(0)
b_b = pred_bboxes_center_y < (
gt_bboxes[:, 1] +
(center_radius *
(xy_target_gts[:, 3] - xy_target_gts[:, 1]))).unsqueeze(0)
is_in_centers = ((b_l.long() + b_r.long() + b_t.long() +
b_b.long()) == 4)
is_in_centers_all = is_in_centers.sum(1) > 0
is_in_boxes_anchor = is_in_boxes_all | is_in_centers_all
is_in_boxes_and_center = (is_in_boxes & is_in_centers)
return is_in_boxes_anchor, is_in_boxes_and_center
def dynamic_k_matching(self, cost: Tensor, pairwise_ious: Tensor,
num_gt: int) -> Tuple[Tensor, Tensor]:
"""Use IoU and matching cost to calculate the dynamic top-k positive
targets."""
matching_matrix = torch.zeros_like(cost)
# select candidate topk ious for dynamic-k calculation
candidate_topk = min(self.candidate_topk, pairwise_ious.size(0))
topk_ious, _ = torch.topk(pairwise_ious, candidate_topk, dim=0)
# calculate dynamic k for each gt
dynamic_ks = torch.clamp(topk_ious.sum(0).int(), min=1)
for gt_idx in range(num_gt):
_, pos_idx = torch.topk(
cost[:, gt_idx], k=dynamic_ks[gt_idx], largest=False)
matching_matrix[:, gt_idx][pos_idx] = 1
del topk_ious, dynamic_ks, pos_idx
prior_match_gt_mask = matching_matrix.sum(1) > 1
if prior_match_gt_mask.sum() > 0:
_, cost_argmin = torch.min(cost[prior_match_gt_mask, :], dim=1)
matching_matrix[prior_match_gt_mask, :] *= 0
matching_matrix[prior_match_gt_mask, cost_argmin] = 1
while (matching_matrix.sum(0) == 0).any():
matched_query_id = matching_matrix.sum(1) > 0
cost[matched_query_id] += 100000.0
unmatch_id = torch.nonzero(
matching_matrix.sum(0) == 0, as_tuple=False).squeeze(1)
for gt_idx in unmatch_id:
pos_idx = torch.argmin(cost[:, gt_idx])
matching_matrix[:, gt_idx][pos_idx] = 1.0
if (matching_matrix.sum(1) > 1).sum() > 0:
_, cost_argmin = torch.min(cost[prior_match_gt_mask], dim=1)
matching_matrix[prior_match_gt_mask] *= 0
matching_matrix[prior_match_gt_mask, cost_argmin, ] = 1
assert not (matching_matrix.sum(0) == 0).any()
# get foreground mask inside box and center prior
fg_mask_inboxes = matching_matrix.sum(1) > 0
matched_gt_inds = matching_matrix[fg_mask_inboxes, :].argmax(1)
return fg_mask_inboxes, matched_gt_inds
| 14,481 | 41.345029 | 142 | py |
ERD | ERD-main/projects/DiffusionDet/diffusiondet/head.py | # Copyright (c) OpenMMLab. All rights reserved.
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
# Modified from https://github.com/ShoufaChen/DiffusionDet/blob/main/diffusiondet/detector.py # noqa
# Modified from https://github.com/ShoufaChen/DiffusionDet/blob/main/diffusiondet/head.py # noqa
# This work is licensed under the CC-BY-NC 4.0 License.
# Users should be careful about adopting these features in any commercial matters. # noqa
# For more details, please refer to https://github.com/ShoufaChen/DiffusionDet/blob/main/LICENSE # noqa
import copy
import math
import random
import warnings
from typing import Tuple
import torch
import torch.nn as nn
import torch.nn.functional as F
from mmcv.cnn import build_activation_layer
from mmcv.ops import batched_nms
from mmengine.structures import InstanceData
from torch import Tensor
from mmdet.registry import MODELS, TASK_UTILS
from mmdet.structures import SampleList
from mmdet.structures.bbox import (bbox2roi, bbox_cxcywh_to_xyxy,
bbox_xyxy_to_cxcywh, get_box_wh,
scale_boxes)
from mmdet.utils import InstanceList
_DEFAULT_SCALE_CLAMP = math.log(100000.0 / 16)
def cosine_beta_schedule(timesteps, s=0.008):
"""Cosine schedule as proposed in
https://openreview.net/forum?id=-NEXDKk8gZ."""
steps = timesteps + 1
x = torch.linspace(0, timesteps, steps, dtype=torch.float64)
alphas_cumprod = torch.cos(
((x / timesteps) + s) / (1 + s) * math.pi * 0.5)**2
alphas_cumprod = alphas_cumprod / alphas_cumprod[0]
betas = 1 - (alphas_cumprod[1:] / alphas_cumprod[:-1])
return torch.clip(betas, 0, 0.999)
def extract(a, t, x_shape):
"""extract the appropriate t index for a batch of indices."""
batch_size = t.shape[0]
out = a.gather(-1, t)
return out.reshape(batch_size, *((1, ) * (len(x_shape) - 1)))
class SinusoidalPositionEmbeddings(nn.Module):
def __init__(self, dim):
super().__init__()
self.dim = dim
def forward(self, time):
device = time.device
half_dim = self.dim // 2
embeddings = math.log(10000) / (half_dim - 1)
embeddings = torch.exp(
torch.arange(half_dim, device=device) * -embeddings)
embeddings = time[:, None] * embeddings[None, :]
embeddings = torch.cat((embeddings.sin(), embeddings.cos()), dim=-1)
return embeddings
@MODELS.register_module()
class DynamicDiffusionDetHead(nn.Module):
def __init__(self,
num_classes=80,
feat_channels=256,
num_proposals=500,
num_heads=6,
prior_prob=0.01,
snr_scale=2.0,
timesteps=1000,
sampling_timesteps=1,
self_condition=False,
box_renewal=True,
use_ensemble=True,
deep_supervision=True,
ddim_sampling_eta=1.0,
criterion=dict(
type='DiffusionDetCriterion',
num_classes=80,
assigner=dict(
type='DiffusionDetMatcher',
match_costs=[
dict(
type='FocalLossCost',
alpha=2.0,
gamma=0.25,
weight=2.0),
dict(
type='BBoxL1Cost',
weight=5.0,
box_format='xyxy'),
dict(type='IoUCost', iou_mode='giou', weight=2.0)
],
center_radius=2.5,
candidate_topk=5),
),
single_head=dict(
type='DiffusionDetHead',
num_cls_convs=1,
num_reg_convs=3,
dim_feedforward=2048,
num_heads=8,
dropout=0.0,
act_cfg=dict(type='ReLU'),
dynamic_conv=dict(dynamic_dim=64, dynamic_num=2)),
roi_extractor=dict(
type='SingleRoIExtractor',
roi_layer=dict(
type='RoIAlign', output_size=7, sampling_ratio=2),
out_channels=256,
featmap_strides=[4, 8, 16, 32]),
test_cfg=None,
**kwargs) -> None:
super().__init__()
self.roi_extractor = MODELS.build(roi_extractor)
self.num_classes = num_classes
self.num_classes = num_classes
self.feat_channels = feat_channels
self.num_proposals = num_proposals
self.num_heads = num_heads
# Build Diffusion
assert isinstance(timesteps, int), 'The type of `timesteps` should ' \
f'be int but got {type(timesteps)}'
assert sampling_timesteps <= timesteps
self.timesteps = timesteps
self.sampling_timesteps = sampling_timesteps
self.snr_scale = snr_scale
self.ddim_sampling = self.sampling_timesteps < self.timesteps
self.ddim_sampling_eta = ddim_sampling_eta
self.self_condition = self_condition
self.box_renewal = box_renewal
self.use_ensemble = use_ensemble
self._build_diffusion()
# Build assigner
assert criterion.get('assigner', None) is not None
assigner = TASK_UTILS.build(criterion.get('assigner'))
# Init parameters.
self.use_focal_loss = assigner.use_focal_loss
self.use_fed_loss = assigner.use_fed_loss
# build criterion
criterion.update(deep_supervision=deep_supervision)
self.criterion = TASK_UTILS.build(criterion)
# Build Dynamic Head.
single_head_ = single_head.copy()
single_head_num_classes = single_head_.get('num_classes', None)
if single_head_num_classes is None:
single_head_.update(num_classes=num_classes)
else:
if single_head_num_classes != num_classes:
warnings.warn(
'The `num_classes` of `DynamicDiffusionDetHead` and '
'`SingleDiffusionDetHead` should be same, changing '
f'`single_head.num_classes` to {num_classes}')
single_head_.update(num_classes=num_classes)
single_head_feat_channels = single_head_.get('feat_channels', None)
if single_head_feat_channels is None:
single_head_.update(feat_channels=feat_channels)
else:
if single_head_feat_channels != feat_channels:
warnings.warn(
'The `feat_channels` of `DynamicDiffusionDetHead` and '
'`SingleDiffusionDetHead` should be same, changing '
f'`single_head.feat_channels` to {feat_channels}')
single_head_.update(feat_channels=feat_channels)
default_pooler_resolution = roi_extractor['roi_layer'].get(
'output_size')
assert default_pooler_resolution is not None
single_head_pooler_resolution = single_head_.get('pooler_resolution')
if single_head_pooler_resolution is None:
single_head_.update(pooler_resolution=default_pooler_resolution)
else:
if single_head_pooler_resolution != default_pooler_resolution:
warnings.warn(
'The `pooler_resolution` of `DynamicDiffusionDetHead` '
'and `SingleDiffusionDetHead` should be same, changing '
f'`single_head.pooler_resolution` to {num_classes}')
single_head_.update(
pooler_resolution=default_pooler_resolution)
single_head_.update(
use_focal_loss=self.use_focal_loss, use_fed_loss=self.use_fed_loss)
single_head_module = MODELS.build(single_head_)
self.num_heads = num_heads
self.head_series = nn.ModuleList(
[copy.deepcopy(single_head_module) for _ in range(num_heads)])
self.deep_supervision = deep_supervision
# Gaussian random feature embedding layer for time
time_dim = feat_channels * 4
self.time_mlp = nn.Sequential(
SinusoidalPositionEmbeddings(feat_channels),
nn.Linear(feat_channels, time_dim), nn.GELU(),
nn.Linear(time_dim, time_dim))
self.prior_prob = prior_prob
self.test_cfg = test_cfg
self.use_nms = self.test_cfg.get('use_nms', True)
self._init_weights()
def _init_weights(self):
# init all parameters.
bias_value = -math.log((1 - self.prior_prob) / self.prior_prob)
for p in self.parameters():
if p.dim() > 1:
nn.init.xavier_uniform_(p)
# initialize the bias for focal loss and fed loss.
if self.use_focal_loss or self.use_fed_loss:
if p.shape[-1] == self.num_classes or \
p.shape[-1] == self.num_classes + 1:
nn.init.constant_(p, bias_value)
def _build_diffusion(self):
betas = cosine_beta_schedule(self.timesteps)
alphas = 1. - betas
alphas_cumprod = torch.cumprod(alphas, dim=0)
alphas_cumprod_prev = F.pad(alphas_cumprod[:-1], (1, 0), value=1.)
self.register_buffer('betas', betas)
self.register_buffer('alphas_cumprod', alphas_cumprod)
self.register_buffer('alphas_cumprod_prev', alphas_cumprod_prev)
# calculations for diffusion q(x_t | x_{t-1}) and others
self.register_buffer('sqrt_alphas_cumprod', torch.sqrt(alphas_cumprod))
self.register_buffer('sqrt_one_minus_alphas_cumprod',
torch.sqrt(1. - alphas_cumprod))
self.register_buffer('log_one_minus_alphas_cumprod',
torch.log(1. - alphas_cumprod))
self.register_buffer('sqrt_recip_alphas_cumprod',
torch.sqrt(1. / alphas_cumprod))
self.register_buffer('sqrt_recipm1_alphas_cumprod',
torch.sqrt(1. / alphas_cumprod - 1))
# calculations for posterior q(x_{t-1} | x_t, x_0)
# equal to 1. / (1. / (1. - alpha_cumprod_tm1) + alpha_t / beta_t)
posterior_variance = betas * (1. - alphas_cumprod_prev) / (
1. - alphas_cumprod)
self.register_buffer('posterior_variance', posterior_variance)
# log calculation clipped because the posterior variance is 0 at
# the beginning of the diffusion chain
self.register_buffer('posterior_log_variance_clipped',
torch.log(posterior_variance.clamp(min=1e-20)))
self.register_buffer(
'posterior_mean_coef1',
betas * torch.sqrt(alphas_cumprod_prev) / (1. - alphas_cumprod))
self.register_buffer('posterior_mean_coef2',
(1. - alphas_cumprod_prev) * torch.sqrt(alphas) /
(1. - alphas_cumprod))
def forward(self, features, init_bboxes, init_t, init_features=None):
time = self.time_mlp(init_t, )
inter_class_logits = []
inter_pred_bboxes = []
bs = len(features[0])
bboxes = init_bboxes
if init_features is not None:
init_features = init_features[None].repeat(1, bs, 1)
proposal_features = init_features.clone()
else:
proposal_features = None
for head_idx, single_head in enumerate(self.head_series):
class_logits, pred_bboxes, proposal_features = single_head(
features, bboxes, proposal_features, self.roi_extractor, time)
if self.deep_supervision:
inter_class_logits.append(class_logits)
inter_pred_bboxes.append(pred_bboxes)
bboxes = pred_bboxes.detach()
if self.deep_supervision:
return torch.stack(inter_class_logits), torch.stack(
inter_pred_bboxes)
else:
return class_logits[None, ...], pred_bboxes[None, ...]
def loss(self, x: Tuple[Tensor], batch_data_samples: SampleList) -> dict:
"""Perform forward propagation and loss calculation of the detection
head on the features of the upstream network.
Args:
x (tuple[Tensor]): Features from the upstream network, each is
a 4D-tensor.
batch_data_samples (List[:obj:`DetDataSample`]): The Data
Samples. It usually includes information such as
`gt_instance`, `gt_panoptic_seg` and `gt_sem_seg`.
Returns:
dict: A dictionary of loss components.
"""
prepare_outputs = self.prepare_training_targets(batch_data_samples)
(batch_gt_instances, batch_pred_instances, batch_gt_instances_ignore,
batch_img_metas) = prepare_outputs
batch_diff_bboxes = torch.stack([
pred_instances.diff_bboxes_abs
for pred_instances in batch_pred_instances
])
batch_time = torch.stack(
[pred_instances.time for pred_instances in batch_pred_instances])
pred_logits, pred_bboxes = self(x, batch_diff_bboxes, batch_time)
output = {
'pred_logits': pred_logits[-1],
'pred_boxes': pred_bboxes[-1]
}
if self.deep_supervision:
output['aux_outputs'] = [{
'pred_logits': a,
'pred_boxes': b
} for a, b in zip(pred_logits[:-1], pred_bboxes[:-1])]
losses = self.criterion(output, batch_gt_instances, batch_img_metas)
return losses
def prepare_training_targets(self, batch_data_samples):
# hard-setting seed to keep results same (if necessary)
# random.seed(0)
# torch.manual_seed(0)
# torch.cuda.manual_seed_all(0)
# torch.backends.cudnn.deterministic = True
# torch.backends.cudnn.benchmark = False
batch_gt_instances = []
batch_pred_instances = []
batch_gt_instances_ignore = []
batch_img_metas = []
for data_sample in batch_data_samples:
img_meta = data_sample.metainfo
gt_instances = data_sample.gt_instances
gt_bboxes = gt_instances.bboxes
h, w = img_meta['img_shape']
image_size = gt_bboxes.new_tensor([w, h, w, h])
norm_gt_bboxes = gt_bboxes / image_size
norm_gt_bboxes_cxcywh = bbox_xyxy_to_cxcywh(norm_gt_bboxes)
pred_instances = self.prepare_diffusion(norm_gt_bboxes_cxcywh,
image_size)
gt_instances.set_metainfo(dict(image_size=image_size))
gt_instances.norm_bboxes_cxcywh = norm_gt_bboxes_cxcywh
batch_gt_instances.append(gt_instances)
batch_pred_instances.append(pred_instances)
batch_img_metas.append(data_sample.metainfo)
if 'ignored_instances' in data_sample:
batch_gt_instances_ignore.append(data_sample.ignored_instances)
else:
batch_gt_instances_ignore.append(None)
return (batch_gt_instances, batch_pred_instances,
batch_gt_instances_ignore, batch_img_metas)
def prepare_diffusion(self, gt_boxes, image_size):
device = gt_boxes.device
time = torch.randint(
0, self.timesteps, (1, ), dtype=torch.long, device=device)
noise = torch.randn(self.num_proposals, 4, device=device)
num_gt = gt_boxes.shape[0]
if num_gt < self.num_proposals:
# 3 * sigma = 1/2 --> sigma: 1/6
box_placeholder = torch.randn(
self.num_proposals - num_gt, 4, device=device) / 6. + 0.5
box_placeholder[:, 2:] = torch.clip(
box_placeholder[:, 2:], min=1e-4)
x_start = torch.cat((gt_boxes, box_placeholder), dim=0)
else:
select_mask = [True] * self.num_proposals + \
[False] * (num_gt - self.num_proposals)
random.shuffle(select_mask)
x_start = gt_boxes[select_mask]
x_start = (x_start * 2. - 1.) * self.snr_scale
# noise sample
x = self.q_sample(x_start=x_start, time=time, noise=noise)
x = torch.clamp(x, min=-1 * self.snr_scale, max=self.snr_scale)
x = ((x / self.snr_scale) + 1) / 2.
diff_bboxes = bbox_cxcywh_to_xyxy(x)
# convert to abs bboxes
diff_bboxes_abs = diff_bboxes * image_size
metainfo = dict(time=time.squeeze(-1))
pred_instances = InstanceData(metainfo=metainfo)
pred_instances.diff_bboxes = diff_bboxes
pred_instances.diff_bboxes_abs = diff_bboxes_abs
pred_instances.noise = noise
return pred_instances
# forward diffusion
def q_sample(self, x_start, time, noise=None):
if noise is None:
noise = torch.randn_like(x_start)
x_start_shape = x_start.shape
sqrt_alphas_cumprod_t = extract(self.sqrt_alphas_cumprod, time,
x_start_shape)
sqrt_one_minus_alphas_cumprod_t = extract(
self.sqrt_one_minus_alphas_cumprod, time, x_start_shape)
return sqrt_alphas_cumprod_t * x_start + \
sqrt_one_minus_alphas_cumprod_t * noise
def predict(self,
x: Tuple[Tensor],
batch_data_samples: SampleList,
rescale: bool = False) -> InstanceList:
"""Perform forward propagation of the detection head and predict
detection results on the features of the upstream network.
Args:
x (tuple[Tensor]): Multi-level features from the
upstream network, each is a 4D-tensor.
batch_data_samples (List[:obj:`DetDataSample`]): The Data
Samples. It usually includes information such as
`gt_instance`, `gt_panoptic_seg` and `gt_sem_seg`.
rescale (bool, optional): Whether to rescale the results.
Defaults to False.
Returns:
list[obj:`InstanceData`]: Detection results of each image
after the post process.
"""
# hard-setting seed to keep results same (if necessary)
# seed = 0
# random.seed(seed)
# torch.manual_seed(seed)
# torch.cuda.manual_seed_all(seed)
device = x[-1].device
batch_img_metas = [
data_samples.metainfo for data_samples in batch_data_samples
]
(time_pairs, batch_noise_bboxes, batch_noise_bboxes_raw,
batch_image_size) = self.prepare_testing_targets(
batch_img_metas, device)
predictions = self.predict_by_feat(
x,
time_pairs=time_pairs,
batch_noise_bboxes=batch_noise_bboxes,
batch_noise_bboxes_raw=batch_noise_bboxes_raw,
batch_image_size=batch_image_size,
device=device,
batch_img_metas=batch_img_metas)
return predictions
def predict_by_feat(self,
x,
time_pairs,
batch_noise_bboxes,
batch_noise_bboxes_raw,
batch_image_size,
device,
batch_img_metas=None,
cfg=None,
rescale=True):
batch_size = len(batch_img_metas)
cfg = self.test_cfg if cfg is None else cfg
cfg = copy.deepcopy(cfg)
ensemble_score, ensemble_label, ensemble_coord = [], [], []
for time, time_next in time_pairs:
batch_time = torch.full((batch_size, ),
time,
device=device,
dtype=torch.long)
# self_condition = x_start if self.self_condition else None
pred_logits, pred_bboxes = self(x, batch_noise_bboxes, batch_time)
x_start = pred_bboxes[-1]
x_start = x_start / batch_image_size[:, None, :]
x_start = bbox_xyxy_to_cxcywh(x_start)
x_start = (x_start * 2 - 1.) * self.snr_scale
x_start = torch.clamp(
x_start, min=-1 * self.snr_scale, max=self.snr_scale)
pred_noise = self.predict_noise_from_start(batch_noise_bboxes_raw,
batch_time, x_start)
pred_noise_list, x_start_list = [], []
noise_bboxes_list, num_remain_list = [], []
if self.box_renewal: # filter
score_thr = cfg.get('score_thr', 0)
for img_id in range(batch_size):
score_per_image = pred_logits[-1][img_id]
score_per_image = torch.sigmoid(score_per_image)
value, _ = torch.max(score_per_image, -1, keepdim=False)
keep_idx = value > score_thr
num_remain_list.append(torch.sum(keep_idx))
pred_noise_list.append(pred_noise[img_id, keep_idx, :])
x_start_list.append(x_start[img_id, keep_idx, :])
noise_bboxes_list.append(batch_noise_bboxes[img_id,
keep_idx, :])
if time_next < 0:
# Not same as original DiffusionDet
if self.use_ensemble and self.sampling_timesteps > 1:
box_pred_per_image, scores_per_image, labels_per_image = \
self.inference(
box_cls=pred_logits[-1],
box_pred=pred_bboxes[-1],
cfg=cfg,
device=device)
ensemble_score.append(scores_per_image)
ensemble_label.append(labels_per_image)
ensemble_coord.append(box_pred_per_image)
continue
alpha = self.alphas_cumprod[time]
alpha_next = self.alphas_cumprod[time_next]
sigma = self.ddim_sampling_eta * ((1 - alpha / alpha_next) *
(1 - alpha_next) /
(1 - alpha)).sqrt()
c = (1 - alpha_next - sigma**2).sqrt()
batch_noise_bboxes_list = []
batch_noise_bboxes_raw_list = []
for idx in range(batch_size):
pred_noise = pred_noise_list[idx]
x_start = x_start_list[idx]
noise_bboxes = noise_bboxes_list[idx]
num_remain = num_remain_list[idx]
noise = torch.randn_like(noise_bboxes)
noise_bboxes = x_start * alpha_next.sqrt() + \
c * pred_noise + sigma * noise
if self.box_renewal: # filter
# replenish with randn boxes
if num_remain < self.num_proposals:
noise_bboxes = torch.cat(
(noise_bboxes,
torch.randn(
self.num_proposals - num_remain,
4,
device=device)),
dim=0)
else:
select_mask = [True] * self.num_proposals + \
[False] * (num_remain -
self.num_proposals)
random.shuffle(select_mask)
noise_bboxes = noise_bboxes[select_mask]
# raw noise boxes
batch_noise_bboxes_raw_list.append(noise_bboxes)
# resize to xyxy
noise_bboxes = torch.clamp(
noise_bboxes,
min=-1 * self.snr_scale,
max=self.snr_scale)
noise_bboxes = ((noise_bboxes / self.snr_scale) + 1) / 2
noise_bboxes = bbox_cxcywh_to_xyxy(noise_bboxes)
noise_bboxes = noise_bboxes * batch_image_size[idx]
batch_noise_bboxes_list.append(noise_bboxes)
batch_noise_bboxes = torch.stack(batch_noise_bboxes_list)
batch_noise_bboxes_raw = torch.stack(batch_noise_bboxes_raw_list)
if self.use_ensemble and self.sampling_timesteps > 1:
box_pred_per_image, scores_per_image, labels_per_image = \
self.inference(
box_cls=pred_logits[-1],
box_pred=pred_bboxes[-1],
cfg=cfg,
device=device)
ensemble_score.append(scores_per_image)
ensemble_label.append(labels_per_image)
ensemble_coord.append(box_pred_per_image)
if self.use_ensemble and self.sampling_timesteps > 1:
steps = len(ensemble_score)
results_list = []
for idx in range(batch_size):
ensemble_score_per_img = [
ensemble_score[i][idx] for i in range(steps)
]
ensemble_label_per_img = [
ensemble_label[i][idx] for i in range(steps)
]
ensemble_coord_per_img = [
ensemble_coord[i][idx] for i in range(steps)
]
scores_per_image = torch.cat(ensemble_score_per_img, dim=0)
labels_per_image = torch.cat(ensemble_label_per_img, dim=0)
box_pred_per_image = torch.cat(ensemble_coord_per_img, dim=0)
if self.use_nms:
det_bboxes, keep_idxs = batched_nms(
box_pred_per_image, scores_per_image, labels_per_image,
cfg.nms)
box_pred_per_image = box_pred_per_image[keep_idxs]
labels_per_image = labels_per_image[keep_idxs]
scores_per_image = det_bboxes[:, -1]
results = InstanceData()
results.bboxes = box_pred_per_image
results.scores = scores_per_image
results.labels = labels_per_image
results_list.append(results)
else:
box_cls = pred_logits[-1]
box_pred = pred_bboxes[-1]
results_list = self.inference(box_cls, box_pred, cfg, device)
if rescale:
results_list = self.do_results_post_process(
results_list, cfg, batch_img_metas=batch_img_metas)
return results_list
@staticmethod
def do_results_post_process(results_list, cfg, batch_img_metas=None):
processed_results = []
for results, img_meta in zip(results_list, batch_img_metas):
assert img_meta.get('scale_factor') is not None
scale_factor = [1 / s for s in img_meta['scale_factor']]
results.bboxes = scale_boxes(results.bboxes, scale_factor)
# clip w, h
h, w = img_meta['ori_shape']
results.bboxes[:, 0::2] = results.bboxes[:, 0::2].clamp(
min=0, max=w)
results.bboxes[:, 1::2] = results.bboxes[:, 1::2].clamp(
min=0, max=h)
# filter small size bboxes
if cfg.get('min_bbox_size', 0) >= 0:
w, h = get_box_wh(results.bboxes)
valid_mask = (w > cfg.min_bbox_size) & (h > cfg.min_bbox_size)
if not valid_mask.all():
results = results[valid_mask]
processed_results.append(results)
return processed_results
def prepare_testing_targets(self, batch_img_metas, device):
# [-1, 0, 1, 2, ..., T-1] when sampling_timesteps == timesteps
times = torch.linspace(
-1, self.timesteps - 1, steps=self.sampling_timesteps + 1)
times = list(reversed(times.int().tolist()))
# [(T-1, T-2), (T-2, T-3), ..., (1, 0), (0, -1)]
time_pairs = list(zip(times[:-1], times[1:]))
noise_bboxes_list = []
noise_bboxes_raw_list = []
image_size_list = []
for img_meta in batch_img_metas:
h, w = img_meta['img_shape']
image_size = torch.tensor([w, h, w, h],
dtype=torch.float32,
device=device)
noise_bboxes_raw = torch.randn((self.num_proposals, 4),
device=device)
noise_bboxes = torch.clamp(
noise_bboxes_raw, min=-1 * self.snr_scale, max=self.snr_scale)
noise_bboxes = ((noise_bboxes / self.snr_scale) + 1) / 2
noise_bboxes = bbox_cxcywh_to_xyxy(noise_bboxes)
noise_bboxes = noise_bboxes * image_size
noise_bboxes_raw_list.append(noise_bboxes_raw)
noise_bboxes_list.append(noise_bboxes)
image_size_list.append(image_size[None])
batch_noise_bboxes = torch.stack(noise_bboxes_list)
batch_image_size = torch.cat(image_size_list)
batch_noise_bboxes_raw = torch.stack(noise_bboxes_raw_list)
return (time_pairs, batch_noise_bboxes, batch_noise_bboxes_raw,
batch_image_size)
def predict_noise_from_start(self, x_t, t, x0):
results = (extract(
self.sqrt_recip_alphas_cumprod, t, x_t.shape) * x_t - x0) / \
extract(self.sqrt_recipm1_alphas_cumprod, t, x_t.shape)
return results
def inference(self, box_cls, box_pred, cfg, device):
"""
Args:
box_cls (Tensor): tensor of shape (batch_size, num_proposals, K).
The tensor predicts the classification probability for
each proposal.
box_pred (Tensor): tensors of shape (batch_size, num_proposals, 4).
The tensor predicts 4-vector (x,y,w,h) box
regression values for every proposal
Returns:
results (List[Instances]): a list of #images elements.
"""
results = []
if self.use_focal_loss or self.use_fed_loss:
scores = torch.sigmoid(box_cls)
labels = torch.arange(
self.num_classes,
device=device).unsqueeze(0).repeat(self.num_proposals,
1).flatten(0, 1)
box_pred_list = []
scores_list = []
labels_list = []
for i, (scores_per_image,
box_pred_per_image) in enumerate(zip(scores, box_pred)):
scores_per_image, topk_indices = scores_per_image.flatten(
0, 1).topk(
self.num_proposals, sorted=False)
labels_per_image = labels[topk_indices]
box_pred_per_image = box_pred_per_image.view(-1, 1, 4).repeat(
1, self.num_classes, 1).view(-1, 4)
box_pred_per_image = box_pred_per_image[topk_indices]
if self.use_ensemble and self.sampling_timesteps > 1:
box_pred_list.append(box_pred_per_image)
scores_list.append(scores_per_image)
labels_list.append(labels_per_image)
continue
if self.use_nms:
det_bboxes, keep_idxs = batched_nms(
box_pred_per_image, scores_per_image, labels_per_image,
cfg.nms)
box_pred_per_image = box_pred_per_image[keep_idxs]
labels_per_image = labels_per_image[keep_idxs]
# some nms would reweight the score, such as softnms
scores_per_image = det_bboxes[:, -1]
result = InstanceData()
result.bboxes = box_pred_per_image
result.scores = scores_per_image
result.labels = labels_per_image
results.append(result)
else:
# For each box we assign the best class or the second
# best if the best on is `no_object`.
scores, labels = F.softmax(box_cls, dim=-1)[:, :, :-1].max(-1)
for i, (scores_per_image, labels_per_image,
box_pred_per_image) in enumerate(
zip(scores, labels, box_pred)):
if self.use_ensemble and self.sampling_timesteps > 1:
return box_pred_per_image, scores_per_image, \
labels_per_image
if self.use_nms:
det_bboxes, keep_idxs = batched_nms(
box_pred_per_image, scores_per_image, labels_per_image,
cfg.nms)
box_pred_per_image = box_pred_per_image[keep_idxs]
labels_per_image = labels_per_image[keep_idxs]
# some nms would reweight the score, such as softnms
scores_per_image = det_bboxes[:, -1]
result = InstanceData()
result.bboxes = box_pred_per_image
result.scores = scores_per_image
result.labels = labels_per_image
results.append(result)
if self.use_ensemble and self.sampling_timesteps > 1:
return box_pred_list, scores_list, labels_list
else:
return results
@MODELS.register_module()
class SingleDiffusionDetHead(nn.Module):
def __init__(
self,
num_classes=80,
feat_channels=256,
dim_feedforward=2048,
num_cls_convs=1,
num_reg_convs=3,
num_heads=8,
dropout=0.0,
pooler_resolution=7,
scale_clamp=_DEFAULT_SCALE_CLAMP,
bbox_weights=(2.0, 2.0, 1.0, 1.0),
use_focal_loss=True,
use_fed_loss=False,
act_cfg=dict(type='ReLU', inplace=True),
dynamic_conv=dict(dynamic_dim=64, dynamic_num=2)
) -> None:
super().__init__()
self.feat_channels = feat_channels
# Dynamic
self.self_attn = nn.MultiheadAttention(
feat_channels, num_heads, dropout=dropout)
self.inst_interact = DynamicConv(
feat_channels=feat_channels,
pooler_resolution=pooler_resolution,
dynamic_dim=dynamic_conv['dynamic_dim'],
dynamic_num=dynamic_conv['dynamic_num'])
self.linear1 = nn.Linear(feat_channels, dim_feedforward)
self.dropout = nn.Dropout(dropout)
self.linear2 = nn.Linear(dim_feedforward, feat_channels)
self.norm1 = nn.LayerNorm(feat_channels)
self.norm2 = nn.LayerNorm(feat_channels)
self.norm3 = nn.LayerNorm(feat_channels)
self.dropout1 = nn.Dropout(dropout)
self.dropout2 = nn.Dropout(dropout)
self.dropout3 = nn.Dropout(dropout)
self.activation = build_activation_layer(act_cfg)
# block time mlp
self.block_time_mlp = nn.Sequential(
nn.SiLU(), nn.Linear(feat_channels * 4, feat_channels * 2))
# cls.
cls_module = list()
for _ in range(num_cls_convs):
cls_module.append(nn.Linear(feat_channels, feat_channels, False))
cls_module.append(nn.LayerNorm(feat_channels))
cls_module.append(nn.ReLU(inplace=True))
self.cls_module = nn.ModuleList(cls_module)
# reg.
reg_module = list()
for _ in range(num_reg_convs):
reg_module.append(nn.Linear(feat_channels, feat_channels, False))
reg_module.append(nn.LayerNorm(feat_channels))
reg_module.append(nn.ReLU(inplace=True))
self.reg_module = nn.ModuleList(reg_module)
# pred.
self.use_focal_loss = use_focal_loss
self.use_fed_loss = use_fed_loss
if self.use_focal_loss or self.use_fed_loss:
self.class_logits = nn.Linear(feat_channels, num_classes)
else:
self.class_logits = nn.Linear(feat_channels, num_classes + 1)
self.bboxes_delta = nn.Linear(feat_channels, 4)
self.scale_clamp = scale_clamp
self.bbox_weights = bbox_weights
def forward(self, features, bboxes, pro_features, pooler, time_emb):
"""
:param bboxes: (N, num_boxes, 4)
:param pro_features: (N, num_boxes, feat_channels)
"""
N, num_boxes = bboxes.shape[:2]
# roi_feature.
proposal_boxes = list()
for b in range(N):
proposal_boxes.append(bboxes[b])
rois = bbox2roi(proposal_boxes)
roi_features = pooler(features, rois)
if pro_features is None:
pro_features = roi_features.view(N, num_boxes, self.feat_channels,
-1).mean(-1)
roi_features = roi_features.view(N * num_boxes, self.feat_channels,
-1).permute(2, 0, 1)
# self_att.
pro_features = pro_features.view(N, num_boxes,
self.feat_channels).permute(1, 0, 2)
pro_features2 = self.self_attn(
pro_features, pro_features, value=pro_features)[0]
pro_features = pro_features + self.dropout1(pro_features2)
pro_features = self.norm1(pro_features)
# inst_interact.
pro_features = pro_features.view(
num_boxes, N,
self.feat_channels).permute(1, 0,
2).reshape(1, N * num_boxes,
self.feat_channels)
pro_features2 = self.inst_interact(pro_features, roi_features)
pro_features = pro_features + self.dropout2(pro_features2)
obj_features = self.norm2(pro_features)
# obj_feature.
obj_features2 = self.linear2(
self.dropout(self.activation(self.linear1(obj_features))))
obj_features = obj_features + self.dropout3(obj_features2)
obj_features = self.norm3(obj_features)
fc_feature = obj_features.transpose(0, 1).reshape(N * num_boxes, -1)
scale_shift = self.block_time_mlp(time_emb)
scale_shift = torch.repeat_interleave(scale_shift, num_boxes, dim=0)
scale, shift = scale_shift.chunk(2, dim=1)
fc_feature = fc_feature * (scale + 1) + shift
cls_feature = fc_feature.clone()
reg_feature = fc_feature.clone()
for cls_layer in self.cls_module:
cls_feature = cls_layer(cls_feature)
for reg_layer in self.reg_module:
reg_feature = reg_layer(reg_feature)
class_logits = self.class_logits(cls_feature)
bboxes_deltas = self.bboxes_delta(reg_feature)
pred_bboxes = self.apply_deltas(bboxes_deltas, bboxes.view(-1, 4))
return (class_logits.view(N, num_boxes,
-1), pred_bboxes.view(N, num_boxes,
-1), obj_features)
def apply_deltas(self, deltas, boxes):
"""Apply transformation `deltas` (dx, dy, dw, dh) to `boxes`.
Args:
deltas (Tensor): transformation deltas of shape (N, k*4),
where k >= 1. deltas[i] represents k potentially
different class-specific box transformations for
the single box boxes[i].
boxes (Tensor): boxes to transform, of shape (N, 4)
"""
boxes = boxes.to(deltas.dtype)
widths = boxes[:, 2] - boxes[:, 0]
heights = boxes[:, 3] - boxes[:, 1]
ctr_x = boxes[:, 0] + 0.5 * widths
ctr_y = boxes[:, 1] + 0.5 * heights
wx, wy, ww, wh = self.bbox_weights
dx = deltas[:, 0::4] / wx
dy = deltas[:, 1::4] / wy
dw = deltas[:, 2::4] / ww
dh = deltas[:, 3::4] / wh
# Prevent sending too large values into torch.exp()
dw = torch.clamp(dw, max=self.scale_clamp)
dh = torch.clamp(dh, max=self.scale_clamp)
pred_ctr_x = dx * widths[:, None] + ctr_x[:, None]
pred_ctr_y = dy * heights[:, None] + ctr_y[:, None]
pred_w = torch.exp(dw) * widths[:, None]
pred_h = torch.exp(dh) * heights[:, None]
pred_boxes = torch.zeros_like(deltas)
pred_boxes[:, 0::4] = pred_ctr_x - 0.5 * pred_w # x1
pred_boxes[:, 1::4] = pred_ctr_y - 0.5 * pred_h # y1
pred_boxes[:, 2::4] = pred_ctr_x + 0.5 * pred_w # x2
pred_boxes[:, 3::4] = pred_ctr_y + 0.5 * pred_h # y2
return pred_boxes
class DynamicConv(nn.Module):
def __init__(self,
feat_channels: int,
dynamic_dim: int = 64,
dynamic_num: int = 2,
pooler_resolution: int = 7) -> None:
super().__init__()
self.feat_channels = feat_channels
self.dynamic_dim = dynamic_dim
self.dynamic_num = dynamic_num
self.num_params = self.feat_channels * self.dynamic_dim
self.dynamic_layer = nn.Linear(self.feat_channels,
self.dynamic_num * self.num_params)
self.norm1 = nn.LayerNorm(self.dynamic_dim)
self.norm2 = nn.LayerNorm(self.feat_channels)
self.activation = nn.ReLU(inplace=True)
num_output = self.feat_channels * pooler_resolution**2
self.out_layer = nn.Linear(num_output, self.feat_channels)
self.norm3 = nn.LayerNorm(self.feat_channels)
def forward(self, pro_features: Tensor, roi_features: Tensor) -> Tensor:
"""Forward function.
Args:
pro_features: (1, N * num_boxes, self.feat_channels)
roi_features: (49, N * num_boxes, self.feat_channels)
Returns:
"""
features = roi_features.permute(1, 0, 2)
parameters = self.dynamic_layer(pro_features).permute(1, 0, 2)
param1 = parameters[:, :, :self.num_params].view(
-1, self.feat_channels, self.dynamic_dim)
param2 = parameters[:, :,
self.num_params:].view(-1, self.dynamic_dim,
self.feat_channels)
features = torch.bmm(features, param1)
features = self.norm1(features)
features = self.activation(features)
features = torch.bmm(features, param2)
features = self.norm2(features)
features = self.activation(features)
features = features.flatten(1)
features = self.out_layer(features)
features = self.norm3(features)
features = self.activation(features)
return features
| 43,032 | 40.577778 | 106 | py |
ERD | ERD-main/projects/DiffusionDet/configs/diffusiondet_r50_fpn_500-proposals_1-step_crop-ms-480-800-450k_coco.py | _base_ = [
'mmdet::_base_/datasets/coco_detection.py',
'mmdet::_base_/schedules/schedule_1x.py',
'mmdet::_base_/default_runtime.py'
]
custom_imports = dict(
imports=['projects.DiffusionDet.diffusiondet'], allow_failed_imports=False)
# model settings
model = dict(
type='DiffusionDet',
data_preprocessor=dict(
type='DetDataPreprocessor',
mean=[123.675, 116.28, 103.53],
std=[58.395, 57.12, 57.375],
bgr_to_rgb=True,
pad_size_divisor=32),
backbone=dict(
type='ResNet',
depth=50,
num_stages=4,
out_indices=(0, 1, 2, 3),
frozen_stages=1,
norm_cfg=dict(type='BN', requires_grad=True),
norm_eval=True,
style='pytorch',
init_cfg=dict(type='Pretrained', checkpoint='torchvision://resnet50')),
neck=dict(
type='FPN',
in_channels=[256, 512, 1024, 2048],
out_channels=256,
num_outs=4),
bbox_head=dict(
type='DynamicDiffusionDetHead',
num_classes=80,
feat_channels=256,
num_proposals=500,
num_heads=6,
deep_supervision=True,
prior_prob=0.01,
snr_scale=2.0,
sampling_timesteps=1,
ddim_sampling_eta=1.0,
single_head=dict(
type='SingleDiffusionDetHead',
num_cls_convs=1,
num_reg_convs=3,
dim_feedforward=2048,
num_heads=8,
dropout=0.0,
act_cfg=dict(type='ReLU', inplace=True),
dynamic_conv=dict(dynamic_dim=64, dynamic_num=2)),
roi_extractor=dict(
type='SingleRoIExtractor',
roi_layer=dict(type='RoIAlign', output_size=7, sampling_ratio=2),
out_channels=256,
featmap_strides=[4, 8, 16, 32]),
# criterion
criterion=dict(
type='DiffusionDetCriterion',
num_classes=80,
assigner=dict(
type='DiffusionDetMatcher',
match_costs=[
dict(
type='FocalLossCost',
alpha=0.25,
gamma=2.0,
weight=2.0,
eps=1e-8),
dict(type='BBoxL1Cost', weight=5.0, box_format='xyxy'),
dict(type='IoUCost', iou_mode='giou', weight=2.0)
],
center_radius=2.5,
candidate_topk=5),
loss_cls=dict(
type='FocalLoss',
use_sigmoid=True,
alpha=0.25,
gamma=2.0,
reduction='sum',
loss_weight=2.0),
loss_bbox=dict(type='L1Loss', reduction='sum', loss_weight=5.0),
loss_giou=dict(type='GIoULoss', reduction='sum',
loss_weight=2.0))),
test_cfg=dict(
use_nms=True,
score_thr=0.5,
min_bbox_size=0,
nms=dict(type='nms', iou_threshold=0.5),
))
backend = 'pillow'
train_pipeline = [
dict(
type='LoadImageFromFile',
backend_args=_base_.backend_args,
imdecode_backend=backend),
dict(type='LoadAnnotations', with_bbox=True),
dict(type='RandomFlip', prob=0.5),
dict(
type='RandomChoice',
transforms=[[
dict(
type='RandomChoiceResize',
scales=[(480, 1333), (512, 1333), (544, 1333), (576, 1333),
(608, 1333), (640, 1333), (672, 1333), (704, 1333),
(736, 1333), (768, 1333), (800, 1333)],
keep_ratio=True,
backend=backend),
],
[
dict(
type='RandomChoiceResize',
scales=[(400, 1333), (500, 1333), (600, 1333)],
keep_ratio=True,
backend=backend),
dict(
type='RandomCrop',
crop_type='absolute_range',
crop_size=(384, 600),
allow_negative_crop=True),
dict(
type='RandomChoiceResize',
scales=[(480, 1333), (512, 1333), (544, 1333),
(576, 1333), (608, 1333), (640, 1333),
(672, 1333), (704, 1333), (736, 1333),
(768, 1333), (800, 1333)],
keep_ratio=True,
backend=backend)
]]),
dict(type='PackDetInputs')
]
test_pipeline = [
dict(
type='LoadImageFromFile',
backend_args=_base_.backend_args,
imdecode_backend=backend),
dict(type='Resize', scale=(1333, 800), keep_ratio=True, backend=backend),
# If you don't have a gt annotation, delete the pipeline
dict(type='LoadAnnotations', with_bbox=True),
dict(
type='PackDetInputs',
meta_keys=('img_id', 'img_path', 'ori_shape', 'img_shape',
'scale_factor'))
]
train_dataloader = dict(
sampler=dict(type='InfiniteSampler'),
dataset=dict(
filter_cfg=dict(filter_empty_gt=False, min_size=1e-5),
pipeline=train_pipeline))
val_dataloader = dict(dataset=dict(pipeline=test_pipeline))
test_dataloader = val_dataloader
# optimizer
optim_wrapper = dict(
type='OptimWrapper',
optimizer=dict(
_delete_=True, type='AdamW', lr=0.000025, weight_decay=0.0001),
clip_grad=dict(max_norm=1.0, norm_type=2))
train_cfg = dict(
_delete_=True,
type='IterBasedTrainLoop',
max_iters=450000,
val_interval=75000)
# learning rate
param_scheduler = [
dict(
type='LinearLR', start_factor=0.01, by_epoch=False, begin=0, end=1000),
dict(
type='MultiStepLR',
begin=0,
end=450000,
by_epoch=False,
milestones=[350000, 420000],
gamma=0.1)
]
default_hooks = dict(
checkpoint=dict(by_epoch=False, interval=75000, max_keep_ckpts=3))
log_processor = dict(by_epoch=False)
| 6,186 | 32.263441 | 79 | py |
ERD | ERD-main/projects/DiffusionDet/model_converters/diffusiondet_resnet_to_mmdet.py | # Copyright (c) OpenMMLab. All rights reserved.
import argparse
from collections import OrderedDict
import numpy as np
import torch
from mmengine.fileio import load
def convert(src, dst):
if src.endswith('pth'):
src_model = torch.load(src)
else:
src_model = load(src)
dst_state_dict = OrderedDict()
for k, v in src_model['model'].items():
key_name_split = k.split('.')
if 'backbone.fpn_lateral' in k:
lateral_id = int(key_name_split[-2][-1])
name = f'neck.lateral_convs.{lateral_id - 2}.' \
f'conv.{key_name_split[-1]}'
elif 'backbone.fpn_output' in k:
lateral_id = int(key_name_split[-2][-1])
name = f'neck.fpn_convs.{lateral_id - 2}.conv.' \
f'{key_name_split[-1]}'
elif 'backbone.bottom_up.stem.conv1.norm.' in k:
name = f'backbone.bn1.{key_name_split[-1]}'
elif 'backbone.bottom_up.stem.conv1.' in k:
name = f'backbone.conv1.{key_name_split[-1]}'
elif 'backbone.bottom_up.res' in k:
# weight_type = key_name_split[-1]
res_id = int(key_name_split[2][-1]) - 1
# deal with short cut
if 'shortcut' in key_name_split[4]:
if 'shortcut' == key_name_split[-2]:
name = f'backbone.layer{res_id}.' \
f'{key_name_split[3]}.downsample.0.' \
f'{key_name_split[-1]}'
elif 'shortcut' == key_name_split[-3]:
name = f'backbone.layer{res_id}.' \
f'{key_name_split[3]}.downsample.1.' \
f'{key_name_split[-1]}'
else:
print(f'Unvalid key {k}')
# deal with conv
elif 'conv' in key_name_split[-2]:
conv_id = int(key_name_split[-2][-1])
name = f'backbone.layer{res_id}.{key_name_split[3]}' \
f'.conv{conv_id}.{key_name_split[-1]}'
# deal with BN
elif key_name_split[-2] == 'norm':
conv_id = int(key_name_split[-3][-1])
name = f'backbone.layer{res_id}.{key_name_split[3]}.' \
f'bn{conv_id}.{key_name_split[-1]}'
else:
print(f'{k} is invalid')
elif key_name_split[0] == 'head':
# d2: head.xxx -> mmdet: bbox_head.xxx
name = f'bbox_{k}'
else:
# some base parameters such as beta will not convert
print(f'{k} is not converted!!')
continue
if not isinstance(v, np.ndarray) and not isinstance(v, torch.Tensor):
raise ValueError(
'Unsupported type found in checkpoint! {}: {}'.format(
k, type(v)))
if not isinstance(v, torch.Tensor):
dst_state_dict[name] = torch.from_numpy(v)
else:
dst_state_dict[name] = v
mmdet_model = dict(state_dict=dst_state_dict, meta=dict())
torch.save(mmdet_model, dst)
def main():
parser = argparse.ArgumentParser(description='Convert model keys')
parser.add_argument('src', help='src detectron model path')
parser.add_argument('dst', help='save path')
args = parser.parse_args()
convert(args.src, args.dst)
if __name__ == '__main__':
main()
| 3,395 | 37.157303 | 77 | py |
ERD | ERD-main/projects/SparseInst/sparseinst/sparseinst.py | # Copyright (c) Tianheng Cheng and its affiliates. All Rights Reserved
from typing import List, Tuple, Union
import torch
import torch.nn.functional as F
from mmengine.structures import InstanceData
from torch import Tensor
from mmdet.models import BaseDetector
from mmdet.models.utils import unpack_gt_instances
from mmdet.registry import MODELS
from mmdet.structures import OptSampleList, SampleList
from mmdet.utils import ConfigType, OptConfigType
@torch.jit.script
def rescoring_mask(scores, mask_pred, masks):
mask_pred_ = mask_pred.float()
return scores * ((masks * mask_pred_).sum([1, 2]) /
(mask_pred_.sum([1, 2]) + 1e-6))
@MODELS.register_module()
class SparseInst(BaseDetector):
"""Implementation of `SparseInst <https://arxiv.org/abs/1912.02424>`_
Args:
data_preprocessor (:obj:`ConfigDict` or dict, optional): Config of
:class:`DetDataPreprocessor` to process the input data.
Defaults to None.
backbone (:obj:`ConfigDict` or dict): The backbone module.
encoder (:obj:`ConfigDict` or dict): The encoder module.
decoder (:obj:`ConfigDict` or dict): The decoder module.
criterion (:obj:`ConfigDict` or dict, optional): The training matcher
and losses. Defaults to None.
test_cfg (:obj:`ConfigDict` or dict, optional): The testing config
of SparseInst. Defaults to None.
init_cfg (:obj:`ConfigDict` or dict, optional): the config to control
the initialization. Defaults to None.
"""
def __init__(self,
data_preprocessor: ConfigType,
backbone: ConfigType,
encoder: ConfigType,
decoder: ConfigType,
criterion: OptConfigType = None,
test_cfg: OptConfigType = None,
init_cfg: OptConfigType = None):
super().__init__(
data_preprocessor=data_preprocessor, init_cfg=init_cfg)
# backbone
self.backbone = MODELS.build(backbone)
# encoder & decoder
self.encoder = MODELS.build(encoder)
self.decoder = MODELS.build(decoder)
# matcher & loss (matcher is built in loss)
self.criterion = MODELS.build(criterion)
# inference
self.cls_threshold = test_cfg.score_thr
self.mask_threshold = test_cfg.mask_thr_binary
def _forward(
self,
batch_inputs: Tensor,
batch_data_samples: OptSampleList = None) -> Tuple[List[Tensor]]:
"""Network forward process. Usually includes backbone, neck and head
forward without any post-processing.
Args:
batch_inputs (Tensor): Inputs with shape (N, C, H, W).
Returns:
tuple[list]: A tuple of features from ``bbox_head`` forward.
"""
x = self.backbone(batch_inputs)
x = self.encoder(x)
results = self.decoder(x)
return results
def predict(self,
batch_inputs: Tensor,
batch_data_samples: SampleList,
rescale: bool = True) -> SampleList:
"""Predict results from a batch of inputs and data samples with post-
processing.
Args:
batch_inputs (Tensor): Inputs with shape (N, C, H, W).
batch_data_samples (List[:obj:`DetDataSample`]): The Data
Samples. It usually includes information such as
`gt_instance`, `gt_panoptic_seg` and `gt_sem_seg`.
rescale (bool): Whether to rescale the results.
Defaults to True.
Returns:
list[:obj:`DetDataSample`]: Detection results of the
input images. Each DetDataSample usually contain
'pred_instances'. And the ``pred_instances`` usually
contains following keys.
- scores (Tensor): Classification scores, has a shape
(num_instance, )
- labels (Tensor): Labels of bboxes, has a shape
(num_instances, ).
- bboxes (Tensor): Has a shape (num_instances, 4),
the last dimension 4 arrange as (x1, y1, x2, y2).
"""
max_shape = batch_inputs.shape[-2:]
output = self._forward(batch_inputs)
pred_scores = output['pred_logits'].sigmoid()
pred_masks = output['pred_masks'].sigmoid()
pred_objectness = output['pred_scores'].sigmoid()
pred_scores = torch.sqrt(pred_scores * pred_objectness)
results_list = []
for batch_idx, (scores_per_image, mask_pred_per_image,
datasample) in enumerate(
zip(pred_scores, pred_masks, batch_data_samples)):
result = InstanceData()
# max/argmax
scores, labels = scores_per_image.max(dim=-1)
# cls threshold
keep = scores > self.cls_threshold
scores = scores[keep]
labels = labels[keep]
mask_pred_per_image = mask_pred_per_image[keep]
if scores.size(0) == 0:
result.scores = scores
result.labels = labels
results_list.append(result)
continue
img_meta = datasample.metainfo
# rescoring mask using maskness
scores = rescoring_mask(scores,
mask_pred_per_image > self.mask_threshold,
mask_pred_per_image)
h, w = img_meta['img_shape'][:2]
mask_pred_per_image = F.interpolate(
mask_pred_per_image.unsqueeze(1),
size=max_shape,
mode='bilinear',
align_corners=False)[:, :, :h, :w]
if rescale:
ori_h, ori_w = img_meta['ori_shape'][:2]
mask_pred_per_image = F.interpolate(
mask_pred_per_image,
size=(ori_h, ori_w),
mode='bilinear',
align_corners=False).squeeze(1)
mask_pred = mask_pred_per_image > self.mask_threshold
result.masks = mask_pred
result.scores = scores
result.labels = labels
# create an empty bbox in InstanceData to avoid bugs when
# calculating metrics.
result.bboxes = result.scores.new_zeros(len(scores), 4)
results_list.append(result)
batch_data_samples = self.add_pred_to_datasample(
batch_data_samples, results_list)
return batch_data_samples
def loss(self, batch_inputs: Tensor,
batch_data_samples: SampleList) -> Union[dict, list]:
"""Calculate losses from a batch of inputs and data samples.
Args:
batch_inputs (Tensor): Input images of shape (N, C, H, W).
These should usually be mean centered and std scaled.
batch_data_samples (list[:obj:`DetDataSample`]): The batch
data samples. It usually includes information such
as `gt_instance` or `gt_panoptic_seg` or `gt_sem_seg`.
Returns:
dict: A dictionary of loss components.
"""
outs = self._forward(batch_inputs)
(batch_gt_instances, batch_gt_instances_ignore,
batch_img_metas) = unpack_gt_instances(batch_data_samples)
losses = self.criterion(outs, batch_gt_instances, batch_img_metas,
batch_gt_instances_ignore)
return losses
def extract_feat(self, batch_inputs: Tensor) -> Tuple[Tensor]:
"""Extract features.
Args:
batch_inputs (Tensor): Image tensor with shape (N, C, H ,W).
Returns:
tuple[Tensor]: Multi-level features that may have
different resolutions.
"""
x = self.backbone(batch_inputs)
x = self.encoder(x)
return x
| 7,972 | 37.516908 | 78 | py |
ERD | ERD-main/projects/SparseInst/sparseinst/loss.py | # Copyright (c) Tianheng Cheng and its affiliates. All Rights Reserved
import torch
import torch.nn as nn
import torch.nn.functional as F
from scipy.optimize import linear_sum_assignment
from torch.cuda.amp import autocast
from mmdet.registry import MODELS, TASK_UTILS
from mmdet.utils import reduce_mean
def compute_mask_iou(inputs, targets):
inputs = inputs.sigmoid()
# thresholding
binarized_inputs = (inputs >= 0.4).float()
targets = (targets > 0.5).float()
intersection = (binarized_inputs * targets).sum(-1)
union = targets.sum(-1) + binarized_inputs.sum(-1) - intersection
score = intersection / (union + 1e-6)
return score
def dice_score(inputs, targets):
inputs = inputs.sigmoid()
numerator = 2 * torch.matmul(inputs, targets.t())
denominator = (inputs * inputs).sum(-1)[:,
None] + (targets * targets).sum(-1)
score = numerator / (denominator + 1e-4)
return score
@MODELS.register_module()
class SparseInstCriterion(nn.Module):
"""This part is partially derivated from:
https://github.com/facebookresearch/detr/blob/main/models/detr.py.
"""
def __init__(
self,
num_classes,
assigner,
loss_cls=dict(
type='FocalLoss',
use_sigmoid=True,
alpha=0.25,
gamma=2.0,
reduction='sum',
loss_weight=2.0),
loss_obj=dict(
type='CrossEntropyLoss',
use_sigmoid=True,
reduction='mean',
loss_weight=1.0),
loss_mask=dict(
type='CrossEntropyLoss',
use_sigmoid=True,
reduction='mean',
loss_weight=5.0),
loss_dice=dict(
type='DiceLoss',
use_sigmoid=True,
reduction='sum',
eps=5e-5,
loss_weight=2.0),
):
super().__init__()
self.matcher = TASK_UTILS.build(assigner)
self.num_classes = num_classes
self.loss_cls = MODELS.build(loss_cls)
self.loss_obj = MODELS.build(loss_obj)
self.loss_mask = MODELS.build(loss_mask)
self.loss_dice = MODELS.build(loss_dice)
def _get_src_permutation_idx(self, indices):
# permute predictions following indices
batch_idx = torch.cat(
[torch.full_like(src, i) for i, (src, _) in enumerate(indices)])
src_idx = torch.cat([src for (src, _) in indices])
return batch_idx, src_idx
def _get_tgt_permutation_idx(self, indices):
# permute targets following indices
batch_idx = torch.cat(
[torch.full_like(tgt, i) for i, (_, tgt) in enumerate(indices)])
tgt_idx = torch.cat([tgt for (_, tgt) in indices])
return batch_idx, tgt_idx
def loss_classification(self, outputs, batch_gt_instances, indices,
num_instances):
assert 'pred_logits' in outputs
src_logits = outputs['pred_logits']
idx = self._get_src_permutation_idx(indices)
target_classes_o = torch.cat(
[gt.labels[J] for gt, (_, J) in zip(batch_gt_instances, indices)])
target_classes = torch.full(
src_logits.shape[:2],
self.num_classes,
dtype=torch.int64,
device=src_logits.device)
target_classes[idx] = target_classes_o
src_logits = src_logits.flatten(0, 1)
target_classes = target_classes.flatten(0, 1)
# comp focal loss.
class_loss = self.loss_cls(
src_logits,
target_classes,
) / num_instances
return class_loss
def loss_masks_with_iou_objectness(self, outputs, batch_gt_instances,
indices, num_instances):
src_idx = self._get_src_permutation_idx(indices)
tgt_idx = self._get_tgt_permutation_idx(indices)
# Bx100xHxW
assert 'pred_masks' in outputs
assert 'pred_scores' in outputs
src_iou_scores = outputs['pred_scores']
src_masks = outputs['pred_masks']
with torch.no_grad():
target_masks = torch.cat([
gt.masks.to_tensor(
dtype=src_masks.dtype, device=src_masks.device)
for gt in batch_gt_instances
])
num_masks = [len(gt.masks) for gt in batch_gt_instances]
target_masks = target_masks.to(src_masks)
if len(target_masks) == 0:
loss_dice = src_masks.sum() * 0.0
loss_mask = src_masks.sum() * 0.0
loss_objectness = src_iou_scores.sum() * 0.0
return loss_objectness, loss_dice, loss_mask
src_masks = src_masks[src_idx]
target_masks = F.interpolate(
target_masks[:, None],
size=src_masks.shape[-2:],
mode='bilinear',
align_corners=False).squeeze(1)
src_masks = src_masks.flatten(1)
# FIXME: tgt_idx
mix_tgt_idx = torch.zeros_like(tgt_idx[1])
cum_sum = 0
for num_mask in num_masks:
mix_tgt_idx[cum_sum:cum_sum + num_mask] = cum_sum
cum_sum += num_mask
mix_tgt_idx += tgt_idx[1]
target_masks = target_masks[mix_tgt_idx].flatten(1)
with torch.no_grad():
ious = compute_mask_iou(src_masks, target_masks)
tgt_iou_scores = ious
src_iou_scores = src_iou_scores[src_idx]
tgt_iou_scores = tgt_iou_scores.flatten(0)
src_iou_scores = src_iou_scores.flatten(0)
loss_objectness = self.loss_obj(src_iou_scores, tgt_iou_scores)
loss_dice = self.loss_dice(src_masks, target_masks) / num_instances
loss_mask = self.loss_mask(src_masks, target_masks)
return loss_objectness, loss_dice, loss_mask
def forward(self, outputs, batch_gt_instances, batch_img_metas,
batch_gt_instances_ignore):
# Retrieve the matching between the outputs of
# the last layer and the targets
indices = self.matcher(outputs, batch_gt_instances)
# Compute the average number of target boxes
# across all nodes, for normalization purposes
num_instances = sum(gt.labels.shape[0] for gt in batch_gt_instances)
num_instances = torch.as_tensor([num_instances],
dtype=torch.float,
device=next(iter(
outputs.values())).device)
num_instances = reduce_mean(num_instances).clamp_(min=1).item()
# Compute all the requested losses
loss_cls = self.loss_classification(outputs, batch_gt_instances,
indices, num_instances)
loss_obj, loss_dice, loss_mask = self.loss_masks_with_iou_objectness(
outputs, batch_gt_instances, indices, num_instances)
return dict(
loss_cls=loss_cls,
loss_obj=loss_obj,
loss_dice=loss_dice,
loss_mask=loss_mask)
@TASK_UTILS.register_module()
class SparseInstMatcher(nn.Module):
def __init__(self, alpha=0.8, beta=0.2):
super().__init__()
self.alpha = alpha
self.beta = beta
self.mask_score = dice_score
def forward(self, outputs, batch_gt_instances):
with torch.no_grad():
B, N, H, W = outputs['pred_masks'].shape
pred_masks = outputs['pred_masks']
pred_logits = outputs['pred_logits'].sigmoid()
device = pred_masks.device
tgt_ids = torch.cat([gt.labels for gt in batch_gt_instances])
if tgt_ids.shape[0] == 0:
return [(torch.as_tensor([]).to(pred_logits),
torch.as_tensor([]).to(pred_logits))] * B
tgt_masks = torch.cat([
gt.masks.to_tensor(dtype=pred_masks.dtype, device=device)
for gt in batch_gt_instances
])
tgt_masks = F.interpolate(
tgt_masks[:, None],
size=pred_masks.shape[-2:],
mode='bilinear',
align_corners=False).squeeze(1)
pred_masks = pred_masks.view(B * N, -1)
tgt_masks = tgt_masks.flatten(1)
with autocast(enabled=False):
pred_masks = pred_masks.float()
tgt_masks = tgt_masks.float()
pred_logits = pred_logits.float()
mask_score = self.mask_score(pred_masks, tgt_masks)
# Nx(Number of gts)
matching_prob = pred_logits.view(B * N, -1)[:, tgt_ids]
C = (mask_score**self.alpha) * (matching_prob**self.beta)
C = C.view(B, N, -1).cpu()
# hungarian matching
sizes = [len(gt.masks) for gt in batch_gt_instances]
indices = [
linear_sum_assignment(c[i], maximize=True)
for i, c in enumerate(C.split(sizes, -1))
]
indices = [(torch.as_tensor(i, dtype=torch.int64),
torch.as_tensor(j, dtype=torch.int64))
for i, j in indices]
return indices
| 9,212 | 35.852 | 79 | py |
ERD | ERD-main/projects/SparseInst/sparseinst/encoder.py | # Copyright (c) Tianheng Cheng and its affiliates. All Rights Reserved
import torch
import torch.nn as nn
import torch.nn.functional as F
from mmengine.model.weight_init import caffe2_xavier_init, kaiming_init
from mmdet.registry import MODELS
class PyramidPoolingModule(nn.Module):
def __init__(self,
in_channels,
channels=512,
sizes=(1, 2, 3, 6),
act_cfg=dict(type='ReLU')):
super().__init__()
self.stages = []
self.stages = nn.ModuleList(
[self._make_stage(in_channels, channels, size) for size in sizes])
self.bottleneck = nn.Conv2d(in_channels + len(sizes) * channels,
in_channels, 1)
self.act = MODELS.build(act_cfg)
def _make_stage(self, features, out_features, size):
prior = nn.AdaptiveAvgPool2d(output_size=(size, size))
conv = nn.Conv2d(features, out_features, 1)
return nn.Sequential(prior, conv)
def forward(self, feats):
h, w = feats.size(2), feats.size(3)
priors = [
F.interpolate(
input=self.act(stage(feats)),
size=(h, w),
mode='bilinear',
align_corners=False) for stage in self.stages
] + [feats]
out = self.act(self.bottleneck(torch.cat(priors, 1)))
return out
@MODELS.register_module()
class InstanceContextEncoder(nn.Module):
"""
Instance Context Encoder
1. construct feature pyramids from ResNet
2. enlarge receptive fields (ppm)
3. multi-scale fusion
"""
def __init__(self,
in_channels,
out_channels=256,
with_ppm=True,
act_cfg=dict(type='ReLU')):
super().__init__()
self.num_channels = out_channels
self.in_channels = in_channels
self.with_ppm = with_ppm
fpn_laterals = []
fpn_outputs = []
for in_channel in reversed(self.in_channels):
lateral_conv = nn.Conv2d(in_channel, self.num_channels, 1)
output_conv = nn.Conv2d(
self.num_channels, self.num_channels, 3, padding=1)
caffe2_xavier_init(lateral_conv)
caffe2_xavier_init(output_conv)
fpn_laterals.append(lateral_conv)
fpn_outputs.append(output_conv)
self.fpn_laterals = nn.ModuleList(fpn_laterals)
self.fpn_outputs = nn.ModuleList(fpn_outputs)
# ppm
if self.with_ppm:
self.ppm = PyramidPoolingModule(
self.num_channels, self.num_channels // 4, act_cfg=act_cfg)
# final fusion
self.fusion = nn.Conv2d(self.num_channels * 3, self.num_channels, 1)
kaiming_init(self.fusion)
def forward(self, features):
features = features[::-1]
prev_features = self.fpn_laterals[0](features[0])
if self.with_ppm:
prev_features = self.ppm(prev_features)
outputs = [self.fpn_outputs[0](prev_features)]
for feature, lat_conv, output_conv in zip(features[1:],
self.fpn_laterals[1:],
self.fpn_outputs[1:]):
lat_features = lat_conv(feature)
top_down_features = F.interpolate(
prev_features, scale_factor=2.0, mode='nearest')
prev_features = lat_features + top_down_features
outputs.insert(0, output_conv(prev_features))
size = outputs[0].shape[2:]
features = [outputs[0]] + [
F.interpolate(x, size, mode='bilinear', align_corners=False)
for x in outputs[1:]
]
features = self.fusion(torch.cat(features, dim=1))
return features
| 3,806 | 35.961165 | 78 | py |
ERD | ERD-main/projects/SparseInst/sparseinst/decoder.py | # Copyright (c) Tianheng Cheng and its affiliates. All Rights Reserved
import math
import torch
import torch.nn as nn
import torch.nn.functional as F
from mmengine.model.weight_init import caffe2_xavier_init, kaiming_init
from torch.nn import init
from mmdet.registry import MODELS
def _make_stack_3x3_convs(num_convs,
in_channels,
out_channels,
act_cfg=dict(type='ReLU', inplace=True)):
convs = []
for _ in range(num_convs):
convs.append(nn.Conv2d(in_channels, out_channels, 3, padding=1))
convs.append(MODELS.build(act_cfg))
in_channels = out_channels
return nn.Sequential(*convs)
class InstanceBranch(nn.Module):
def __init__(self,
in_channels,
dim=256,
num_convs=4,
num_masks=100,
num_classes=80,
kernel_dim=128,
act_cfg=dict(type='ReLU', inplace=True)):
super().__init__()
num_masks = num_masks
self.num_classes = num_classes
self.inst_convs = _make_stack_3x3_convs(num_convs, in_channels, dim,
act_cfg)
# iam prediction, a simple conv
self.iam_conv = nn.Conv2d(dim, num_masks, 3, padding=1)
# outputs
self.cls_score = nn.Linear(dim, self.num_classes)
self.mask_kernel = nn.Linear(dim, kernel_dim)
self.objectness = nn.Linear(dim, 1)
self.prior_prob = 0.01
self._init_weights()
def _init_weights(self):
for m in self.inst_convs.modules():
if isinstance(m, nn.Conv2d):
kaiming_init(m)
bias_value = -math.log((1 - self.prior_prob) / self.prior_prob)
for module in [self.iam_conv, self.cls_score]:
init.constant_(module.bias, bias_value)
init.normal_(self.iam_conv.weight, std=0.01)
init.normal_(self.cls_score.weight, std=0.01)
init.normal_(self.mask_kernel.weight, std=0.01)
init.constant_(self.mask_kernel.bias, 0.0)
def forward(self, features):
# instance features (x4 convs)
features = self.inst_convs(features)
# predict instance activation maps
iam = self.iam_conv(features)
iam_prob = iam.sigmoid()
B, N = iam_prob.shape[:2]
C = features.size(1)
# BxNxHxW -> BxNx(HW)
iam_prob = iam_prob.view(B, N, -1)
normalizer = iam_prob.sum(-1).clamp(min=1e-6)
iam_prob = iam_prob / normalizer[:, :, None]
# aggregate features: BxCxHxW -> Bx(HW)xC
inst_features = torch.bmm(iam_prob,
features.view(B, C, -1).permute(0, 2, 1))
# predict classification & segmentation kernel & objectness
pred_logits = self.cls_score(inst_features)
pred_kernel = self.mask_kernel(inst_features)
pred_scores = self.objectness(inst_features)
return pred_logits, pred_kernel, pred_scores, iam
class MaskBranch(nn.Module):
def __init__(self,
in_channels,
dim=256,
num_convs=4,
kernel_dim=128,
act_cfg=dict(type='ReLU', inplace=True)):
super().__init__()
self.mask_convs = _make_stack_3x3_convs(num_convs, in_channels, dim,
act_cfg)
self.projection = nn.Conv2d(dim, kernel_dim, kernel_size=1)
self._init_weights()
def _init_weights(self):
for m in self.mask_convs.modules():
if isinstance(m, nn.Conv2d):
kaiming_init(m)
kaiming_init(self.projection)
def forward(self, features):
# mask features (x4 convs)
features = self.mask_convs(features)
return self.projection(features)
@MODELS.register_module()
class BaseIAMDecoder(nn.Module):
def __init__(self,
in_channels,
num_classes,
ins_dim=256,
ins_conv=4,
mask_dim=256,
mask_conv=4,
kernel_dim=128,
scale_factor=2.0,
output_iam=False,
num_masks=100,
act_cfg=dict(type='ReLU', inplace=True)):
super().__init__()
# add 2 for coordinates
in_channels = in_channels # ENCODER.NUM_CHANNELS + 2
self.scale_factor = scale_factor
self.output_iam = output_iam
self.inst_branch = InstanceBranch(
in_channels,
dim=ins_dim,
num_convs=ins_conv,
num_masks=num_masks,
num_classes=num_classes,
kernel_dim=kernel_dim,
act_cfg=act_cfg)
self.mask_branch = MaskBranch(
in_channels,
dim=mask_dim,
num_convs=mask_conv,
kernel_dim=kernel_dim,
act_cfg=act_cfg)
@torch.no_grad()
def compute_coordinates_linspace(self, x):
# linspace is not supported in ONNX
h, w = x.size(2), x.size(3)
y_loc = torch.linspace(-1, 1, h, device=x.device)
x_loc = torch.linspace(-1, 1, w, device=x.device)
y_loc, x_loc = torch.meshgrid(y_loc, x_loc)
y_loc = y_loc.expand([x.shape[0], 1, -1, -1])
x_loc = x_loc.expand([x.shape[0], 1, -1, -1])
locations = torch.cat([x_loc, y_loc], 1)
return locations.to(x)
@torch.no_grad()
def compute_coordinates(self, x):
h, w = x.size(2), x.size(3)
y_loc = -1.0 + 2.0 * torch.arange(h, device=x.device) / (h - 1)
x_loc = -1.0 + 2.0 * torch.arange(w, device=x.device) / (w - 1)
y_loc, x_loc = torch.meshgrid(y_loc, x_loc)
y_loc = y_loc.expand([x.shape[0], 1, -1, -1])
x_loc = x_loc.expand([x.shape[0], 1, -1, -1])
locations = torch.cat([x_loc, y_loc], 1)
return locations.to(x)
def forward(self, features):
coord_features = self.compute_coordinates(features)
features = torch.cat([coord_features, features], dim=1)
pred_logits, pred_kernel, pred_scores, iam = self.inst_branch(features)
mask_features = self.mask_branch(features)
N = pred_kernel.shape[1]
# mask_features: BxCxHxW
B, C, H, W = mask_features.shape
pred_masks = torch.bmm(pred_kernel,
mask_features.view(B, C,
H * W)).view(B, N, H, W)
pred_masks = F.interpolate(
pred_masks,
scale_factor=self.scale_factor,
mode='bilinear',
align_corners=False)
output = {
'pred_logits': pred_logits,
'pred_masks': pred_masks,
'pred_scores': pred_scores,
}
if self.output_iam:
iam = F.interpolate(
iam,
scale_factor=self.scale_factor,
mode='bilinear',
align_corners=False)
output['pred_iam'] = iam
return output
class GroupInstanceBranch(nn.Module):
def __init__(self,
in_channels,
num_groups=4,
dim=256,
num_convs=4,
num_masks=100,
num_classes=80,
kernel_dim=128,
act_cfg=dict(type='ReLU', inplace=True)):
super().__init__()
self.num_groups = num_groups
self.num_classes = num_classes
self.inst_convs = _make_stack_3x3_convs(
num_convs, in_channels, dim, act_cfg=act_cfg)
# iam prediction, a group conv
expand_dim = dim * self.num_groups
self.iam_conv = nn.Conv2d(
dim,
num_masks * self.num_groups,
3,
padding=1,
groups=self.num_groups)
# outputs
self.fc = nn.Linear(expand_dim, expand_dim)
self.cls_score = nn.Linear(expand_dim, self.num_classes)
self.mask_kernel = nn.Linear(expand_dim, kernel_dim)
self.objectness = nn.Linear(expand_dim, 1)
self.prior_prob = 0.01
self._init_weights()
def _init_weights(self):
for m in self.inst_convs.modules():
if isinstance(m, nn.Conv2d):
kaiming_init(m)
bias_value = -math.log((1 - self.prior_prob) / self.prior_prob)
for module in [self.iam_conv, self.cls_score]:
init.constant_(module.bias, bias_value)
init.normal_(self.iam_conv.weight, std=0.01)
init.normal_(self.cls_score.weight, std=0.01)
init.normal_(self.mask_kernel.weight, std=0.01)
init.constant_(self.mask_kernel.bias, 0.0)
caffe2_xavier_init(self.fc)
def forward(self, features):
# instance features (x4 convs)
features = self.inst_convs(features)
# predict instance activation maps
iam = self.iam_conv(features)
iam_prob = iam.sigmoid()
B, N = iam_prob.shape[:2]
C = features.size(1)
# BxNxHxW -> BxNx(HW)
iam_prob = iam_prob.view(B, N, -1)
normalizer = iam_prob.sum(-1).clamp(min=1e-6)
iam_prob = iam_prob / normalizer[:, :, None]
# aggregate features: BxCxHxW -> Bx(HW)xC
inst_features = torch.bmm(iam_prob,
features.view(B, C, -1).permute(0, 2, 1))
inst_features = inst_features.reshape(B, 4, N // self.num_groups,
-1).transpose(1, 2).reshape(
B, N // self.num_groups, -1)
inst_features = F.relu_(self.fc(inst_features))
# predict classification & segmentation kernel & objectness
pred_logits = self.cls_score(inst_features)
pred_kernel = self.mask_kernel(inst_features)
pred_scores = self.objectness(inst_features)
return pred_logits, pred_kernel, pred_scores, iam
@MODELS.register_module()
class GroupIAMDecoder(BaseIAMDecoder):
def __init__(self,
in_channels,
num_classes,
num_groups=4,
ins_dim=256,
ins_conv=4,
mask_dim=256,
mask_conv=4,
kernel_dim=128,
scale_factor=2.0,
output_iam=False,
num_masks=100,
act_cfg=dict(type='ReLU', inplace=True)):
super().__init__(
in_channels=in_channels,
num_classes=num_classes,
ins_dim=ins_dim,
ins_conv=ins_conv,
mask_dim=mask_dim,
mask_conv=mask_conv,
kernel_dim=kernel_dim,
scale_factor=scale_factor,
output_iam=output_iam,
num_masks=num_masks,
act_cfg=act_cfg)
self.inst_branch = GroupInstanceBranch(
in_channels,
num_groups=num_groups,
dim=ins_dim,
num_convs=ins_conv,
num_masks=num_masks,
num_classes=num_classes,
kernel_dim=kernel_dim,
act_cfg=act_cfg)
class GroupInstanceSoftBranch(GroupInstanceBranch):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.softmax_bias = nn.Parameter(torch.ones([
1,
]))
def forward(self, features):
# instance features (x4 convs)
features = self.inst_convs(features)
# predict instance activation maps
iam = self.iam_conv(features)
B, N = iam.shape[:2]
C = features.size(1)
# BxNxHxW -> BxNx(HW)
iam_prob = F.softmax(iam.view(B, N, -1) + self.softmax_bias, dim=-1)
# aggregate features: BxCxHxW -> Bx(HW)xC
inst_features = torch.bmm(iam_prob,
features.view(B, C, -1).permute(0, 2, 1))
inst_features = inst_features.reshape(B, self.num_groups,
N // self.num_groups,
-1).transpose(1, 2).reshape(
B, N // self.num_groups, -1)
inst_features = F.relu_(self.fc(inst_features))
# predict classification & segmentation kernel & objectness
pred_logits = self.cls_score(inst_features)
pred_kernel = self.mask_kernel(inst_features)
pred_scores = self.objectness(inst_features)
return pred_logits, pred_kernel, pred_scores, iam
@MODELS.register_module()
class GroupIAMSoftDecoder(BaseIAMDecoder):
def __init__(self,
in_channels,
num_classes,
num_groups=4,
ins_dim=256,
ins_conv=4,
mask_dim=256,
mask_conv=4,
kernel_dim=128,
scale_factor=2.0,
output_iam=False,
num_masks=100,
act_cfg=dict(type='ReLU', inplace=True)):
super().__init__(
in_channels=in_channels,
num_classes=num_classes,
ins_dim=ins_dim,
ins_conv=ins_conv,
mask_dim=mask_dim,
mask_conv=mask_conv,
kernel_dim=kernel_dim,
scale_factor=scale_factor,
output_iam=output_iam,
num_masks=num_masks,
act_cfg=act_cfg)
self.inst_branch = GroupInstanceSoftBranch(
in_channels,
num_groups=num_groups,
dim=ins_dim,
num_convs=ins_conv,
num_masks=num_masks,
num_classes=num_classes,
kernel_dim=kernel_dim,
act_cfg=act_cfg)
| 13,792 | 33.396509 | 79 | py |
ERD | ERD-main/projects/SparseInst/configs/sparseinst_r50_iam_8xb8-ms-270k_coco.py | _base_ = [
'mmdet::_base_/datasets/coco_instance.py',
'mmdet::_base_/schedules/schedule_1x.py',
'mmdet::_base_/default_runtime.py'
]
custom_imports = dict(
imports=['projects.SparseInst.sparseinst'], allow_failed_imports=False)
model = dict(
type='SparseInst',
data_preprocessor=dict(
type='DetDataPreprocessor',
mean=[123.675, 116.28, 103.53],
std=[58.395, 57.12, 57.375],
bgr_to_rgb=True,
pad_mask=True,
pad_size_divisor=32),
backbone=dict(
type='ResNet',
depth=50,
num_stages=4,
out_indices=(1, 2, 3),
frozen_stages=0,
norm_cfg=dict(type='BN', requires_grad=False),
norm_eval=True,
style='pytorch',
init_cfg=dict(type='Pretrained', checkpoint='torchvision://resnet50')),
encoder=dict(
type='InstanceContextEncoder',
in_channels=[512, 1024, 2048],
out_channels=256),
decoder=dict(
type='BaseIAMDecoder',
in_channels=256 + 2,
num_classes=80,
ins_dim=256,
ins_conv=4,
mask_dim=256,
mask_conv=4,
kernel_dim=128,
scale_factor=2.0,
output_iam=False,
num_masks=100),
criterion=dict(
type='SparseInstCriterion',
num_classes=80,
assigner=dict(type='SparseInstMatcher', alpha=0.8, beta=0.2),
loss_cls=dict(
type='FocalLoss',
use_sigmoid=True,
alpha=0.25,
gamma=2.0,
reduction='sum',
loss_weight=2.0),
loss_obj=dict(
type='CrossEntropyLoss',
use_sigmoid=True,
reduction='mean',
loss_weight=1.0),
loss_mask=dict(
type='CrossEntropyLoss',
use_sigmoid=True,
reduction='mean',
loss_weight=5.0),
loss_dice=dict(
type='DiceLoss',
use_sigmoid=True,
reduction='sum',
eps=5e-5,
loss_weight=2.0),
),
test_cfg=dict(score_thr=0.005, mask_thr_binary=0.45))
backend = 'pillow'
train_pipeline = [
dict(
type='LoadImageFromFile',
backend_args={{_base_.backend_args}},
imdecode_backend=backend),
dict(
type='LoadAnnotations',
with_bbox=True,
with_mask=True,
poly2mask=False),
dict(
type='RandomChoiceResize',
scales=[(416, 853), (448, 853), (480, 853), (512, 853), (544, 853),
(576, 853), (608, 853), (640, 853)],
keep_ratio=True,
backend=backend),
dict(type='RandomFlip', prob=0.5),
dict(type='PackDetInputs')
]
test_pipeline = [
dict(
type='LoadImageFromFile',
backend_args={{_base_.backend_args}},
imdecode_backend=backend),
dict(type='Resize', scale=(640, 853), keep_ratio=True, backend=backend),
dict(
type='PackDetInputs',
meta_keys=('img_id', 'img_path', 'ori_shape', 'img_shape',
'scale_factor'))
]
train_dataloader = dict(
batch_size=8,
num_workers=8,
sampler=dict(type='InfiniteSampler'),
dataset=dict(pipeline=train_pipeline))
test_dataloader = dict(dataset=dict(pipeline=test_pipeline))
val_dataloader = test_dataloader
val_evaluator = dict(metric='segm')
test_evaluator = val_evaluator
# optimizer
optim_wrapper = dict(
type='OptimWrapper',
optimizer=dict(_delete_=True, type='AdamW', lr=0.00005, weight_decay=0.05))
train_cfg = dict(
_delete_=True,
type='IterBasedTrainLoop',
max_iters=270000,
val_interval=10000)
# learning rate
param_scheduler = [
dict(
type='MultiStepLR',
begin=0,
end=270000,
by_epoch=False,
milestones=[210000, 250000],
gamma=0.1)
]
default_hooks = dict(
checkpoint=dict(by_epoch=False, interval=10000, max_keep_ckpts=3))
log_processor = dict(by_epoch=False)
# NOTE: `auto_scale_lr` is for automatically scaling LR,
# USER SHOULD NOT CHANGE ITS VALUES.
# base_batch_size = (8 GPUs) x (8 samples per GPU)
auto_scale_lr = dict(base_batch_size=64, enable=True)
| 4,133 | 27.122449 | 79 | py |
ERD | ERD-main/projects/EfficientDet/convert_tf_to_pt.py | import argparse
import numpy as np
import torch
from tensorflow.python.training import py_checkpoint_reader
torch.set_printoptions(precision=20)
def tf2pth(v):
if v.ndim == 4:
return np.ascontiguousarray(v.transpose(3, 2, 0, 1))
elif v.ndim == 2:
return np.ascontiguousarray(v.transpose())
return v
def convert_key(model_name, bifpn_repeats, weights):
p6_w1 = [
torch.tensor([-1e4, -1e4], dtype=torch.float64)
for _ in range(bifpn_repeats)
]
p5_w1 = [
torch.tensor([-1e4, -1e4], dtype=torch.float64)
for _ in range(bifpn_repeats)
]
p4_w1 = [
torch.tensor([-1e4, -1e4], dtype=torch.float64)
for _ in range(bifpn_repeats)
]
p3_w1 = [
torch.tensor([-1e4, -1e4], dtype=torch.float64)
for _ in range(bifpn_repeats)
]
p4_w2 = [
torch.tensor([-1e4, -1e4, -1e4], dtype=torch.float64)
for _ in range(bifpn_repeats)
]
p5_w2 = [
torch.tensor([-1e4, -1e4, -1e4], dtype=torch.float64)
for _ in range(bifpn_repeats)
]
p6_w2 = [
torch.tensor([-1e4, -1e4, -1e4], dtype=torch.float64)
for _ in range(bifpn_repeats)
]
p7_w2 = [
torch.tensor([-1e4, -1e4], dtype=torch.float64)
for _ in range(bifpn_repeats)
]
idx2key = {
0: '1.0',
1: '2.0',
2: '2.1',
3: '3.0',
4: '3.1',
5: '4.0',
6: '4.1',
7: '4.2',
8: '4.3',
9: '4.4',
10: '4.5',
11: '5.0',
12: '5.1',
13: '5.2',
14: '5.3',
15: '5.4'
}
m = dict()
for k, v in weights.items():
if 'Exponential' in k or 'global_step' in k:
continue
seg = k.split('/')
if len(seg) == 1:
continue
if seg[2] == 'depthwise_conv2d':
v = v.transpose(1, 0)
if seg[0] == model_name:
if seg[1] == 'stem':
prefix = 'backbone.layers.0'
mapping = {
'conv2d/kernel': 'conv.weight',
'tpu_batch_normalization/beta': 'bn.bias',
'tpu_batch_normalization/gamma': 'bn.weight',
'tpu_batch_normalization/moving_mean': 'bn.running_mean',
'tpu_batch_normalization/moving_variance':
'bn.running_var',
}
suffix = mapping['/'.join(seg[2:])]
m[prefix + '.' + suffix] = v
elif seg[1].startswith('blocks_'):
idx = int(seg[1][7:])
prefix = '.'.join(['backbone', 'layers', idx2key[idx]])
base_mapping = {
'depthwise_conv2d/depthwise_kernel':
'depthwise_conv.conv.weight',
'se/conv2d/kernel': 'se.conv1.conv.weight',
'se/conv2d/bias': 'se.conv1.conv.bias',
'se/conv2d_1/kernel': 'se.conv2.conv.weight',
'se/conv2d_1/bias': 'se.conv2.conv.bias'
}
if idx == 0:
mapping = {
'conv2d/kernel':
'linear_conv.conv.weight',
'tpu_batch_normalization/beta':
'depthwise_conv.bn.bias',
'tpu_batch_normalization/gamma':
'depthwise_conv.bn.weight',
'tpu_batch_normalization/moving_mean':
'depthwise_conv.bn.running_mean',
'tpu_batch_normalization/moving_variance':
'depthwise_conv.bn.running_var',
'tpu_batch_normalization_1/beta':
'linear_conv.bn.bias',
'tpu_batch_normalization_1/gamma':
'linear_conv.bn.weight',
'tpu_batch_normalization_1/moving_mean':
'linear_conv.bn.running_mean',
'tpu_batch_normalization_1/moving_variance':
'linear_conv.bn.running_var',
}
else:
mapping = {
'depthwise_conv2d/depthwise_kernel':
'depthwise_conv.conv.weight',
'conv2d/kernel':
'expand_conv.conv.weight',
'conv2d_1/kernel':
'linear_conv.conv.weight',
'tpu_batch_normalization/beta':
'expand_conv.bn.bias',
'tpu_batch_normalization/gamma':
'expand_conv.bn.weight',
'tpu_batch_normalization/moving_mean':
'expand_conv.bn.running_mean',
'tpu_batch_normalization/moving_variance':
'expand_conv.bn.running_var',
'tpu_batch_normalization_1/beta':
'depthwise_conv.bn.bias',
'tpu_batch_normalization_1/gamma':
'depthwise_conv.bn.weight',
'tpu_batch_normalization_1/moving_mean':
'depthwise_conv.bn.running_mean',
'tpu_batch_normalization_1/moving_variance':
'depthwise_conv.bn.running_var',
'tpu_batch_normalization_2/beta':
'linear_conv.bn.bias',
'tpu_batch_normalization_2/gamma':
'linear_conv.bn.weight',
'tpu_batch_normalization_2/moving_mean':
'linear_conv.bn.running_mean',
'tpu_batch_normalization_2/moving_variance':
'linear_conv.bn.running_var',
}
mapping.update(base_mapping)
suffix = mapping['/'.join(seg[2:])]
m[prefix + '.' + suffix] = v
elif seg[0] == 'resample_p6':
prefix = 'neck.bifpn.0.p5_to_p6.0'
mapping = {
'conv2d/kernel': 'down_conv.weight',
'conv2d/bias': 'down_conv.bias',
'bn/beta': 'bn.bias',
'bn/gamma': 'bn.weight',
'bn/moving_mean': 'bn.running_mean',
'bn/moving_variance': 'bn.running_var',
}
suffix = mapping['/'.join(seg[1:])]
m[prefix + '.' + suffix] = v
elif seg[0] == 'fpn_cells':
fpn_idx = int(seg[1][5:])
prefix = '.'.join(['neck', 'bifpn', str(fpn_idx)])
fnode_id = int(seg[2][5])
if fnode_id == 0:
mapping = {
'op_after_combine5/conv/depthwise_kernel':
'conv6_up.depthwise_conv.weight',
'op_after_combine5/conv/pointwise_kernel':
'conv6_up.pointwise_conv.weight',
'op_after_combine5/conv/bias':
'conv6_up.pointwise_conv.bias',
'op_after_combine5/bn/beta':
'conv6_up.bn.bias',
'op_after_combine5/bn/gamma':
'conv6_up.bn.weight',
'op_after_combine5/bn/moving_mean':
'conv6_up.bn.running_mean',
'op_after_combine5/bn/moving_variance':
'conv6_up.bn.running_var',
}
if seg[3] != 'WSM' and seg[3] != 'WSM_1':
suffix = mapping['/'.join(seg[3:])]
if 'depthwise_conv' in suffix:
v = v.transpose(1, 0)
m[prefix + '.' + suffix] = v
elif seg[3] == 'WSM':
p6_w1[fpn_idx][0] = v
elif seg[3] == 'WSM_1':
p6_w1[fpn_idx][1] = v
if torch.min(p6_w1[fpn_idx]) > -1e4:
m[prefix + '.p6_w1'] = p6_w1[fpn_idx]
elif fnode_id == 1:
base_mapping = {
'op_after_combine6/conv/depthwise_kernel':
'conv5_up.depthwise_conv.weight',
'op_after_combine6/conv/pointwise_kernel':
'conv5_up.pointwise_conv.weight',
'op_after_combine6/conv/bias':
'conv5_up.pointwise_conv.bias',
'op_after_combine6/bn/beta':
'conv5_up.bn.bias',
'op_after_combine6/bn/gamma':
'conv5_up.bn.weight',
'op_after_combine6/bn/moving_mean':
'conv5_up.bn.running_mean',
'op_after_combine6/bn/moving_variance':
'conv5_up.bn.running_var',
}
if fpn_idx == 0:
mapping = {
'resample_0_2_6/conv2d/kernel':
'p5_down_channel.down_conv.weight',
'resample_0_2_6/conv2d/bias':
'p5_down_channel.down_conv.bias',
'resample_0_2_6/bn/beta':
'p5_down_channel.bn.bias',
'resample_0_2_6/bn/gamma':
'p5_down_channel.bn.weight',
'resample_0_2_6/bn/moving_mean':
'p5_down_channel.bn.running_mean',
'resample_0_2_6/bn/moving_variance':
'p5_down_channel.bn.running_var',
}
base_mapping.update(mapping)
if seg[3] != 'WSM' and seg[3] != 'WSM_1':
suffix = base_mapping['/'.join(seg[3:])]
if 'depthwise_conv' in suffix:
v = v.transpose(1, 0)
m[prefix + '.' + suffix] = v
elif seg[3] == 'WSM':
p5_w1[fpn_idx][0] = v
elif seg[3] == 'WSM_1':
p5_w1[fpn_idx][1] = v
if torch.min(p5_w1[fpn_idx]) > -1e4:
m[prefix + '.p5_w1'] = p5_w1[fpn_idx]
elif fnode_id == 2:
base_mapping = {
'op_after_combine7/conv/depthwise_kernel':
'conv4_up.depthwise_conv.weight',
'op_after_combine7/conv/pointwise_kernel':
'conv4_up.pointwise_conv.weight',
'op_after_combine7/conv/bias':
'conv4_up.pointwise_conv.bias',
'op_after_combine7/bn/beta':
'conv4_up.bn.bias',
'op_after_combine7/bn/gamma':
'conv4_up.bn.weight',
'op_after_combine7/bn/moving_mean':
'conv4_up.bn.running_mean',
'op_after_combine7/bn/moving_variance':
'conv4_up.bn.running_var',
}
if fpn_idx == 0:
mapping = {
'resample_0_1_7/conv2d/kernel':
'p4_down_channel.down_conv.weight',
'resample_0_1_7/conv2d/bias':
'p4_down_channel.down_conv.bias',
'resample_0_1_7/bn/beta':
'p4_down_channel.bn.bias',
'resample_0_1_7/bn/gamma':
'p4_down_channel.bn.weight',
'resample_0_1_7/bn/moving_mean':
'p4_down_channel.bn.running_mean',
'resample_0_1_7/bn/moving_variance':
'p4_down_channel.bn.running_var',
}
base_mapping.update(mapping)
if seg[3] != 'WSM' and seg[3] != 'WSM_1':
suffix = base_mapping['/'.join(seg[3:])]
if 'depthwise_conv' in suffix:
v = v.transpose(1, 0)
m[prefix + '.' + suffix] = v
elif seg[3] == 'WSM':
p4_w1[fpn_idx][0] = v
elif seg[3] == 'WSM_1':
p4_w1[fpn_idx][1] = v
if torch.min(p4_w1[fpn_idx]) > -1e4:
m[prefix + '.p4_w1'] = p4_w1[fpn_idx]
elif fnode_id == 3:
base_mapping = {
'op_after_combine8/conv/depthwise_kernel':
'conv3_up.depthwise_conv.weight',
'op_after_combine8/conv/pointwise_kernel':
'conv3_up.pointwise_conv.weight',
'op_after_combine8/conv/bias':
'conv3_up.pointwise_conv.bias',
'op_after_combine8/bn/beta':
'conv3_up.bn.bias',
'op_after_combine8/bn/gamma':
'conv3_up.bn.weight',
'op_after_combine8/bn/moving_mean':
'conv3_up.bn.running_mean',
'op_after_combine8/bn/moving_variance':
'conv3_up.bn.running_var',
}
if fpn_idx == 0:
mapping = {
'resample_0_0_8/conv2d/kernel':
'p3_down_channel.down_conv.weight',
'resample_0_0_8/conv2d/bias':
'p3_down_channel.down_conv.bias',
'resample_0_0_8/bn/beta':
'p3_down_channel.bn.bias',
'resample_0_0_8/bn/gamma':
'p3_down_channel.bn.weight',
'resample_0_0_8/bn/moving_mean':
'p3_down_channel.bn.running_mean',
'resample_0_0_8/bn/moving_variance':
'p3_down_channel.bn.running_var',
}
base_mapping.update(mapping)
if seg[3] != 'WSM' and seg[3] != 'WSM_1':
suffix = base_mapping['/'.join(seg[3:])]
if 'depthwise_conv' in suffix:
v = v.transpose(1, 0)
m[prefix + '.' + suffix] = v
elif seg[3] == 'WSM':
p3_w1[fpn_idx][0] = v
elif seg[3] == 'WSM_1':
p3_w1[fpn_idx][1] = v
if torch.min(p3_w1[fpn_idx]) > -1e4:
m[prefix + '.p3_w1'] = p3_w1[fpn_idx]
elif fnode_id == 4:
base_mapping = {
'op_after_combine9/conv/depthwise_kernel':
'conv4_down.depthwise_conv.weight',
'op_after_combine9/conv/pointwise_kernel':
'conv4_down.pointwise_conv.weight',
'op_after_combine9/conv/bias':
'conv4_down.pointwise_conv.bias',
'op_after_combine9/bn/beta':
'conv4_down.bn.bias',
'op_after_combine9/bn/gamma':
'conv4_down.bn.weight',
'op_after_combine9/bn/moving_mean':
'conv4_down.bn.running_mean',
'op_after_combine9/bn/moving_variance':
'conv4_down.bn.running_var',
}
if fpn_idx == 0:
mapping = {
'resample_0_1_9/conv2d/kernel':
'p4_level_connection.down_conv.weight',
'resample_0_1_9/conv2d/bias':
'p4_level_connection.down_conv.bias',
'resample_0_1_9/bn/beta':
'p4_level_connection.bn.bias',
'resample_0_1_9/bn/gamma':
'p4_level_connection.bn.weight',
'resample_0_1_9/bn/moving_mean':
'p4_level_connection.bn.running_mean',
'resample_0_1_9/bn/moving_variance':
'p4_level_connection.bn.running_var',
}
base_mapping.update(mapping)
if seg[3] != 'WSM' and seg[3] != 'WSM_1' and seg[3] != 'WSM_2':
suffix = base_mapping['/'.join(seg[3:])]
if 'depthwise_conv' in suffix:
v = v.transpose(1, 0)
m[prefix + '.' + suffix] = v
elif seg[3] == 'WSM':
p4_w2[fpn_idx][0] = v
elif seg[3] == 'WSM_1':
p4_w2[fpn_idx][1] = v
elif seg[3] == 'WSM_2':
p4_w2[fpn_idx][2] = v
if torch.min(p4_w2[fpn_idx]) > -1e4:
m[prefix + '.p4_w2'] = p4_w2[fpn_idx]
elif fnode_id == 5:
base_mapping = {
'op_after_combine10/conv/depthwise_kernel':
'conv5_down.depthwise_conv.weight',
'op_after_combine10/conv/pointwise_kernel':
'conv5_down.pointwise_conv.weight',
'op_after_combine10/conv/bias':
'conv5_down.pointwise_conv.bias',
'op_after_combine10/bn/beta':
'conv5_down.bn.bias',
'op_after_combine10/bn/gamma':
'conv5_down.bn.weight',
'op_after_combine10/bn/moving_mean':
'conv5_down.bn.running_mean',
'op_after_combine10/bn/moving_variance':
'conv5_down.bn.running_var',
}
if fpn_idx == 0:
mapping = {
'resample_0_2_10/conv2d/kernel':
'p5_level_connection.down_conv.weight',
'resample_0_2_10/conv2d/bias':
'p5_level_connection.down_conv.bias',
'resample_0_2_10/bn/beta':
'p5_level_connection.bn.bias',
'resample_0_2_10/bn/gamma':
'p5_level_connection.bn.weight',
'resample_0_2_10/bn/moving_mean':
'p5_level_connection.bn.running_mean',
'resample_0_2_10/bn/moving_variance':
'p5_level_connection.bn.running_var',
}
base_mapping.update(mapping)
if seg[3] != 'WSM' and seg[3] != 'WSM_1' and seg[3] != 'WSM_2':
suffix = base_mapping['/'.join(seg[3:])]
if 'depthwise_conv' in suffix:
v = v.transpose(1, 0)
m[prefix + '.' + suffix] = v
elif seg[3] == 'WSM':
p5_w2[fpn_idx][0] = v
elif seg[3] == 'WSM_1':
p5_w2[fpn_idx][1] = v
elif seg[3] == 'WSM_2':
p5_w2[fpn_idx][2] = v
if torch.min(p5_w2[fpn_idx]) > -1e4:
m[prefix + '.p5_w2'] = p5_w2[fpn_idx]
elif fnode_id == 6:
base_mapping = {
'op_after_combine11/conv/depthwise_kernel':
'conv6_down.depthwise_conv.weight',
'op_after_combine11/conv/pointwise_kernel':
'conv6_down.pointwise_conv.weight',
'op_after_combine11/conv/bias':
'conv6_down.pointwise_conv.bias',
'op_after_combine11/bn/beta':
'conv6_down.bn.bias',
'op_after_combine11/bn/gamma':
'conv6_down.bn.weight',
'op_after_combine11/bn/moving_mean':
'conv6_down.bn.running_mean',
'op_after_combine11/bn/moving_variance':
'conv6_down.bn.running_var',
}
if seg[3] != 'WSM' and seg[3] != 'WSM_1' and seg[3] != 'WSM_2':
suffix = base_mapping['/'.join(seg[3:])]
if 'depthwise_conv' in suffix:
v = v.transpose(1, 0)
m[prefix + '.' + suffix] = v
elif seg[3] == 'WSM':
p6_w2[fpn_idx][0] = v
elif seg[3] == 'WSM_1':
p6_w2[fpn_idx][1] = v
elif seg[3] == 'WSM_2':
p6_w2[fpn_idx][2] = v
if torch.min(p6_w2[fpn_idx]) > -1e4:
m[prefix + '.p6_w2'] = p6_w2[fpn_idx]
elif fnode_id == 7:
base_mapping = {
'op_after_combine12/conv/depthwise_kernel':
'conv7_down.depthwise_conv.weight',
'op_after_combine12/conv/pointwise_kernel':
'conv7_down.pointwise_conv.weight',
'op_after_combine12/conv/bias':
'conv7_down.pointwise_conv.bias',
'op_after_combine12/bn/beta':
'conv7_down.bn.bias',
'op_after_combine12/bn/gamma':
'conv7_down.bn.weight',
'op_after_combine12/bn/moving_mean':
'conv7_down.bn.running_mean',
'op_after_combine12/bn/moving_variance':
'conv7_down.bn.running_var',
}
if seg[3] != 'WSM' and seg[3] != 'WSM_1' and seg[3] != 'WSM_2':
suffix = base_mapping['/'.join(seg[3:])]
if 'depthwise_conv' in suffix:
v = v.transpose(1, 0)
m[prefix + '.' + suffix] = v
elif seg[3] == 'WSM':
p7_w2[fpn_idx][0] = v
elif seg[3] == 'WSM_1':
p7_w2[fpn_idx][1] = v
if torch.min(p7_w2[fpn_idx]) > -1e4:
m[prefix + '.p7_w2'] = p7_w2[fpn_idx]
elif seg[0] == 'box_net':
if 'box-predict' in seg[1]:
prefix = '.'.join(['bbox_head', 'reg_header'])
base_mapping = {
'depthwise_kernel': 'depthwise_conv.weight',
'pointwise_kernel': 'pointwise_conv.weight',
'bias': 'pointwise_conv.bias'
}
suffix = base_mapping['/'.join(seg[2:])]
if 'depthwise_conv' in suffix:
v = v.transpose(1, 0)
m[prefix + '.' + suffix] = v
elif 'bn' in seg[1]:
bbox_conv_idx = int(seg[1][4])
bbox_bn_idx = int(seg[1][9]) - 3
prefix = '.'.join([
'bbox_head', 'reg_bn_list',
str(bbox_conv_idx),
str(bbox_bn_idx)
])
base_mapping = {
'beta': 'bias',
'gamma': 'weight',
'moving_mean': 'running_mean',
'moving_variance': 'running_var'
}
suffix = base_mapping['/'.join(seg[2:])]
m[prefix + '.' + suffix] = v
else:
bbox_conv_idx = int(seg[1][4])
prefix = '.'.join(
['bbox_head', 'reg_conv_list',
str(bbox_conv_idx)])
base_mapping = {
'depthwise_kernel': 'depthwise_conv.weight',
'pointwise_kernel': 'pointwise_conv.weight',
'bias': 'pointwise_conv.bias'
}
suffix = base_mapping['/'.join(seg[2:])]
if 'depthwise_conv' in suffix:
v = v.transpose(1, 0)
m[prefix + '.' + suffix] = v
elif seg[0] == 'class_net':
if 'class-predict' in seg[1]:
prefix = '.'.join(['bbox_head', 'cls_header'])
base_mapping = {
'depthwise_kernel': 'depthwise_conv.weight',
'pointwise_kernel': 'pointwise_conv.weight',
'bias': 'pointwise_conv.bias'
}
suffix = base_mapping['/'.join(seg[2:])]
if 'depthwise_conv' in suffix:
v = v.transpose(1, 0)
m[prefix + '.' + suffix] = v
elif 'bn' in seg[1]:
cls_conv_idx = int(seg[1][6])
cls_bn_idx = int(seg[1][11]) - 3
prefix = '.'.join([
'bbox_head', 'cls_bn_list',
str(cls_conv_idx),
str(cls_bn_idx)
])
base_mapping = {
'beta': 'bias',
'gamma': 'weight',
'moving_mean': 'running_mean',
'moving_variance': 'running_var'
}
suffix = base_mapping['/'.join(seg[2:])]
m[prefix + '.' + suffix] = v
else:
cls_conv_idx = int(seg[1][6])
prefix = '.'.join(
['bbox_head', 'cls_conv_list',
str(cls_conv_idx)])
base_mapping = {
'depthwise_kernel': 'depthwise_conv.weight',
'pointwise_kernel': 'pointwise_conv.weight',
'bias': 'pointwise_conv.bias'
}
suffix = base_mapping['/'.join(seg[2:])]
if 'depthwise_conv' in suffix:
v = v.transpose(1, 0)
m[prefix + '.' + suffix] = v
return m
def parse_args():
parser = argparse.ArgumentParser(
description='convert efficientdet weight from tensorflow to pytorch')
parser.add_argument(
'--backbone',
type=str,
help='efficientnet model name, like efficientnet-b0')
parser.add_argument(
'--tensorflow_weight',
type=str,
help='efficientdet tensorflow weight name, like efficientdet-d0/model')
parser.add_argument(
'--out_weight',
type=str,
help='efficientdet pytorch weight name like demo.pth')
args = parser.parse_args()
return args
def main():
args = parse_args()
model_name = args.backbone
ori_weight_name = args.tensorflow_weight
out_name = args.out_weight
repeat_map = {
0: 3,
1: 4,
2: 5,
3: 6,
4: 7,
5: 7,
6: 8,
7: 8,
}
reader = py_checkpoint_reader.NewCheckpointReader(ori_weight_name)
weights = {
n: torch.as_tensor(tf2pth(reader.get_tensor(n)))
for (n, _) in reader.get_variable_to_shape_map().items()
}
bifpn_repeats = repeat_map[int(model_name[14])]
out = convert_key(model_name, bifpn_repeats, weights)
result = {'state_dict': out}
torch.save(result, out_name)
if __name__ == '__main__':
main()
| 26,971 | 42.017544 | 79 | py |
ERD | ERD-main/projects/EfficientDet/efficientdet/bifpn.py | from typing import List
import torch
import torch.nn as nn
from mmcv.cnn.bricks import Swish
from mmengine.model import BaseModule
from mmdet.registry import MODELS
from mmdet.utils import MultiConfig, OptConfigType
from .utils import DepthWiseConvBlock, DownChannelBlock, MaxPool2dSamePadding
class BiFPNStage(nn.Module):
"""
in_channels: List[int], input dim for P3, P4, P5
out_channels: int, output dim for P2 - P7
first_time: int, whether is the first bifpnstage
conv_bn_act_pattern: bool, whether use conv_bn_act_pattern
norm_cfg: (:obj:`ConfigDict` or dict, optional): Config dict for
normalization layer.
epsilon: float, hyperparameter in fusion features
"""
def __init__(self,
in_channels: List[int],
out_channels: int,
first_time: bool = False,
apply_bn_for_resampling: bool = True,
conv_bn_act_pattern: bool = False,
norm_cfg: OptConfigType = dict(
type='BN', momentum=1e-2, eps=1e-3),
epsilon: float = 1e-4) -> None:
super().__init__()
assert isinstance(in_channels, list)
self.in_channels = in_channels
self.out_channels = out_channels
self.first_time = first_time
self.apply_bn_for_resampling = apply_bn_for_resampling
self.conv_bn_act_pattern = conv_bn_act_pattern
self.norm_cfg = norm_cfg
self.epsilon = epsilon
if self.first_time:
self.p5_down_channel = DownChannelBlock(
self.in_channels[-1],
self.out_channels,
apply_norm=self.apply_bn_for_resampling,
conv_bn_act_pattern=self.conv_bn_act_pattern,
norm_cfg=norm_cfg)
self.p4_down_channel = DownChannelBlock(
self.in_channels[-2],
self.out_channels,
apply_norm=self.apply_bn_for_resampling,
conv_bn_act_pattern=self.conv_bn_act_pattern,
norm_cfg=norm_cfg)
self.p3_down_channel = DownChannelBlock(
self.in_channels[-3],
self.out_channels,
apply_norm=self.apply_bn_for_resampling,
conv_bn_act_pattern=self.conv_bn_act_pattern,
norm_cfg=norm_cfg)
self.p5_to_p6 = nn.Sequential(
DownChannelBlock(
self.in_channels[-1],
self.out_channels,
apply_norm=self.apply_bn_for_resampling,
conv_bn_act_pattern=self.conv_bn_act_pattern,
norm_cfg=norm_cfg), MaxPool2dSamePadding(3, 2))
self.p6_to_p7 = MaxPool2dSamePadding(3, 2)
self.p4_level_connection = DownChannelBlock(
self.in_channels[-2],
self.out_channels,
apply_norm=self.apply_bn_for_resampling,
conv_bn_act_pattern=self.conv_bn_act_pattern,
norm_cfg=norm_cfg)
self.p5_level_connection = DownChannelBlock(
self.in_channels[-1],
self.out_channels,
apply_norm=self.apply_bn_for_resampling,
conv_bn_act_pattern=self.conv_bn_act_pattern,
norm_cfg=norm_cfg)
self.p6_upsample = nn.Upsample(scale_factor=2, mode='nearest')
self.p5_upsample = nn.Upsample(scale_factor=2, mode='nearest')
self.p4_upsample = nn.Upsample(scale_factor=2, mode='nearest')
self.p3_upsample = nn.Upsample(scale_factor=2, mode='nearest')
# bottom to up: feature map down_sample module
self.p4_down_sample = MaxPool2dSamePadding(3, 2)
self.p5_down_sample = MaxPool2dSamePadding(3, 2)
self.p6_down_sample = MaxPool2dSamePadding(3, 2)
self.p7_down_sample = MaxPool2dSamePadding(3, 2)
# Fuse Conv Layers
self.conv6_up = DepthWiseConvBlock(
out_channels,
out_channels,
apply_norm=self.apply_bn_for_resampling,
conv_bn_act_pattern=self.conv_bn_act_pattern,
norm_cfg=norm_cfg)
self.conv5_up = DepthWiseConvBlock(
out_channels,
out_channels,
apply_norm=self.apply_bn_for_resampling,
conv_bn_act_pattern=self.conv_bn_act_pattern,
norm_cfg=norm_cfg)
self.conv4_up = DepthWiseConvBlock(
out_channels,
out_channels,
apply_norm=self.apply_bn_for_resampling,
conv_bn_act_pattern=self.conv_bn_act_pattern,
norm_cfg=norm_cfg)
self.conv3_up = DepthWiseConvBlock(
out_channels,
out_channels,
apply_norm=self.apply_bn_for_resampling,
conv_bn_act_pattern=self.conv_bn_act_pattern,
norm_cfg=norm_cfg)
self.conv4_down = DepthWiseConvBlock(
out_channels,
out_channels,
apply_norm=self.apply_bn_for_resampling,
conv_bn_act_pattern=self.conv_bn_act_pattern,
norm_cfg=norm_cfg)
self.conv5_down = DepthWiseConvBlock(
out_channels,
out_channels,
apply_norm=self.apply_bn_for_resampling,
conv_bn_act_pattern=self.conv_bn_act_pattern,
norm_cfg=norm_cfg)
self.conv6_down = DepthWiseConvBlock(
out_channels,
out_channels,
apply_norm=self.apply_bn_for_resampling,
conv_bn_act_pattern=self.conv_bn_act_pattern,
norm_cfg=norm_cfg)
self.conv7_down = DepthWiseConvBlock(
out_channels,
out_channels,
apply_norm=self.apply_bn_for_resampling,
conv_bn_act_pattern=self.conv_bn_act_pattern,
norm_cfg=norm_cfg)
# weights
self.p6_w1 = nn.Parameter(
torch.ones(2, dtype=torch.float32), requires_grad=True)
self.p6_w1_relu = nn.ReLU()
self.p5_w1 = nn.Parameter(
torch.ones(2, dtype=torch.float32), requires_grad=True)
self.p5_w1_relu = nn.ReLU()
self.p4_w1 = nn.Parameter(
torch.ones(2, dtype=torch.float32), requires_grad=True)
self.p4_w1_relu = nn.ReLU()
self.p3_w1 = nn.Parameter(
torch.ones(2, dtype=torch.float32), requires_grad=True)
self.p3_w1_relu = nn.ReLU()
self.p4_w2 = nn.Parameter(
torch.ones(3, dtype=torch.float32), requires_grad=True)
self.p4_w2_relu = nn.ReLU()
self.p5_w2 = nn.Parameter(
torch.ones(3, dtype=torch.float32), requires_grad=True)
self.p5_w2_relu = nn.ReLU()
self.p6_w2 = nn.Parameter(
torch.ones(3, dtype=torch.float32), requires_grad=True)
self.p6_w2_relu = nn.ReLU()
self.p7_w2 = nn.Parameter(
torch.ones(2, dtype=torch.float32), requires_grad=True)
self.p7_w2_relu = nn.ReLU()
self.swish = Swish()
def combine(self, x):
if not self.conv_bn_act_pattern:
x = self.swish(x)
return x
def forward(self, x):
if self.first_time:
p3, p4, p5 = x
# build feature map P6
p6_in = self.p5_to_p6(p5)
# build feature map P7
p7_in = self.p6_to_p7(p6_in)
p3_in = self.p3_down_channel(p3)
p4_in = self.p4_down_channel(p4)
p5_in = self.p5_down_channel(p5)
else:
p3_in, p4_in, p5_in, p6_in, p7_in = x
# Weights for P6_0 and P7_0 to P6_1
p6_w1 = self.p6_w1_relu(self.p6_w1)
weight = p6_w1 / (torch.sum(p6_w1, dim=0) + self.epsilon)
# Connections for P6_0 and P7_0 to P6_1 respectively
p6_up = self.conv6_up(
self.combine(weight[0] * p6_in +
weight[1] * self.p6_upsample(p7_in)))
# Weights for P5_0 and P6_1 to P5_1
p5_w1 = self.p5_w1_relu(self.p5_w1)
weight = p5_w1 / (torch.sum(p5_w1, dim=0) + self.epsilon)
# Connections for P5_0 and P6_1 to P5_1 respectively
p5_up = self.conv5_up(
self.combine(weight[0] * p5_in +
weight[1] * self.p5_upsample(p6_up)))
# Weights for P4_0 and P5_1 to P4_1
p4_w1 = self.p4_w1_relu(self.p4_w1)
weight = p4_w1 / (torch.sum(p4_w1, dim=0) + self.epsilon)
# Connections for P4_0 and P5_1 to P4_1 respectively
p4_up = self.conv4_up(
self.combine(weight[0] * p4_in +
weight[1] * self.p4_upsample(p5_up)))
# Weights for P3_0 and P4_1 to P3_2
p3_w1 = self.p3_w1_relu(self.p3_w1)
weight = p3_w1 / (torch.sum(p3_w1, dim=0) + self.epsilon)
# Connections for P3_0 and P4_1 to P3_2 respectively
p3_out = self.conv3_up(
self.combine(weight[0] * p3_in +
weight[1] * self.p3_upsample(p4_up)))
if self.first_time:
p4_in = self.p4_level_connection(p4)
p5_in = self.p5_level_connection(p5)
# Weights for P4_0, P4_1 and P3_2 to P4_2
p4_w2 = self.p4_w2_relu(self.p4_w2)
weight = p4_w2 / (torch.sum(p4_w2, dim=0) + self.epsilon)
# Connections for P4_0, P4_1 and P3_2 to P4_2 respectively
p4_out = self.conv4_down(
self.combine(weight[0] * p4_in + weight[1] * p4_up +
weight[2] * self.p4_down_sample(p3_out)))
# Weights for P5_0, P5_1 and P4_2 to P5_2
p5_w2 = self.p5_w2_relu(self.p5_w2)
weight = p5_w2 / (torch.sum(p5_w2, dim=0) + self.epsilon)
# Connections for P5_0, P5_1 and P4_2 to P5_2 respectively
p5_out = self.conv5_down(
self.combine(weight[0] * p5_in + weight[1] * p5_up +
weight[2] * self.p5_down_sample(p4_out)))
# Weights for P6_0, P6_1 and P5_2 to P6_2
p6_w2 = self.p6_w2_relu(self.p6_w2)
weight = p6_w2 / (torch.sum(p6_w2, dim=0) + self.epsilon)
# Connections for P6_0, P6_1 and P5_2 to P6_2 respectively
p6_out = self.conv6_down(
self.combine(weight[0] * p6_in + weight[1] * p6_up +
weight[2] * self.p6_down_sample(p5_out)))
# Weights for P7_0 and P6_2 to P7_2
p7_w2 = self.p7_w2_relu(self.p7_w2)
weight = p7_w2 / (torch.sum(p7_w2, dim=0) + self.epsilon)
# Connections for P7_0 and P6_2 to P7_2
p7_out = self.conv7_down(
self.combine(weight[0] * p7_in +
weight[1] * self.p7_down_sample(p6_out)))
return p3_out, p4_out, p5_out, p6_out, p7_out
@MODELS.register_module()
class BiFPN(BaseModule):
"""
num_stages: int, bifpn number of repeats
in_channels: List[int], input dim for P3, P4, P5
out_channels: int, output dim for P2 - P7
start_level: int, Index of input features in backbone
epsilon: float, hyperparameter in fusion features
apply_bn_for_resampling: bool, whether use bn after resampling
conv_bn_act_pattern: bool, whether use conv_bn_act_pattern
norm_cfg: (:obj:`ConfigDict` or dict, optional): Config dict for
normalization layer.
init_cfg: MultiConfig: init method
"""
def __init__(self,
num_stages: int,
in_channels: List[int],
out_channels: int,
start_level: int = 0,
epsilon: float = 1e-4,
apply_bn_for_resampling: bool = True,
conv_bn_act_pattern: bool = False,
norm_cfg: OptConfigType = dict(
type='BN', momentum=1e-2, eps=1e-3),
init_cfg: MultiConfig = None) -> None:
super().__init__(init_cfg=init_cfg)
self.start_level = start_level
self.bifpn = nn.Sequential(*[
BiFPNStage(
in_channels=in_channels,
out_channels=out_channels,
first_time=True if _ == 0 else False,
apply_bn_for_resampling=apply_bn_for_resampling,
conv_bn_act_pattern=conv_bn_act_pattern,
norm_cfg=norm_cfg,
epsilon=epsilon) for _ in range(num_stages)
])
def forward(self, x):
x = x[self.start_level:]
x = self.bifpn(x)
return x
| 12,443 | 39.534202 | 77 | py |
ERD | ERD-main/projects/EfficientDet/efficientdet/utils.py | import math
from typing import Tuple, Union
import torch
import torch.nn as nn
from mmcv.cnn.bricks import Swish, build_norm_layer
from torch.nn import functional as F
from torch.nn.init import _calculate_fan_in_and_fan_out, trunc_normal_
from mmdet.registry import MODELS
from mmdet.utils import OptConfigType
def variance_scaling_trunc(tensor, gain=1.):
fan_in, _ = _calculate_fan_in_and_fan_out(tensor)
gain /= max(1.0, fan_in)
std = math.sqrt(gain) / .87962566103423978
return trunc_normal_(tensor, 0., std)
@MODELS.register_module()
class Conv2dSamePadding(nn.Conv2d):
def __init__(self,
in_channels: int,
out_channels: int,
kernel_size: Union[int, Tuple[int, int]],
stride: Union[int, Tuple[int, int]] = 1,
padding: Union[int, Tuple[int, int]] = 0,
dilation: Union[int, Tuple[int, int]] = 1,
groups: int = 1,
bias: bool = True):
super().__init__(in_channels, out_channels, kernel_size, stride, 0,
dilation, groups, bias)
def forward(self, x: torch.Tensor) -> torch.Tensor:
img_h, img_w = x.size()[-2:]
kernel_h, kernel_w = self.weight.size()[-2:]
extra_w = (math.ceil(img_w / self.stride[1]) -
1) * self.stride[1] - img_w + kernel_w
extra_h = (math.ceil(img_h / self.stride[0]) -
1) * self.stride[0] - img_h + kernel_h
left = extra_w // 2
right = extra_w - left
top = extra_h // 2
bottom = extra_h - top
x = F.pad(x, [left, right, top, bottom])
return F.conv2d(x, self.weight, self.bias, self.stride, self.padding,
self.dilation, self.groups)
class MaxPool2dSamePadding(nn.Module):
def __init__(self,
kernel_size: Union[int, Tuple[int, int]] = 3,
stride: Union[int, Tuple[int, int]] = 2,
**kwargs):
super().__init__()
self.pool = nn.MaxPool2d(kernel_size, stride, **kwargs)
self.stride = self.pool.stride
self.kernel_size = self.pool.kernel_size
if isinstance(self.stride, int):
self.stride = [self.stride] * 2
if isinstance(self.kernel_size, int):
self.kernel_size = [self.kernel_size] * 2
def forward(self, x):
h, w = x.shape[-2:]
extra_h = (math.ceil(w / self.stride[1]) -
1) * self.stride[1] - w + self.kernel_size[1]
extra_v = (math.ceil(h / self.stride[0]) -
1) * self.stride[0] - h + self.kernel_size[0]
left = extra_h // 2
right = extra_h - left
top = extra_v // 2
bottom = extra_v - top
x = F.pad(x, [left, right, top, bottom])
x = self.pool(x)
return x
class DepthWiseConvBlock(nn.Module):
def __init__(
self,
in_channels: int,
out_channels: int,
apply_norm: bool = True,
conv_bn_act_pattern: bool = False,
norm_cfg: OptConfigType = dict(type='BN', momentum=1e-2, eps=1e-3)
) -> None:
super(DepthWiseConvBlock, self).__init__()
self.depthwise_conv = Conv2dSamePadding(
in_channels,
in_channels,
kernel_size=3,
stride=1,
groups=in_channels,
bias=False)
self.pointwise_conv = Conv2dSamePadding(
in_channels, out_channels, kernel_size=1, stride=1)
self.apply_norm = apply_norm
if self.apply_norm:
self.bn = build_norm_layer(norm_cfg, num_features=out_channels)[1]
self.apply_activation = conv_bn_act_pattern
if self.apply_activation:
self.swish = Swish()
def forward(self, x):
x = self.depthwise_conv(x)
x = self.pointwise_conv(x)
if self.apply_norm:
x = self.bn(x)
if self.apply_activation:
x = self.swish(x)
return x
class DownChannelBlock(nn.Module):
def __init__(
self,
in_channels: int,
out_channels: int,
apply_norm: bool = True,
conv_bn_act_pattern: bool = False,
norm_cfg: OptConfigType = dict(type='BN', momentum=1e-2, eps=1e-3)
) -> None:
super(DownChannelBlock, self).__init__()
self.down_conv = Conv2dSamePadding(in_channels, out_channels, 1)
self.apply_norm = apply_norm
if self.apply_norm:
self.bn = build_norm_layer(norm_cfg, num_features=out_channels)[1]
self.apply_activation = conv_bn_act_pattern
if self.apply_activation:
self.swish = Swish()
def forward(self, x):
x = self.down_conv(x)
if self.apply_norm:
x = self.bn(x)
if self.apply_activation:
x = self.swish(x)
return x
| 4,897 | 30.6 | 78 | py |
ERD | ERD-main/projects/EfficientDet/efficientdet/huber_loss.py | # Copyright (c) OpenMMLab. All rights reserved.
from typing import Optional
import torch
import torch.nn as nn
from torch import Tensor
from mmdet.models.losses.utils import weighted_loss
from mmdet.registry import MODELS
@weighted_loss
def huber_loss(pred: Tensor, target: Tensor, beta: float = 1.0) -> Tensor:
"""Huber loss.
Args:
pred (Tensor): The prediction.
target (Tensor): The learning target of the prediction.
beta (float, optional): The threshold in the piecewise function.
Defaults to 1.0.
Returns:
Tensor: Calculated loss
"""
assert beta > 0
if target.numel() == 0:
return pred.sum() * 0
assert pred.size() == target.size()
diff = torch.abs(pred - target)
loss = torch.where(diff < beta, 0.5 * diff * diff,
beta * diff - 0.5 * beta * beta)
return loss
@MODELS.register_module()
class HuberLoss(nn.Module):
"""Huber loss.
Args:
beta (float, optional): The threshold in the piecewise function.
Defaults to 1.0.
reduction (str, optional): The method to reduce the loss.
Options are "none", "mean" and "sum". Defaults to "mean".
loss_weight (float, optional): The weight of loss.
"""
def __init__(self,
beta: float = 1.0,
reduction: str = 'mean',
loss_weight: float = 1.0) -> None:
super().__init__()
self.beta = beta
self.reduction = reduction
self.loss_weight = loss_weight
def forward(self,
pred: Tensor,
target: Tensor,
weight: Optional[Tensor] = None,
avg_factor: Optional[int] = None,
reduction_override: Optional[str] = None,
**kwargs) -> Tensor:
"""Forward function.
Args:
pred (Tensor): The prediction.
target (Tensor): The learning target of the prediction.
weight (Tensor, optional): The weight of loss for each
prediction. Defaults to None.
avg_factor (int, optional): Average factor that is used to average
the loss. Defaults to None.
reduction_override (str, optional): The reduction method used to
override the original reduction method of the loss.
Defaults to None.
Returns:
Tensor: Calculated loss
"""
assert reduction_override in (None, 'none', 'mean', 'sum')
reduction = (
reduction_override if reduction_override else self.reduction)
loss_bbox = self.loss_weight * huber_loss(
pred,
target,
weight,
beta=self.beta,
reduction=reduction,
avg_factor=avg_factor,
**kwargs)
return loss_bbox
| 2,888 | 30.402174 | 78 | py |
ERD | ERD-main/projects/EfficientDet/efficientdet/efficientdet_head.py | # Copyright (c) OpenMMLab. All rights reserved.
from typing import List, Tuple
import torch
import torch.nn as nn
from mmcv.cnn.bricks import Swish, build_norm_layer
from mmengine.model import bias_init_with_prob
from torch import Tensor
from mmdet.models.dense_heads.anchor_head import AnchorHead
from mmdet.models.utils import images_to_levels, multi_apply
from mmdet.registry import MODELS
from mmdet.structures.bbox import cat_boxes, get_box_tensor
from mmdet.utils import (InstanceList, OptConfigType, OptInstanceList,
OptMultiConfig, reduce_mean)
from .utils import DepthWiseConvBlock
@MODELS.register_module()
class EfficientDetSepBNHead(AnchorHead):
"""EfficientDetHead with separate BN.
num_classes (int): Number of categories num_ins (int): Number of the input
feature map. in_channels (int): Number of channels in the input feature
map. feat_channels (int): Number of hidden channels. stacked_convs (int):
Number of repetitions of conv norm_cfg (dict): Config dict for
normalization layer. anchor_generator (dict): Config dict for anchor
generator bbox_coder (dict): Config of bounding box coder. loss_cls (dict):
Config of classification loss. loss_bbox (dict): Config of localization
loss. train_cfg (dict): Training config of anchor head. test_cfg (dict):
Testing config of anchor head. init_cfg (dict or list[dict], optional):
Initialization config dict.
"""
def __init__(self,
num_classes: int,
num_ins: int,
in_channels: int,
feat_channels: int,
stacked_convs: int = 3,
norm_cfg: OptConfigType = dict(
type='BN', momentum=1e-2, eps=1e-3),
init_cfg: OptMultiConfig = None,
**kwargs) -> None:
self.num_ins = num_ins
self.stacked_convs = stacked_convs
self.norm_cfg = norm_cfg
super().__init__(
num_classes=num_classes,
in_channels=in_channels,
feat_channels=feat_channels,
init_cfg=init_cfg,
**kwargs)
def _init_layers(self) -> None:
"""Initialize layers of the head."""
self.reg_conv_list = nn.ModuleList()
self.cls_conv_list = nn.ModuleList()
for i in range(self.stacked_convs):
channels = self.in_channels if i == 0 else self.feat_channels
self.reg_conv_list.append(
DepthWiseConvBlock(
channels, self.feat_channels, apply_norm=False))
self.cls_conv_list.append(
DepthWiseConvBlock(
channels, self.feat_channels, apply_norm=False))
self.reg_bn_list = nn.ModuleList([
nn.ModuleList([
build_norm_layer(
self.norm_cfg, num_features=self.feat_channels)[1]
for j in range(self.num_ins)
]) for i in range(self.stacked_convs)
])
self.cls_bn_list = nn.ModuleList([
nn.ModuleList([
build_norm_layer(
self.norm_cfg, num_features=self.feat_channels)[1]
for j in range(self.num_ins)
]) for i in range(self.stacked_convs)
])
self.cls_header = DepthWiseConvBlock(
self.in_channels,
self.num_base_priors * self.cls_out_channels,
apply_norm=False)
self.reg_header = DepthWiseConvBlock(
self.in_channels, self.num_base_priors * 4, apply_norm=False)
self.swish = Swish()
def init_weights(self) -> None:
"""Initialize weights of the head."""
for m in self.reg_conv_list:
nn.init.constant_(m.pointwise_conv.bias, 0.0)
for m in self.cls_conv_list:
nn.init.constant_(m.pointwise_conv.bias, 0.0)
bias_cls = bias_init_with_prob(0.01)
nn.init.constant_(self.cls_header.pointwise_conv.bias, bias_cls)
nn.init.constant_(self.reg_header.pointwise_conv.bias, 0.0)
def forward_single_bbox(self, feat: Tensor, level_id: int,
i: int) -> Tensor:
conv_op = self.reg_conv_list[i]
bn = self.reg_bn_list[i][level_id]
feat = conv_op(feat)
feat = bn(feat)
feat = self.swish(feat)
return feat
def forward_single_cls(self, feat: Tensor, level_id: int,
i: int) -> Tensor:
conv_op = self.cls_conv_list[i]
bn = self.cls_bn_list[i][level_id]
feat = conv_op(feat)
feat = bn(feat)
feat = self.swish(feat)
return feat
def forward(self, feats: Tuple[Tensor]) -> tuple:
cls_scores = []
bbox_preds = []
for level_id in range(self.num_ins):
feat = feats[level_id]
for i in range(self.stacked_convs):
feat = self.forward_single_bbox(feat, level_id, i)
bbox_pred = self.reg_header(feat)
bbox_preds.append(bbox_pred)
for level_id in range(self.num_ins):
feat = feats[level_id]
for i in range(self.stacked_convs):
feat = self.forward_single_cls(feat, level_id, i)
cls_score = self.cls_header(feat)
cls_scores.append(cls_score)
return cls_scores, bbox_preds
def loss_by_feat(
self,
cls_scores: List[Tensor],
bbox_preds: List[Tensor],
batch_gt_instances: InstanceList,
batch_img_metas: List[dict],
batch_gt_instances_ignore: OptInstanceList = None) -> dict:
"""Calculate the loss based on the features extracted by the detection
head.
Args:
cls_scores (list[Tensor]): Box scores for each scale level
has shape (N, num_anchors * num_classes, H, W).
bbox_preds (list[Tensor]): Box energies / deltas for each scale
level with shape (N, num_anchors * 4, H, W).
batch_gt_instances (list[:obj:`InstanceData`]): Batch of
gt_instance. It usually includes ``bboxes`` and ``labels``
attributes.
batch_img_metas (list[dict]): Meta information of each image, e.g.,
image size, scaling factor, etc.
batch_gt_instances_ignore (list[:obj:`InstanceData`], optional):
Batch of gt_instances_ignore. It includes ``bboxes`` attribute
data that is ignored during training and testing.
Defaults to None.
Returns:
dict: A dictionary of loss components.
"""
featmap_sizes = [featmap.size()[-2:] for featmap in cls_scores]
assert len(featmap_sizes) == self.prior_generator.num_levels
device = cls_scores[0].device
anchor_list, valid_flag_list = self.get_anchors(
featmap_sizes, batch_img_metas, device=device)
cls_reg_targets = self.get_targets(
anchor_list,
valid_flag_list,
batch_gt_instances,
batch_img_metas,
batch_gt_instances_ignore=batch_gt_instances_ignore)
(labels_list, label_weights_list, bbox_targets_list, bbox_weights_list,
avg_factor) = cls_reg_targets
# anchor number of multi levels
num_level_anchors = [anchors.size(0) for anchors in anchor_list[0]]
# concat all level anchors and flags to a single tensor
concat_anchor_list = []
for i in range(len(anchor_list)):
concat_anchor_list.append(cat_boxes(anchor_list[i]))
all_anchor_list = images_to_levels(concat_anchor_list,
num_level_anchors)
avg_factor = reduce_mean(
torch.tensor(avg_factor, dtype=torch.float, device=device)).item()
avg_factor = max(avg_factor, 1.0)
losses_cls, losses_bbox = multi_apply(
self.loss_by_feat_single,
cls_scores,
bbox_preds,
all_anchor_list,
labels_list,
label_weights_list,
bbox_targets_list,
bbox_weights_list,
avg_factor=avg_factor)
return dict(loss_cls=losses_cls, loss_bbox=losses_bbox)
def loss_by_feat_single(self, cls_score: Tensor, bbox_pred: Tensor,
anchors: Tensor, labels: Tensor,
label_weights: Tensor, bbox_targets: Tensor,
bbox_weights: Tensor, avg_factor: int) -> tuple:
"""Calculate the loss of a single scale level based on the features
extracted by the detection head.
Args:
cls_score (Tensor): Box scores for each scale level
Has shape (N, num_anchors * num_classes, H, W).
bbox_pred (Tensor): Box energies / deltas for each scale
level with shape (N, num_anchors * 4, H, W).
anchors (Tensor): Box reference for each scale level with shape
(N, num_total_anchors, 4).
labels (Tensor): Labels of each anchors with shape
(N, num_total_anchors).
label_weights (Tensor): Label weights of each anchor with shape
(N, num_total_anchors)
bbox_targets (Tensor): BBox regression targets of each anchor
weight shape (N, num_total_anchors, 4).
bbox_weights (Tensor): BBox regression loss weights of each anchor
with shape (N, num_total_anchors, 4).
avg_factor (int): Average factor that is used to average the loss.
Returns:
tuple: loss components.
"""
# classification loss
labels = labels.reshape(-1)
label_weights = label_weights.reshape(-1)
cls_score = cls_score.permute(0, 2, 3,
1).reshape(-1, self.cls_out_channels)
loss_cls = self.loss_cls(
cls_score, labels, label_weights, avg_factor=avg_factor)
# regression loss
target_dim = bbox_targets.size(-1)
bbox_targets = bbox_targets.reshape(-1, target_dim)
bbox_weights = bbox_weights.reshape(-1, target_dim)
bbox_pred = bbox_pred.permute(0, 2, 3,
1).reshape(-1,
self.bbox_coder.encode_size)
if self.reg_decoded_bbox:
# When the regression loss (e.g. `IouLoss`, `GIouLoss`)
# is applied directly on the decoded bounding boxes, it
# decodes the already encoded coordinates to absolute format.
anchors = anchors.reshape(-1, anchors.size(-1))
bbox_pred = self.bbox_coder.decode(anchors, bbox_pred)
bbox_pred = get_box_tensor(bbox_pred)
loss_bbox = self.loss_bbox(
bbox_pred, bbox_targets, bbox_weights, avg_factor=avg_factor * 4)
return loss_cls, loss_bbox
| 10,986 | 40.935115 | 79 | py |
ERD | ERD-main/projects/EfficientDet/efficientdet/tensorflow/anchor_generator.py | # Copyright (c) OpenMMLab. All rights reserved.
from typing import Optional, Tuple, Union
import torch
from torch import Tensor
from mmdet.models.task_modules.prior_generators.anchor_generator import \
AnchorGenerator
from mmdet.registry import TASK_UTILS
from mmdet.structures.bbox import HorizontalBoxes
DeviceType = Union[str, torch.device]
@TASK_UTILS.register_module()
class YXYXAnchorGenerator(AnchorGenerator):
def gen_single_level_base_anchors(self,
base_size: Union[int, float],
scales: Tensor,
ratios: Tensor,
center: Optional[Tuple[float]] = None) \
-> Tensor:
"""Generate base anchors of a single level.
Args:
base_size (int | float): Basic size of an anchor.
scales (torch.Tensor): Scales of the anchor.
ratios (torch.Tensor): The ratio between the height
and width of anchors in a single level.
center (tuple[float], optional): The center of the base anchor
related to a single feature grid. Defaults to None.
Returns:
torch.Tensor: Anchors in a single-level feature maps.
"""
w = base_size
h = base_size
if center is None:
x_center = self.center_offset * w
y_center = self.center_offset * h
else:
x_center, y_center = center
h_ratios = torch.sqrt(ratios)
w_ratios = 1 / h_ratios
if self.scale_major:
ws = (w * scales[:, None] * w_ratios[None, :]).view(-1)
hs = (h * scales[:, None] * h_ratios[None, :]).view(-1)
else:
ws = (w * scales[:, None] * w_ratios[None, :]).view(-1)
hs = (h * scales[:, None] * h_ratios[None, :]).view(-1)
# use float anchor and the anchor's center is aligned with the
# pixel center
base_anchors = [
y_center - 0.5 * hs,
x_center - 0.5 * ws,
y_center + 0.5 * hs,
x_center + 0.5 * ws,
]
base_anchors = torch.stack(base_anchors, dim=-1)
return base_anchors
def single_level_grid_priors(self,
featmap_size: Tuple[int, int],
level_idx: int,
dtype: torch.dtype = torch.float32,
device: DeviceType = 'cuda') -> Tensor:
"""Generate grid anchors of a single level.
Note:
This function is usually called by method ``self.grid_priors``.
Args:
featmap_size (tuple[int, int]): Size of the feature maps.
level_idx (int): The index of corresponding feature map level.
dtype (obj:`torch.dtype`): Date type of points.Defaults to
``torch.float32``.
device (str | torch.device): The device the tensor will be put on.
Defaults to 'cuda'.
Returns:
torch.Tensor: Anchors in the overall feature maps.
"""
base_anchors = self.base_anchors[level_idx].to(device).to(dtype)
feat_h, feat_w = featmap_size
stride_w, stride_h = self.strides[level_idx]
# First create Range with the default dtype, than convert to
# target `dtype` for onnx exporting.
shift_x = torch.arange(0, feat_w, device=device).to(dtype) * stride_w
shift_y = torch.arange(0, feat_h, device=device).to(dtype) * stride_h
shift_xx, shift_yy = self._meshgrid(shift_x, shift_y)
shifts = torch.stack([shift_yy, shift_xx, shift_yy, shift_xx], dim=-1)
# first feat_w elements correspond to the first row of shifts
# add A anchors (1, A, 4) to K shifts (K, 1, 4) to get
# shifted anchors (K, A, 4), reshape to (K*A, 4)
all_anchors = base_anchors[None, :, :] + shifts[:, None, :]
all_anchors = all_anchors.view(-1, 4)
# first A rows correspond to A anchors of (0, 0) in feature map,
# then (0, 1), (0, 2), ...
if self.use_box_type:
all_anchors = HorizontalBoxes(all_anchors)
return all_anchors
| 4,261 | 37.745455 | 78 | py |
ERD | ERD-main/projects/EfficientDet/efficientdet/tensorflow/yxyx_bbox_coder.py | # Copyright (c) OpenMMLab. All rights reserved.
import warnings
import numpy as np
import torch
from mmdet.models.task_modules.coders.delta_xywh_bbox_coder import \
DeltaXYWHBBoxCoder
from mmdet.registry import TASK_UTILS
from mmdet.structures.bbox import HorizontalBoxes, get_box_tensor
@TASK_UTILS.register_module()
class YXYXDeltaXYWHBBoxCoder(DeltaXYWHBBoxCoder):
def encode(self, bboxes, gt_bboxes):
"""Get box regression transformation deltas that can be used to
transform the ``bboxes`` into the ``gt_bboxes``.
Args:
bboxes (torch.Tensor or :obj:`BaseBoxes`): Source boxes,
e.g., object proposals.
gt_bboxes (torch.Tensor or :obj:`BaseBoxes`): Target of the
transformation, e.g., ground-truth boxes.
Returns:
torch.Tensor: Box transformation deltas
"""
bboxes = get_box_tensor(bboxes)
gt_bboxes = get_box_tensor(gt_bboxes)
assert bboxes.size(0) == gt_bboxes.size(0)
assert bboxes.size(-1) == gt_bboxes.size(-1) == 4
encoded_bboxes = YXbbox2delta(bboxes, gt_bboxes, self.means, self.stds)
return encoded_bboxes
def decode(self,
bboxes,
pred_bboxes,
max_shape=None,
wh_ratio_clip=16 / 1000):
"""Apply transformation `pred_bboxes` to `boxes`.
Args:
bboxes (torch.Tensor or :obj:`BaseBoxes`): Basic boxes. Shape
(B, N, 4) or (N, 4)
pred_bboxes (Tensor): Encoded offsets with respect to each roi.
Has shape (B, N, num_classes * 4) or (B, N, 4) or
(N, num_classes * 4) or (N, 4). Note N = num_anchors * W * H
when rois is a grid of anchors.Offset encoding follows [1]_.
max_shape (Sequence[int] or torch.Tensor or Sequence[
Sequence[int]],optional): Maximum bounds for boxes, specifies
(H, W, C) or (H, W). If bboxes shape is (B, N, 4), then
the max_shape should be a Sequence[Sequence[int]]
and the length of max_shape should also be B.
wh_ratio_clip (float, optional): The allowed ratio between
width and height.
Returns:
Union[torch.Tensor, :obj:`BaseBoxes`]: Decoded boxes.
"""
bboxes = get_box_tensor(bboxes)
assert pred_bboxes.size(0) == bboxes.size(0)
if pred_bboxes.ndim == 3:
assert pred_bboxes.size(1) == bboxes.size(1)
if pred_bboxes.ndim == 2 and not torch.onnx.is_in_onnx_export():
# single image decode
decoded_bboxes = YXdelta2bbox(bboxes, pred_bboxes, self.means,
self.stds, max_shape, wh_ratio_clip,
self.clip_border, self.add_ctr_clamp,
self.ctr_clamp)
else:
if pred_bboxes.ndim == 3 and not torch.onnx.is_in_onnx_export():
warnings.warn(
'DeprecationWarning: onnx_delta2bbox is deprecated '
'in the case of batch decoding and non-ONNX, '
'please use “delta2bbox” instead. In order to improve '
'the decoding speed, the batch function will no '
'longer be supported. ')
decoded_bboxes = YXonnx_delta2bbox(bboxes, pred_bboxes, self.means,
self.stds, max_shape,
wh_ratio_clip, self.clip_border,
self.add_ctr_clamp,
self.ctr_clamp)
if self.use_box_type:
assert decoded_bboxes.size(-1) == 4, \
('Cannot warp decoded boxes with box type when decoded boxes'
'have shape of (N, num_classes * 4)')
decoded_bboxes = HorizontalBoxes(decoded_bboxes)
return decoded_bboxes
def YXdelta2bbox(rois,
deltas,
means=(0., 0., 0., 0.),
stds=(1., 1., 1., 1.),
max_shape=None,
hw_ratio_clip=1000 / 16,
clip_border=True,
add_ctr_clamp=False,
ctr_clamp=32):
"""Apply deltas to shift/scale base boxes.
Typically the rois are anchor or proposed bounding boxes and the deltas are
network outputs used to shift/scale those boxes.
This is the inverse function of :func:`bbox2delta`.
Args:
rois (Tensor): Boxes to be transformed. Has shape (N, 4).
deltas (Tensor): Encoded offsets relative to each roi.
Has shape (N, num_classes * 4) or (N, 4). Note
N = num_base_anchors * W * H, when rois is a grid of
anchors. Offset encoding follows [1]_.
means (Sequence[float]): Denormalizing means for delta coordinates.
Default (0., 0., 0., 0.).
stds (Sequence[float]): Denormalizing standard deviation for delta
coordinates. Default (1., 1., 1., 1.).
max_shape (tuple[int, int]): Maximum bounds for boxes, specifies
(H, W). Default None.
wh_ratio_clip (float): Maximum aspect ratio for boxes. Default
16 / 1000.
clip_border (bool, optional): Whether clip the objects outside the
border of the image. Default True.
add_ctr_clamp (bool): Whether to add center clamp. When set to True,
the center of the prediction bounding box will be clamped to
avoid being too far away from the center of the anchor.
Only used by YOLOF. Default False.
ctr_clamp (int): the maximum pixel shift to clamp. Only used by YOLOF.
Default 32.
Returns:
Tensor: Boxes with shape (N, num_classes * 4) or (N, 4), where 4
represent tl_x, tl_y, br_x, br_y.
References:
.. [1] https://arxiv.org/abs/1311.2524
Example:
>>> rois = torch.Tensor([[ 0., 0., 1., 1.],
>>> [ 0., 0., 1., 1.],
>>> [ 0., 0., 1., 1.],
>>> [ 5., 5., 5., 5.]])
>>> deltas = torch.Tensor([[ 0., 0., 0., 0.],
>>> [ 1., 1., 1., 1.],
>>> [ 0., 0., 2., -1.],
>>> [ 0.7, -1.9, -0.5, 0.3]])
>>> delta2bbox(rois, deltas, max_shape=(32, 32, 3))
tensor([[0.0000, 0.0000, 1.0000, 1.0000],
[0.1409, 0.1409, 2.8591, 2.8591],
[0.0000, 0.3161, 4.1945, 0.6839],
[5.0000, 5.0000, 5.0000, 5.0000]])
"""
num_bboxes, num_classes = deltas.size(0), deltas.size(1) // 4
if num_bboxes == 0:
return deltas
deltas = deltas.reshape(-1, 4)
means = deltas.new_tensor(means).view(1, -1)
stds = deltas.new_tensor(stds).view(1, -1)
denorm_deltas = deltas * stds + means
dyx = denorm_deltas[:, :2]
dhw = denorm_deltas[:, 2:]
# Compute width/height of each roi
rois_ = rois.repeat(1, num_classes).reshape(-1, 4)
pyx = ((rois_[:, :2] + rois_[:, 2:]) * 0.5)
phw = (rois_[:, 2:] - rois_[:, :2])
dyx_hw = phw * dyx
max_ratio = np.abs(np.log(hw_ratio_clip))
if add_ctr_clamp:
dyx_hw = torch.clamp(dyx_hw, max=ctr_clamp, min=-ctr_clamp)
dhw = torch.clamp(dhw, max=max_ratio)
else:
dhw = dhw.clamp(min=-max_ratio, max=max_ratio)
gyx = pyx + dyx_hw
ghw = phw * dhw.exp()
y1x1 = gyx - (ghw * 0.5)
y2x2 = gyx + (ghw * 0.5)
ymin, xmin = y1x1[:, 0].reshape(-1, 1), y1x1[:, 1].reshape(-1, 1)
ymax, xmax = y2x2[:, 0].reshape(-1, 1), y2x2[:, 1].reshape(-1, 1)
bboxes = torch.cat([xmin, ymin, xmax, ymax], dim=-1)
if clip_border and max_shape is not None:
bboxes[..., 0::2].clamp_(min=0, max=max_shape[1])
bboxes[..., 1::2].clamp_(min=0, max=max_shape[0])
bboxes = bboxes.reshape(num_bboxes, -1)
return bboxes
def YXbbox2delta(proposals, gt, means=(0., 0., 0., 0.), stds=(1., 1., 1., 1.)):
"""Compute deltas of proposals w.r.t. gt.
We usually compute the deltas of x, y, w, h of proposals w.r.t ground
truth bboxes to get regression target.
This is the inverse function of :func:`delta2bbox`.
Args:
proposals (Tensor): Boxes to be transformed, shape (N, ..., 4)
gt (Tensor): Gt bboxes to be used as base, shape (N, ..., 4)
means (Sequence[float]): Denormalizing means for delta coordinates
stds (Sequence[float]): Denormalizing standard deviation for delta
coordinates
Returns:
Tensor: deltas with shape (N, 4), where columns represent dx, dy,
dw, dh.
"""
assert proposals.size() == gt.size()
proposals = proposals.float()
gt = gt.float()
py = (proposals[..., 0] + proposals[..., 2]) * 0.5
px = (proposals[..., 1] + proposals[..., 3]) * 0.5
ph = proposals[..., 2] - proposals[..., 0]
pw = proposals[..., 3] - proposals[..., 1]
gx = (gt[..., 0] + gt[..., 2]) * 0.5
gy = (gt[..., 1] + gt[..., 3]) * 0.5
gw = gt[..., 2] - gt[..., 0]
gh = gt[..., 3] - gt[..., 1]
dx = (gx - px) / pw
dy = (gy - py) / ph
dw = torch.log(gw / pw)
dh = torch.log(gh / ph)
deltas = torch.stack([dy, dx, dh, dw], dim=-1)
means = deltas.new_tensor(means).unsqueeze(0)
stds = deltas.new_tensor(stds).unsqueeze(0)
deltas = deltas.sub_(means).div_(stds)
return deltas
def YXonnx_delta2bbox(rois,
deltas,
means=(0., 0., 0., 0.),
stds=(1., 1., 1., 1.),
max_shape=None,
wh_ratio_clip=16 / 1000,
clip_border=True,
add_ctr_clamp=False,
ctr_clamp=32):
"""Apply deltas to shift/scale base boxes.
Typically the rois are anchor or proposed bounding boxes and the deltas are
network outputs used to shift/scale those boxes.
This is the inverse function of :func:`bbox2delta`.
Args:
rois (Tensor): Boxes to be transformed. Has shape (N, 4) or (B, N, 4)
deltas (Tensor): Encoded offsets with respect to each roi.
Has shape (B, N, num_classes * 4) or (B, N, 4) or
(N, num_classes * 4) or (N, 4). Note N = num_anchors * W * H
when rois is a grid of anchors.Offset encoding follows [1]_.
means (Sequence[float]): Denormalizing means for delta coordinates.
Default (0., 0., 0., 0.).
stds (Sequence[float]): Denormalizing standard deviation for delta
coordinates. Default (1., 1., 1., 1.).
max_shape (Sequence[int] or torch.Tensor or Sequence[
Sequence[int]],optional): Maximum bounds for boxes, specifies
(H, W, C) or (H, W). If rois shape is (B, N, 4), then
the max_shape should be a Sequence[Sequence[int]]
and the length of max_shape should also be B. Default None.
wh_ratio_clip (float): Maximum aspect ratio for boxes.
Default 16 / 1000.
clip_border (bool, optional): Whether clip the objects outside the
border of the image. Default True.
add_ctr_clamp (bool): Whether to add center clamp, when added, the
predicted box is clamped is its center is too far away from
the original anchor's center. Only used by YOLOF. Default False.
ctr_clamp (int): the maximum pixel shift to clamp. Only used by YOLOF.
Default 32.
Returns:
Tensor: Boxes with shape (B, N, num_classes * 4) or (B, N, 4) or
(N, num_classes * 4) or (N, 4), where 4 represent
tl_x, tl_y, br_x, br_y.
References:
.. [1] https://arxiv.org/abs/1311.2524
Example:
>>> rois = torch.Tensor([[ 0., 0., 1., 1.],
>>> [ 0., 0., 1., 1.],
>>> [ 0., 0., 1., 1.],
>>> [ 5., 5., 5., 5.]])
>>> deltas = torch.Tensor([[ 0., 0., 0., 0.],
>>> [ 1., 1., 1., 1.],
>>> [ 0., 0., 2., -1.],
>>> [ 0.7, -1.9, -0.5, 0.3]])
>>> delta2bbox(rois, deltas, max_shape=(32, 32, 3))
tensor([[0.0000, 0.0000, 1.0000, 1.0000],
[0.1409, 0.1409, 2.8591, 2.8591],
[0.0000, 0.3161, 4.1945, 0.6839],
[5.0000, 5.0000, 5.0000, 5.0000]])
"""
means = deltas.new_tensor(means).view(1,
-1).repeat(1,
deltas.size(-1) // 4)
stds = deltas.new_tensor(stds).view(1, -1).repeat(1, deltas.size(-1) // 4)
denorm_deltas = deltas * stds + means
dy = denorm_deltas[..., 0::4]
dx = denorm_deltas[..., 1::4]
dh = denorm_deltas[..., 2::4]
dw = denorm_deltas[..., 3::4]
y1, x1 = rois[..., 0], rois[..., 1]
y2, x2 = rois[..., 2], rois[..., 3]
# Compute center of each roi
px = ((x1 + x2) * 0.5).unsqueeze(-1).expand_as(dx)
py = ((y1 + y2) * 0.5).unsqueeze(-1).expand_as(dy)
# Compute width/height of each roi
pw = (x2 - x1).unsqueeze(-1).expand_as(dw)
ph = (y2 - y1).unsqueeze(-1).expand_as(dh)
dx_width = pw * dx
dy_height = ph * dy
max_ratio = np.abs(np.log(wh_ratio_clip))
if add_ctr_clamp:
dx_width = torch.clamp(dx_width, max=ctr_clamp, min=-ctr_clamp)
dy_height = torch.clamp(dy_height, max=ctr_clamp, min=-ctr_clamp)
dw = torch.clamp(dw, max=max_ratio)
dh = torch.clamp(dh, max=max_ratio)
else:
dw = dw.clamp(min=-max_ratio, max=max_ratio)
dh = dh.clamp(min=-max_ratio, max=max_ratio)
# Use exp(network energy) to enlarge/shrink each roi
gw = pw * dw.exp()
gh = ph * dh.exp()
# Use network energy to shift the center of each roi
gx = px + dx_width
gy = py + dy_height
# Convert center-xy/width/height to top-left, bottom-right
x1 = gx - gw * 0.5
y1 = gy - gh * 0.5
x2 = gx + gw * 0.5
y2 = gy + gh * 0.5
bboxes = torch.stack([x1, y1, x2, y2], dim=-1).view(deltas.size())
if clip_border and max_shape is not None:
# clip bboxes with dynamic `min` and `max` for onnx
if torch.onnx.is_in_onnx_export():
from mmdet.core.export import dynamic_clip_for_onnx
x1, y1, x2, y2 = dynamic_clip_for_onnx(x1, y1, x2, y2, max_shape)
bboxes = torch.stack([x1, y1, x2, y2], dim=-1).view(deltas.size())
return bboxes
if not isinstance(max_shape, torch.Tensor):
max_shape = x1.new_tensor(max_shape)
max_shape = max_shape[..., :2].type_as(x1)
if max_shape.ndim == 2:
assert bboxes.ndim == 3
assert max_shape.size(0) == bboxes.size(0)
min_xy = x1.new_tensor(0)
max_xy = torch.cat(
[max_shape] * (deltas.size(-1) // 2),
dim=-1).flip(-1).unsqueeze(-2)
bboxes = torch.where(bboxes < min_xy, min_xy, bboxes)
bboxes = torch.where(bboxes > max_xy, max_xy, bboxes)
return bboxes
| 15,367 | 40.535135 | 79 | py |
ERD | ERD-main/projects/EfficientDet/efficientdet/tensorflow/trans_max_iou_assigner.py | # Copyright (c) OpenMMLab. All rights reserved.
from typing import Optional
import torch
from mmengine.structures import InstanceData
from mmdet.models.task_modules.assigners.assign_result import AssignResult
from mmdet.models.task_modules.assigners.max_iou_assigner import MaxIoUAssigner
from mmdet.registry import TASK_UTILS
@TASK_UTILS.register_module()
class TransMaxIoUAssigner(MaxIoUAssigner):
def assign(self,
pred_instances: InstanceData,
gt_instances: InstanceData,
gt_instances_ignore: Optional[InstanceData] = None,
**kwargs) -> AssignResult:
"""Assign gt to bboxes.
This method assign a gt bbox to every bbox (proposal/anchor), each bbox
will be assigned with -1, or a semi-positive number. -1 means negative
sample, semi-positive number is the index (0-based) of assigned gt.
The assignment is done in following steps, the order matters.
1. assign every bbox to the background
2. assign proposals whose iou with all gts < neg_iou_thr to 0
3. for each bbox, if the iou with its nearest gt >= pos_iou_thr,
assign it to that bbox
4. for each gt bbox, assign its nearest proposals (may be more than
one) to itself
Args:
pred_instances (:obj:`InstanceData`): Instances of model
predictions. It includes ``priors``, and the priors can
be anchors or points, or the bboxes predicted by the
previous stage, has shape (n, 4). The bboxes predicted by
the current model or stage will be named ``bboxes``,
``labels``, and ``scores``, the same as the ``InstanceData``
in other places.
gt_instances (:obj:`InstanceData`): Ground truth of instance
annotations. It usually includes ``bboxes``, with shape (k, 4),
and ``labels``, with shape (k, ).
gt_instances_ignore (:obj:`InstanceData`, optional): Instances
to be ignored during training. It includes ``bboxes``
attribute data that is ignored during training and testing.
Defaults to None.
Returns:
:obj:`AssignResult`: The assign result.
Example:
>>> from mmengine.structures import InstanceData
>>> self = MaxIoUAssigner(0.5, 0.5)
>>> pred_instances = InstanceData()
>>> pred_instances.priors = torch.Tensor([[0, 0, 10, 10],
... [10, 10, 20, 20]])
>>> gt_instances = InstanceData()
>>> gt_instances.bboxes = torch.Tensor([[0, 0, 10, 9]])
>>> gt_instances.labels = torch.Tensor([0])
>>> assign_result = self.assign(pred_instances, gt_instances)
>>> expected_gt_inds = torch.LongTensor([1, 0])
>>> assert torch.all(assign_result.gt_inds == expected_gt_inds)
"""
gt_bboxes = gt_instances.bboxes
priors = pred_instances.priors
gt_labels = gt_instances.labels
if gt_instances_ignore is not None:
gt_bboxes_ignore = gt_instances_ignore.bboxes
else:
gt_bboxes_ignore = None
assign_on_cpu = True if (self.gpu_assign_thr > 0) and (
gt_bboxes.shape[0] > self.gpu_assign_thr) else False
# compute overlap and assign gt on CPU when number of GT is large
if assign_on_cpu:
device = priors.device
priors = priors.cpu()
gt_bboxes = gt_bboxes.cpu()
gt_labels = gt_labels.cpu()
if gt_bboxes_ignore is not None:
gt_bboxes_ignore = gt_bboxes_ignore.cpu()
trans_priors = torch.cat([
priors[..., 1].view(-1, 1), priors[..., 0].view(-1, 1),
priors[..., 3].view(-1, 1), priors[..., 2].view(-1, 1)
],
dim=-1)
overlaps = self.iou_calculator(gt_bboxes, trans_priors)
if (self.ignore_iof_thr > 0 and gt_bboxes_ignore is not None
and gt_bboxes_ignore.numel() > 0 and trans_priors.numel() > 0):
if self.ignore_wrt_candidates:
ignore_overlaps = self.iou_calculator(
trans_priors, gt_bboxes_ignore, mode='iof')
ignore_max_overlaps, _ = ignore_overlaps.max(dim=1)
else:
ignore_overlaps = self.iou_calculator(
gt_bboxes_ignore, trans_priors, mode='iof')
ignore_max_overlaps, _ = ignore_overlaps.max(dim=0)
overlaps[:, ignore_max_overlaps > self.ignore_iof_thr] = -1
assign_result = self.assign_wrt_overlaps(overlaps, gt_labels)
if assign_on_cpu:
assign_result.gt_inds = assign_result.gt_inds.to(device)
assign_result.max_overlaps = assign_result.max_overlaps.to(device)
if assign_result.labels is not None:
assign_result.labels = assign_result.labels.to(device)
return assign_result
| 5,094 | 44.900901 | 79 | py |
ERD | ERD-main/.dev_scripts/download_checkpoints.py | # Copyright (c) OpenMMLab. All rights reserved.
import argparse
import math
import os
import os.path as osp
from multiprocessing import Pool
import torch
from mmengine.config import Config
from mmengine.utils import mkdir_or_exist
def download(url, out_file, min_bytes=math.pow(1024, 2), progress=True):
# math.pow(1024, 2) is mean 1 MB
assert_msg = f"Downloaded url '{url}' does not exist " \
f'or size is < min_bytes={min_bytes}'
try:
print(f'Downloading {url} to {out_file}...')
torch.hub.download_url_to_file(url, str(out_file), progress=progress)
assert osp.exists(
out_file) and osp.getsize(out_file) > min_bytes, assert_msg
except Exception as e:
if osp.exists(out_file):
os.remove(out_file)
print(f'ERROR: {e}\nRe-attempting {url} to {out_file} ...')
os.system(f"curl -L '{url}' -o '{out_file}' --retry 3 -C -"
) # curl download, retry and resume on fail
finally:
if osp.exists(out_file) and osp.getsize(out_file) < min_bytes:
os.remove(out_file) # remove partial downloads
if not osp.exists(out_file):
print(f'ERROR: {assert_msg}\n')
print('=========================================\n')
def parse_args():
parser = argparse.ArgumentParser(description='Download checkpoints')
parser.add_argument('config', help='test config file path')
parser.add_argument(
'out', type=str, help='output dir of checkpoints to be stored')
parser.add_argument(
'--nproc', type=int, default=16, help='num of Processes')
parser.add_argument(
'--intranet',
action='store_true',
help='switch to internal network url')
args = parser.parse_args()
return args
if __name__ == '__main__':
args = parse_args()
mkdir_or_exist(args.out)
cfg = Config.fromfile(args.config)
checkpoint_url_list = []
checkpoint_out_list = []
for model in cfg:
model_infos = cfg[model]
if not isinstance(model_infos, list):
model_infos = [model_infos]
for model_info in model_infos:
checkpoint = model_info['checkpoint']
out_file = osp.join(args.out, checkpoint)
if not osp.exists(out_file):
url = model_info['url']
if args.intranet is True:
url = url.replace('.com', '.sensetime.com')
url = url.replace('https', 'http')
checkpoint_url_list.append(url)
checkpoint_out_list.append(out_file)
if len(checkpoint_url_list) > 0:
pool = Pool(min(os.cpu_count(), args.nproc))
pool.starmap(download, zip(checkpoint_url_list, checkpoint_out_list))
else:
print('No files to download!')
| 2,822 | 32.607143 | 77 | py |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.