repo stringlengths 1 99 | file stringlengths 13 215 | code stringlengths 12 59.2M | file_length int64 12 59.2M | avg_line_length float64 3.82 1.48M | max_line_length int64 12 2.51M | extension_type stringclasses 1
value |
|---|---|---|---|---|---|---|
XFL | XFL-master/python/algorithm/config_descriptor/horizontal_chatglm/label_trainer.py | from common.checker.x_types import String, Bool, Integer, Float, Any
from common.checker.qualifiers import OneOf, SomeOf, RepeatableSomeOf, Required, Optional
horizontal_chatglm_label_trainer_rule = {
"identity": "label_trainer",
"model_info": {
"name": "horizontal_chatglm"
},
"input": {
"__rule__": [Optional("trainset"), Optional("adater_model"), Optional("pretrain_model")],
"trainset": [
{
"type": "QA",
"path": String()
}
],
"pretrained_model": {
"path": String()
},
"adapter_model": {
"path": String()
}
},
"output": {
"path": String("/opt/checkpoints/[JOB_ID]/[NODE_ID]")
},
"train_info": {
"train_params": {
"trainer": {
"per_device_train_batch_size": Integer(1),
"gradient_accumulation_steps": Integer(4),
"save_strategy": OneOf("steps", "no"),
"torch_compile": Bool(False),
"no_cuda": Bool(False)
}
}
}
}
| 1,127 | 27.2 | 97 | py |
XFL | XFL-master/python/algorithm/config_descriptor/horizontal_chatglm/assist_trainer.py | from common.checker.x_types import String, Bool, Integer, Float, Any
from common.checker.qualifiers import OneOf, SomeOf, RepeatableSomeOf, Required, Optional
from common.utils.auto_descriptor.torch.optimizer import optimizer
from common.utils.auto_descriptor.torch.lr_scheduler import lr_scheduler
from common.utils.auto_descriptor.torch.lossfunc import lossfunc
from common.utils.auto_descriptor.torch.metrics import metrics
from common.utils.utils import update_dict
from algorithm.core.metrics import metric_dict
horizontal_chatglm_assist_trainer_rule = {
"identity": "assist_trainer",
"model_info": {
"name": "horizontal_chatglm"
},
"input": {
"__rule__": [Optional("trainset"), Optional("adater_model"), Optional("pretrain_model")],
"trainset": [
{
"type": "QA",
"path": String()
}
],
"pretrained_model": {
"path": String()
},
"adapter_model": {
"path": String()
}
},
"output": {
"path": String("/opt/checkpoints/[JOB_ID]/[NODE_ID]")
},
"train_info": {
"train_params": {
"aggregation": {
"agg_steps": float(0.2)
},
"encryption": {
"__rule__": OneOf("otp", "plain").set_default("otp"),
"otp": {
"key_bitlength": OneOf(64, 128).set_default(64),
"data_type": "torch.Tensor",
"key_exchange": {
"key_bitlength": OneOf(3072, 4096, 6144, 8192),
"optimized": Bool(True)
},
"csprng": {
"name": OneOf("hmac_drbg").set_default("hmac_drbg"),
"method": OneOf("sha1", "sha224", "sha256", "sha384", "sha512").set_default("sha256")
}
},
"plain": {}
},
"peft": {
"__rule__": OneOf("LORA", "PREFIX_TUNING", "ADALOARA"),
"LORA": {
"task_type": "CAUSAL_LM",
"r": Integer(8),
"target_modules": ["query_key_value"],
"lora_alpha": Integer(32),
"lora_dropout": Float(0.1),
"fan_in_fan_out": Bool(False),
"bias": OneOf("none", "all", "loral_only"),
"modules_to_save": None
},
"PREFIX_TUNING": {
"task_type": "CAUSAL_LM",
"pre_seq_len": Integer(20),
"prefix_projection": Bool(False)
},
"ADALORA": {
"task_type": "CAUSAL_LM",
"r": Integer(8),
"target_modules": ["query_key_value"],
"lora_alpha": Integer(32),
"lora_dropout": Float(0.1),
"fan_in_fan_out": Bool(False),
"bias": OneOf("none", "all", "loral_only"),
"modules_to_save": None,
"target_r": Integer(8),
"init_r": Integer(12),
"tinit": Integer(0),
"tfinal": Integer(0),
"deltaT": Integer(1),
"beta1": Float(0.85),
"beta2": Float(0.85),
"orth_reg_weight": Float(0.5)
}
},
"trainer": {
"per_device_train_batch_size": Integer(1),
"gradient_accumulation_steps": Integer(4),
"learning_rate": Float(1e-4),
"weight_decay": Float(0),
"adam_beta1": Float(0.9),
"adam_beta2": Float(0.999),
"adam_epsilon": Float(1e-8),
"max_grad_norm": Float(1.0),
"num_train_epochs": Integer(2),
"save_strategy": OneOf("steps", "no"),
"torch_compile": Bool(False),
"no_cuda": Bool(False),
"seed": Integer(42)
},
"dataset": {
"max_src_length": Integer(100),
"max_dst_length": Integer(100),
"ignore_pad_token_for_loss": Bool(True)
}
}
}
}
| 4,378 | 37.412281 | 109 | py |
XFL | XFL-master/python/algorithm/config_descriptor/vertical_xgboost/label_trainer.py | from common.checker.x_types import String, Bool, Integer, Float, Any
from common.checker.qualifiers import OneOf, SomeOf, RepeatableSomeOf, Required, Optional
vertical_xgboost_label_trainer_rule = {
"identity": "label_trainer",
"model_info": {
"name": "vertical_xgboost"
},
"input": {
"trainset": [
OneOf(
{
"type": "csv",
"path": String(""),
"name": String(""),
"has_id": Bool(True),
"has_label": Bool(True)
}
).set_default_index(0)
],
"valset": [
RepeatableSomeOf(
{
"type": "csv",
"path": String(""),
"name": String(""),
"has_id": Bool(True),
"has_label": Bool(True)
}
).set_default_indices(0)
]
},
"output": {
"__rule__": [SomeOf("model", "proto_model")],
"path": String("/opt/checkpoints/[JOB_ID]/[NODE_ID]"),
"model": {
"name": String("vertical_xgboost_[STAGE_ID].model")
},
"proto_model": {
"name": String("vertical_xgboost_[STAGE_ID].pmodel")
},
"metric_train": {
"name": String("xgb_metric_train_[STAGE_ID].csv")
},
"metric_val": {
"name": String("xgb_metric_val_[STAGE_ID].csv")
},
"prediction_train": {
"name": String("xgb_prediction_train_[STAGE_ID].csv")
},
"prediction_val": {
"name": String("xgb_prediction_val_[STAGE_ID].csv")
},
"ks_plot_train": {
"name": String("xgb_ks_plot_train_[STAGE_ID].csv")
},
"ks_plot_val": {
"name": String("xgb_ks_plot_val[STAGE_ID].csv")
},
"decision_table_train": {
"name": String("xgb_decision_table_train_[STAGE_ID].csv")
},
"decision_table_val": {
"name": String("xgb_decision_table_val_[STAGE_ID].csv")
},
"feature_importance": {
"name": String("xgb_feature_importance_[STAGE_ID].csv")
},
"plot_ks": {
"name": "xgb_plot_ks_[STAGE_ID].json"
},
"plot_roc": {
"name": "xgb_plot_roc_[STAGE_ID].json"
},
"plot_lift": {
"name": "xgb_plot_lift_[STAGE_ID].json"
},
"plot_gain": {
"name": "xgb_plot_gain_[STAGE_ID].json"
},
"plot_precision_recall": {
"name": "xgb_plot_precision_recall_[STAGE_ID].json"
},
"plot_feature_importance": {
"name": "xgb_plot_feature_importance_[STAGE_ID].json"
},
"plot_loss": {
"name": "xgb_plot_loss_[STAGE_ID].json"
}
},
"train_info": {
"interaction_params": {
"save_frequency": Integer(-1).ge(-1),
"echo_training_metrics": Bool(True),
"write_training_prediction": Bool(True),
"write_validation_prediction": Bool(True)
},
"train_params": {
"lossfunc": {
"__rule__": OneOf("BCEWithLogitsLoss").set_default_index(0),
"BCEWithLogitsLoss": {}
},
"num_trees": Integer(30).ge(1),
"learning_rate": Float(0.3).gt(0),
"gamma": Float(0),
"lambda_": Float(1.0),
"max_depth": Integer(3).ge(1),
"num_bins": Integer(16).ge(2).le(65535),
"min_split_gain": Float(0).ge(0),
"min_sample_split": Integer(20).ge(1),
"feature_importance_type": OneOf("gain", "split").set_default_index(0),
"max_num_cores": Integer(999).ge(1),
"batch_size_val": Integer(40960).ge(1),
"downsampling": {
"column": {
"rate": Float(1.0).gt(0).le(1)
},
"row": {
"run_goss": Bool(True),
"top_rate": Float(0.4).gt(0).le(1),
"other_rate": Float(0.4).gt(0).le(1).add_rule(lambda x, y: x + y["train_info"]["train_params"]["downsampling"]["row"]["top_rate"] <= 1, "top_rate + other_rate <=1")
}
},
"category": {
"cat_smooth": Float(1.0),
"cat_features": {
"col_index": String(""),
"col_names": [Optional(RepeatableSomeOf(String("")))],
"max_num_value": Integer(0).ge(0),
"col_index_type": OneOf("inclusive", "exclusive").set_default_index(0),
"col_names_type": OneOf("inclusive", "exclusive").set_default_index(0),
"max_num_value_type": OneOf("intersection", "union").set_default_index(1)
}
},
"metric": {
"__rule__": [Optional("decision_table"), Required("acc", "precision", "recall", "f1_score", "auc", "ks")],
"acc": {},
"precision": {},
"recall": {},
"f1_score": {},
"auc": {},
"ks": {},
"decision_table": {
"method": OneOf("equal_frequency", "equal_width").set_default_index(0),
"bins": Integer(10).ge(2)
}
},
"early_stopping": {
# 这里的key必须是在metric里配置过的key
"key": OneOf("acc", "precision", "recall", "f1_score", "auc", "ks").set_default_index(-1).add_rule(lambda x, y: x in y["train_info"]["train_params"]["metric"].keys(), "should in metric"),
"patience": Integer(10).ge(-1),
"delta": Float(0.001).gt(0)
},
"encryption": {
"__rule__": OneOf("paillier", "plain").set_default_index(0),
"paillier": {
"key_bit_size": OneOf(2048, 4096, 8192).set_default_index(0),
"precision": Optional(Integer(7).ge(1)).set_default_not_none(),
"djn_on": Bool(True),
"parallelize_on": Bool(True)
},
"plain": {}
}
}
}
}
| 6,331 | 36.247059 | 203 | py |
XFL | XFL-master/python/algorithm/config_descriptor/vertical_xgboost/sync.py | from x_types import String, Bool, Integer, Float, All
from common.checker.qualifiers import OneOf, SomeOf, RepeatableSomeOf, Required, Optional
vertical_xgboost_sync_rule = {
"train_info": {
"interaction_params": All(),
"train_params": {
"lossfunc": All(),
"num_trees": All(),
"num_bins": All(),
"batch_size_val": All(),
"downsampling": {
"row": {
"run_goss": All()
}
},
"encryption": All()
}
}
}
| 569 | 22.75 | 89 | py |
XFL | XFL-master/python/algorithm/config_descriptor/vertical_xgboost/trainer.py | from common.checker.x_types import String, Bool, Integer, Float, Any
from common.checker.qualifiers import OneOf, SomeOf, RepeatableSomeOf, Required, Optional
vertical_xgboost_trainer_rule = {
"identity": "trainer",
"model_info": {
"name": "vertical_xgboost"
},
"input": {
"trainset": [
OneOf(
{
"type": "csv",
"path": String(""),
"name": String(""),
"has_id": Bool(True),
"has_label": Bool(False)
}
).set_default_index(0)
],
"valset": [
RepeatableSomeOf(
{
"type": "csv",
"path": String(""),
"name": String(""),
"has_id": Bool(True),
"has_label": Bool(False)
}
).set_default_indices(0)
]
},
"output": {
"__rule__": [SomeOf("model", "proto_model")],
"path": String("/opt/checkpoints/[JOB_ID]/[NODE_ID]"),
"model": {
"name": String("vertical_xgboost_[STAGE_ID].model")
},
"proto_model": {
"name": String("vertical_xgboost_[STAGE_ID].pmodel")
}
},
"train_info": {
"train_params": {
"max_num_cores": Integer(999).ge(1),
"downsampling": {
"column": {
"rate": Float(1.0).gt(0).le(1)
}
},
"category": {
"cat_features": {
"col_index": String(""),
"col_names": [Optional(RepeatableSomeOf(String("")))],
"max_num_value": Integer(0).ge(0),
"col_index_type": OneOf("inclusive", "exclusive").set_default_index(0),
"col_names_type": OneOf("inclusive", "exclusive").set_default_index(0),
"max_num_value_type": OneOf("intersection", "union").set_default_index(1)
}
},
"advanced": {
"row_batch": Integer(40000).ge(1),
"col_batch": Integer(64).ge(1)
}
}
}
}
| 2,239 | 31.463768 | 93 | py |
XFL | XFL-master/python/algorithm/config_descriptor/horizontal_binning_woe_iv/assist_trainer.py | from common.checker.x_types import String, Bool, Integer, Float, Any
from common.checker.qualifiers import OneOf, SomeOf, RepeatableSomeOf, Required, Optional
from common.utils.auto_descriptor.torch.optimizer import optimizer
from common.utils.auto_descriptor.torch.lr_scheduler import lr_scheduler
from common.utils.auto_descriptor.torch.lossfunc import lossfunc
from common.utils.auto_descriptor.torch.metrics import metrics
from common.utils.utils import update_dict
from algorithm.core.metrics import metric_dict
horizontal_binning_woe_iv_assist_trainer_rule = {
"identity": "assist_trainer",
"model_info": {
"name": "horizontal_binning_woe_iv"
},
"output": {
"path": String("/opt/checkpoints/[JOB_ID]/[NODE_ID]"),
"result": {
"name": String("woe_iv_result_[STAGE_ID].json")
}
},
"train_info": {
"train_params": {
"encryption": {
"__rule__": OneOf("otp", "plain").set_default("otp"),
"otp": {
"key_bitlength": OneOf(64, 128).set_default(64),
"data_type": "numpy.ndarray",
"key_exchange": {
"key_bitlength": OneOf(3072, 4096, 6144, 8192),
"optimized": Bool(True)
},
"csprng": {
"name": OneOf("hmac_drbg").set_default("hmac_drbg"),
"method": OneOf("sha1", "sha224", "sha256", "sha384", "sha512").set_default("sha256")
}
},
"plain": {}
},
}
}
}
| 1,640 | 37.162791 | 109 | py |
XFL | XFL-master/python/algorithm/core/encryption_param.py | # Copyright 2022 The XFL Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import List, Optional, Union, Dict, Any
from common.utils.constants import CKKS, PAILLIER, PLAIN, OTP
# used by xgboost
class EncryptionParam(object):
pass
class PaillierParam(object):
def __init__(self,
key_bit_size: int = 2048,
precision: Optional[int] = 7,
djn_on: bool = True,
parallelize_on: bool = False):
self.method = PAILLIER
self.key_bit_size = key_bit_size
self.precision = precision
self.djn_on = djn_on
self.parallelize_on = parallelize_on
class CKKSParam(object):
def __init__(self,
poly_modulus_degree: int = 8192,
coeff_mod_bit_sizes: List[int] = [60, 40, 40, 60],
global_scale_bit_size: int = 40):
self.method = CKKS
self.poly_modulus_degress = poly_modulus_degree
self.coeff_mod_bit_sizes = coeff_mod_bit_sizes
self.global_scale_bit_size = global_scale_bit_size
class OTPParam(object):
def __init__(self,
key_bitlength: int = 64,
data_type: str = "torch.Tensor",
key_exchange: Dict[str, Any] = None,
csprng: Dict[str, Any] = None):
self.method = OTP
self.key_bitlength = key_bitlength
self.data_tyep = data_type
self.key_exchange = key_exchange
self.csprng = csprng
class PlainParam(object):
def __init__(self):
self.method = PLAIN
def get_encryption_param(method: str, params: Optional[dict] = None) -> Union[PlainParam, PAILLIER, CKKS]:
if method == PLAIN:
return PlainParam()
elif method == PAILLIER:
return PaillierParam(**params)
elif method == CKKS:
return CKKSParam(**params)
elif method == OTP:
return OTPParam(**params)
else:
raise ValueError(f"Encryption method {method} not supported.")
| 2,593 | 31.425 | 106 | py |
XFL | XFL-master/python/algorithm/core/data_io.py | # Copyright 2022 The XFL Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import json
import pandas as pd
import numpy as np
from torch.utils.data import Dataset
class CsvReader(object):
def __init__(self,
path: str,
has_id: bool = True,
has_label: bool = True):
index_col = 0 if has_id else False
self.table: pd.DataFrame = pd.read_csv(path, index_col=index_col)
self.ids = self.table.index.to_numpy()
self.table.reset_index(drop=True, inplace=True)
self.has_id = has_id
self.has_label = has_label
self.label_col = 0 if has_label else -1
def features(self, type: str = "numpy.ndarray"):
if type == "numpy.ndarray":
return self.table.iloc[:, self.label_col + 1:].to_numpy().astype(np.float32)
else: # pandas.dataframe
return self.table.iloc[:, self.label_col + 1:]
def label(self, type: str = "numpy.ndarray"):
if self.label_col == 0:
if type == "numpy.ndarray":
return self.table.iloc[:, 0].to_numpy().astype(np.float32)
else:
return self.table.iloc[:, 0]
else:
return None
def col_names(self):
return self.table.columns.tolist()
def feature_names(self):
index = 0
if self.has_label:
index += 1
return self.table.columns.tolist()[index:]
def label_name(self):
if self.label_col == 0:
return self.table.columns.tolist()[0]
else:
return None
class NpzReader(object):
def __init__(self,
path: str):
self.data = np.load(path, allow_pickle=True)
def features(self):
return self.data["data"].astype(float)
def label(self):
return self.data["labels"].astype(float)
class NdarrayIterator():
def __init__(self, data: np.ndarray, batch_size: int):
self.data = data
self.bs = batch_size
self.index = 0
def __len__(self):
return len(self.data)
def __iter__(self):
return self
def __next__(self):
if self.index < len(self.data):
data = self.data[self.index: self.index + self.bs]
self.index += self.bs
return data
else:
self.index = 0
raise StopIteration
class QADataset(Dataset):
def __init__(self,
file_name_or_path,
tokenizer,
max_src_length=200,
max_dst_length=500,
prompt_pattern="{}:\n问:{}\n答:",
key_query='input',
key_answer='output'):
super().__init__()
if os.path.isdir(file_name_or_path):
data = []
for file_name in os.listdir(file_name_or_path):
with open(os.path.join(file_name_or_path, file_name), 'r') as fp:
content = json.load(fp)
instruction = content["instruction"]
instances = content["instances"]
for item in instances:
data.append(
{
"Q": prompt_pattern.format(instruction, item[key_query]),
"A": item[key_answer]
}
)
elif os.path.isfile(file_name_or_path):
data = []
with open(file_name_or_path, 'r') as fp:
content = json.load(fp)
instruction = content["instruction"]
instances = content["instances"]
for item in instances:
data.append(
{
"Q": prompt_pattern.format(instruction, item[key_query]),
"A": item[key_answer]
}
)
else:
raise ValueError(f"Dataset path {file_name_or_path} is not a dir or a file name.")
self.data = data
self.tokenizer = tokenizer
# self.prefix = prefix
self.max_src_length = max_src_length
self.max_dst_length = max_dst_length
self.key_query = key_query
self.key_answer = key_answer
def __len__(self):
return len(self.data)
def __getitem__(self, index):
query, answer = self.data[index]["Q"], self.data[index]["A"]
src_ids = self.tokenizer.encode(text=query, max_length=self.max_src_length, truncation=True)
dst_ids = self.tokenizer.encode(text=answer, max_length=self.max_dst_length, truncation=True, add_special_tokens=False)
input_ids = src_ids + dst_ids + [self.tokenizer.eos_token_id]
labels = [-100] * len(src_ids) + dst_ids + [self.tokenizer.eos_token_id]
return {"input_ids": input_ids, "labels": labels}
# class QADataset(Dataset):
# def __init__(self,
# file_name_or_path,
# tokenizer,
# max_src_length=200,
# max_dst_length=500,
# ignore_pad_token_for_loss=True,
# prompt_pattern="{}:\n问:{}\n答:",
# key_query='input',
# key_answer='output'):
# super().__init__()
# if os.path.isdir(file_name_or_path):
# data = []
# for file_name in os.listdir(file_name_or_path):
# with open(os.path.join(file_name_or_path, file_name), 'r') as fp:
# content = json.load(fp)
# instruction = content["instruction"]
# instances = content["instances"]
# for item in instances:
# data.append(
# {
# "Q": prompt_pattern.format(instruction, item[key_query]),
# "A": item[key_answer]
# }
# )
# elif os.path.isfile(file_name_or_path):
# data = []
# with open(file_name_or_path, 'r') as fp:
# content = json.load(fp)
# instruction = content["instruction"]
# instances = content["instances"]
# for item in instances:
# data.append(
# {
# "Q": prompt_pattern.format(instruction, item[key_query]),
# "A": item[key_answer]
# }
# )
# else:
# raise ValueError(f"Dataset path {file_name_or_path} is not a dir or a file name.")
# self.data = data
# self.tokenizer = tokenizer
# # self.prefix = prefix
# self.max_src_length = max_src_length
# self.max_dst_length = max_dst_length
# self.ignore_pad_token_for_loss = ignore_pad_token_for_loss
# self.key_query = key_query
# self.key_answer = key_answer
# def __len__(self):
# return len(self.data)
# def __getitem__(self, index):
# query, answer = self.data[index]["Q"], self.data[index]["A"]
# # prompt_ids = tokenizer.encode(prompt, max_length=max_seq_length, truncation=True)
# # target_ids = tokenizer.encode(
# # target,
# # max_length=max_seq_length,
# # truncation=True,
# # add_special_tokens=False)
# # input_ids = prompt_ids + target_ids + [config.eos_token_id]
# src_ids = self.tokenizer.encode(text=query, add_special_tokens=False)
# dst_ids = self.tokenizer.encode(text=answer, add_special_tokens=False)
# if len(src_ids) > self.max_src_length - 1:
# src_ids = src_ids[: self.max_src_length - 1]
# if len(dst_ids) > self.max_dst_length - 2:
# dst_ids = dst_ids[: self.max_dst_length - 2]
# input_ids = self.tokenizer.build_inputs_with_special_tokens(src_ids, dst_ids)
# context_length = input_ids.index(self.tokenizer.bos_token_id)
# mask_position = context_length - 1
# labels = [-100] * context_length + input_ids[mask_position+1:]
# # from original project code, is it necessary?
# max_seq_length = self.max_src_length + self.max_dst_length
# pad_len = max_seq_length - len(input_ids)
# input_ids += [self.tokenizer.pad_token_id] * pad_len
# labels += [self.tokenizer.pad_token_id] * pad_len
# if self.ignore_pad_token_for_loss:
# labels = [(l if l != self.tokenizer.pad_token_id else -100) for l in labels]
# out = {
# "input_ids": input_ids,
# "labels": labels
# }
# return out
# class QADataset(Dataset):
# """
# [
# {
# "Q": "",
# "A": ""
# }
# ]
# """
# def __init__(self,
# file_name_or_path,
# tokenizer,
# max_src_length=200,
# max_dst_length=500,
# ignore_pad_token_for_loss=True,
# key_query='input',
# key_answer='output'):
# super().__init__()
# if os.path.isdir(file_name_or_path):
# data = []
# for file_name in os.listdir(file_name_or_path):
# with open(os.path.join(file_name_or_path, file_name), 'r') as fp:
# content = json.load(fp)
# instruction = content["instruction"]
# instances = content["instances"]
# for item in instances:
# data.append(
# {
# key_query: "{}:\n问:{}\n答:".format(instruction, item[key_query]),
# key_answer: item[key_answer]
# }
# )
# elif os.path.isfile(file_name_or_path):
# data = []
# with open(file_name_or_path, 'r') as fp:
# content = json.load(fp)
# instruction = content["instruction"]
# instances = content["instances"]
# for item in instances:
# data.append(
# {
# key_query: "{}:\n问:{}\n答:".format(instruction, item["input"]),
# key_answer: item["output"]
# }
# )
# else:
# raise ValueError(f"Dataset path {file_name_or_path} is not a dir or a file name.")
# self.data = data
# self.tokenizer = tokenizer
# # self.prefix = prefix
# self.max_src_length = max_src_length
# self.max_dst_length = max_dst_length
# self.ignore_pad_token_for_loss = ignore_pad_token_for_loss
# self.key_query = key_query
# self.key_answer = key_answer
# def __len__(self):
# return len(self.data)
# def __getitem__(self, index):
# query, answer = self.data[index][self.key_query], self.data[index][self.key_answer]
# # if self.prefix:
# # query = self.prefix + query
# src_ids = self.tokenizer.encode(text=query, add_special_tokens=False)
# dst_ids = self.tokenizer.encode(text=answer, add_special_tokens=False)
# if len(src_ids) > self.max_src_length - 1:
# src_ids = src_ids[: self.max_src_length - 1]
# if len(dst_ids) > self.max_dst_length - 2:
# dst_ids = dst_ids[: self.max_dst_length - 2]
# input_ids = self.tokenizer.build_inputs_with_special_tokens(src_ids, dst_ids)
# context_length = input_ids.index(self.tokenizer.bos_token_id)
# mask_position = context_length - 1
# labels = [-100] * context_length + input_ids[mask_position+1:]
# # from original project code, is it necessary?
# max_seq_length = self.max_src_length + self.max_dst_length
# pad_len = max_seq_length - len(input_ids)
# input_ids += [self.tokenizer.pad_token_id] * pad_len
# labels += [self.tokenizer.pad_token_id] * pad_len
# if self.ignore_pad_token_for_loss:
# labels = [(l if l != self.tokenizer.pad_token_id else -100) for l in labels]
# out = {
# "input_ids": input_ids,
# "labels": labels
# }
# return out
# def collate_fn_for_qa(batch):
# input_ids = []
# # attention_mask = []
# labels = []
# # position_ids = []
# for obj in batch:
# input_ids.append(obj['input_ids'])
# labels.append(obj['labels'])
# # attention_mask.append(obj['attention_mask'])
# # position_ids.append(obj['position_ids'])
# return {
# 'input_ids': torch.stack(input_ids),
# 'attention_mask': torch.stack(attention_mask),
# 'labels': torch.stack(labels),
# 'position_ids':torch.stack(position_ids)
# } | 13,829 | 36.177419 | 127 | py |
XFL | XFL-master/python/algorithm/core/metrics.py | # Copyright 2022 The XFL Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import math
import sys
import numpy as np
from sklearn import metrics as sklearn_metrics
from common.xregister import xregister
from common.utils.auto_descriptor.torch.metrics import metrics
metric_dict = {
# "accuracy": "accuracy_score",
"acc": "accuracy_score",
"precision": "precision_score",
"recall": "recall_score",
"auc": "roc_auc_score",
"mape": "mean_absolute_percentage_error",
"mse": "mean_squared_error",
"mae": "mean_absolute_error",
"r2": "r2_score",
"median_ae": "median_absolute_error",
"rmse": "root_mean_squared_error"
}
def get_metric(name: str):
if name in metric_dict.keys():
name = metric_dict[name]
else:
name = name
if name in dir(sklearn_metrics):
metric = getattr(sklearn_metrics, name)
elif name in dir(sys.modules[__name__]):
metric = getattr(sys.modules[__name__], name)
elif name in xregister.registered_object:
metric = xregister(name)
else:
raise ValueError(f"Metric {name} is not defined.")
return metric
# def list_metrics():
# names = set(metric_dict.keys()) + \
# set(metrics.keys()) - set(metric_dict.values()) + \
# set(dir(sys.modules[__name__])) + \ # 不太对,多了
# set(xregister.registered_object.keys())
# return names
def ks(y_true, y_pred):
fpr, tpr, _ = sklearn_metrics.roc_curve(y_true, y_pred)
ks = max(np.max(tpr - fpr), 0)
return ks
def root_mean_squared_error(y_true, y_pred):
mse_value = sklearn_metrics.mean_squared_error(y_true, y_pred)
rmse_value = math.sqrt(mse_value)
return rmse_value
# if __name__ == "__main__":
# print(dir(sys.modules[__name__]))
| 2,348 | 29.506494 | 74 | py |
XFL | XFL-master/python/algorithm/core/lr_scheduler/jax_lr_scheduler.py | # Copyright 2022 The XFL Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sys
import optax
from common.xregister import xregister
def get_lr_scheduler(name: str):
if name in dir(optax):
scheduler = getattr(optax, name)
elif name in dir(sys.modules[__name__]):
scheduler = getattr(sys.modules[__name__], name)
elif name in xregister.registered_object:
scheduler = xregister(name)
else:
raise ValueError(f"Scheduler {name} is not supported in jax.")
return scheduler | 1,057 | 34.266667 | 74 | py |
XFL | XFL-master/python/algorithm/core/lr_scheduler/paddle_lr_scheduler.py | # Copyright 2022 The XFL Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sys
import paddle.optimizer.lr as paddle_lr_scheduler
from common.xregister import xregister
def get_lr_scheduler(name: str):
if name in dir(paddle_lr_scheduler):
scheduler = getattr(paddle_lr_scheduler, name)
elif name in dir(sys.modules[__name__]):
scheduler = getattr(sys.modules[__name__], name)
elif name in xregister.registered_object:
scheduler = xregister(name)
else:
raise ValueError(f"Scheduler {name} is not supported in torch.")
return scheduler | 1,124 | 36.5 | 74 | py |
XFL | XFL-master/python/algorithm/core/lr_scheduler/torch_lr_scheduler.py | # Copyright 2022 The XFL Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sys
import torch.optim.lr_scheduler as lr_scheduler
from common.xregister import xregister
def get_lr_scheduler(name: str):
if name in dir(lr_scheduler):
scheduler = getattr(lr_scheduler, name)
elif name in dir(sys.modules[__name__]):
scheduler = getattr(sys.modules[__name__], name)
elif name in xregister.registered_object:
scheduler = xregister(name)
else:
raise ValueError(f"Scheduler {name} is not supported in torch.")
return scheduler | 1,108 | 35.966667 | 74 | py |
XFL | XFL-master/python/algorithm/core/horizontal/aggregation/aggregation_plain.py | # Copyright 2022 The XFL Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import List, OrderedDict, Tuple
import torch
import numpy as np
from service.fed_config import FedConfig
from .aggregation_base import AggregationRootBase, AggregationLeafBase
class AggregationPlainLeaf(AggregationLeafBase):
def __init__(self, sec_conf: dict, root_id: str = '', leaf_ids: list[str] = []) -> None:
# super().__init__(sec_conf, root_id, leaf_ids)
super().__init__(sec_conf, root_id, FedConfig.node_id)
self.leaf_ids = leaf_ids or FedConfig.get_label_trainer() + FedConfig.get_trainer()
def _calc_upload_value(self, parameters: OrderedDict, parameters_weight: float) -> Tuple[OrderedDict, float]:
def f(x):
y = x[1]
if isinstance(x[1], torch.Tensor):
y = x[1].cpu()
return (x[0], y * parameters_weight)
weighted_parameters = OrderedDict(map(f, parameters.items()))
return (weighted_parameters, parameters_weight)
class AggregationPlainRoot(AggregationRootBase):
def __init__(self, sec_conf: dict, root_id: str = '', leaf_ids: list[str] = []) -> None:
super().__init__(sec_conf, root_id, leaf_ids)
def _calc_aggregated_params(self, received_value: List, average=True) -> OrderedDict:
total_weight = sum([item[1] for item in received_value])
if self.initial_parameters is not None:
parameters = self.initial_parameters
else:
parameters = received_value[0][0]
for k in parameters.keys():
for item in received_value[1:]:
received_value[0][0][k] += item[0][k]
if received_value[0][0][k].dtype in [np.float32, np.float64]:
if average:
received_value[0][0][k] /= total_weight
elif received_value[0][0][k].dtype not in [torch.float32, torch.float64]:
ori_dtype = received_value[0][0][k].dtype
received_value[0][0][k] = received_value[0][0][k].to(dtype=torch.float32)
if average:
received_value[0][0][k] /= total_weight
received_value[0][0][k] = received_value[0][0][k].to(dtype=ori_dtype)
else:
if average:
received_value[0][0][k] /= total_weight
return received_value[0][0]
| 2,982 | 39.310811 | 113 | py |
XFL | XFL-master/python/algorithm/core/horizontal/aggregation/aggregation_otp.py | # Copyright 2022 The XFL Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import threading
from functools import reduce
from itertools import combinations
from typing import Dict, List, OrderedDict, Tuple
import numpy as np
import torch
from common.communication.gRPC.python.commu import Commu
from common.crypto.csprng.drbg import get_drbg_inst
from common.crypto.csprng.drbg_base import DRBGBase
from common.crypto.key_agreement.diffie_hellman import DiffieHellman
from common.crypto.one_time_pad.component import OneTimePadContext, OneTimeKey, OneTimePadCiphertext
from common.crypto.one_time_pad.one_time_add import OneTimeAdd
from service.fed_config import FedConfig
from .aggregation_base import AggregationRootBase, AggregationLeafBase
# {
# "method": "otp",
# "key_bitlength": 128,
# "data_type": "torch.Tensor",
# "key_exchange": {
# "key_bitlength": 3072,
# "optimized": True
# },
# "csprng": {
# "name": "hmac_drbg",
# "method": "sha512",
# }
# }
def split_bytes(x: bytes, out_shape: Tuple[int]):
if len(out_shape) == 0:
return int.from_bytes(x, 'big')
elif len(out_shape) == 1:
a = len(x) // out_shape[0]
return [int.from_bytes(x[a * i: a * (i + 1)], 'big') for i in range(out_shape[0])]
else:
a = len(x) // out_shape[0]
return [split_bytes(x[a * i: a * (i + 1)], out_shape[1:]) for i in range(out_shape[0])]
class AggregationOTPLeaf(AggregationLeafBase):
def __init__(self, sec_conf: dict, root_id: str = '', leaf_ids: list[str] = []) -> None:
# super().__init__(sec_conf, root_id, leaf_ids)
super().__init__(sec_conf, root_id, FedConfig.node_id)
self.leaf_ids = leaf_ids or FedConfig.get_label_trainer() + FedConfig.get_trainer()
leaf_pairs = combinations(self.leaf_ids, 2)
# key exchange
key_exchange_conf = sec_conf["key_exchange"]
df_protocols: Dict[str, DiffieHellman] = {}
for _leaf_ids in leaf_pairs:
if Commu.node_id in _leaf_ids:
df_protocol = DiffieHellman(list(_leaf_ids),
key_bitlength=key_exchange_conf['key_bitlength'],
optimized=key_exchange_conf["optimized"],
channel_name="otp_diffie_hellman")
df_protocols[df_protocol.chan.remote_id] = df_protocol
entropys: Dict[str, bytes] = {remote_id: None for remote_id in df_protocols}
# sequential
# for id in df_protocols:
# entropys[id] = df_protocols[id].exchange(out_bytes=True)
def func(id):
entropys[id] = df_protocols[id].exchange(out_bytes=True)
thread_list = []
for id in df_protocols:
task = threading.Thread(target=func, args=(id,))
thread_list.append(task)
for task in thread_list:
task.start()
for task in thread_list:
task.join()
# csprng
csprng_conf = sec_conf["csprng"]
self.csprngs: OrderedDict[str, DRBGBase] = OrderedDict()
self.is_addition = []
for remote_id in self.leaf_ids:
if remote_id != Commu.node_id:
self.csprngs[remote_id] = get_drbg_inst(name=csprng_conf["name"],
entropy=entropys[remote_id],
method=csprng_conf["method"],
nonce=b'',
additional_data=b'')
self.is_addition.append(Commu.node_id < remote_id)
# one-time-pad
self.otp_context = OneTimePadContext(modulus_exp=sec_conf["key_bitlength"],
data_type=sec_conf["data_type"])
def _calc_upload_value(self, parameters: OrderedDict, parameters_weight: float) -> Tuple[OrderedDict, float]:
# calculate total number of bytes of weights
def f(t):
return reduce(lambda x, y: x * y, t.shape, 1) * self.otp_context.modulus_exp // 8
num_bytes_array = list(map(f, parameters.values()))
csprng_generators = []
for remote_id in self.csprngs:
generator = self.csprngs[remote_id].generator(num_bytes=num_bytes_array,
additional_data=b'')
csprng_generators.append(generator)
weighted_parameters = OrderedDict()
encrypted_parameters = OrderedDict()
for k, v in parameters.items():
if isinstance(v, torch.Tensor):
v = v.cpu()
weighted_parameters[k] = v * float(parameters_weight)
# one_time_key = [np.array(split_bytes(bytes(next(g)), v.shape)) for g in csprng_generators]
one_time_key = []
for g in csprng_generators:
x = bytearray(next(g))
y = split_bytes(x, v.shape)
one_time_key.append(np.array(y))
one_time_key = OneTimeKey(one_time_key, self.otp_context.modulus_exp)
encrypted_parameters[k] = OneTimeAdd.encrypt(context_=self.otp_context,
data=weighted_parameters[k],
one_time_key=one_time_key,
is_addition=self.is_addition,
serialized=False)
return (encrypted_parameters, parameters_weight)
class AggregationOTPRoot(AggregationRootBase):
def __init__(self, sec_conf: dict, root_id: str = '', leaf_ids: list[str] = []) -> None:
super().__init__(sec_conf, root_id, leaf_ids)
# # one-time-pad
# self.otp_context = OneTimePadContext(modulus_exp=sec_conf["key_bitlength"],
# data_type=sec_conf["data_type"])
# modulus_exp=sec_conf["key_bitlength"]
# self.dtype = np.uint64 if self.otp_context.modulus_exp == 64 else object
def _calc_aggregated_params(self, received_value: List, average=True) -> OrderedDict:
total_weight = sum([item[1] for item in received_value])
if self.initial_parameters is not None:
parameters = self.initial_parameters
else:
parameters = received_value[0][0]
all_cipher = True if isinstance(list(parameters.values())[0], OneTimePadCiphertext) else False
idx = 1 if all_cipher else 2
for k in parameters.keys():
for item in received_value[idx:]:
received_value[idx-1][0][k] += item[0][k]
received_value[idx-1][0][k] = received_value[idx-1][0][k].decode()
if average:
if all_cipher:
received_value[0][0][k] /= total_weight
else:
received_value[0][0][k] = (received_value[0][0][k] + received_value[1][0][k]) / total_weight
return received_value[0][0]
| 7,803 | 40.73262 | 113 | py |
XFL | XFL-master/python/algorithm/core/horizontal/template/agg_type.py | # Copyright 2022 The XFL Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
def register_agg_type_for_assist_trainer(trainer: object, framework: str, agg_type: str):
if framework == 'torch':
if agg_type == "fedavg":
from algorithm.core.horizontal.template.torch.fedavg.assist_trainer import FedAvgAssistTrainer
FedAvgAssistTrainer(trainer).register()
elif agg_type == "fedprox":
from algorithm.core.horizontal.template.torch.fedprox.assist_trainer import FedProxAssistTrainer
FedProxAssistTrainer(trainer).register()
elif agg_type == "scaffold":
from algorithm.core.horizontal.template.torch.scaffold.assist_trainer import SCAFFOLDAssistTrainer
SCAFFOLDAssistTrainer(trainer).register()
else:
raise ValueError(f"Aggregation agg_type {agg_type} is not valid. Accepted agg_types are fedavg, fedprox, scaffold.")
elif framework == 'tensorflow':
if agg_type == "fedavg":
from algorithm.core.horizontal.template.tensorflow.fedavg.assist_trainer import FedAvgAssistTrainer
FedAvgAssistTrainer(trainer).register()
else:
raise ValueError(f"Aggregation agg_type {agg_type} is not valid. Accepted agg_types are fedavg.")
elif framework == 'jax':
if agg_type == "fedavg":
from algorithm.core.horizontal.template.jax.fedavg.assist_trainer import FedAvgAssistTrainer
FedAvgAssistTrainer(trainer).register()
else:
raise ValueError(f"Aggregation agg_type {agg_type} is not valid. Accepted agg_types are fedavg.")
def register_agg_type_for_label_trainer(trainer: object, framework: str, agg_type: str):
if framework == 'torch':
if agg_type == "fedavg":
from algorithm.core.horizontal.template.torch.fedavg.label_trainer import FedAvgLabelTrainer
FedAvgLabelTrainer(trainer).register()
elif agg_type == "fedprox":
from algorithm.core.horizontal.template.torch.fedprox.label_trainer import FedProxLabelTrainer
FedProxLabelTrainer(trainer).register()
elif agg_type == "scaffold":
from algorithm.core.horizontal.template.torch.scaffold.label_trainer import SCAFFOLDLabelTrainer
SCAFFOLDLabelTrainer(trainer).register()
else:
raise ValueError(f"Aggregation agg_type {agg_type} is not valid. Accepted agg_types are fedavg, fedprox, scaffold.")
elif framework == 'tensorflow':
if agg_type == "fedavg":
from algorithm.core.horizontal.template.tensorflow.fedavg.label_trainer import FedAvgLabelTrainer
FedAvgLabelTrainer(trainer).register()
else:
raise ValueError(f"Aggregation agg_type {agg_type} is not valid. Accepted agg_types are fedavg.")
elif framework == 'jax':
if agg_type == "fedavg":
from algorithm.core.horizontal.template.jax.fedavg.label_trainer import FedAvgLabelTrainer
FedAvgLabelTrainer(trainer).register()
else:
raise ValueError(f"Aggregation agg_type {agg_type} is not valid. Accepted agg_types are fedavg.")
| 3,704 | 53.485294 | 128 | py |
XFL | XFL-master/python/algorithm/core/horizontal/template/torch/base.py | # Copyright 2022 The XFL Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import copy
import os
import inspect
from functools import partial
from typing import OrderedDict
import torch.nn as nn
from algorithm.core.horizontal.aggregation.api import get_aggregation_root_inst
from algorithm.core.horizontal.aggregation.api import get_aggregation_leaf_inst
from algorithm.core.loss.torch_loss import get_lossfunc
from algorithm.core.metrics import get_metric
from algorithm.core.optimizer.torch_optimizer import get_optimizer
from algorithm.core.lr_scheduler.torch_lr_scheduler import get_lr_scheduler
from common.utils.config_parser import CommonConfigParser
from common.utils.algo_utils import earlyStoppingH
from common.utils.logger import logger
from common.utils.model_io import ModelIO
from algorithm.core.horizontal.template.hooker import Hooker
class BaseTrainer(Hooker):
def __init__(self, train_conf: dict):
Hooker.__init__(self)
self.common_config = CommonConfigParser(train_conf)
self.device = self.common_config.device
# if self.common_config.early_stopping:
self.earlystopping = earlyStoppingH(
key=self.common_config.early_stopping.get("key", "acc"),
patience=self.common_config.early_stopping.get("patience", -1),
delta=self.common_config.early_stopping.get("delta", 0)
)
self.declare_hooks([
"before_global_epoch", "before_local_epoch", "before_train_loop",
"after_train_loop", "after_local_epoch", "after_global_epoch"
])
self._init_context()
self.model = self._set_model()
self.train_dataloader = self._set_train_dataloader()
self.val_dataloader = self._set_val_dataloader()
self.lossfunc = self._set_lossfunc()
self.optimizer = self._set_optimizer()
self.lr_scheduler = self._set_lr_scheduler(self.optimizer)
self.metrics = self._set_metrics()
self.aggregator = self._set_aggregator(self.common_config.identity)
def _init_context(self):
self.context['g_epoch'] = 0
self.context['l_epoch'] = 0
self.context["config"] = self.common_config.config
self.context["global_epoch_num"] = self.common_config.train_params.get("global_epoch", 0)
self.context["local_epoch_num"] = self.common_config.train_params.get("local_epoch", 0)
self.context["early_stop_flag"] = False
self.context["early_stop_epoch"] = 0
def _set_aggregator(self, party_type: str):
if party_type == "assist_trainer":
aggregator = get_aggregation_root_inst(self.common_config.encryption)
else:
aggregator = get_aggregation_leaf_inst(self.common_config.encryption)
return aggregator
def _set_model(self) -> nn.Module:
raise NotImplementedError("The _set_model method is not implemented.")
def _set_train_dataloader(self):
raise NotImplementedError(
"The _set_train_dataloader method is not implemented.")
def _set_val_dataloader(self):
raise NotImplementedError(
"The _set_val_dataloader method is not implemented.")
def _save_model(self, final: bool, context: dict):
if not os.path.exists(self.common_config.output_dir):
os.makedirs(self.common_config.output_dir)
if final:
if context["early_stop_flag"] & (context["early_stop_epoch"] > 0):
if self.common_config.output_model_name != "":
ModelIO.copy_best_model(
save_dir=self.common_config.output_dir,
model_name=self.common_config.output_model_name,
epoch=context["early_stop_epoch"],
)
if self.common_config.output_onnx_model_name != "":
ModelIO.copy_best_model(
save_dir=self.common_config.output_dir,
model_name=self.common_config.output_onnx_model_name,
epoch=context["early_stop_epoch"],
)
else:
if self.common_config.output_model_name != "":
ModelIO.save_torch_model(
state_dict=self.model.state_dict(),
save_dir=self.common_config.output_dir,
model_name=self.common_config.output_model_name,
)
if self.common_config.output_onnx_model_name != "":
input_dim = self.common_config.model_conf.get("input_dim")
if input_dim is None:
raise ValueError("input_dim is None")
ModelIO.save_torch_onnx(
model=self.model,
input_dim=(input_dim,),
save_dir=self.common_config.output_dir,
model_name=self.common_config.output_onnx_model_name,
)
else:
if self.common_config.save_frequency == -1:
return
if context["g_epoch"] % self.common_config.save_frequency == 0:
if self.common_config.output_model_name != "":
ModelIO.save_torch_model(
state_dict=self.model.state_dict(),
save_dir=self.common_config.output_dir,
model_name=self.common_config.output_model_name,
epoch=context["g_epoch"],
)
if self.common_config.output_onnx_model_name != "":
input_dim = self.common_config.model_conf.get("input_dim")
if input_dim is None:
raise ValueError("input_dim is None")
ModelIO.save_torch_onnx(
model=self.model,
input_dim=(input_dim,),
save_dir=self.common_config.output_dir,
model_name=self.common_config.output_onnx_model_name,
epoch=context["g_epoch"],
)
def _load_model(self, context: dict):
if self.common_config.pretrain_model_path != "":
path = os.path.join(
self.common_config.pretrain_model_path,
self.common_config.pretrain_model_name
)
state_dict = ModelIO.load_torch_model(path, device=self.device)
self.model.load_state_dict(state_dict)
def _set_optimizer(self):
""" Define self.optimizer """
optimizer_conf = OrderedDict(self.common_config.optimizer)
optimizer = OrderedDict()
for k, v in optimizer_conf.items():
params = list(inspect.signature(get_optimizer(k)).parameters.values())
accepted_keys = [param.name for param in params]
v = {k: v[k] for k in v if k in accepted_keys}
optimizer[k] = get_optimizer(k)(self.model.parameters(), **v)
return optimizer
def _set_lossfunc(self):
""" Define self.lossfunc """
lossfunc_conf = OrderedDict(self.common_config.lossfunc)
lossfunc = OrderedDict()
for k, v in lossfunc_conf.items():
params = list(inspect.signature(get_lossfunc(k)).parameters.values())
accepted_keys = [param.name for param in params]
v = {k: v[k] for k in v if k in accepted_keys}
lossfunc[k] = get_lossfunc(k)(**v)
return lossfunc
def _set_lr_scheduler(self, optimizer):
lr_scheduler_conf = OrderedDict(self.common_config.lr_scheduler)
lr_scheduler = OrderedDict()
for (k, v), o in zip(lr_scheduler_conf.items(), optimizer.values()):
params = list(inspect.signature(get_lr_scheduler(k)).parameters.values())
accepted_keys = [param.name for param in params]
v = {k: v[k] for k in v if k in accepted_keys}
lr_scheduler[k] = get_lr_scheduler(k)(o, **v)
return lr_scheduler
def _set_metrics(self):
""" Define metric """
metrics = {}
metrics_conf: dict = self.common_config.metric
for k, v in metrics_conf.items():
params = list(inspect.signature(get_metric(k)).parameters.values())
accepted_keys = [param.name for param in params]
v = {k: v[k] for k in v if k in accepted_keys}
metric = get_metric(k)
metrics[k] = partial(metric, **v)
return metrics
def _state_dict_to_device(
self, params: OrderedDict, device: str, inline: bool = True) -> OrderedDict:
if not inline:
params = copy.deepcopy(params)
for k, v in params.items():
params[k] = v.to(device)
return params
def train_loop(self):
raise NotImplementedError("The train_loop method is not implemented.")
def fit(self):
global_epoch_num = self.context["global_epoch_num"]
local_epoch_num = self.context["local_epoch_num"]
self.execute_hook_at("before_global_epoch")
for g_epoch in range(1, global_epoch_num + 1):
logger.info(f"global epoch {g_epoch}/{global_epoch_num} start...")
self.context['g_epoch'] = g_epoch
if self.execute_hook_at("before_local_epoch"):
break
for l_epoch in range(1, local_epoch_num + 1):
logger.info(
f"local epoch {l_epoch}/{local_epoch_num} of global epoch {g_epoch} start...")
self.context['l_epoch'] = l_epoch
self.execute_hook_at("before_train_loop")
self.train_loop()
self.execute_hook_at("after_train_loop")
logger.info(
f"local epoch {l_epoch}/{local_epoch_num} of global epoch {g_epoch} finished.")
if self.execute_hook_at("after_local_epoch"):
break
logger.info(f"global epoch {g_epoch}/{global_epoch_num} finished.")
self.execute_hook_at("after_global_epoch")
| 10,692 | 41.264822 | 99 | py |
XFL | XFL-master/python/algorithm/core/horizontal/template/torch/fedtype.py | # Copyright 2022 The XFL Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from service.fed_config import FedConfig
def _get_assist_trainer():
aggregation_config = FedConfig.stage_config["train_info"]["params"]["aggregation_config"]
type = aggregation_config.get("type")
if type == "fedprox":
from algorithm.core.horizontal.template.torch.fedprox.assist_trainer import FedProxAssistTrainer
return FedProxAssistTrainer
elif type == "scaffold":
from algorithm.core.horizontal.template.torch.scaffold.assist_trainer import SCAFFOLDAssistTrainer
return SCAFFOLDAssistTrainer
from algorithm.core.horizontal.template.torch.fedavg.assist_trainer import FedAvgAssistTrainer
return FedAvgAssistTrainer
def _get_label_trainer():
aggregation_config = FedConfig.stage_config["train_info"]["params"]["aggregation_config"]
type = aggregation_config.get("type")
if type == "fedprox":
from algorithm.core.horizontal.template.torch.fedprox.label_trainer import FedProxLabelTrainer
return FedProxLabelTrainer
if type == "scaffold":
from algorithm.core.horizontal.template.torch.scaffold.label_trainer import SCAFFOLDLabelTrainer
return SCAFFOLDLabelTrainer
from algorithm.core.horizontal.template.torch.fedavg.label_trainer import FedAvgLabelTrainer
return FedAvgLabelTrainer | 1,913 | 43.511628 | 106 | py |
XFL | XFL-master/python/algorithm/core/horizontal/template/torch/fedprox/label_trainer.py | # Copyright 2022 The XFL Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
from typing import OrderedDict
from algorithm.core.loss.torch_loss import get_lossfunc
from ..base import BaseTrainer
from algorithm.core.horizontal.aggregation.aggregation_base import AggregationLeafBase
class FedProxLabelTrainer:
def __init__(self, trainer: BaseTrainer):
self.trainer = trainer
self.mu = self.trainer.common_config.aggregation.get("mu", 0)
def register(self):
self.trainer.register_hook(
place="before_local_epoch", rank=-3,
func=self._sync_early_stop_flag, desc="sync early stop flag"
)
self.trainer.register_hook(
place="before_local_epoch", rank=-2, func=self._download_model,
desc="download global model"
)
self.trainer.register_hook(
place="before_local_epoch", rank=-1, func=self._update_gmodel_params,
desc="Update gmodel param"
)
self.trainer.register_hook(
place="after_local_epoch", rank=-1, func=self._upload_model,
desc="upload local model"
)
# if get True, means the training is finished
def _sync_early_stop_flag(self, context: dict):
aggregator: AggregationLeafBase = self.trainer.aggregator
early_stop_flag = aggregator.download()
assert isinstance(early_stop_flag, bool)
return early_stop_flag
def _download_model(self, context: dict):
aggregator: AggregationLeafBase = self.trainer.aggregator
new_state_dict = aggregator.download()
self.trainer._state_dict_to_device(new_state_dict, self.trainer.device, inline=True)
self.trainer.model.load_state_dict(new_state_dict)
def _upload_model(self, context: dict):
aggregator: AggregationLeafBase = self.trainer.aggregator
if self.trainer.device != "cpu":
state_dict = self.trainer._state_dict_to_device(
self.trainer.model.state_dict(), "cpu", inline=False
)
else:
state_dict = self.trainer.model.state_dict()
weight = self.trainer.common_config.aggregation.get("weight") or \
len(self.trainer.train_dataloader)
aggregator.upload(state_dict, weight)
def _update_gmodel_params(self, context):
self.gmodel_params = \
[param.data.detach().clone() for param in self.trainer.model.parameters()]
return
def _set_lossfunc(self):
""" Define self.lossfunc """
lossfunc_conf = OrderedDict(self.trainer.common_config.lossfunc)
lossfunc = OrderedDict()
for k, v in lossfunc_conf.items():
lossfunc[k] = self._get_fedprox_loss(k, v)
return lossfunc
def _get_fedprox_loss(self, k, v):
def fedprox_loss(pred, label):
reg = 0.0
for w_prev, w in zip(self.gmodel_params, self.trainer.model.parameters()):
reg += torch.pow(torch.norm(w - w_prev, p='fro'), 2)
loss = get_lossfunc(k)(**v)(pred, label) + self.mu * reg / 2
return loss
return fedprox_loss | 3,714 | 39.380435 | 92 | py |
XFL | XFL-master/python/algorithm/core/horizontal/template/torch/scaffold/label_trainer.py | # Copyright 2022 The XFL Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
from torch import Tensor
from copy import deepcopy
from typing import OrderedDict, List, Optional
from torch.optim.optimizer import Optimizer, required
from ..base import BaseTrainer
from algorithm.core.horizontal.aggregation.aggregation_base import AggregationLeafBase
class SCAFFOLDLabelTrainer:
def __init__(self, trainer: BaseTrainer):
self.trainer = trainer
self.prev_gmodel_params = None
self.gmodel_params = None
self.gmodel_grad = []
self.lmodel_grad = []
def register(self):
self.trainer.register_hook(
place="before_local_epoch", rank=-3,
func=self._sync_early_stop_flag, desc="sync early stop flag"
)
self.trainer.register_hook(
place="before_local_epoch", rank=-2,
func=self._download_model, desc="download global model"
)
self.trainer.register_hook(
place="before_local_epoch", rank=-1,
func=self._update_gmodel_grad, desc="Update gmodel grad"
)
self.trainer.register_hook(
place="after_local_epoch", rank=-2,
func=self._update_lmodel_grad, desc="Update lmodel grad"
)
self.trainer.register_hook(
place="after_local_epoch", rank=-1,
func=self._upload_model, desc="upload local model"
)
self._set_optimizer()
# if get True, means the training is finished
def _sync_early_stop_flag(self, context: dict):
aggregator: AggregationLeafBase = self.trainer.aggregator
early_stop_flag = aggregator.download()
assert isinstance(early_stop_flag, bool)
return early_stop_flag
def _download_model(self, context: dict):
aggregator: AggregationLeafBase = self.trainer.aggregator
new_state_dict = aggregator.download()
self.trainer._state_dict_to_device(
new_state_dict, self.trainer.device, inline=True)
self.trainer.model.load_state_dict(new_state_dict)
def _upload_model(self, context: dict):
aggregator: AggregationLeafBase = self.trainer.aggregator
if self.trainer.device != "cpu":
state_dict = self.trainer._state_dict_to_device(
self.trainer.model.state_dict(), "cpu", inline=False)
else:
state_dict = self.trainer.model.state_dict()
weight = self.trainer.common_config.aggregation.get("weight") or \
len(self.trainer.train_dataloader)
aggregator.upload(state_dict, weight)
def _update_gmodel_grad(self, context):
self.gmodel_grad.clear()
if self.gmodel_params:
self.prev_gmodel_params = deepcopy(self.gmodel_params)
self.gmodel_params = [p.data.detach().clone()
for p in self.trainer.model.parameters()]
if self.prev_gmodel_params:
for w, prev_w in zip(self.gmodel_params, self.prev_gmodel_params):
self.gmodel_grad.append(w.sub(prev_w))
return
def _update_lmodel_grad(self, context):
if len(self.lmodel_grad) == 0:
for l_w, g_w in zip(self.trainer.model.parameters(), self.gmodel_params):
self.lmodel_grad.append(l_w.sub(g_w))
else:
for i in range(len(self.lmodel_grad)):
self.lmodel_grad[i] += -self.gmodel_grad[i] + \
[p.data.detach() for p in self.trainer.model.parameters()][i] - \
self.gmodel_params[i]
return
def _set_optimizer(self):
""" Define self.optimizer """
optimizer_conf = OrderedDict(
self.trainer.common_config.optimizer
)
optimizer = OrderedDict()
for k, v in optimizer_conf.items():
optimizer[k] = SCAFFOLDOptimizer(
self.trainer.model.parameters(), self.gmodel_grad, self.lmodel_grad,
self.trainer.common_config.train_params.get("local_epoch", 0) \
*len(self.trainer.train_dataloader), **v
)
self.trainer.optimizer = optimizer
self.trainer.lr_scheduler = self.trainer._set_lr_scheduler(self.trainer.optimizer)
class SCAFFOLDOptimizer(Optimizer):
def __init__(
self, params, gmodel_grad, lmodel_grad, iter_num, lr=required,
weight_decay=0, maximize=False, momentum=0, dampening=0,
nesterov=False, amsgrad=False
):
if lr is not required and lr < 0.0:
raise ValueError("Invalid learning rate: {}".format(lr))
if weight_decay < 0.0:
raise ValueError(
"Invalid weight_decay value: {}".format(weight_decay))
if momentum < 0.0:
raise ValueError("Invalid momentum value: {}".format(momentum))
defaults = dict(gmodel_grad=gmodel_grad, lmodel_grad=lmodel_grad, iter_num=iter_num, lr_history=[], lr_sum=1, lr=lr,
weight_decay=weight_decay, maximize=maximize, momentum=momentum, dampening=dampening, nesterov=nesterov)
if nesterov and (momentum <= 0 or dampening != 0):
raise ValueError(
"Nesterov momentum requires a momentum and zero dampening")
super().__init__(params, defaults)
@torch.no_grad()
def step(self):
loss = None
for group in self.param_groups:
params_with_grad = []
d_p_list = []
momentum_buffer_list = []
for p in group['params']:
if p.grad is not None:
params_with_grad.append(p)
d_p_list.append(p.grad)
state = self.state[p]
if 'momentum_buffer' not in state:
momentum_buffer_list.append(None)
else:
momentum_buffer_list.append(state['momentum_buffer'])
sgdfold(
params_with_grad, d_p_list, momentum_buffer_list,
gmodel_grad=group['gmodel_grad'], lmodel_grad=group['lmodel_grad'],
lr_sum=group['lr_sum'], lr=group['lr'], weight_decay=group['weight_decay'],
maximize=group['maximize'], momentum=group['momentum'],
dampening=group['dampening'], nesterov=group['nesterov']
)
group['lr_history'].append(group['lr'])
if len(group['lr_history']) == group['iter_num']:
group['lr_sum'] = sum(group['lr_history'])
group['lr_history'].clear()
# update momentum_buffers in state
for p, momentum_buffer in zip(params_with_grad, momentum_buffer_list):
state = self.state[p]
state['momentum_buffer'] = momentum_buffer
return loss
def sgdfold(
params: List[Tensor], d_p_list: List[Tensor],
momentum_buffer_list: List[Optional[Tensor]],
gmodel_grad: List[Tensor], lmodel_grad: List[Tensor],
lr_sum: float, lr: float, weight_decay: float, maximize: bool,
momentum: float, dampening: float, nesterov: bool
):
for i, param in enumerate(params):
d_p = d_p_list[i]
if weight_decay != 0:
d_p = d_p.add(param, alpha=weight_decay)
if momentum != 0:
buf = momentum_buffer_list[i]
if buf is None:
buf = torch.clone(d_p).detach()
momentum_buffer_list[i] = buf
else:
buf.mul_(momentum).add_(d_p, alpha=1 - dampening)
if nesterov:
d_p = d_p.add(buf, alpha=momentum)
else:
d_p = buf
alpha = lr if maximize else -lr
beta = lr_sum if maximize else -lr_sum
if len(gmodel_grad) > 0:
param.add_(d_p - (lmodel_grad[i] -
gmodel_grad[i]) / beta, alpha=alpha)
else:
param.add_(d_p, alpha=alpha)
| 8,527 | 39.803828 | 128 | py |
XFL | XFL-master/python/algorithm/core/horizontal/template/jax/base.py | # Copyright 2022 The XFL Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
from functools import partial
from typing import OrderedDict
import flax.linen as nn
import optax
from flax.training import train_state, checkpoints
from typing import Any
from algorithm.core.horizontal.aggregation.api import get_aggregation_root_inst
from algorithm.core.horizontal.aggregation.api import get_aggregation_leaf_inst
from algorithm.core.loss.jax_loss import get_lossfunc
from algorithm.core.lr_scheduler.jax_lr_scheduler import get_lr_scheduler
from algorithm.core.optimizer.jax_optimizer import get_optimizer
from algorithm.core.metrics import get_metric
from common.utils.config_parser import TrainConfigParser
from common.utils.logger import logger
from algorithm.core.horizontal.template.hooker import Hooker
class BaseTrainer(Hooker, TrainConfigParser):
def __init__(self, train_conf: dict):
Hooker.__init__(self)
TrainConfigParser.__init__(self, train_conf)
self.declare_hooks(["before_global_epoch", "before_local_epoch", "before_train_loop",
"after_train_loop", "after_local_epoch", "after_global_epoch"])
self.train_dataloader, self.exmp_label = self._set_train_dataloader()
self.val_dataloader, self.exmp_assist = self._set_val_dataloader()
self.model = self._set_model()
self.loss_func = self._set_lossfunc()
self.lr_scheduler = self._set_lr_scheduler()
self.state = self._set_optimizer()
self.metrics = self._set_metrics()
self.aggregator = self._set_aggregator(self.identity)
def _set_aggregator(self, party_type: str):
aggregation_config = self.train_params.get("aggregation_config", {})
encryption_params = aggregation_config.get("encryption")
#logger.info(encryption_params)
if party_type == "assist_trainer":
aggregator = get_aggregation_root_inst(encryption_params)
else:
aggregator = get_aggregation_leaf_inst(encryption_params)
return aggregator
def _set_model(self) -> nn.Module:
raise NotImplementedError("The _set_model method is not implemented.")
def _set_train_dataloader(self):
raise NotImplementedError(
"The _set_train_dataloader method is not implemented.")
def _set_val_dataloader(self):
raise NotImplementedError(
"The _set_val_dataloader method is not implemented.")
def _save_model(self, context: dict):
path = self.output["model"]["path"]
name = self.output["model"]["name"]
type = self.output["model"]["type"]
if not os.path.exists(path):
os.makedirs(path)
if type == "file":
checkpoints.save_checkpoint(
ckpt_dir=path,
target={'params': self.state.params, 'batch_stats': self.state.batch_stats},
step=0,
overwrite=True,
)
else:
raise NotImplementedError(f"Type {type} not supported.")
def _set_lossfunc(self):
""" Define self.loss_func """
loss_func = None
loss_func_conf = OrderedDict(self.train_params.get("lossfunc_config", {}))
for k in loss_func_conf.keys():
self.loss_func_name = k
loss_func = get_lossfunc(k)
return loss_func
def _set_lr_scheduler(self):
""" Define self.lr_scheduler """
lr_scheduler = None
lr_scheduler_conf = OrderedDict(self.train_params.get("lr_scheduler_config", {}))
for k, v in lr_scheduler_conf.items():
lr_scheduler = get_lr_scheduler(k)(**v)
return lr_scheduler
def _set_optimizer(self):
""" Define self.optimizer """
optimizer_conf = OrderedDict(self.train_params.get("optimizer_config", {}))
optimizer = None
for k, v in optimizer_conf.items():
opt_class = get_optimizer(k)
if self.lr_scheduler:
optimizer = optax.chain(optax.clip(1.0), opt_class(self.lr_scheduler, **v))
else:
optimizer = optax.chain(optax.clip(1.0), opt_class(**v))
state = None
if optimizer:
state = TrainState.create(
apply_fn=self.model.apply,
params=self.init_params,
batch_stats=self.init_batch_stats,
tx=optimizer
)
return state
def _set_metrics(self):
""" Define metric """
metrics = {}
metrics_conf: dict = self.train_params.get("metric_config", {})
for k, v in metrics_conf.items():
metric = get_metric(k)
metrics[k] = partial(metric, **v)
return metrics
def train_loop(self):
raise NotImplementedError("The train_loop method is not implemented.")
def fit(self):
current_epoch = 1
self.context["current_epoch"] = current_epoch
self.context["train_conf"] = self.train_conf
global_epoch_num = self.train_params.get("global_epoch", 0)
local_epoch_num = self.train_params.get("local_epoch", 0)
self.execute_hook_at("before_global_epoch")
for g_epoch in range(1, global_epoch_num + 1):
logger.info(f"global epoch {g_epoch}/{global_epoch_num} start...")
self.context['g_epoch'] = g_epoch
if self.execute_hook_at("before_local_epoch"):
break
for l_epoch in range(1, local_epoch_num + 1):
logger.info(
f"local epoch {l_epoch}/{local_epoch_num} of global epoch {g_epoch} start...")
self.context['l_epoch'] = l_epoch
self.execute_hook_at("before_train_loop")
self.train_loop()
self.execute_hook_at("after_train_loop")
current_epoch += 1
self.context["current_epoch"] = current_epoch
logger.info(
f"local epoch {l_epoch}/{local_epoch_num} of global epoch {g_epoch} finished.")
if self.execute_hook_at("after_local_epoch"):
break
logger.info(f"global epoch {g_epoch}/{global_epoch_num} finished.")
self.execute_hook_at("after_global_epoch")
class TrainState(train_state.TrainState):
# A simple extension of TrainState to also include batch statistics
batch_stats: Any | 6,958 | 36.820652 | 99 | py |
XFL | XFL-master/python/algorithm/core/horizontal/template/jax/fedtype.py | # Copyright 2022 The XFL Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from service.fed_config import FedConfig
def _get_assist_trainer():
aggregation_config = FedConfig.stage_config["train_info"]["params"]["aggregation_config"]
type = aggregation_config.get("type")
from algorithm.core.horizontal.template.jax.fedavg.assist_trainer import FedAvgAssistTrainer
return FedAvgAssistTrainer
def _get_label_trainer():
aggregation_config = FedConfig.stage_config["train_info"]["params"]["aggregation_config"]
type = aggregation_config.get("type")
from algorithm.core.horizontal.template.jax.fedavg.label_trainer import FedAvgLabelTrainer
return FedAvgLabelTrainer | 1,227 | 41.344828 | 96 | py |
XFL | XFL-master/python/algorithm/core/horizontal/template/tensorflow/fedavg/base.py | # Copyright 2022 The XFL Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import copy
import os
from functools import partial
from typing import OrderedDict
from algorithm.core.horizontal.aggregation.api import get_aggregation_root_inst
from algorithm.core.horizontal.aggregation.api import get_aggregation_leaf_inst
from algorithm.core.loss.tf_loss import get_lossfunc
from algorithm.core.metrics import get_metric
from algorithm.core.optimizer.tf_optimizer import get_optimizer
from common.utils.config_parser import TrainConfigParser
from common.utils.logger import logger
from algorithm.core.horizontal.template.hooker import Hooker
import tensorflow.keras as keras
class BaseTrainer(Hooker, TrainConfigParser):
def __init__(self, train_conf: dict):
Hooker.__init__(self)
TrainConfigParser.__init__(self, train_conf)
self.declare_hooks(["before_global_epoch", "before_local_epoch", "before_train_loop",
"after_train_loop", "after_local_epoch", "after_global_epoch"])
self.model = self._set_model()
self.train_dataloader = self._set_train_dataloader()
self.val_dataloader = self._set_val_dataloader()
self.loss_func = self._set_lossfunc()
self.optimizer = self._set_optimizer()
self.metrics = self._set_metrics()
self.aggregator = self._set_aggregator(self.identity)
def _set_aggregator(self, party_type: str):
aggregation_config = self.train_params.get("aggregation_config", {})
encryption_params = aggregation_config.get("encryption")
# logger.info(encryption_params)
if party_type == "assist_trainer":
aggregator = get_aggregation_root_inst(encryption_params)
else:
aggregator = get_aggregation_leaf_inst(encryption_params)
return aggregator
def _set_model(self) -> keras.Model:
raise NotImplementedError("The _set_model method is not implemented.")
def _set_train_dataloader(self):
raise NotImplementedError(
"The _set_train_dataloader method is not implemented.")
def _set_val_dataloader(self):
raise NotImplementedError(
"The _set_val_dataloader method is not implemented.")
def _save_model(self, context: dict):
path = self.output["model"]["path"]
name = self.output["model"]["name"]
type = self.output["model"]["type"]
if not os.path.exists(path):
os.makedirs(path)
path = os.path.join(path, name)
if type == "file":
self.model.save_weights(path)
else:
raise NotImplementedError(f"Type {type} not supported.")
def _load_model(self, context: dict):
pretrain_model_conf = self.input["pretrain_model"]
if pretrain_model_conf != {}:
path = os.path.join(
pretrain_model_conf["path"], pretrain_model_conf["name"])
self.model.load_weights(path)
def _set_optimizer(self):
""" Define self.optimizer """
optimizer_conf = OrderedDict(
self.train_params.get("optimizer_config", {}))
optimizer = OrderedDict()
for k, v in optimizer_conf.items():
optimizer[k] = get_optimizer(k)(**v)
return optimizer
def _set_lossfunc(self):
""" Define self.loss_func """
loss_func_conf = OrderedDict(
self.train_params.get("lossfunc_config", {}))
loss_func = OrderedDict()
for k, v in loss_func_conf.items():
loss_func[k] = get_lossfunc(k)(**v)
return loss_func
def _set_metrics(self):
""" Define metric """
metrics = {}
metrics_conf: dict = self.train_params.get("metric_config", {})
for k, v in metrics_conf.items():
metric = get_metric(k)
metrics[k] = partial(metric, **v)
return metrics
def train_loop(self):
raise NotImplementedError("The train_loop method is not implemented.")
def fit(self):
current_epoch = 1
self.context["current_epoch"] = current_epoch
self.context["train_conf"] = self.train_conf
global_epoch_num = self.train_params.get("global_epoch", 0)
local_epoch_num = self.train_params.get("local_epoch", 0)
self.execute_hook_at("before_global_epoch")
for g_epoch in range(1, global_epoch_num + 1):
logger.info(f"global epoch {g_epoch}/{global_epoch_num} start...")
self.context['g_epoch'] = g_epoch
if self.execute_hook_at("before_local_epoch"):
break
for l_epoch in range(1, local_epoch_num + 1):
logger.info(
f"local epoch {l_epoch}/{local_epoch_num} of global epoch {g_epoch} start...")
self.context['l_epoch'] = l_epoch
self.execute_hook_at("before_train_loop")
self.train_loop()
self.execute_hook_at("after_train_loop")
current_epoch += 1
self.context["current_epoch"] = current_epoch
logger.info(
f"local epoch {l_epoch}/{local_epoch_num} of global epoch {g_epoch} finished.")
if self.execute_hook_at("after_local_epoch"):
break
logger.info(f"global epoch {g_epoch}/{global_epoch_num} finished.")
self.execute_hook_at("after_global_epoch")
| 5,968 | 36.074534 | 99 | py |
XFL | XFL-master/python/algorithm/core/tree_ray/xgb_actor.py | # Copyright 2022 The XFL Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import gc
from typing import Dict, List, Tuple, Optional, Union
import numpy as np
import pandas as pd
import ray
from algorithm.core.tree_ray.big_feature import Feature
from algorithm.core.tree.xgboost_loss import XGBLoss
from algorithm.core.tree.tree_structure import Tree, BoostingTree, Node
from algorithm.core.paillier_acceleration import embed, unpack
from algorithm.core.tree.gain_calc import cal_cat_rank, cal_gain, cal_weight
from common.crypto.paillier.paillier import Paillier, PaillierContext
"""
Note: Typings in Actors are for better understanding, they are ActorHandle actually.
"""
class XgbBaseActor:
def __init__(self):
self.features: Dict[int, pd.DataFrame] = {}
self.label: Dict[int, Optional[np.ndarray]] = {}
self.loss_method: Optional[str] = None
self.loss_func: XGBLoss = None
self.boosting_tree: Optional[BoostingTree] = None
self.val_features: Dict[int, pd.DataFrame] = {}
self.val_label: Dict[int, np.ndarray] = {}
self.test_features: Dict[int, pd.DataFrame] = {}
self.big_feature: Dict[int, Feature] = {}
self.node_big_feature: Dict[str, Dict[int, Feature]] = {}
self.split_points: Dict[str, list] = {}
self.split_point_bin_map: Dict[str, Dict[float: int]] = {}
self.paillier_context: PaillierContext = None
self.cat_names: List[str] = []
self.cat_smooth: float = None
self.lambda_: float = None
self.actor_id = ray.get_runtime_context().get_actor_id()
self.node_id = ray.get_runtime_context().get_node_id()
def report_actor_id(self):
return self.actor_id
def report_node_id(self):
return self.node_id
class RayCentralCsvActor:
def __init__(self):
super().__init__()
@classmethod
def recv_data(cls,
data: list, # [int, pd.DataFrame]
has_label: bool,
missing_values: List[float]):
features: Dict[int, pd.DataFrame] = {}
label: Dict[int, Optional[np.ndarray]] = {}
if has_label:
features[data[0]] = data[1].iloc[:, 1:]
label[data[0]] = data[1].iloc[:, 0].to_numpy()
else:
features[data[0]] = data[1]
label[data[0]] = None
if missing_values != []:
# features[data[0]].replace({k: 0 for k in missing_values}, inplace=True)
features[data[0]] = features[data[0]].replace({k: 0 for k in missing_values})
return features, label
class XgbDataFrameActor(XgbBaseActor):
def __init__(self):
super().__init__()
def unique(self, cols: Optional[List[Union[bool, int]]] = None):
res = None
# def f(x: pd.Series):
# out = np.unique(np.concatenate([x.iloc[0], res[x.name].iloc[0]]))
# return [out]
# although save memory, but much more slower
# for _, df in self.features.items():
# ################################################################################################
# # Do not try to use lambda x: pd.unique(x)!
# # Return a numpy.ndarray is dangerous in apply body, when you are not sure whether
# # the returned array length are the same or not, it will result in different format
# # of dataframes.
# ################################################################################################
# if cols is not None:
# df = df.iloc[:, cols]
# unique_df = df.apply(lambda x: [np.unique(x.to_numpy())])
# if res is None:
# res = unique_df
# else:
# res = unique_df.apply(f)
# Another version for cols=None, consumes more memory
res = []
for _, df in self.features.items():
if cols is None:
res.append(df)
else:
res.append(df[cols])
if res == []:
return None
else:
res = pd.concat(res).apply(lambda x: [np.unique(x.to_numpy())])
return res
def set_cat_features(self, names: List[str]):
self.cat_names = names
# for _, features in self.features.items():
# features[names] = features[names].astype('category')
return
def set_split_points(self, split_points: Dict[str, list]):
self.split_points = split_points
for feature_name in split_points:
self.split_point_bin_map[feature_name] = {}
for bin, split_point in enumerate(split_points[feature_name]):
if isinstance(split_point, list):
for v in split_point:
self.split_point_bin_map[feature_name][v] = bin
else:
self.split_point_bin_map[feature_name][split_point] = bin
return
def xgb_binning_phase1(self):
def f(x: pd.Series):
if x.name in self.cat_names:
return [x.value_counts()]
else:
return [(x.min(), x.max())]
# if x.dtypes == 'category':
# return [x.value_counts()]
# else:
# return [(x.min(), x.max())]
res = None
def g(x: pd.Series):
""" 1. x -- |(int, int)|
2. x -- |pd.Series|
"""
if isinstance(x.iloc[0], pd.Series):
y = pd.merge(x.iloc[0], res[x.name].iloc[0], how='outer', left_index=True, right_index=True).fillna(0)
counted_values = y[x.iloc[0].name+'_x'] + y[x.iloc[0].name+'_y']
counted_values.rename(x.iloc[0].name, inplace=True)
return [counted_values]
else:
a, b = x.iloc[0]
c, d = res[x.name].iloc[0]
min_v = min(a, c)
max_v = max(b, d)
return [(min_v, max_v)]
for _, features in self.features.items():
out = features.apply(f)
if res is None:
res = out
else:
res = out.apply(g)
return res
def xgb_binning_phase2(self,
num_bins: int,
split_points_df: pd.DataFrame):
if num_bins <= 256:
dtype = np.uint8
elif num_bins <= 2 ** 16:
dtype = np.uint16
else:
dtype = np.uint32
def f(x: pd.Series):
"""1. Categorial_1 -- |object|
# 2. Categorial_2 -- |np.int64|
2. Continuous -- |np.float64|
"""
x = x.iloc[0]
if x.dtype == object:
if isinstance(x[-1], list):
value_map = {v: i for i, v in enumerate(x[:-1])}
value_map.update({v: len(x)-1 for v in x[-1]})
else:
value_map = {v: i for i, v in enumerate(x)}
return [value_map]
else:
bins = [-float('inf')] + x.tolist() + [float('inf')]
return [bins]
split_points_df = split_points_df.apply(f)
def g(x: pd.Series):
binning_info = split_points_df[x.name].iloc[0]
if isinstance(binning_info, dict):
codes = x.map(binning_info)
else:
codes = pd.cut(x, bins=binning_info, labels=range(len(binning_info)-1))
return codes
for block_idx, features in self.features.items():
self.features[block_idx] = features.apply(g).astype(dtype)
return
class XgbTrainActor(XgbBaseActor):
def __init__(self):
super().__init__()
def recv_all_trees(self, boosting_tree: BoostingTree):
self.boosting_tree = boosting_tree
def recv_latest_tree(self, tree: Tree, lr: float, max_depth: int):
self.boosting_tree.append(tree, lr, max_depth)
def sync_config(self,
paillier_context: PaillierContext,
cat_smooth: float,
lambda_: float):
self.paillier_context = paillier_context
self.cat_smooth = cat_smooth
self.lambda_ = lambda_
def update_big_feature(self,
indices: Dict[int, Optional[np.ndarray]],
columns: Optional[List[str]],
grad: Optional[Dict[int, np.ndarray]],
hess: Optional[Dict[int, np.ndarray]],
grad_hess: Optional[Dict[int, np.ndarray]],
create_new: bool):
# For label trainer, new a big_feature directly.
# For trainer, the grad, hess or grad_hess are supposed to be ciphertext, so update a potion one time.
if create_new:
self.big_feature = {}
gc.collect()
for block_id, features in self.features.items():
if indices is not None and block_id not in indices:
# This block is not used because of sampling
continue
if grad_hess is None:
self.big_feature[block_id] = Feature.create(values=features,
indices=None if indices is None else indices[block_id],
columns=columns,
grad=grad[block_id],
hess=hess[block_id],
grad_hess=None)
else:
self.big_feature[block_id] = Feature.create(values=features,
indices=None if indices is None else indices[block_id],
columns=columns,
grad=None,
hess=None,
grad_hess=grad_hess[block_id])
return
def cal_hist_for_node(self,
node_id: str,
packed: bool,
calc_count: bool,
indices: Optional[Dict[int, np.ndarray]],
col_section: Optional[Tuple[int, int]]):
""" Calculate hist for this node_big_feature on selected feature columns.
Note: Categorial feature hist is not sorted here.
Args:
node_id (str): node's id in a tree.
packed (bool): if true, calc hist of column 'xfl_grad_hess', else, calc hist of columns 'xfl_grad' and 'xfl_hess'
indices (Optional[Dict[int, np.ndarray]]): selected sample indices of the node.
if indices is None, create a new self.node_big_feature equals to self.big_feature.
col_section (Optional[Tuple[int, int]]): a section for feature columns on the node_big_feature.
free_memory_after_execution: (bool): if true, delete the node_id key in self.node_big_feature and free the memroy.
"""
if len(self.big_feature.keys()) == 0:
return None
if node_id not in self.node_big_feature:
if indices is None:
self.node_big_feature[node_id] = self.big_feature
else:
self.node_big_feature[node_id] = {}
for block_idx in indices:
feature = self.big_feature[block_idx]
self.node_big_feature[node_id][block_idx] = feature.slice_by_indices(indices[block_idx])
node_big_feature = self.node_big_feature[node_id]
# hist_dict: Dict[int, Dict[str, pd.DataFrame]] = {}
first_feature_col = 1 if packed else 2
if col_section is None:
columns = node_big_feature[list(node_big_feature.keys())[0]].data.columns.tolist()[first_feature_col:] # for grad and hess
else:
columns = node_big_feature[
list(node_big_feature.keys())[0]].data.columns.tolist()[col_section[0]+first_feature_col:col_section[1]+first_feature_col]
agg_arg = {'sum', 'count'} if calc_count else {'sum'}
# num_samples_sum = sum([feature.data.shape[0] for block_id, feature in node_big_feature.items()])
# agg_feature = pd.DataFrame(columns=node_big_feature[list(node_big_feature.keys())[0]].data.columns,
# index=range(num_samples_sum))
agg_feature = pd.concat([feature.data for feature in node_big_feature.values()])
hist: Dict[str: pd.DataFrame] = {name: None for name in columns}
for name in columns:
if not packed:
hist[name] = agg_feature.groupby([name], observed=True)[['xfl_grad', 'xfl_hess']].agg(agg_arg)
else:
hist[name] = agg_feature.groupby([name], observed=True)[['xfl_grad_hess']].agg(agg_arg)
# for block_idx, feature in node_big_feature.items():
# hist_dict[block_idx] = {}
# for name in columns:
# if not packed:
# res = feature.data.groupby([name], observed=True)[['xfl_grad', 'xfl_hess']].agg(agg_arg)
# else:
# res = feature.data.groupby([name], observed=True)[['xfl_grad_hess']].agg(agg_arg)
# hist_dict[block_idx][name] = res
# hist: Dict[str: pd.DataFrame] = {name: None for name in columns}
# for col_name in hist:
# hist_list = [hist_dict[block_id][col_name] for block_id in hist_dict]
# if len(hist_list) == 1:
# hist[col_name] = hist_list[0]
# else:
# hist_df = pd.concat(hist_list)
# # numeric_only=False !!! Pandas bug.
# hist_df = hist_df.groupby(hist_df.index).sum(numeric_only=False)
# hist[col_name] = hist_df
return hist
def encrypt_grad_hess(self,
packed: bool,
block_id: int,
context: PaillierContext,
precision: Optional[float]):
if block_id not in self.big_feature:
# Actually not reach
if packed:
return np.array([])
else:
return [np.array([]), np.array([])]
big_feature_df: pd.DataFrame = self.big_feature[block_id].data
if packed:
grad = big_feature_df["xfl_grad"].to_numpy()
hess = big_feature_df["xfl_hess"].to_numpy()
data = embed([grad, hess], interval=(1 << 128), precision=64)
res = Paillier.encrypt(data=data,
context=context,
precision=0, # must be 0 if data is packed grad and hess
obfuscation=True,
num_cores=1)
res = Paillier.serialize(res, compression=False)
else:
data_grad = big_feature_df["xfl_grad"].to_numpy()
data_hess = big_feature_df["xfl_hess"].to_numpy()
data = [data_grad, data_hess]
res = []
for d in data:
out = Paillier.encrypt(data=d,
context=context,
precision=precision, # must be 0 if data is packed grad and hess
obfuscation=True,
num_cores=1)
res.append(out)
res = [Paillier.serialize(i, compression=False) for i in res]
return res
def filter_sample_index(self,
node_id: str,
feature_name: str,
condition: Union[int, List[int]]):
"""
Args:
node_id (str): node id
feature_name (str): feature name
condition (Union[int, List[int]]): if is cat feature, condition is List[int], else is int.
"""
if node_id not in self.node_big_feature.keys():
# No data in this actor
return {}
sample_index: Dict[int, list] = {}
for block_id, feature in self.node_big_feature[node_id].items():
# if feature.data[feature_name].dtype == 'category':
if feature_name in self.cat_names:
filter = feature.data[feature_name].isin(condition)
else:
filter = feature.data[feature_name] <= condition
if len(feature.data[filter]) != 0:
sample_index[block_id] = feature.data[filter].index.astype('int').tolist()
return sample_index
def free_node_big_feature(self, node_id: str):
if node_id in self.node_big_feature:
del self.node_big_feature[node_id]
gc.collect()
return
def merge_hist(self, hist_list_dict: Dict[str, List[pd.DataFrame]]):
out_hist_dict: Dict[str, pd.DataFrame] = {}
for col_name in hist_list_dict:
hist_df = pd.concat(hist_list_dict[col_name])
# numeric_only=False !!! Pandas bug.
hist_df = hist_df.groupby(hist_df.index).sum(numeric_only=False)
out_hist_dict[col_name] = hist_df
return out_hist_dict
def calc_split_info(self,
is_remote: bool,
hist_dict: Dict[str, pd.DataFrame],
cat_names: Optional[List[str]]):
def f(x):
y = Paillier.decrypt(self.paillier_context, x, num_cores=1, out_origin=True)
z = unpack(y, num=2)
return z
hint_split_info = {
'max_gain': -float('inf'),
"feature_name": None, # fake name for remote party
'split_bin': None, # fake bin for remote party
# "is_category": None,
"left_cat": None, # fake cat for remote party
"left_weight": None,
"right_weight": None,
'num_left_sample': None,
'num_right_sample': None
}
if not is_remote and cat_names is None:
cat_names = self.cat_names
for feature_name, feature_hist in hist_dict.items():
if len(feature_hist) <= 1:
# no split for this feature
continue
if is_remote and self.paillier_context:
if ('xfl_grad_hess', 'sum') in feature_hist.columns:
feature_hist[('xfl_grad_hess', 'sum')] = feature_hist[('xfl_grad_hess', 'sum')].apply(f)
grad_hess_ndarray = np.array(feature_hist[('xfl_grad_hess', 'sum')].to_list()).astype(np.float32)
count = feature_hist[('xfl_grad_hess', 'count')].to_numpy()
feature_hist = pd.DataFrame(
np.concatenate([grad_hess_ndarray, count[:, np.newaxis]], axis=1),
index=feature_hist.index,
columns=pd.MultiIndex.from_tuples([('xfl_grad', 'sum'), ('xfl_hess', 'sum'), ('xfl_grad', 'count')])
)
else:
feature_hist[[('xfl_grad', 'sum'), ('xfl_hess', 'sum')]] = \
feature_hist[[('xfl_grad', 'sum'), ('xfl_hess', 'sum')]].apply(lambda x: Paillier.decrypt(self.paillier_context, x, num_cores=1, out_origin=False))
is_category = feature_name in cat_names
if is_category:
cat_rank = cal_cat_rank(feature_hist[('xfl_grad', 'sum')],
feature_hist[('xfl_hess', 'sum')],
self.cat_smooth)
cat_rank.sort_values(inplace=True)
feature_hist = feature_hist.loc[cat_rank.index]
feature_hist = feature_hist.cumsum(axis=0)
feature_hist.rename(columns={"sum": "cum_sum", "count": "cum_count"}, inplace=True)
cum_grad = feature_hist[('xfl_grad', 'cum_sum')].to_numpy()
cum_hess = feature_hist[('xfl_hess', 'cum_sum')].to_numpy()
gains = cal_gain(cum_grad, cum_hess, self.lambda_)
max_gain_index = np.argmax(gains)
feature_max_gain = gains[max_gain_index]
if feature_max_gain > hint_split_info['max_gain']:
count_hist = feature_hist[('xfl_grad', 'cum_count')]
num_left_sample = count_hist.iloc[max_gain_index]
num_right_sample = count_hist.iloc[-1] - count_hist.iloc[max_gain_index]
if is_category:
self.out_left_cat = []
left_cat = feature_hist.index.to_list()[:max_gain_index + 1]
split_bin = None
else:
left_cat = None
# convert to global index of split points of this feature, only for continuous feature
split_bin = int(feature_hist.index[max_gain_index])
left_weight = cal_weight(cum_grad[max_gain_index],
cum_hess[max_gain_index],
self.lambda_)
right_weight = cal_weight(cum_grad[-1] - cum_grad[max_gain_index],
cum_hess[-1] - cum_hess[max_gain_index],
self.lambda_)
hint_split_info['max_gain'] = feature_max_gain
hint_split_info['feature_name'] = feature_name
hint_split_info['split_bin'] = split_bin
hint_split_info['left_cat'] = left_cat
hint_split_info['is_category'] = is_category
hint_split_info['left_weight'] = left_weight
hint_split_info['right_weight'] = right_weight
hint_split_info['num_left_sample'] = num_left_sample
hint_split_info['num_right_sample'] = num_right_sample
return hint_split_info
def make_indicator_for_prediction_on_tree(self, tree: Tree, local_party_id: str, dataset_type: str):
if dataset_type == "train":
dataset = self.features
elif dataset_type == "val":
dataset = self.val_features
elif dataset_type == 'test':
dataset = self.test_features
else:
raise ValueError(f"Dataset type {dataset_type} is not valid, supported types are 'train' and 'val'.")
# Dict[node_id, Dict[block_id, indicator]]
indicator: Dict[str, Dict[int, np.ndarray]] = {}
for node_id, node in tree.nodes.items():
if not node.is_leaf and node.split_info.owner_id == local_party_id:
indicator[node_id] = {}
feature_name = node.split_info.feature_name
if node.split_info.is_category:
if dataset_type == 'train':
left_cat = list(set([self.split_point_bin_map[feature_name][v] for v in node.split_info.left_cat]))
else:
left_cat = node.split_info.left_cat
for block_id, features in dataset.items():
data = features[feature_name].to_numpy()
indicator[node_id][block_id] = np.isin(data, left_cat)
else:
if dataset_type == 'train':
split_point = self.split_point_bin_map[feature_name][node.split_info.split_point]
for block_id, features in dataset.items():
data = features[feature_name].to_numpy()
indicator[node_id][block_id] = (data <= split_point)
else:
split_point = node.split_info.split_point
for block_id, features in dataset.items():
data = features[feature_name].to_numpy()
indicator[node_id][block_id] = (data <= split_point)
# Dict[node_id, Dict[block_id, indicator]] -> Dict[block_id, Dict[node_id, indicator]]
out_indicator: Dict[int, Dict[str, np.ndarray]] = {}
for node_id in indicator:
for block_id, data in indicator[node_id].items():
if block_id not in out_indicator:
out_indicator[block_id] = {}
out_indicator[block_id][node_id] = data
return out_indicator
def make_indicator_for_prediction_on_boosting_tree(self, boosting_tree: BoostingTree, local_party_id: str, dataset_type: str):
if dataset_type == "train":
dataset = self.features
elif dataset_type == "val":
dataset = self.val_features
elif dataset_type == 'test':
dataset = self.test_features
else:
raise ValueError(f"Dataset type {dataset_type} is not valid, supported types are 'train' and 'val'.")
# Dict[node_id, Dict[block_id, indicator]]
indicator: Dict[str, Dict[int, np.ndarray]] = {}
for tree in boosting_tree.trees:
for node_id, node in tree.nodes.items():
if not node.is_leaf and node.split_info.owner_id == local_party_id:
indicator[node_id] = {}
feature_name = node.split_info.feature_name
if node.split_info.is_category:
if dataset_type == 'train':
left_cat = list(set([self.split_point_bin_map[feature_name][v] for v in node.split_info.left_cat]))
else:
left_cat = node.split_info.left_cat
for block_id, features in dataset.items():
data = features[feature_name].to_numpy()
indicator[node_id][block_id] = np.isin(data, left_cat)
else:
if dataset_type == 'train':
split_point = self.split_point_bin_map[feature_name][node.split_info.split_point]
for block_id, features in dataset.items():
data = features[feature_name].to_numpy()
indicator[node_id][block_id] = (data <= split_point)
else:
split_point = node.split_info.split_point
for block_id, features in dataset.items():
data = features[feature_name].to_numpy()
indicator[node_id][block_id] = (data <= split_point)
# Dict[node_id, Dict[block_id, indicator]] -> Dict[block_id, Dict[node_id, indicator]]
out_indicator: Dict[int, Dict[str, np.ndarray]] = {}
for node_id in indicator:
for block_id, data in indicator[node_id].items():
if block_id not in out_indicator:
out_indicator[block_id] = {}
out_indicator[block_id][node_id] = data
return out_indicator
def make_indicator_for_prediction_on_nodes(self, nodes: Dict[str, Node], dataset_type: str):
if dataset_type == "train":
dataset = self.features
elif dataset_type == "val":
dataset = self.val_features
elif dataset_type == 'test':
dataset = self.test_features
else:
raise ValueError(f"Dataset type {dataset_type} is not valid, supported types are 'train' and 'val'.")
# Dict[node_id, Dict[block_id, indicator]]
indicator: Dict[str, Dict[int, np.ndarray]] = {}
for node_id, node in nodes.items():
indicator[node_id] = {}
feature_name = node.split_info.feature_name
if node.split_info.is_category:
if dataset_type == 'train':
left_cat = list(set([self.split_point_bin_map[feature_name][v] for v in node.split_info.left_cat]))
else:
left_cat = node.split_info.left_cat
for block_id, features in dataset.items():
data = features[feature_name].to_numpy()
indicator[node_id][block_id] = np.isin(data, left_cat)
else:
if dataset_type == 'train':
split_point = self.split_point_bin_map[feature_name][node.split_info.split_point]
for block_id, features in dataset.items():
data = features[feature_name].to_numpy()
indicator[node_id][block_id] = (data <= split_point)
else:
split_point = node.split_info.split_point
for block_id, features in dataset.items():
data = features[feature_name].to_numpy()
indicator[node_id][block_id] = (data <= split_point)
# Dict[node_id, Dict[block_id, indicator]] -> Dict[block_id, Dict[node_id, indicator]]
out_indicator: Dict[int, Dict[str, np.ndarray]] = {}
for node_id in indicator:
for block_id, data in indicator[node_id].items():
if block_id not in out_indicator:
out_indicator[block_id] = {}
out_indicator[block_id][node_id] = data
return out_indicator
def _gen_prediction(self, tree: Tree, indicator: Dict[str, np.ndarray]):
num_samples = list(indicator.values())[0].shape[0]
prediction = np.zeros((num_samples,), dtype=np.float32)
depth = 0
sample_in_node = {}
while True:
node_list = tree.search_nodes(depth)
if not node_list:
break
for node in node_list:
if node.is_leaf:
prediction[sample_in_node[node.id]] = node.weight
else:
if depth == 0:
sample_in_node[node.left_node_id] = np.where(indicator[node.id] == 1)[0]
sample_in_node[node.right_node_id] = np.where(indicator[node.id] == 0)[0]
else:
sample_in_node[node.left_node_id] = np.intersect1d(
sample_in_node[node.id], np.where(indicator[node.id] == 1)[0])
sample_in_node[node.right_node_id] = np.intersect1d(
sample_in_node[node.id], np.where(indicator[node.id] == 0)[0])
depth += 1
return prediction
def predict_on_tree(self, tree: Tree, indicator: Dict[int, Dict[str, np.ndarray]]):
prediction: Dict[int, np.ndarray] = {}
for block_id, indicator_dict in indicator.items():
prediction[block_id] = self._gen_prediction(tree, indicator_dict)
return prediction
def predict_on_boosting_tree(self, boosting_tree: BoostingTree, indicator: Dict[int, Dict[str, np.ndarray]]):
prediction: Dict[int, np.ndarray] = {}
for tree_idx, tree in enumerate(boosting_tree.trees):
for block_id, indicator_dict in indicator.items():
p = self._gen_prediction(tree, indicator_dict)
if block_id not in prediction:
prediction[block_id] = p * boosting_tree.lr[tree_idx]
else:
prediction[block_id] += p * boosting_tree.lr[tree_idx]
return prediction
@ray.remote(num_cpus=1)
class XgbActor(XgbDataFrameActor, XgbTrainActor):
def __init__(self):
super().__init__()
def recv_data(self,
data,
file_type: str,
is_centralized: bool,
dataset_type: str,
has_label: bool,
missing_values: List[float]):
if is_centralized:
if file_type == 'csv':
if dataset_type == 'train':
features, label = RayCentralCsvActor.recv_data(data,
has_label,
missing_values)
self.features.update(features)
self.label.update(label)
elif dataset_type == 'val':
val_features, val_label = RayCentralCsvActor.recv_data(data,
has_label,
missing_values)
self.val_features.update(val_features)
self.val_label.update(val_label)
else:
test_features, test_label = RayCentralCsvActor.recv_data(data,
has_label,
missing_values)
self.test_features.update(test_features)
# self.test_label.update(test_label)
else:
raise NotImplementedError
else:
raise NotImplementedError
return
| 34,789 | 43.375 | 171 | py |
XFL | XFL-master/python/algorithm/core/tree/xgboost_loss.py | # Copyright 2022 The XFL Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Optional
import numpy as np
import torch
from common.utils.constants import BCEWithLogitsLoss, MSELoss
from ..activation import sigmoid
def get_xgb_loss_inst(name: str, params: Optional[dict] = None):
name = name.lower()
if name == BCEWithLogitsLoss.lower():
return XGBBCEWithLogitsLoss(params)
elif name == MSELoss.lower():
return XGBLoss(params)
else:
raise NotImplementedError(f"Loss {name} not implemented.")
class XGBLoss(object):
def __init__(self, params: Optional[dict] = None):
self.name = 'Loss'
self.params = params
def cal_grad(self, y: np.ndarray, y_pred: np.ndarray, after_prediction: bool = True):
raise NotImplementedError("Method cal_grad not implemented.")
def cal_hess(self, y: np.ndarray, y_pred: np.ndarray, after_prediction: bool = True):
raise NotImplementedError("Method cal_hess not implemented.")
# def predict(raw_value: np.ndarray):
# raise NotImplemented("Method predict not implemented.")
def cal_loss(self, y: np.ndarray, y_pred: np.ndarray, after_prediction: bool = False):
raise NotImplementedError("Method cal_loss not implemented.")
class XGBBCEWithLogitsLoss(XGBLoss):
def __init__(self, params: Optional[dict] = None):
super().__init__(params)
self.name = BCEWithLogitsLoss
def cal_grad(self, y: np.ndarray, y_pred: np.ndarray, after_prediction: bool = True):
if not after_prediction:
y_pred = sigmoid(y_pred)
return y_pred - y
def cal_hess(self, y: np.ndarray, y_pred: np.ndarray, after_prediction: bool = True):
if not after_prediction:
y_pred = sigmoid(y_pred)
return y_pred * (1 - y_pred)
def predict(self, raw_value: np.ndarray):
return sigmoid(raw_value)
def cal_loss(self, y: np.ndarray, y_pred: np.ndarray, after_prediction: bool = False):
if not after_prediction:
loss_func = torch.nn.BCEWithLogitsLoss()
loss = loss_func(torch.tensor(y_pred), torch.tensor(y)).item()
else:
loss_func = torch.nn.BCELoss()
loss = loss_func(torch.tensor(y_pred), torch.tensor(y)).item()
return loss
# if not after_prediction:
# y_pred = sigmoid(y_pred)
# _loss = -y * np.log(y_pred) - (1 - y) * np.log(1 - y_pred)
# loss = np.average(_loss)
# return loss
class XGBMSELoss(XGBLoss):
def __init__(self, params: Optional[dict] = None):
super().__init__(params)
self.name = MSELoss
def cal_grad(self, y: np.ndarray, y_pred: np.ndarray):
return -2 * (y - y_pred)
def cal_hess(self, y: np.ndarray, y_pred: np.ndarray):
return 2
def predict(self, raw_value: np.ndarray):
return raw_value
def cal_loss(self, y: np.ndarray, y_pred: np.ndarray):
loss_func = torch.nn.MSELoss()
loss = loss_func(torch.tensor(y_pred), torch.tensor(y)).item()
# _loss = np.square(y - y_pred)
# loss = np.average(_loss)
return loss
| 3,740 | 33.962617 | 90 | py |
XFL | XFL-master/python/algorithm/core/loss/torch_loss.py | # Copyright 2022 The XFL Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sys
import torch.nn as torch_nn
from torch.nn import Module
import torch
from common.xregister import xregister
def get_lossfunc(name: str):
if name in dir(torch_nn):
lossfunc = getattr(torch_nn, name)
elif name in dir(sys.modules[__name__]):
lossfunc = getattr(sys.modules[__name__], name)
elif name in xregister.registered_object:
lossfunc = xregister(name)
else:
raise ValueError(f"Loss function {name} is not supported in torch.")
return lossfunc
class MapeLoss(Module):
def __init__(self):
super(MapeLoss, self).__init__()
def forward(self, preds: torch.Tensor, labels: torch.Tensor) -> torch.Tensor:
mask = (labels != 0)
distance = torch.abs(preds - labels) / torch.abs(labels)
return torch.mean(distance[mask])
| 1,428 | 32.232558 | 81 | py |
XFL | XFL-master/python/algorithm/core/loss/jax_loss.py | # Copyright 2022 The XFL Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sys
import optax
from common.xregister import xregister
def get_lossfunc(name: str):
if name in dir(optax):
loss_func = getattr(optax, name)
elif name in dir(sys.modules[__name__]):
loss_func = getattr(sys.modules[__name__], name)
elif name in xregister.registered_object:
loss_func = xregister(name)
else:
raise ValueError(f"Loss function {name} is not supported in jax.")
return loss_func
| 1,062 | 33.290323 | 74 | py |
XFL | XFL-master/python/algorithm/core/loss/tf_loss.py | # Copyright 2022 The XFL Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sys
import tensorflow.keras.losses as tf_loss
from common.xregister import xregister
def get_lossfunc(name: str):
if name in dir(tf_loss):
loss_func = getattr(tf_loss, name)
elif name in dir(sys.modules[__name__]):
loss_func = getattr(sys.modules[__name__], name)
elif name in xregister.registered_object:
loss_func = xregister(name)
else:
raise ValueError(f"Loss function {name} is not supported in tensorflow.")
return loss_func
| 1,098 | 34.451613 | 81 | py |
XFL | XFL-master/python/algorithm/core/optimizer/paddle_optimizer.py | # Copyright 2022 The XFL Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sys
import paddle.optimizer as pd_optim
from common.xregister import xregister
def get_optimizer(name: str):
optim = None
if name in dir(pd_optim):
optim = getattr(pd_optim, name)
elif name in dir(sys.modules[__name__]):
optim = getattr(sys.modules[__name__], name)
elif name in xregister.registered_object:
optim = xregister(name)
else:
raise ValueError(f"Optimizer {name} is not supported in torch.")
return optim
| 1,087 | 33 | 74 | py |
XFL | XFL-master/python/algorithm/core/optimizer/torch_optimizer.py | # Copyright 2022 The XFL Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sys
import torch.optim as torch_optim
from common.xregister import xregister
def get_optimizer(name: str):
optim = None
if name in dir(torch_optim):
optim = getattr(torch_optim, name)
elif name in dir(sys.modules[__name__]):
optim = getattr(sys.modules[__name__], name)
elif name in xregister.registered_object:
optim = xregister(name)
else:
raise ValueError(f"Optimizer {name} is not supported in torch.")
return optim
| 1,091 | 33.125 | 74 | py |
XFL | XFL-master/python/algorithm/core/optimizer/jax_optimizer.py | # Copyright 2022 The XFL Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sys
import optax
from common.xregister import xregister
def get_optimizer(name: str):
optim = None
if name in dir(optax):
optim = getattr(optax, name)
elif name in dir(sys.modules[__name__]):
optim = getattr(sys.modules[__name__], name)
elif name in xregister.registered_object:
optim = xregister(name)
else:
raise ValueError(f"Optimizer {name} is not supported in jax.")
return optim
| 1,056 | 32.03125 | 74 | py |
XFL | XFL-master/python/algorithm/core/optimizer/tf_optimizer.py | # Copyright 2022 The XFL Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sys
import tensorflow.keras.optimizers as tf_optim
from common.xregister import xregister
def get_optimizer(name: str):
optim = None
if name in dir(tf_optim):
optim = getattr(tf_optim, name)
elif name in dir(sys.modules[__name__]):
optim = getattr(sys.modules[__name__], name)
elif name in xregister.registered_object:
optim = xregister(name)
else:
raise ValueError(f"Optimizer {name} is not supported in tensorflow.")
return optim
| 1,104 | 32.484848 | 77 | py |
XFL | XFL-master/python/algorithm/model/bert_torch.py | # Copyright 2022 The XFL Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from transformers import BertConfig, BertForSequenceClassification
import torch.nn as nn
class BertForSst2Torch(nn.Module):
def __init__(self, from_pretrained=True, num_labels=None, **kwargs):
super().__init__()
if from_pretrained:
config = BertConfig.from_pretrained("bert-base-uncased", num_labels=num_labels)
self.bert = BertForSequenceClassification.from_pretrained('bert-base-uncased', config=config)
else:
config = BertConfig(num_labels=num_labels, **kwargs)
self.bert = BertForSequenceClassification(config=config)
self.softmax = nn.Softmax(dim=-1)
def forward(self, input_ids, attention_mask, token_type_ids, labels):
loss, logits = self.bert(input_ids = input_ids, attention_mask = attention_mask,
token_type_ids=token_type_ids, labels = labels)[:2]
prob = self.softmax(logits)
return loss, logits, prob | 1,564 | 43.714286 | 105 | py |
XFL | XFL-master/python/algorithm/model/bert.py | # Copyright 2022 The XFL Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from transformers import BertConfig, BertForSequenceClassification
import torch.nn as nn
class BertForSst2Torch(nn.Module):
def __init__(self, from_pretrained=True, num_labels=None, **kwargs):
super().__init__()
if from_pretrained:
config = BertConfig.from_pretrained("bert-base-uncased", num_labels=num_labels)
self.bert = BertForSequenceClassification.from_pretrained('bert-base-uncased', config=config)
else:
config = BertConfig(num_labels=num_labels, **kwargs)
self.bert = BertForSequenceClassification(config=config)
self.softmax = nn.Softmax(dim=-1)
def forward(self, input_ids, attention_mask, token_type_ids, labels):
loss, logits = self.bert(input_ids = input_ids, attention_mask = attention_mask,
token_type_ids=token_type_ids, labels = labels)[:2]
prob = self.softmax(logits)
return loss, logits, prob | 1,564 | 43.714286 | 105 | py |
XFL | XFL-master/python/algorithm/model/horizontal_k_means.py | # Copyright 2022 The XFL Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
import torch.nn as nn
class HorizontalKMeans(nn.Module):
def __init__(self, input_dim, num_clusters) -> None:
super(HorizontalKMeans, self).__init__()
self.centroids = nn.Parameter(
torch.rand(num_clusters, input_dim)
)
def forward(self, x):
return x
| 922 | 30.827586 | 74 | py |
XFL | XFL-master/python/algorithm/model/resnet.py | # Copyright 2022 The XFL Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# This model contains a PyTorch implementation of the paper "Deep Residual Learning for Image Recognition."[1]
# [1]He, K., Zhang, X., Ren, S., & Sun, J. (2016). Deep residual learning for image recognition. In Proceedings of the IEEE conference on computer vision and pattern recognition (pp. 770-778).
from collections import OrderedDict
import torch.nn as nn
class ConvBlock(nn.Module):
expansion = 4
def __init__(self, in_channels, out_channels, downsample=None, stride=1):
super().__init__()
self.stem = nn.Sequential(OrderedDict([
("conv1", nn.Conv2d(in_channels, out_channels,
kernel_size=1, stride=1, padding=0)),
("batch_norm1", nn.BatchNorm2d(out_channels, track_running_stats=True)), # setting track_running_stats as False
("relu1", nn.ReLU()),
("conv2", nn.Conv2d(out_channels, out_channels,
kernel_size=3, stride=stride, padding=1)),
("batch_norm2", nn.BatchNorm2d(out_channels,track_running_stats=True)),
("relu2", nn.ReLU()),
("conv3", nn.Conv2d(out_channels, out_channels *
self.expansion, kernel_size=1, stride=1, padding=0)),
("batch_norm3", nn.BatchNorm2d(out_channels*self.expansion, track_running_stats=True))
]))
self.downsample = downsample
self.stride = stride
self.relu = nn.ReLU()
def forward(self, x):
residual = x
x = self.stem(x)
if self.downsample is not None:
residual = self.downsample(residual)
x += residual
x = self.relu(x)
return x
class Resnet(nn.Module):
def __init__(self, ResBlock, block_list, num_classes):
super().__init__()
self.stem = nn.Sequential(OrderedDict([
("conv1", nn.Conv2d(3, 64, kernel_size=3, stride=1, padding=1, bias=False)),
("batch_norm1", nn.BatchNorm2d(64, track_running_stats=True)),
("relu", nn.ReLU())
]))
self.max_pool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
self.layers1 = self._make_layers(
ResBlock, block_list[0], inplanes=64, outplanes=64, stride=1)
self.layers2 = self._make_layers(
ResBlock, block_list[1], inplanes=256, outplanes=128, stride=2)
self.layers3 = self._make_layers(
ResBlock, block_list[2], inplanes=512, outplanes=256, stride=2)
self.layers4 = self._make_layers(
ResBlock, block_list[3], inplanes=1024, outplanes=512, stride=2)
self.avgpool = nn.AdaptiveAvgPool2d((1, 1))
self.fc = nn.Linear(512*ResBlock.expansion, num_classes)
def forward(self, x):
x = self.stem(x)
x = self.max_pool(x)
x = self.layers1(x)
x = self.layers2(x)
x = self.layers3(x)
x = self.layers4(x)
x = self.avgpool(x)
x = x.reshape(x.shape[0], -1)
x = self.fc(x)
return x
def _make_layers(self, ResBlock, blocks, inplanes, outplanes, stride=1):
layers =[]
downsample = None
if stride != 1 or inplanes != outplanes*ResBlock.expansion:
downsample = nn.Sequential(
nn.Conv2d(inplanes, outplanes*ResBlock.expansion,
kernel_size=1, stride=stride),
nn.BatchNorm2d(outplanes*ResBlock.expansion, track_running_stats=True)
)
layers.append(ResBlock(inplanes, outplanes,
downsample=downsample, stride=stride))
for i in range(1, blocks):
layers.append(ResBlock(outplanes*ResBlock.expansion, outplanes))
return nn.Sequential(*layers)
def ResNet(num_classes, layers):
if layers == 18:
return Resnet(ConvBlock, [2, 2, 2, 2], num_classes)
if layers == 50:
return Resnet(ConvBlock, [3, 4, 6, 3], num_classes)
elif layers == 101:
return Resnet(ConvBlock, [3, 4, 23, 3], num_classes)
elif layers == 152:
return Resnet(ConvBlock, [3, 8, 36, 3], num_classes)
elif layers == 'unit_test':
return Resnet(ConvBlock, [2,2,2,2], num_classes)
else:
raise NotImplementedError("Only support ResNet50, ResNet101, ResNet152 currently, please change layers")
# if __name__ == "__main__":
# import torch
# from thop import profile, clever_format
# input = torch.randn(1, 3, 224, 224)
# model = ResNet(10,50)
# macs, params = profile(model, inputs=(input, ))
# macs, params = clever_format([macs, params], "%.3f")
# print(macs, params) | 5,173 | 37.902256 | 192 | py |
XFL | XFL-master/python/algorithm/model/vgg_jax.py | # Copyright 2022 The XFL Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# This model contains a PyTorch implementation of the paper "Very Deep Convolutional Networks for Large-Scale Image Recognition."[1]
# [1]Simonyan, K., & Zisserman, A. (2014). Very deep convolutional networks for large-scale image recognition. arXiv preprint arXiv:1409.1556.
import jax.numpy as jnp
import flax.linen as nn
layers_cfg = {
'VGG11': [64, 'max', 128, 'max', 256, 256, 'max', 512, 512, 'max', 512, 512, 'max'],
'VGG13': [64, 64, 'max', 128, 128, 'max', 256, 256, 'max', 512, 512, 'max', 512, 512, 'max'],
'VGG16': [64, 64, 'max', 128, 128, 'max', 256, 256, 256, 'max', 512, 512, 512, 'max', 512, 512, 512, 'max'],
'VGG19': [64, 64, 'max', 128, 128, 'max', 256, 256, 256, 256, 'max', 512, 512, 512, 512, 'max', 512, 512, 512, 512, 'max'],
'unit_test': [64, 'max', 128, 'max', 256, 'max', 512, 'max']
}
class VggJax(nn.Module):
vgg_name: str
num_classes: int
@nn.compact
def __call__(self, x, train=True):
def adaptive_avg_pool(x):
return nn.avg_pool(x, window_shape=(x.shape[1], x.shape[2]), strides=(1,1))
def seq_max_pool(x):
return nn.max_pool(x, window_shape=(2, 2), strides=(2, 2), padding='VALID')
layers = []
for outplanes in layers_cfg[self.vgg_name]:
if outplanes == 'max':
layers.append(seq_max_pool)
else:
layers.extend([
nn.Conv(features=outplanes, kernel_size=(3, 3), padding=(1, 1)),
nn.BatchNorm(use_running_average=not train, momentum=0.9, epsilon=1e-5, dtype=jnp.float32),
nn.relu
])
layers.append(adaptive_avg_pool)
model = nn.Sequential(layers)
fc = nn.Dense(self.num_classes)
x = model(x)
x = x.reshape((x.shape[0], -1))
x = fc(x)
return x
def vggjax(num_classes, layers):
if layers == 11:
return VggJax("VGG11", num_classes)
elif layers == 13:
return VggJax("VGG13", num_classes)
elif layers == 16:
return VggJax("VGG16", num_classes)
elif layers == 19:
return VggJax("VGG19", num_classes)
elif layers == "unit_test":
return VggJax("unit_test", num_classes)
else:
raise NotImplementedError("Only support VGG11, VGG13, VGG16, VGG19 currently, please change layers") | 3,012 | 38.12987 | 142 | py |
XFL | XFL-master/python/algorithm/model/vgg.py | # Copyright 2022 The XFL Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# This model contains a PyTorch implementation of the paper "Very Deep Convolutional Networks for Large-Scale Image Recognition."[1]
# [1]Simonyan, K., & Zisserman, A. (2014). Very deep convolutional networks for large-scale image recognition. arXiv preprint arXiv:1409.1556.
import torch
import torch.nn as nn
layers_cfg = {
'VGG11': [64, 'max', 128, 'max', 256, 256, 'max', 512, 512, 'max', 512, 512, 'max'],
'VGG13': [64, 64, 'max', 128, 128, 'max', 256, 256, 'max', 512, 512, 'max', 512, 512, 'max'],
'VGG16': [64, 64, 'max', 128, 128, 'max', 256, 256, 256, 'max', 512, 512, 512, 'max', 512, 512, 512, 'max'],
'VGG19': [64, 64, 'max', 128, 128, 'max', 256, 256, 256, 256, 'max', 512, 512, 512, 512, 'max', 512, 512, 512, 512, 'max'],
'unit_test': [64, 'max', 128, 'max', 256, 'max', 512, 'max']
}
class Vgg(nn.Module):
def __init__(self, vgg_name, num_classes):
super().__init__()
self.stem = self._make_layers(layers_cfg[vgg_name])
self.fc = nn.Linear(512, num_classes)
def forward(self, x):
x = self.stem(x)
x = x.view(x.size(0), -1)
x = self.fc(x)
return x
def _make_layers(self, layers_cfg):
layers = []
in_planes = 3
for outplanes in layers_cfg:
if outplanes == 'max':
layers.append(nn.MaxPool2d(kernel_size=2, stride=2))
else:
layers.extend([nn.Conv2d(in_planes, outplanes, kernel_size=3, padding=1),
nn.BatchNorm2d(outplanes),
nn.ReLU(inplace=True)])
in_planes = outplanes
layers.append(nn.AdaptiveAvgPool2d((1, 1)))
return nn.Sequential(*layers)
def VGG(num_classes, layers):
if layers == 11:
return Vgg("VGG11", num_classes)
elif layers == 13:
return Vgg("VGG13", num_classes)
elif layers == 16:
return Vgg("VGG16", num_classes)
elif layers == 19:
return Vgg("VGG19", num_classes)
elif layers == "unit_test":
return Vgg("unit_test", num_classes)
else:
raise NotImplementedError("Only support VGG11, VGG13, VGG16, VGG!9 currently, please change layers") | 2,806 | 39.1 | 142 | py |
XFL | XFL-master/python/algorithm/model/densenet.py | # Copyright 2022 The XFL Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# This model contains a PyTorch implementation of the paper "Densely Connected Convolutional Networks."[1]
# [1]Huang, G., Liu, Z., Weinberger, K. Q., & van der Maaten, L. (2016). Densely connected convolutional networks. arXiv preprint arXiv:1608.06993.
from collections import OrderedDict
import math
import torch.nn as nn
import torch
import torch.nn.functional as F
class BottleNeckBlock(nn.Module):
expansion = 4
def __init__(self, in_planes, growth_rate, drop_out=0.0):
super().__init__()
self.conv_block1 = nn.Sequential(OrderedDict([
("batch_norm1", nn.BatchNorm2d(in_planes, track_running_stats=True)), # setting track_running_stats as False
("relu1", nn.ReLU()),
("conv1", nn.Conv2d(in_planes, self.expansion*growth_rate, kernel_size=1, stride=1, bias=False))
]))
self.conv_block2 = nn.Sequential(OrderedDict([
("batch_norm2", nn.BatchNorm2d(self.expansion*growth_rate, track_running_stats=True)),
("relu2", nn.ReLU()),
("conv2", nn.Conv2d(self.expansion*growth_rate, growth_rate, kernel_size=3, stride=1, padding=1, bias=False)),
]))
self.drop_out = drop_out
def forward(self, x):
out = self.conv_block1(x)
if self.drop_out:
out = F.dropout(out, p=self.drop_out, training=self.training)
out = self.conv_block2(out)
if self.drop_out:
out = F.dropout(out, p=self.drop_out, training=self.training)
return torch.cat([x, out], 1)
class TransitionBlock(nn.Module):
def __init__(self, in_planes, out_planes, drop_out=0.0):
super().__init__()
self.bn = nn.BatchNorm2d(in_planes)
self.relu = nn.ReLU()
self.conv = nn.Conv2d(in_planes, out_planes, kernel_size=1, stride=1, padding=0, bias=False)
self.drop_out = drop_out
def forward(self, x):
out = self.conv(self.relu(self.bn(x)))
if self.drop_out:
out = F.dropout(out, p=self.drop_out, training=self.training)
return F.avg_pool2d(out, 2)
class Densenet(nn.Module):
def __init__(self, block, block_list, num_classes, growth_rate=12, reduction=0.5, drop_out=0.0):
super().__init__()
self.growth_rate = growth_rate
self.drop_out = drop_out
in_planes = 2 * growth_rate
self.conv = nn.Conv2d(3, in_planes, kernel_size=3, padding=1, bias=False)
self.dense_layer1 = self._make_layers(block, block_list[0], in_planes)
in_planes += block_list[0]*growth_rate
self.transition1 = TransitionBlock(in_planes, int(math.floor(in_planes*reduction)), drop_out=drop_out)
in_planes = int(math.floor(in_planes*reduction))
self.dense_layer2 = self._make_layers(block, block_list[1], in_planes)
in_planes += block_list[1]*growth_rate
self.transition2 = TransitionBlock(in_planes, int(math.floor(in_planes*reduction)), drop_out=drop_out)
in_planes = int(math.floor(in_planes*reduction))
self.dense_layer3 = self._make_layers(block, block_list[2], in_planes)
in_planes += block_list[2]*growth_rate
self.transition3 = TransitionBlock(in_planes, int(math.floor(in_planes*reduction)), drop_out=drop_out)
in_planes = int(math.floor(in_planes*reduction))
self.dense_layer4 = self._make_layers(block, block_list[3], in_planes)
in_planes += block_list[3]*growth_rate
self.batchnorm = nn.BatchNorm2d(in_planes)
self.relu = nn.ReLU()
self.avgpool = nn.AdaptiveAvgPool2d((1, 1))
self.fc = nn.Linear(in_planes, num_classes)
def forward(self, x):
x = self.conv(x)
x = self.transition1(self.dense_layer1(x))
x = self.transition2(self.dense_layer2(x))
x = self.transition3(self.dense_layer3(x))
x = self.dense_layer4(x)
x = self.relu(self.batchnorm(x))
x = self.avgpool(x)
x = x.view(x.shape[0], -1)
x = self.fc(x)
return x
def _make_layers(self, block, blocks, in_planes):
layers = []
for i in range(blocks):
layers.append(block(in_planes, self.growth_rate, drop_out=self.drop_out))
in_planes += self.growth_rate
return nn.Sequential(*layers)
def DenseNet(num_classes, layers):
if layers == 121:
return Densenet(BottleNeckBlock, [6,12,24,16], num_classes, growth_rate=32)
elif layers == 169:
return Densenet(BottleNeckBlock, [6,12,32,32], num_classes, growth_rate=32)
elif layers == 201:
return Densenet(BottleNeckBlock, [6,12,48,32], num_classes, growth_rate=32)
elif layers == 264:
return Densenet(BottleNeckBlock, [6,12,64,48], num_classes, growth_rate=32)
elif layers == 'unit_test':
return Densenet(BottleNeckBlock, [2,2,2,2], num_classes, growth_rate=8)
else:
raise NotImplementedError("Only support DenseNet121, DenseNet169, DenseNet201, DenseNet264 currently, please change layers")
| 5,629 | 41.651515 | 147 | py |
XFL | XFL-master/python/algorithm/model/linear_regression.py | # Copyright 2022 The XFL Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
import torch.nn as nn
class LinearRegression(nn.Module):
def __init__(self, input_dim: int, bias: bool = False):
super(LinearRegression, self).__init__()
self.linear = torch.nn.Linear(input_dim, 1, bias=bias)
def forward(self, x):
return self.linear(x)
| 907 | 32.62963 | 74 | py |
XFL | XFL-master/python/algorithm/model/poisson_regression.py | # Copyright 2022 The XFL Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
import torch.nn as nn
class PoissonRegression(nn.Module):
def __init__(self, input_dim: int, bias: bool = False):
super(PoissonRegression, self).__init__()
self.linear = torch.nn.Linear(input_dim, 1, bias=bias)
def forward(self, x):
return torch.exp(self.linear(x))
| 917 | 33 | 74 | py |
XFL | XFL-master/python/algorithm/model/logistic_regression.py | # Copyright 2022 The XFL Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch.nn as nn
class LogisticRegression(nn.Module):
def __init__(self, input_dim: int, bias: bool = False):
super().__init__()
self.linear = nn.Linear(input_dim, 1, bias=bias)
self.sigmoid = nn.Sigmoid()
def forward(self, x):
x = self.linear(x)
x = self.sigmoid(x)
return x
| 954 | 31.931034 | 74 | py |
XFL | XFL-master/python/algorithm/framework/horizontal/poisson_regression/label_trainer.py | # Copyright 2022 The XFL Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from functools import partial
from algorithm.core.horizontal.template.agg_type import \
register_agg_type_for_label_trainer
from common.utils.logger import logger
from .common import Common
class HorizontalPoissonRegressionLabelTrainer(Common):
def __init__(self, train_conf: dict):
super().__init__(train_conf)
agg_type = list(self.common_config.aggregation["method"].keys())[0]
self.register_hook(
place="after_train_loop", rank=1,
func=partial(self.val_loop, "train"), desc="validation on trainset"
)
register_agg_type_for_label_trainer(self, 'torch', agg_type)
def train_loop(self):
self.model.train()
train_loss = 0
lossfunc = list(self.lossfunc.values())[0]
optimizer = list(self.optimizer.values())[0]
lr_scheduler = list(self.lr_scheduler.values())[0] if self.lr_scheduler.values() else None
for batch, (feature, label) in enumerate(self.train_dataloader):
pred = self.model(feature)
loss = lossfunc(pred, label)
optimizer.zero_grad()
loss.backward()
optimizer.step()
train_loss += loss.item()
train_loss /= len(self.train_dataloader)
if lr_scheduler:
lr_scheduler.step()
self.context["train_loss"] = train_loss
logger.info(f"Train loss: {train_loss}")
| 2,035 | 36.018182 | 98 | py |
XFL | XFL-master/python/algorithm/framework/horizontal/poisson_regression/assist_trainer.py | # Copyright 2022 The XFL Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from functools import partial
from .common import Common
from algorithm.core.horizontal.template.agg_type import register_agg_type_for_assist_trainer
class HorizontalPoissonRegressionAssistTrainer(Common):
def __init__(self, train_conf: dict):
super().__init__(train_conf)
agg_type = list(self.common_config.aggregation["method"].keys())[0]
register_agg_type_for_assist_trainer(self, 'torch', agg_type)
self.register_hook(place="after_local_epoch", rank=1,
func=partial(self._save_model, False), desc="save model ")
self.register_hook(place="after_local_epoch", rank=2,
func=partial(self.val_loop, "val"), desc="validation on valset")
self.register_hook(place="after_global_epoch", rank=1,
func=partial(self._save_model, True), desc="save final model")
def train_loop(self):
pass
| 1,539 | 43 | 92 | py |
XFL | XFL-master/python/algorithm/framework/horizontal/poisson_regression/common.py | # Copyright 2022 The XFL Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import numpy as np
import torch
import torch.nn as nn
from torch.utils.data import DataLoader, TensorDataset
from algorithm.core.data_io import CsvReader
from algorithm.core.horizontal.template.torch.base import BaseTrainer
from algorithm.model.poisson_regression import PoissonRegression
from common.checker.x_types import All
from common.evaluation.metrics import CommonMetrics
from common.utils.config_sync import ConfigSynchronizer
from common.utils.logger import logger
class Common(BaseTrainer):
def __init__(self, train_conf: dict):
sync_rule = {
"model_info": {
"config": All()
},
"train_info": {
"interaction_params": All(),
"train_params": {
"global_epoch": All(),
"aggregation": All(),
"encryption": All(),
"optimizer": All(),
"lr_scheduler": All(),
"lossfunc": All(),
"metric": All(),
"early_stopping": All()
}
}
}
train_conf = ConfigSynchronizer(train_conf).sync(sync_rule)
super().__init__(train_conf)
def _set_model(self) -> nn.Module:
model_config = self.common_config.model_info.get("config")
model = PoissonRegression(input_dim=model_config["input_dim"],
bias=model_config["bias"])
return model
def _read_data(self, input_dataset):
if len(input_dataset) == 0:
return None
conf = input_dataset[0]
if conf["type"] == "csv":
path = os.path.join(conf['path'], conf['name'])
has_label = conf["has_label"]
has_id = conf['has_id']
return CsvReader(path, has_id, has_label)
else:
return None
def _set_train_dataloader(self):
train_data = self._read_data(self.common_config.input_trainset)
trainset = None
train_dataloader = None
if train_data:
trainset = TensorDataset(
torch.tensor(train_data.features(), dtype=torch.float32).to(self.device),
torch.tensor(train_data.label(), dtype=torch.float32).unsqueeze(dim=-1).to(self.device)
)
batch_size = self.common_config.train_params.get("train_batch_size")
if trainset:
train_dataloader = DataLoader(trainset, batch_size, shuffle=True)
return train_dataloader
def _set_val_dataloader(self):
val_data = self._read_data(self.common_config.input_valset)
valset = None
val_dataloader = None
if val_data:
valset = TensorDataset(
torch.tensor(val_data.features(), dtype=torch.float32).to(self.device),
torch.tensor(val_data.label(), dtype=torch.float32).unsqueeze(dim=-1).to(self.device)
)
batch_size = self.common_config.train_params.get("val_batch_size")
if valset:
val_dataloader = DataLoader(valset, batch_size, shuffle=True)
return val_dataloader
def val_loop(self, dataset_type: str = "val", context: dict = {}):
self.model.eval()
val_loss = 0
val_predicts = []
labels = []
lossfunc_name = list(self.lossfunc.keys())[0]
lossfunc = list(self.lossfunc.values())[0]
if dataset_type == "val":
dataloader = self.val_dataloader
elif dataset_type == "train":
dataloader = self.train_dataloader
else:
raise ValueError(f"dataset type {dataset_type} is not valid.")
for batch, (feature, label) in enumerate(dataloader):
pred = self.model(feature)
loss = lossfunc(pred, label)
val_predicts.append(pred.detach().cpu().squeeze(-1).numpy())
val_loss += loss.item()
labels.append(label.cpu().squeeze(-1).numpy())
val_loss /= len(dataloader)
labels: np.ndarray = np.concatenate(labels, axis=0)
val_predicts: np.ndarray = np.concatenate(val_predicts, axis=0)
metrics_output = CommonMetrics._calc_metrics(
metrics=self.metrics,
labels=labels,
val_predicts=val_predicts,
lossfunc_name=lossfunc_name,
loss=val_loss,
dataset_type=dataset_type
)
global_epoch = self.context["g_epoch"]
if dataset_type == "val":
local_epoch = None
elif dataset_type == "train":
local_epoch = self.context["l_epoch"]
CommonMetrics.save_metric_csv(
metrics_output=metrics_output,
output_config=self.common_config.output,
global_epoch=global_epoch,
local_epoch=local_epoch,
dataset_type=dataset_type,
)
early_stop_flag = self.context["early_stop_flag"]
if (self.common_config.save_frequency > 0) & \
(dataset_type == "val") & (self.earlystopping.patience > 0):
early_stop_flag = self.earlystopping(metrics_output, global_epoch)
if early_stop_flag:
# find the saved epoch closest to the best epoch
best_epoch = self.earlystopping.best_epoch
closest_epoch = round(best_epoch / self.common_config.save_frequency) * \
self.common_config.save_frequency
closest_epoch -= self.common_config.save_frequency \
if closest_epoch > global_epoch else 0
self.context["early_stop_flag"] = True
self.context["early_stop_epoch"] = closest_epoch
| 6,322 | 36.414201 | 103 | py |
XFL | XFL-master/python/algorithm/framework/horizontal/vgg/label_trainer.py | # Copyright 2022 The XFL Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from algorithm.core.horizontal.template.agg_type import \
register_agg_type_for_label_trainer
from common.utils.logger import logger
from .common import Common
from functools import partial
class HorizontalVggLabelTrainer(Common):
def __init__(self, train_conf: dict):
super().__init__(train_conf)
agg_type = list(self.common_config.aggregation["method"].keys())[0]
self.register_hook(
place="after_train_loop", rank=1,
func=partial(self.val_loop, "train"), desc="validation on trainset"
)
register_agg_type_for_label_trainer(self, 'torch', agg_type)
def train_loop(self):
self.model.train()
train_loss = 0
lossfunc = list(self.lossfunc.values())[0]
optimizer = list(self.optimizer.values())[0]
lr_scheduler = list(self.lr_scheduler.values())[0] if self.lr_scheduler.values() else None
for batch, (feature, label) in enumerate(self.train_dataloader):
pred = self.model(feature)
loss = lossfunc(pred, label)
optimizer.zero_grad()
loss.backward()
optimizer.step()
train_loss += loss.item()
train_loss /= len(self.train_dataloader)
if lr_scheduler:
lr_scheduler.step()
self.context["train_loss"] = train_loss
logger.info(f"Train loss: {train_loss}")
| 2,018 | 37.09434 | 98 | py |
XFL | XFL-master/python/algorithm/framework/horizontal/vgg/assist_trainer.py | # Copyright 2022 The XFL Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from functools import partial
from .common import Common
from algorithm.core.horizontal.template.agg_type import \
register_agg_type_for_assist_trainer
class HorizontalVggAssistTrainer(Common):
def __init__(self, train_conf: dict):
super().__init__(train_conf)
agg_type = list(self.common_config.aggregation["method"].keys())[0]
register_agg_type_for_assist_trainer(self, 'torch', agg_type)
self.register_hook(place="after_local_epoch", rank=1,
func=partial(self._save_model, False), desc="save model ")
self.register_hook(place="after_local_epoch", rank=2,
func=partial(self.val_loop, "val"), desc="validation on valset")
self.register_hook(place="after_global_epoch", rank=1,
func=partial(self._save_model, True), desc="save final model")
def train_loop(self):
pass
| 1,538 | 41.75 | 91 | py |
XFL | XFL-master/python/algorithm/framework/horizontal/vgg/common.py | # Copyright 2022 The XFL Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import numpy as np
import torch
import torch.nn as nn
from torch.utils.data import DataLoader, TensorDataset
from PIL import Image
import torchvision.transforms as transforms
from algorithm.core.data_io import CsvReader, NpzReader
from algorithm.model.vgg import VGG
from common.utils.logger import logger
from algorithm.core.horizontal.template.torch.base import BaseTrainer
from common.utils.config_sync import ConfigSynchronizer
from common.checker.x_types import All
from common.evaluation.metrics import CommonMetrics
class Common(BaseTrainer):
def __init__(self, train_conf: dict) -> None:
sync_rule = {
"model_info": All(),
"train_info": {
"interaction_params": All(),
"train_params": {
"global_epoch": All(),
"aggregation": All(),
"encryption": All(),
"optimizer": All(),
"lr_scheduler": All(),
"lossfunc": All(),
"metric": All(),
"early_stopping": All()
}
}
}
train_conf = ConfigSynchronizer(train_conf).sync(sync_rule)
super().__init__(train_conf)
def _set_model(self) -> nn.Module:
model_config = self.common_config.model_info.get("config")
model = VGG(num_classes=model_config["num_classes"], layers=model_config["layers"])
model = model.to(self.device)
return model
def _read_data(self, input_dataset):
if len(input_dataset) == 0:
return None
conf = input_dataset[0]
if conf["type"] == "csv":
path = os.path.join(conf['path'], conf['name'])
has_label = conf["has_label"]
has_id = conf['has_id']
return CsvReader(path, has_id, has_label)
elif conf["type"] == "npz":
path = os.path.join(conf['path'], conf['name'])
return NpzReader(path)
else:
return None
def _set_train_dataloader(self):
def img_collate_fn(batch):
labels = []
imgs = []
transform_train = transforms.Compose([
transforms.RandomCrop(32, padding=4),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010))])
for feature, label in batch:
img = Image.fromarray(feature.numpy().astype(np.uint8))
imgs.append(transform_train(img))
labels.append(label)
return torch.stack(imgs,0).to(self.device), torch.stack(labels, 0).long().to(self.device)
train_data = self._read_data(self.common_config.input_trainset)
trainset = None
train_dataloader = None
if train_data:
trainset = TensorDataset(
torch.tensor(train_data.features()[:100]), torch.tensor(train_data.label()[:100])
)
batch_size = self.common_config.train_params.get("train_batch_size")
if trainset:
train_dataloader = DataLoader(
trainset, batch_size, shuffle=True, collate_fn=img_collate_fn
)
return train_dataloader
def _set_val_dataloader(self):
def img_collate_fn(batch):
labels = []
imgs = []
transform_test = transforms.Compose([
transforms.ToTensor(),
transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010))])
for feature, label in batch:
img = Image.fromarray(feature.numpy().astype(np.uint8))
imgs.append(transform_test(img))
labels.append(label)
return torch.stack(imgs,0).to(self.device), torch.stack(labels, 0).long().to(self.device)
val_data = self._read_data(self.common_config.input_valset)
valset = None
val_dataloader = None
if val_data:
valset = TensorDataset(
torch.tensor(val_data.features()[:100]), torch.tensor(val_data.label()[:100])
)
batch_size = self.common_config.train_params.get("val_batch_size")
if valset:
val_dataloader = DataLoader(
valset, batch_size, shuffle=False, collate_fn=img_collate_fn
)
return val_dataloader
def val_loop(self, dataset_type: str = "val", context: dict = {}):
self.model.eval()
val_loss = 0
val_predicts = []
labels = []
lossfunc_name = list(self.lossfunc.keys())[0]
lossfunc = list(self.lossfunc.values())[0]
if dataset_type == "val":
dataloader = self.val_dataloader
elif dataset_type == "train":
dataloader = self.train_dataloader
else:
raise ValueError(f"dataset type {dataset_type} is not valid.")
for batch, (feature, label) in enumerate(dataloader):
pred = self.model(feature)
loss = lossfunc(pred, label)
val_predicts.append(pred.detach().cpu().squeeze(-1).numpy())
val_loss += loss.item()
labels.append(label.cpu().squeeze(-1).numpy())
val_loss /= len(dataloader)
labels: np.ndarray = np.concatenate(labels, axis=0)
val_predicts: np.ndarray = np.concatenate(val_predicts, axis=0)
if len(val_predicts.shape) == 1:
val_predicts = np.array(val_predicts > 0.5, dtype=np.int32)
elif len(val_predicts.shape) == 2:
val_predicts = val_predicts.argmax(axis=-1)
metrics_output = CommonMetrics._calc_metrics(
metrics=self.metrics,
labels=labels,
val_predicts=val_predicts,
lossfunc_name=lossfunc_name,
loss=val_loss,
dataset_type=dataset_type
)
global_epoch = self.context["g_epoch"]
if dataset_type == "val":
local_epoch = None
elif dataset_type == "train":
local_epoch = self.context["l_epoch"]
CommonMetrics.save_metric_csv(
metrics_output=metrics_output,
output_config=self.common_config.output,
global_epoch=global_epoch,
local_epoch=local_epoch,
dataset_type=dataset_type,
)
early_stop_flag = self.context["early_stop_flag"]
if (self.common_config.save_frequency > 0) & \
(dataset_type == "val") & (self.earlystopping.patience > 0):
early_stop_flag = self.earlystopping(metrics_output, global_epoch)
if early_stop_flag:
# find the saved epoch closest to the best epoch
best_epoch = self.earlystopping.best_epoch
closest_epoch = round(best_epoch / self.common_config.save_frequency) * \
self.common_config.save_frequency
closest_epoch -= self.common_config.save_frequency \
if closest_epoch > global_epoch else 0
self.context["early_stop_flag"] = True
self.context["early_stop_epoch"] = closest_epoch
| 7,957 | 37.819512 | 101 | py |
XFL | XFL-master/python/algorithm/framework/horizontal/vgg_jax/label_trainer.py | # Copyright 2022 The XFL Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from algorithm.core.horizontal.template.jax.fedtype import _get_label_trainer
from common.utils.logger import logger
from .common import Common
from jax import jit, value_and_grad
class HorizontalVggJaxLabelTrainer(Common, _get_label_trainer()):
def __init__(self, train_conf: dict):
_get_label_trainer().__init__(self, train_conf)
self._set_jit_train_step()
self._set_jit_val_step()
def _set_jit_train_step(self):
def train_step(batch, state):
loss_fn = lambda params: self.calculate_loss(params, state.batch_stats, batch, train=True)
ret, grads = value_and_grad(loss_fn, has_aux=True)(state.params)
loss, _, new_model_state = ret[0], *ret[1]
state = state.apply_gradients(grads=grads, batch_stats=new_model_state['batch_stats'])
return loss, state
self.jit_train_step = jit(train_step)
def train_loop(self):
train_loss = 0
for batch_id, batch in enumerate(self.train_dataloader):
loss, self.state = self.jit_train_step(batch, self.state)
train_loss += loss.item()
train_loss /= len(self.train_dataloader)
self.context["train_loss"] = train_loss
logger.info(f"Train loss: {train_loss}")
| 1,891 | 40.130435 | 102 | py |
XFL | XFL-master/python/algorithm/framework/horizontal/vgg_jax/assist_trainer.py | # Copyright 2022 The XFL Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from algorithm.core.horizontal.template.jax.fedtype import _get_assist_trainer
from functools import partial
from .common import Common
class HorizontalVggJaxAssistTrainer(Common, _get_assist_trainer()):
def __init__(self, train_conf: dict):
_get_assist_trainer().__init__(self, train_conf)
self._set_jit_val_step()
self.register_hook(place="after_local_epoch", rank=2,
func=partial(self.val_loop, "val"), desc="validation on valset")
self.register_hook(place="after_local_epoch", rank=3,
func=self._save_model, desc="save model")
def train_loop(self):
pass
| 1,286 | 38 | 91 | py |
XFL | XFL-master/python/algorithm/framework/horizontal/vgg_jax/common.py | # Copyright 2022 The XFL Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import numpy as np
from jax import jit, random
import jax.numpy as jnp
import flax.linen as nn
import torch
from torch.utils.data import DataLoader, TensorDataset
import torchvision.transforms as transforms
from PIL import Image
from flax.core.frozen_dict import FrozenDict
from collections import OrderedDict
from algorithm.core.data_io import NpzReader
from algorithm.model.vgg_jax import vggjax
from common.utils.logger import logger
class Common():
def _set_model(self) -> nn.Module:
model = None
self.init_params = None
self.init_batch_stats = None
self.state = None
exmp_features = self.exmp_label if self.exmp_label is not None else self.exmp_assist
model_config = self.model_info.get("config")
model = vggjax(num_classes=model_config["num_classes"], layers=model_config["layers"])
init_rng = random.PRNGKey(0)
variables = model.init(init_rng, exmp_features, train=True)
self.init_params, self.init_batch_stats = variables["params"], variables["batch_stats"]
# init the state_dict and keys_dict used for aggregation
self.state_dict = OrderedDict()
self.keys_dict = OrderedDict()
for key in ["params", "batch_stats"]:
self.keys_dict[key] = OrderedDict()
for i, j in variables[key].unfreeze().items():
self.keys_dict[key][i] = []
for k, v in j.items():
self.keys_dict[key][i].append(k)
self.state_dict[i+k] = np.asarray(v, dtype=np.float32)
return model
def state_to_state_dict(self):
for i, j in self.state.params.unfreeze().items():
for k, v in j.items():
self.state_dict[i+k] = np.asarray(v, dtype=np.float32)
for i, j in self.state.batch_stats.unfreeze().items():
for k, v in j.items():
self.state_dict[i+k] = np.asarray(v, dtype=np.float32)
def state_dict_to_state(self):
new_state = dict()
for key in ["params", "batch_stats"]:
new_state[key] = dict()
for i, j in self.keys_dict[key].items():
value_dict = dict()
for k in j:
value_dict[k] = jnp.asarray(self.state_dict[i+k], dtype=np.float32)
new_state[key][i] = value_dict
new_state = FrozenDict(new_state)
self.state = self.state.replace(params=new_state["params"], batch_stats=new_state["batch_stats"])
def _read_data(self, input_dataset):
if len(input_dataset) == 0:
return None
conf = input_dataset[0]
if conf["type"] == "npz":
path = os.path.join(conf['path'], conf['name'])
return NpzReader(path)
else:
return None
def _set_train_dataloader(self):
def img_collate_fn(batch):
labels = []
imgs = []
transform_train = transforms.Compose([
transforms.RandomCrop(32, padding=4),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010)),
np.array
])
for feature, label in batch:
img = Image.fromarray(feature.numpy().astype(np.uint8))
img = transform_train(img)
img = np.transpose(img, (1, 2, 0))
imgs.append(img) # [N, C, H, W] -> [N, H, W, C]
labels.append(label.numpy())
return jnp.stack(imgs, 0).astype(jnp.float32), jnp.stack(labels, 0).astype(jnp.int32)
train_data = self._read_data(self.input_trainset)
exmp_features = None
trainset = None
train_dataloader = None
if train_data:
trainset = TensorDataset(torch.tensor(train_data.features()[0:100]), torch.tensor(train_data.label()[0:100]))
exmp_features = jnp.ones_like(jnp.stack(train_data.features()[0:2], 0))
batch_size = self.train_params.get("batch_size", 64)
if trainset:
train_dataloader = DataLoader(trainset, batch_size, shuffle=True, collate_fn=img_collate_fn)
return train_dataloader, exmp_features
def _set_val_dataloader(self):
def img_collate_fn(batch):
labels = []
imgs = []
transform_test = transforms.Compose([
transforms.ToTensor(),
transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010)),
np.array
])
for feature, label in batch:
img = Image.fromarray(feature.numpy().astype(np.uint8))
img = transform_test(img)
img = np.transpose(img, (1, 2, 0))
imgs.append(img) # [N, C, H, W] -> [N, H, W, C]
labels.append(label.numpy())
return jnp.stack(imgs, 0).astype(jnp.float32), jnp.stack(labels, 0).astype(jnp.int32)
val_data = self._read_data(self.input_valset)
exmp_features = None
valset = None
val_dataloader = None
if val_data:
valset = TensorDataset(torch.tensor(val_data.features()[0:100]), torch.tensor(val_data.label()[0:100]))
exmp_features = jnp.ones_like(jnp.stack(val_data.features()[0:2], 0))
batch_size = self.train_params.get("batch_size", 64)
if valset:
val_dataloader = DataLoader(valset, batch_size, shuffle=True, collate_fn=img_collate_fn)
return val_dataloader, exmp_features
def calculate_loss(self, params, batch_stats, batch, train):
features, labels = batch
# Run model. During training, we need to update the BatchNorm statistics.
outputs = self.model.apply(
{'params': params, 'batch_stats': batch_stats},
features,
train=train,
mutable=['batch_stats'] if train else False
)
logits, new_model_state = outputs if train else (outputs, None)
loss = self.loss_func(logits, labels).mean()
preds = logits.argmax(axis=-1)
return loss, (preds, new_model_state)
def _set_jit_val_step(self):
def val_step(batch, state):
loss, (preds, _) = self.calculate_loss(state.params, state.batch_stats, batch, train=False)
return loss, preds
self.jit_val_step = jit(val_step)
def val_loop(self, dataset_type: str = "validation", context: dict = {}):
val_loss = 0
val_predicts = []
labels = []
metric_output = {}
if dataset_type in ["validation", "val"]:
dataloader = self.val_dataloader
elif dataset_type == "train":
dataloader = self.train_dataloader
else:
raise ValueError(f"dataset type {dataset_type} is not valid.")
for batch_id, (feature, label) in enumerate(dataloader):
loss, preds = self.jit_val_step((feature, label), self.state)
val_predicts.append(preds)
val_loss += loss.item()
labels.append(label)
val_loss /= len(dataloader)
metric_output[self.loss_func_name] = val_loss
val_predicts = jnp.concatenate(val_predicts, axis=0)
labels = jnp.concatenate(labels, axis=0)
metrics_conf: dict = self.train_params["metric_config"]
for method in self.metrics:
metric_output[method] = self.metrics[method](labels, val_predicts, **metrics_conf[method])
logger.info(f"Metrics on {dataset_type} set: {metric_output}")
| 8,308 | 39.531707 | 121 | py |
XFL | XFL-master/python/algorithm/framework/horizontal/linear_regression/label_trainer.py | # Copyright 2022 The XFL Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from algorithm.core.horizontal.template.agg_type import register_agg_type_for_label_trainer
from common.utils.logger import logger
from .common import Common
from functools import partial
class HorizontalLinearRegressionLabelTrainer(Common):
def __init__(self, train_conf: dict):
super().__init__(train_conf)
agg_type = list(self.common_config.aggregation["method"].keys())[0]
self.register_hook(
place="after_train_loop", rank=1,
func=partial(self.val_loop, "train"), desc="validation on trainset"
)
register_agg_type_for_label_trainer(self, 'torch', agg_type)
def train_loop(self):
self.model.train()
train_loss = 0
lossfunc = list(self.lossfunc.values())[0]
optimizer = list(self.optimizer.values())[0]
lr_scheduler = list(self.lr_scheduler.values())[0] if self.lr_scheduler.values() else None
for batch, (feature, label) in enumerate(self.train_dataloader):
pred = self.model(feature)
loss = lossfunc(pred, label)
optimizer.zero_grad()
loss.backward()
optimizer.step()
train_loss += loss.item()
train_loss /= len(self.train_dataloader)
if lr_scheduler:
lr_scheduler.step()
self.context["train_loss"] = train_loss
logger.info(f"Train loss: {train_loss}")
| 2,036 | 36.722222 | 98 | py |
XFL | XFL-master/python/algorithm/framework/horizontal/linear_regression/assist_trainer.py | # Copyright 2022 The XFL Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from functools import partial
from .common import Common
from algorithm.core.horizontal.template.agg_type import register_agg_type_for_assist_trainer
class HorizontalLinearRegressionAssistTrainer(Common):
def __init__(self, train_conf: dict):
super().__init__(train_conf)
agg_type = list(self.common_config.aggregation["method"].keys())[0]
register_agg_type_for_assist_trainer(self, 'torch', agg_type)
self.register_hook(place="after_local_epoch", rank=1,
func=partial(self._save_model, False), desc="save model ")
self.register_hook(place="after_local_epoch", rank=2,
func=partial(self.val_loop, "val"), desc="validation on valset")
self.register_hook(place="after_global_epoch", rank=1,
func=partial(self._save_model, True), desc="save final model")
def train_loop(self):
pass
| 1,538 | 42.971429 | 92 | py |
XFL | XFL-master/python/algorithm/framework/horizontal/linear_regression/common.py | # Copyright 2022 The XFL Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import numpy as np
import torch
import torch.nn as nn
from torch.utils.data import DataLoader, TensorDataset
from algorithm.core.data_io import CsvReader
from algorithm.model.linear_regression import LinearRegression
from common.utils.logger import logger
from algorithm.core.horizontal.template.torch.base import BaseTrainer
from common.utils.config_sync import ConfigSynchronizer
from common.checker.x_types import All
from common.evaluation.metrics import CommonMetrics
class Common(BaseTrainer):
def __init__(self, train_conf: dict):
sync_rule = {
"model_info": {
"config": All()
},
"train_info": {
"interaction_params": All(),
"train_params": {
"global_epoch": All(),
"aggregation": All(),
"encryption": All(),
"optimizer": All(),
"lr_scheduler": All(),
"lossfunc": All(),
"metric": All(),
"early_stopping": All()
}
}
}
train_conf = ConfigSynchronizer(train_conf).sync(sync_rule)
super().__init__(train_conf)
def _set_model(self) -> nn.Module:
model_config = self.common_config.model_info.get("config")
model = LinearRegression(input_dim=model_config["input_dim"],
bias=model_config["bias"])
return model
def _read_data(self, input_dataset):
if len(input_dataset) == 0:
return None
conf = input_dataset[0]
if conf["type"] == "csv":
path = os.path.join(conf['path'], conf['name'])
has_label = conf["has_label"]
has_id = conf['has_id']
return CsvReader(path, has_id, has_label)
else:
return None
def _set_train_dataloader(self):
train_data = self._read_data(self.common_config.input_trainset)
trainset = None
train_dataloader = None
if train_data:
trainset = TensorDataset(
torch.tensor(train_data.features(), dtype=torch.float32).to(self.device),
torch.tensor(train_data.label(), dtype=torch.float32).unsqueeze(dim=-1).to(self.device)
)
batch_size = self.common_config.train_params.get("train_batch_size")
if trainset:
train_dataloader = DataLoader(trainset, batch_size, shuffle=True)
return train_dataloader
def _set_val_dataloader(self):
val_data = self._read_data(self.common_config.input_valset)
valset = None
val_dataloader = None
if val_data:
valset = TensorDataset(
torch.tensor(val_data.features(), dtype=torch.float32).to(self.device),
torch.tensor(val_data.label(), dtype=torch.float32).unsqueeze(dim=-1).to(self.device)
)
batch_size = self.common_config.train_params.get("val_batch_size")
if valset:
val_dataloader = DataLoader(valset, batch_size, shuffle=True)
return val_dataloader
def val_loop(self, dataset_type: str = "val", context: dict = {}):
self.model.eval()
val_loss = 0
val_predicts = []
labels = []
lossfunc_name = list(self.lossfunc.keys())[0]
lossfunc = list(self.lossfunc.values())[0]
if dataset_type == "val":
dataloader = self.val_dataloader
elif dataset_type == "train":
dataloader = self.train_dataloader
else:
raise ValueError(f"dataset type {dataset_type} is not valid.")
for batch, (feature, label) in enumerate(dataloader):
pred = self.model(feature)
loss = lossfunc(pred, label)
val_predicts.append(pred.detach().cpu().squeeze(-1).numpy())
val_loss += loss.item()
labels.append(label.cpu().squeeze(-1).numpy())
val_loss /= len(dataloader)
labels: np.ndarray = np.concatenate(labels, axis=0)
val_predicts: np.ndarray = np.concatenate(val_predicts, axis=0)
metrics_output = CommonMetrics._calc_metrics(
metrics=self.metrics,
labels=labels,
val_predicts=val_predicts,
lossfunc_name=lossfunc_name,
loss=val_loss,
dataset_type=dataset_type
)
global_epoch = self.context["g_epoch"]
if dataset_type == "val":
local_epoch = None
elif dataset_type == "train":
local_epoch = self.context["l_epoch"]
CommonMetrics.save_metric_csv(
metrics_output=metrics_output,
output_config=self.common_config.output,
global_epoch=global_epoch,
local_epoch=local_epoch,
dataset_type=dataset_type,
)
early_stop_flag = self.context["early_stop_flag"]
if (self.common_config.save_frequency > 0) & \
(dataset_type == "val") & (self.earlystopping.patience > 0):
early_stop_flag = self.earlystopping(metrics_output, global_epoch)
if early_stop_flag:
# find the saved epoch closest to the best epoch
best_epoch = self.earlystopping.best_epoch
closest_epoch = round(best_epoch / self.common_config.save_frequency) * \
self.common_config.save_frequency
closest_epoch -= self.common_config.save_frequency \
if closest_epoch > global_epoch else 0
self.context["early_stop_flag"] = True
self.context["early_stop_epoch"] = closest_epoch
| 6,316 | 35.94152 | 103 | py |
XFL | XFL-master/python/algorithm/framework/horizontal/kmeans/label_trainer.py | # Copyright 2022 The XFL Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
from torch import nn
from sklearn.cluster import KMeans
from algorithm.core.horizontal.template.agg_type import register_agg_type_for_label_trainer
from common.utils.logger import logger
from .common import Common
from functools import partial
class HorizontalKmeansLabelTrainer(Common):
def __init__(self, train_conf: dict):
super().__init__(train_conf)
self.register_hook(
place="after_train_loop", rank=1,
func=partial(self.val_loop, "train"), desc="validation on trainset"
)
register_agg_type_for_label_trainer(self, "torch", "fedavg")
# logger.info("Label trainer initialized")
def train_loop(self):
# load centroids
centroids = self.model.state_dict()['centroids'].numpy()
# train one iter of KMeans
kmeans_model = KMeans(
n_clusters=centroids.shape[0],
init=centroids,
n_init=1,
max_iter=10
)
train_features, _ = self.train_dataloader.dataset.tensors
train_features = train_features.numpy()
kmeans_model.fit(train_features)
logger.info(f"K-Means score: {kmeans_model.score(train_features)}")
# write centroids
model_state_dict = self.model.state_dict()
model_state_dict['centroids'] = torch.tensor(
kmeans_model.cluster_centers_)
# self.model.load_state_dict(model_state_dict)
self.model.centroids = nn.Parameter(
torch.tensor(kmeans_model.cluster_centers_))
| 2,144 | 35.355932 | 91 | py |
XFL | XFL-master/python/algorithm/framework/horizontal/kmeans/assist_trainer.py | # Copyright 2022 The XFL Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from functools import partial
from common.utils.logger import logger
from .common import Common
from algorithm.core.horizontal.template.agg_type import register_agg_type_for_assist_trainer
class HorizontalKmeansAssistTrainer(Common):
def __init__(self, train_conf: dict):
super().__init__(train_conf)
register_agg_type_for_assist_trainer(self, 'torch', "fedavg")
self.register_hook(place="after_local_epoch", rank=1,
func=partial(self._save_model, False), desc="save model ")
self.register_hook(place="after_local_epoch", rank=2,
func=partial(self.val_loop, "val"), desc="validation on valset")
self.register_hook(place="after_global_epoch", rank=1,
func=partial(self._save_model, True), desc="save final model")
def train_loop(self):
pass
| 1,491 | 41.628571 | 92 | py |
XFL | XFL-master/python/algorithm/framework/horizontal/kmeans/common.py | # Copyright 2022 The XFL Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import numpy as np
import torch
from torch import nn
from torch.utils.data.dataset import TensorDataset
from torch.utils.data.dataloader import DataLoader
from sklearn.metrics import davies_bouldin_score
from sklearn.cluster import KMeans
from algorithm.core.data_io import CsvReader
from algorithm.model.horizontal_k_means import HorizontalKMeans
from common.utils.logger import logger
from algorithm.core.horizontal.template.torch.base import BaseTrainer
from common.utils.config_sync import ConfigSynchronizer
from common.checker.x_types import All
from common.evaluation.metrics import CommonMetrics
class Common(BaseTrainer):
def __init__(self, train_conf: dict) -> None:
sync_rule = {
"model_info": All(),
"train_info": {
"interaction_params": All(),
"train_params": {
"global_epoch": All(),
"aggregation": All(),
"encryption": All(),
}
}
}
train_conf = ConfigSynchronizer(train_conf).sync(sync_rule)
super().__init__(train_conf)
def _set_model(self) -> nn.Module:
model_config = self.common_config.model_info.get("config")
input_dim = model_config["input_dim"]
num_clusters = model_config["num_clusters"]
model = HorizontalKMeans(
input_dim=input_dim, num_clusters=num_clusters)
return model
def _read_data(self, input_dataset):
if len(input_dataset) == 0:
return None
conf = input_dataset[0]
if conf["type"] == "csv":
path = os.path.join(conf['path'], conf['name'])
logger.info(f"Data path: {os.path.abspath(path)}")
has_label = conf["has_label"]
has_id = conf['has_id']
return CsvReader(path, has_id, has_label)
else:
return None
def _set_train_dataloader(self):
train_data = self._read_data(self.common_config.input_trainset)
train_dataloader = None
if train_data:
train_dataset = TensorDataset(
torch.Tensor(train_data.features()),
torch.Tensor(train_data.label())
)
train_dataloader = DataLoader(train_dataset)
return train_dataloader
def _set_val_dataloader(self):
val_data = self._read_data(self.common_config.input_valset)
val_dataloader = None
if val_data:
val_dataset = TensorDataset(
torch.Tensor(val_data.features()),
torch.Tensor(val_data.label())
)
val_dataloader = DataLoader(val_dataset)
return val_dataloader
def val_loop(self, dataset_type: str = "val", context: dict = {}):
if dataset_type == "val":
dataloader = self.val_dataloader
elif dataset_type == "train":
dataloader = self.train_dataloader
else:
raise ValueError(f"dataset type {dataset_type} is not valid.")
val_features, val_label = dataloader.dataset.tensors
val_features = val_features.numpy()
# val_bale = val_label.numpy()
centroids = self.model.state_dict()['centroids'].numpy()
kmeans = KMeans(
n_clusters=centroids.shape[0], init=centroids, n_init=1, max_iter=1)
kmeans.fit(val_features)
kmeans.cluster_centers = centroids
pred_labels = kmeans.predict(val_features)
score = davies_bouldin_score(val_features, pred_labels)
metrics_output = CommonMetrics._calc_metrics(
metrics={},
labels=val_label,
val_predicts=pred_labels,
lossfunc_name="davies_bouldin_score",
loss=score,
dataset_type=dataset_type
)
global_epoch = self.context["g_epoch"]
if dataset_type == "val":
local_epoch = None
elif dataset_type == "train":
local_epoch = self.context["l_epoch"]
CommonMetrics.save_metric_csv(
metrics_output=metrics_output,
output_config=self.common_config.output,
global_epoch=global_epoch,
local_epoch=local_epoch,
dataset_type=dataset_type,
)
| 4,892 | 34.456522 | 80 | py |
XFL | XFL-master/python/algorithm/framework/horizontal/nbafl/label_trainer.py | # Copyright 2022 The XFL Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
from common.communication.gRPC.python.channel import DualChannel
from service.fed_config import FedConfig
from functools import partial
from algorithm.core.horizontal.template.agg_type import \
register_agg_type_for_label_trainer
from common.utils.logger import logger
from .common import Common
class HorizontalNbaflLabelTrainer(Common):
def __init__(self, train_conf: dict):
super().__init__(train_conf)
register_agg_type_for_label_trainer(self, "torch", "fedavg")
self.sample_size_channel = DualChannel(
name="sample_size_" + FedConfig.node_id,
ids=[FedConfig.get_assist_trainer(), FedConfig.node_id]
)
# initialize prev params
self.prev_params = [
param.data.detach().clone() for param in self.model.parameters()
]
# Update sample size
self.register_hook(
place="before_global_epoch", rank=1,
func=self._update_sample_size, desc="Update local sample size"
)
# Calculate update sigma
self.register_hook(
place="before_global_epoch", rank=2,
func=self._calc_uplink_sigma, desc="Calculate uplink sigma"
)
# Update prev param
self.register_hook(
place="after_local_epoch", rank=1,
func=self._update_prev_param, desc="Update prev param"
)
# Clip norm
self.register_hook(
place="after_local_epoch", rank=2,
func=self._clip_params, desc="Clip param norms"
)
# Add noise
self.register_hook(
place="after_local_epoch", rank=3,
func=self._add_noise, desc="Add uplink noise"
)
# Validation
self.register_hook(
place="after_train_loop", rank=1,
func=partial(self.val_loop, "train"), desc="validation on trainset"
)
def _update_prev_param(self, context):
self.prev_params = [
param.data.detach().clone() for param in self.model.parameters()
]
def _cal_regularization(self, p=2):
reg = 0.0
for w_prev, w in zip(self.prev_params, self.model.parameters()):
reg += torch.pow(torch.norm(w - w_prev, p), p)
return self.mu * reg / p
def _clip_params(self, context):
for param in self.model.parameters():
norm_ratio = torch.maximum(
torch.ones(param.shape),
torch.abs(param.data) / self.common_config.train_params['C']
)
param.data = param.data / norm_ratio
return
def _calc_uplink_sigma(self, context):
delta_S_u = 2 * self.common_config.train_params['C'] / \
len(self.train_dataloader.dataset)
sigma_u = self.c * delta_S_u / self.epsilon
logger.info("Uplink sigma: {}".format(sigma_u))
self.sigma_u = sigma_u
return
def train_loop(self):
self.model.train()
train_loss = 0
lossfunc = list(self.lossfunc.values())[0]
optimizer = list(self.optimizer.values())[0]
for batch_idx, (feature, label) in enumerate(self.train_dataloader):
pred = self.model(feature)
loss = lossfunc(pred, label)
reg = self._cal_regularization()
loss += reg
optimizer.zero_grad()
loss.backward()
optimizer.step()
train_loss += loss.item()
train_loss /= len(self.train_dataloader)
# retain current params
self.prev_params = [
param.data.detach().clone() for param in self.model.parameters()
]
return train_loss
def _add_noise(self, context):
for param in self.model.parameters():
param.data += torch.distributions.Normal(
loc=0, scale=self.sigma_u).sample(param.size()).to(self.device)
return
def _update_sample_size(self, context):
logger.info("trainset length: {}".format(
len(self.train_dataloader.dataset)))
self.sample_size_channel.send(len(self.train_dataloader.dataset))
return
| 4,761 | 33.014286 | 79 | py |
XFL | XFL-master/python/algorithm/framework/horizontal/nbafl/assist_trainer.py | # Copyright 2022 The XFL Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
import torch
from common.communication.gRPC.python.channel import DualChannel
from service.fed_config import FedConfig
from functools import partial
from common.utils.logger import logger
from .common import Common
from algorithm.core.horizontal.template.agg_type import \
register_agg_type_for_assist_trainer
class HorizontalNbaflAssistTrainer(Common):
def __init__(self, train_conf: dict):
super().__init__(train_conf)
register_agg_type_for_assist_trainer(self, "torch", "fedavg")
self.load_model()
self.sample_size_channel = {}
# Init size channel
for party_id in FedConfig.get_label_trainer():
self.sample_size_channel[party_id] = DualChannel(
name="sample_size_" + party_id,
ids=[FedConfig.node_id, party_id]
)
# Get sample size
self.register_hook(
place="before_global_epoch", rank=1,
func=self._get_sample_size, desc="Get sample size"
)
# Calculate downlink noise
self.register_hook(
place="before_global_epoch", rank=2,
func=self._calc_downlink_sigma, desc="Calculate downlink noise"
)
# Add noise
self.register_hook(
place="after_local_epoch", rank=1,
func=self._add_noise, desc="Add downlink noise"
)
# Validation
self.register_hook(
place="after_local_epoch", rank=2,
func=partial(self.val_loop, "val"), desc="validation on valset"
)
self.register_hook(
place="after_global_epoch", rank=1,
func=partial(self._save_model, True), desc="save final model"
)
def _calc_downlink_sigma(self, context):
logger.info("Calculating downlink sigma")
if self.common_config.train_params['global_epoch'] > \
self.common_config.train_params['num_client'] * \
np.sqrt(self.common_config.train_params['num_client']):
sigma_d = (
2 * self.common_config.train_params['C'] * self.c * np.sqrt(
self.common_config.train_params['global_epoch'] ** 2 - \
np.power(self.common_config.train_params['num_client'], 3)) / \
(self.min_sample_num * \
self.common_config.train_params['num_client'] * \
self.common_config.train_params['epsilon'])
)
else:
sigma_d = 0.0
logger.info("Downlink sigma: {}".format(sigma_d))
self.sigma_d = sigma_d
return
def _add_noise(self, context):
if self.sigma_d > 0:
noise_generator = torch.distributions.Normal(
loc=0, scale=self.sigma_d)
for param_data in self.model.parameters():
param_data.data += noise_generator.sample(param_data.size())
return
def _get_sample_size(self, context):
sample_nums = []
for party_id in FedConfig.get_label_trainer():
single_sample_size = self.sample_size_channel[party_id].recv()
sample_nums.append(single_sample_size)
sample_num_array = np.array(sample_nums)
logger.info("Sample num array: {}".format(sample_num_array))
self.min_sample_num = np.min(sample_num_array)
return
def train_loop(self):
pass
| 4,053 | 36.192661 | 87 | py |
XFL | XFL-master/python/algorithm/framework/horizontal/nbafl/common.py | # Copyright 2022 The XFL Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import numpy as np
import torch
import torch.nn as nn
from torch.utils.data import DataLoader, TensorDataset
from algorithm.core.data_io import CsvReader
from algorithm.core.horizontal.template.torch.base import BaseTrainer
from common.checker.x_types import All
from common.evaluation.metrics import CommonMetrics
from common.utils.config_sync import ConfigSynchronizer
from common.utils.logger import logger
class Common(BaseTrainer):
def __init__(self, train_conf: dict):
sync_rule = {
"model_info": {
"config": All()
},
"train_info": {
"interaction_params": All(),
"train_params": {
"global_epoch": All(),
"mu": All(),
"epsilon": All(),
"delta": All(),
"C": All(),
"aggregation": All(),
"encryption": All(),
"optimizer": All(),
"lossfunc": All(),
"metric": All(),
"early_stopping": All()
}
}
}
train_conf = ConfigSynchronizer(train_conf).sync(sync_rule)
super().__init__(train_conf)
self.mu = self.common_config.train_params['mu']
self.delta = self.common_config.train_params['delta']
self.c = np.sqrt(2 * np.log(1.25 / self.delta))
self.epsilon = self.common_config.train_params['epsilon']
def _set_model(self):
logger.info("Model info: {}".format(self.common_config.model_info))
model_config = self.common_config.model_info["config"]
assert len(model_config['layer_dim']) == len(
model_config['activation']), "Hidden layer nums must match activation nums"
layer_dims = [model_config['input_dim']] + model_config['layer_dim']
layer_act = model_config['activation']
module_list = []
for input_dim, output_dim, activation_str in zip(layer_dims, layer_dims[1:], layer_act):
module_list.append(
nn.Linear(input_dim, output_dim, bias=model_config['bias']))
activation = getattr(nn, activation_str)()
module_list.append(activation)
model = nn.Sequential(*module_list)
return model
def load_model(self):
self._load_model({})
def _read_data(self, input_dataset):
if len(input_dataset) == 0:
return None
conf = input_dataset[0]
if conf["type"] == "csv":
path = os.path.join(conf['path'], conf['name'])
has_label = conf["has_label"]
has_id = conf['has_id']
return CsvReader(path, has_id, has_label)
else:
return None
def _set_train_dataloader(self):
train_data = self._read_data(self.common_config.input_trainset)
trainset = None
train_dataloader = None
if train_data:
trainset = TensorDataset(
torch.tensor(train_data.features(), dtype=torch.float32).to(self.device),
torch.tensor(train_data.label(), dtype=torch.float32).unsqueeze(dim=-1).to(self.device)
)
batch_size = self.common_config.train_params.get("train_batch_size")
if trainset:
train_dataloader = DataLoader(trainset, batch_size, shuffle=True)
return train_dataloader
def _set_val_dataloader(self):
val_data = self._read_data(self.common_config.input_valset)
valset = None
val_dataloader = None
if val_data:
valset = TensorDataset(
torch.tensor(val_data.features(), dtype=torch.float32).to(self.device),
torch.tensor(val_data.label(), dtype=torch.float32).unsqueeze(dim=-1).to(self.device)
)
batch_size = self.common_config.train_params.get("val_batch_size")
if valset:
val_dataloader = DataLoader(valset, batch_size, shuffle=True)
return val_dataloader
def val_loop(self, dataset_type: str = "val", context: dict = {}):
self.model.eval()
val_loss = 0
val_predicts = []
labels = []
lossfunc_name = list(self.lossfunc.keys())[0]
lossfunc = list(self.lossfunc.values())[0]
if dataset_type == "val":
dataloader = self.val_dataloader
elif dataset_type == "train":
dataloader = self.train_dataloader
else:
raise ValueError(f"dataset type {dataset_type} is not valid.")
with torch.no_grad():
for batch_idx, (feature, label) in enumerate(dataloader):
pred = self.model(feature)
loss = lossfunc(pred, label)
val_predicts.append(pred.detach().cpu().squeeze(-1).numpy())
val_loss += loss.item()
labels.append(label.cpu().squeeze(-1).numpy())
val_loss /= len(dataloader)
labels: np.ndarray = np.concatenate(labels, axis=0)
val_predicts: np.ndarray = np.concatenate(val_predicts, axis=0)
if len(val_predicts.shape) == 1:
val_predicts = np.array(val_predicts > 0.5, dtype=np.int32)
elif len(val_predicts.shape) == 2:
val_predicts = val_predicts.argmax(axis=-1)
metrics_output = CommonMetrics._calc_metrics(
metrics=self.metrics,
labels=labels,
val_predicts=val_predicts,
lossfunc_name=lossfunc_name,
loss=val_loss,
dataset_type=dataset_type
)
global_epoch = self.context["g_epoch"]
if dataset_type == "val":
local_epoch = None
elif dataset_type == "train":
local_epoch = self.context["l_epoch"]
CommonMetrics.save_metric_csv(
metrics_output=metrics_output,
output_config=self.common_config.output,
global_epoch=global_epoch,
local_epoch=local_epoch,
dataset_type=dataset_type,
)
early_stop_flag = self.context["early_stop_flag"]
if (self.common_config.save_frequency > 0) & \
(dataset_type == "val") & (self.earlystopping.patience > 0):
early_stop_flag = self.earlystopping(metrics_output, global_epoch)
if early_stop_flag:
# find the saved epoch closest to the best epoch
best_epoch = self.earlystopping.best_epoch
closest_epoch = round(best_epoch / self.common_config.save_frequency) * \
self.common_config.save_frequency
closest_epoch -= self.common_config.save_frequency \
if closest_epoch > global_epoch else 0
self.context["early_stop_flag"] = True
self.context["early_stop_epoch"] = closest_epoch
| 7,492 | 37.229592 | 103 | py |
XFL | XFL-master/python/algorithm/framework/horizontal/chatglm/common.py | # Copyright 2022 The XFL Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
from pathlib import Path
import torch
from peft import (
get_peft_model, PeftModel, PEFT_TYPE_TO_CONFIG_MAPPING
)
from transformers import (
AutoTokenizer,
AutoModel,
AutoConfig,
DataCollatorForSeq2Seq,
TrainingArguments
)
from algorithm.core.data_io import QADataset
from service.fed_config import FedConfig
from common.utils.config_parser import CommonConfigParser
from common.utils.logger import logger
from common.checker.x_types import All
from common.utils.config_sync import ConfigSynchronizer
def alternateSVDLinear():
import torch.nn.functional as F
from peft.tuners.adalora import transpose
def forward(self, x: torch.Tensor):
if self.active_adapter not in self.lora_A.keys():
return F.linear(x, transpose(self.weight, self.fan_in_fan_out), bias=self.bias)
if self.disable_adapters:
if self.r[self.active_adapter] > 0 and self.merged:
self.unmerge()
result = F.linear(x, transpose(self.weight, self.fan_in_fan_out), bias=self.bias)
elif self.r[self.active_adapter] > 0 and not self.merged:
result = F.linear(x, transpose(self.weight, self.fan_in_fan_out), bias=self.bias)
result += (
(
self.lora_dropout[self.active_adapter](x.float()) # to float()
@ (self.lora_A[self.active_adapter] * self.lora_E[self.active_adapter]).T
@ self.lora_B[self.active_adapter].T
)
* self.scaling[self.active_adapter]
/ (self.ranknum[self.active_adapter] + 1e-5)
)
else:
result = F.linear(x, transpose(self.weight, self.fan_in_fan_out), bias=self.bias)
return result
from peft.tuners.adalora import SVDLinear
SVDLinear.forward = forward
class Common:
def __init__(self, train_conf: dict):
if FedConfig.get_assist_trainer():
sync_rule = {
"train_info": {
"train_params": {
"aggregation": All(),
"encryption": All(),
"peft": All(),
"trainer": {
"learning_rate": All(),
"weight_decay": All(),
"adam_beta1": All(),
"adam_beta2": All(),
"adam_epsilon": All(),
"max_grad_norm": All(),
"max_steps": All(),
"num_train_epochs": All(),
"seed": All()
},
"dataset": All()
}
}
}
train_conf = ConfigSynchronizer(train_conf).sync(sync_rule)
root_path = Path(__file__).parents[4]
path = train_conf.get('input', {}).get("pretrained_model", {}).get("path")
if path and not os.path.isabs(path):
train_conf["input"]["pretrained_model"]['path'] = os.path.abspath(os.path.join(root_path, path))
path = train_conf.get('input', {}).get("adapter_model", {}).get("path")
if path and not os.path.isabs(path):
train_conf["input"]["adapter_model"]['path'] = os.path.abspath(os.path.join(root_path, path))
trainset_conf = train_conf.get('input', {}).get("trainset")
if trainset_conf:
path = trainset_conf[0].get("path")
if path and not os.path.isabs(path):
train_conf["input"]["trainset"][0]['path'] = os.path.abspath(os.path.join(root_path, path))
self.common_config = CommonConfigParser(train_conf)
# for adalora
alternateSVDLinear()
# CPU
if self.common_config.train_params["trainer"].get("no_cuda"):
os.environ["CUDA_VISIBLE_DEVICES"] = "-1"
pretrained_model_conf = self.common_config.input.get("pretrained_model", {})
path = pretrained_model_conf.get("path")
model_name_or_path = path
self.tokenizer = AutoTokenizer.from_pretrained(model_name_or_path, trust_remote_code=True) # Callback
logger.info(self.tokenizer)
self.load_from_pretrained = False
for name in os.listdir(model_name_or_path):
if 'pytorch_model' in name:
self.load_from_pretrained = True
break
(peft_type, peft_config_dict), = self.common_config.train_params["peft"].items()
if self.load_from_pretrained:
if peft_type == "PREFIX_TUNING":
config = AutoConfig.from_pretrained(model_name_or_path, trust_remote_code=True)
config.pre_seq_len = self.common_config.train_params["peft"][peft_type]["pre_seq_len"]
config.prefix_projection = self.common_config.train_params["peft"][peft_type]["prefix_projection"]
model = AutoModel.from_pretrained(model_name_or_path, config=config, trust_remote_code=True)
else:
model = AutoModel.from_pretrained(model_name_or_path, trust_remote_code=True, device_map="auto")
else:
if peft_type == "PREFIX_TUNING":
config = AutoConfig.from_pretrained(model_name_or_path, trust_remote_code=True)
config.pre_seq_len = self.common_config.train_params["peft"][peft_type]["pre_seq_len"]
config.prefix_projection = self.common_config.train_params["peft"][peft_type]["prefix_projection"]
model = AutoModel.from_config(config, trust_remote_code=True)
else: # if peft_type == "LORA":
config = AutoConfig.from_pretrained(model_name_or_path, trust_remote_code=True)
model = AutoModel.from_config(config, trust_remote_code=True)
logger.warning("No pretrained model founded, load from config")
if self.common_config.train_params["trainer"].get("no_cuda"):
model = model.float()
else:
model = model.half()
if peft_type == "PREFIX_TUNING":
self.model = model
else:
peft_config = PEFT_TYPE_TO_CONFIG_MAPPING[peft_type](inference_mode=False, **peft_config_dict)
# model = prepare_model_for_int8_training(model)
self.model = get_peft_model(model, peft_config)
if self.common_config.input.get("adapter_model", {}):
adapter_path = self.common_config.input.get("adapter_model")["path"]
if peft_type == "PREFIX_TUNING":
# P-tuning v2
prefix_state_dict = torch.load(os.path.join(adapter_path, "pytorch_model.bin"))
new_prefix_state_dict = {}
for k, v in prefix_state_dict.items():
if k.startswith("transformer.prefix_encoder."):
new_prefix_state_dict[k[len("transformer.prefix_encoder."):]] = v
self.model.transformer.prefix_encoder.load_state_dict(new_prefix_state_dict)
else:
self.model = PeftModel.from_pretrained(self.model,
adapter_path,
adapter_name="default",
is_trainable=True)
logger.info("Load adapter model.")
if peft_type == "PREFIX_TUNING":
if not self.common_config.train_params["trainer"].get("no_cuda"):
self.model.transformer.prefix_encoder.float()
logger.info(self.model)
self.train_dataset, self.val_dataset = self._set_dataset()
self.data_collator = self._set_data_collator()
trainer_conf = self.common_config.train_params["trainer"]
trainer_conf["max_steps"] = -1
# trainer_conf["save_strategy"] = 'steps'
trainer_conf["save_steps"] = self.common_config.train_params["aggregation"]["agg_steps"]
trainer_conf["output_dir"] = self.common_config.output_dir
self.training_args = TrainingArguments(**trainer_conf)
self.trainer_conf = trainer_conf
if peft_type == "PREFIX_TUNING":
self.training_args.local_rank = -1
def _set_data_collator(self):
data_collator = DataCollatorForSeq2Seq(
self.tokenizer,
model=None,
label_pad_token_id=-100,
pad_to_multiple_of=None,
padding=True
)
return data_collator
def _set_dataset(self):
dataset_conf = self.common_config.train_params["dataset"]
train_dataset, val_dataset = None, None
if self.common_config.input_trainset:
file_name_or_path = os.path.join(
self.common_config.input_trainset[0].get("path")
)
train_dataset = QADataset(
file_name_or_path=file_name_or_path,
tokenizer=self.tokenizer,
max_src_length=dataset_conf["max_src_length"],
max_dst_length=dataset_conf["max_dst_length"],
prompt_pattern=dataset_conf["prompt_pattern"],
key_query=dataset_conf.get("key_query", "input"),
key_answer=dataset_conf.get("key_answer", "output")
)
if self.common_config.input_valset:
file_name_or_path = os.path.join(
self.common_config.input_valset[0].get("path")
)
val_dataset = QADataset(
file_name_or_path=file_name_or_path,
tokenizer=self.tokenizer,
max_src_length=dataset_conf["max_src_length"],
max_dst_length=dataset_conf["max_dst_length"],
prompt_pattern=dataset_conf["prompt_pattern"],
key_query=dataset_conf.get("key_query", "input"),
key_answer=dataset_conf.get("key_answer", "output")
)
return train_dataset, val_dataset
| 10,798 | 42.898374 | 114 | py |
XFL | XFL-master/python/algorithm/framework/horizontal/chatglm/callback.py | # Copyright 2022 The XFL Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import math
import inspect
from pathlib import Path
from typing import Union
from copy import deepcopy
import torch
import torch.nn as nn
import transformers
from accelerate import (
dispatch_model, infer_auto_device_map
)
from accelerate.utils import get_balanced_memory
from accelerate.hooks import (
AlignDevicesHook, add_hook_to_module, remove_hook_from_submodules
)
from transformers import (
TrainerCallback, TrainingArguments, TrainerState, TrainerControl
)
from peft import PeftModel
from peft.utils import (
get_peft_model_state_dict, set_peft_model_state_dict, PromptLearningConfig
)
from algorithm.core.horizontal.aggregation.api import (
get_aggregation_root_inst, get_aggregation_leaf_inst
)
from common.utils.logger import logger
from service.fed_config import FedConfig
# def is_nan_exists(state_dict):
# flag = False
# for k, v in state_dict.items():
# if torch.isnan(v).any():
# flag = True
# logger.warning(f"Parameter {k} contains nan")
# break
# return flag
class AssistTrainerCallback(TrainerCallback):
def __init__(self,
agg_steps: int,
sec_conf: dict,
root_id: str,
leaf_ids: list[str],
init_params: bool = False,
peft_type: str = "LORA"):
super().__init__()
self.agg_steps = agg_steps
self.agg_steps_list = []
assert 0 < agg_steps <= 1
self.agg_inst = get_aggregation_root_inst(sec_conf, root_id, leaf_ids)
self.init_params = init_params
self.peft_type = peft_type
# self.latest_adapters_weights = None
def on_train_begin(self,
args: TrainingArguments,
state: TrainerState,
control: TrainerControl,
model: Union[transformers.PreTrainedModel, torch.nn.Module],
**kwargs):
if self.init_params:
for m in model.modules():
if isinstance(m, nn.Conv2d):
torch.nn.init.xavier_normal_(m.weight.data)
if m.bias is not None:
m.bias.data.zero_()
elif isinstance(m, nn.BatchNorm2d):
m.weight.data.fill_(1)
m.bias.data.zero_()
elif isinstance(m, nn.Linear):
torch.nn.init.normal_(m.weight.data, 0, 0.01)
if m.bias is not None:
m.bias.data.zero_()
elif isinstance(m, nn.Embedding):
torch.nn.init.uniform_(m.weight.data)
args.logging_steps = state.max_steps + 1
args.save_steps = state.max_steps + 1
adapters_weights = get_adapter_state_dict(model, self.peft_type)
self.agg_inst.broadcast(adapters_weights)
# self.latest_adapters_weights = adapters_weights
# def on_step_begin(self,
# args: TrainingArguments,
# state: TrainerState,
# control: TrainerControl,
# model: Union[transformers.PreTrainedModel, torch.nn.Module],
# **kwargs):
# for k, v in model.state_dict().items():
# if torch.isnan(v).any():
# # set nan to 0
# v[v != v] = 0
# logger.warning(f"Parameter {k} contains nan, replace nan to 0")
# control.should_log = False
def on_step_end(self,
args: TrainingArguments,
state: TrainerState,
control: TrainerControl,
model: Union[transformers.PreTrainedModel, torch.nn.Module],
tokenizer,
**kwargs):
# Trainer saves model after check on_step_end
if not self.agg_steps_list:
i = self.agg_steps
while i < 1:
self.agg_steps_list.append(round(i, 4))
i += self.agg_steps
self.agg_steps_list.append(1)
self.steps_list = [math.ceil(i * state.max_steps)
for i in self.agg_steps_list]
assert len(self.steps_list) == len(set(self.steps_list))
logger.info(f"Aggergate model by steps: {self.agg_steps_list}")
if state.global_step in self.steps_list:
idx = self.steps_list.index(state.global_step)
if idx == 0:
factor = 1
else:
factor = 1
adapters_weights = get_adapter_state_dict(model, self.peft_type)
logger.info(
f"gather and aggregating..., global_step={state.global_step}")
new_adapters_weights = self.agg_inst.aggregate(
parameters=adapters_weights, parameters_weight=factor)
set_adapter_state_dict(model, self.peft_type, new_adapters_weights)
logger.info(f"broadcasting..., global_step={state.global_step}")
self.agg_inst.broadcast(new_adapters_weights)
if args.output_dir and args.save_strategy != 'no':
if self.peft_type != "PREFIX_TUNING":
model.save_pretrained(save_directory=Path(args.output_dir) / f"checkpoint-{str(self.agg_steps_list[idx])}")
else:
model.save_pretrained(save_directory=Path(args.output_dir) / f"checkpoint-{str(self.agg_steps_list[idx])}",
state_dict=get_adapter_state_dict(model, self.peft_type))
control.should_log = True
class LabelTrainerCallback(TrainerCallback):
def __init__(self,
agg_steps: Union[float, int],
sec_conf: dict,
root_id: str,
leaf_ids: list[str],
init_params: bool = False,
peft_type: str = "LORA"):
super().__init__()
self.agg_steps = agg_steps
self.agg_steps_list = []
assert 0 < agg_steps <= 1
self.is_standalone = False if FedConfig.get_assist_trainer() else True
if not self.is_standalone:
self.agg_inst = get_aggregation_leaf_inst(sec_conf, root_id, leaf_ids)
self.init_params = init_params
self.peft_type = peft_type
def on_train_begin(self,
args: TrainingArguments,
state: TrainerState,
control: TrainerControl,
model: Union[transformers.PreTrainedModel, torch.nn.Module],
train_dataloader,
**kwargs):
if self.init_params:
for m in model.modules():
if isinstance(m, nn.Conv2d):
torch.nn.init.xavier_normal_(m.weight.data)
if m.bias is not None:
m.bias.data.zero_()
elif isinstance(m, nn.BatchNorm2d):
m.weight.data.fill_(1)
m.bias.data.zero_()
elif isinstance(m, nn.Linear):
torch.nn.init.normal_(m.weight.data, 0, 0.01)
if m.bias is not None:
m.bias.data.zero_()
elif isinstance(m, nn.Embedding):
torch.nn.init.uniform_(m.weight.data)
args.logging_steps = state.max_steps + 1
args.save_steps = state.max_steps + 1
if not self.is_standalone:
new_adapters_weights = self.agg_inst.download()
set_adapter_state_dict(model, self.peft_type, new_adapters_weights)
# def on_step_begin(self,
# args: TrainingArguments,
# state: TrainerState,
# control: TrainerControl,
# model: Union[transformers.PreTrainedModel, torch.nn.Module],
# **kwargs):
# for k, v in model.state_dict().items():
# if torch.isnan(v).any():
# # set nan to 0
# v[v != v] = 0
# logger.warning(f"Parameter {k} contains nan, replace nan to 0")
# control.should_log = False
def on_step_end(self,
args: TrainingArguments,
state: TrainerState,
control: TrainerControl,
model: Union[transformers.PreTrainedModel, torch.nn.Module],
**kwargs):
# if is_nan_exists(model.state_dict()):
# logger.warning(f"Nan exists!")
# Trainer saves model after check on_step_end
if not self.agg_steps_list:
i = self.agg_steps
while i < 1:
self.agg_steps_list.append(round(i, 4))
i += self.agg_steps
self.agg_steps_list.append(1)
self.steps_list = [math.ceil(i * state.max_steps)
for i in self.agg_steps_list]
if len(self.steps_list) != len(set(self.steps_list)):
raise ValueError(f"agg_steps is too small, try a larger one.")
logger.info(f"Aggergate model by steps: {self.agg_steps_list}")
if state.global_step in self.steps_list:
idx = self.steps_list.index(state.global_step)
if not self.is_standalone:
if idx == 0:
factor = 1
# factor = self.agg_steps_list[0] * \
# args.gradient_accumulation_steps * args.train_batch_size
else:
factor = 1
# factor = (self.agg_steps_list[idx] - self.agg_steps_list[idx-1]) * \
# args.gradient_accumulation_steps * args.train_batch_size
adapters_weights = get_adapter_state_dict(model, self.peft_type)
logger.info(f"uploading..., global_step={state.global_step}")
self.agg_inst.upload(adapters_weights, factor)
logger.info(f"downloading..., global_step={state.global_step}")
new_adapters_weights = self.agg_inst.download()
set_adapter_state_dict(model, self.peft_type, new_adapters_weights)
if args.output_dir and args.save_strategy != 'no':
if self.peft_type != "PREFIX_TUNING":
model.save_pretrained(save_directory=Path(args.output_dir) / f"checkpoint-{str(self.agg_steps_list[idx])}")
else:
model.save_pretrained(save_directory=Path(args.output_dir) / f"checkpoint-{str(self.agg_steps_list[idx])}",
state_dict=get_adapter_state_dict(model, self.peft_type))
control.should_log = True
def get_adapter_state_dict(model: PeftModel, peft_type: str, **kwargs):
if peft_type == "PREFIX_TUNING":
state_dict = model.state_dict()
adapters_weights = {}
for k, v in model.named_parameters():
if v.requires_grad:
adapters_weights[k] = deepcopy(state_dict[k]).to('cpu')
else: # if peft_type == 'LORA':
adapter_name = model.active_adapter
adapters_weights = get_peft_model_state_dict(
model, state_dict=kwargs.get("state_dict", None), adapter_name=adapter_name
)
for k, v in adapters_weights.items():
adapters_weights[k] = deepcopy(v).to('cpu')
return adapters_weights
def set_adapter_state_dict(model: PeftModel, peft_type: str, adapters_weights: dict, **kwargs):
if peft_type == "PREFIX_TUNING":
state_dict = model.state_dict()
state_dict.update(adapters_weights)
model.load_state_dict(state_dict)
else:
adapter_name = model.active_adapter
# load the weights into the model
set_peft_model_state_dict(model, adapters_weights, adapter_name=adapter_name)
if (
(getattr(model, "hf_device_map", None) is not None)
and (len(set(model.hf_device_map.values()).intersection({"cpu", "disk"})) > 0)
and len(model.peft_config) == 1
):
device_map = kwargs.get("device_map", "auto")
max_memory = kwargs.get("max_memory", None)
offload_dir = kwargs.get("offload_folder", None)
offload_index = kwargs.get("offload_index", None)
dispatch_model_kwargs = {}
# Safety checker for previous `accelerate` versions
# `offload_index` was introduced in https://github.com/huggingface/accelerate/pull/873/
if "offload_index" in inspect.signature(dispatch_model).parameters:
dispatch_model_kwargs["offload_index"] = offload_index
no_split_module_classes = model._no_split_modules
if device_map != "sequential":
max_memory = get_balanced_memory(
model,
max_memory=max_memory,
no_split_module_classes=no_split_module_classes,
low_zero=(device_map == "balanced_low_0"),
)
if isinstance(device_map, str):
device_map = infer_auto_device_map(
model, max_memory=max_memory, no_split_module_classes=no_split_module_classes
)
dispatch_model(
model,
device_map=device_map,
offload_dir=offload_dir,
**dispatch_model_kwargs,
)
hook = AlignDevicesHook(io_same_device=True)
if isinstance(model.peft_config[adapter_name], PromptLearningConfig):
remove_hook_from_submodules(model.prompt_encoder)
add_hook_to_module(model.get_base_model(), hook)
# Set model in evaluation mode to deactivate Dropout modules by default
# model.eval()
| 14,501 | 41.527859 | 127 | py |
XFL | XFL-master/python/algorithm/framework/horizontal/bert/label_trainer.py | # Copyright 2022 The XFL Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from algorithm.core.horizontal.template.agg_type import \
register_agg_type_for_label_trainer
from common.utils.logger import logger
from .common import Common
from functools import partial
class HorizontalBertLabelTrainer(Common):
def __init__(self, train_conf: dict):
super().__init__(train_conf)
agg_type = list(self.common_config.aggregation["method"].keys())[0]
self.register_hook(
place="after_train_loop", rank=1,
func=partial(self.val_loop, "train"), desc="validation on trainset"
)
register_agg_type_for_label_trainer(self, 'torch', agg_type)
def train_loop(self):
self.model.train()
train_loss = 0
lossfunc = list(self.lossfunc.values())[0]
optimizer = list(self.optimizer.values())[0]
lr_scheduler = list(self.lr_scheduler.values())[0] if self.lr_scheduler.values() else None
for batch_id, (input_ids, token_type_ids, attention_masks, labels) in enumerate(self.train_dataloader):
_,_,pred = self.model(input_ids, token_type_ids, attention_masks, labels)
loss = lossfunc(pred, labels)
optimizer.zero_grad()
loss.backward()
optimizer.step()
train_loss += loss.item()
train_loss /= len(self.train_dataloader)
if lr_scheduler:
lr_scheduler.step()
self.context["train_loss"] = train_loss
logger.info(f"Train loss: {train_loss}")
| 2,106 | 38.754717 | 111 | py |
XFL | XFL-master/python/algorithm/framework/horizontal/bert/assist_trainer.py | # Copyright 2022 The XFL Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from functools import partial
from .common import Common
from algorithm.core.horizontal.template.agg_type import register_agg_type_for_assist_trainer
class HorizontalBertAssistTrainer(Common):
def __init__(self, train_conf: dict):
super().__init__(train_conf)
agg_type = list(self.common_config.aggregation["method"].keys())[0]
register_agg_type_for_assist_trainer(self, 'torch', agg_type)
self.register_hook(place="after_local_epoch", rank=1,
func=partial(self._save_model, False), desc="save model ")
self.register_hook(place="after_local_epoch", rank=2,
func=partial(self.val_loop, "val"), desc="validation on valset")
self.register_hook(place="after_global_epoch", rank=1,
func=partial(self._save_model, True), desc="save final model")
def train_loop(self):
pass
| 1,529 | 42.714286 | 92 | py |
XFL | XFL-master/python/algorithm/framework/horizontal/bert/common.py | # Copyright 2022 The XFL Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import torch
import pandas as pd
import numpy as np
from transformers import BertTokenizer
import torch.nn as nn
from torch.utils.data import DataLoader, TensorDataset
from python.algorithm.model.bert import BertForSst2Torch
from common.utils.logger import logger
from algorithm.core.horizontal.template.torch.base import BaseTrainer
from common.utils.config_sync import ConfigSynchronizer
from common.checker.x_types import All
from common.evaluation.metrics import CommonMetrics
class Common(BaseTrainer):
def __init__(self, train_conf: dict) -> None:
sync_rule = {
"model_info": All(),
"train_info": {
"interaction_params": All(),
"train_params": {
"global_epoch": All(),
"aggregation": All(),
"encryption": All(),
"optimizer": All(),
"lr_scheduler": All(),
"lossfunc": All(),
"metric": All(),
"early_stopping": All()
}
}
}
train_conf = ConfigSynchronizer(train_conf).sync(sync_rule)
super().__init__(train_conf)
def _set_model(self) -> nn.Module:
model_config = self.common_config.model_info.get("config")
model = BertForSst2Torch(**model_config)
return model
def _read_data(self, input_dataset):
if len(input_dataset) == 0:
return None
conf = input_dataset[0]
path = os.path.join(conf['path'], conf['name'])
raw_data = pd.read_csv(path, sep='\t')
data = raw_data["sentence"].values, raw_data["label"].values
return data
def _encode_examples(self, data, tokenizer, max_length=512):
input_ids, token_type_ids, attention_masks, labels = [], [], [], []
for feature, label in zip(*data):
bert_input = tokenizer.encode_plus(feature,
add_special_tokens=True,
max_length=max_length,
padding='max_length',
return_token_type_ids=True,
return_attention_mask=True)
input_ids.append(bert_input['input_ids'])
token_type_ids.append(bert_input['token_type_ids'])
attention_masks.append(bert_input['attention_mask'])
labels.append(label)
return TensorDataset(torch.tensor(input_ids), torch.tensor(token_type_ids),
torch.tensor(attention_masks), torch.tensor(labels))
def _set_train_dataloader(self):
train_dataset = None
train_dataloader = None
tokenizer = BertTokenizer.from_pretrained("bert-base-uncased")
train_data = self._read_data(self.common_config.input_trainset)
if train_data is not None:
train_dataset = self._encode_examples(train_data, tokenizer)
batch_size = self.common_config.train_params.get(
"train_batch_size")
train_dataloader = DataLoader(
train_dataset, batch_size, shuffle=True)
return train_dataloader
def _set_val_dataloader(self):
val_dataset = None
val_dataloader = None
tokenizer = BertTokenizer.from_pretrained("bert-base-uncased")
val_data = self._read_data(self.common_config.input_valset)
if val_data is not None:
val_dataset = self._encode_examples(val_data, tokenizer)
batch_size = self.common_config.train_params.get("val_batch_size")
val_dataloader = DataLoader(val_dataset, batch_size, shuffle=False)
return val_dataloader
def val_loop(self, dataset_type: str = "val", context: dict = {}):
self.model.eval()
val_loss = 0
val_predicts = []
labels = []
lossfunc_name = list(self.lossfunc.keys())[0]
lossfunc = list(self.lossfunc.values())[0]
if dataset_type == "val":
dataloader = self.val_dataloader
elif dataset_type == "train":
dataloader = self.train_dataloader
else:
raise ValueError(f"dataset type {dataset_type} is not valid.")
for batch_id, (input_ids, token_type_ids, attention_masks, label) in enumerate(dataloader):
_, _, pred = self.model(
input_ids, token_type_ids, attention_masks, label)
loss = lossfunc(pred, label)
val_predicts.append(pred.detach().cpu().squeeze(-1).numpy())
val_loss += loss.item()
labels.append(label.cpu().squeeze(-1).numpy())
val_loss /= len(dataloader)
labels: np.ndarray = np.concatenate(labels, axis=0)
val_predicts: np.ndarray = np.concatenate(val_predicts, axis=0)
if len(val_predicts.shape) == 1:
val_predicts = np.array(val_predicts > 0.5, dtype=np.int32)
elif len(val_predicts.shape) == 2:
val_predicts = val_predicts.argmax(axis=-1)
metrics_output = CommonMetrics._calc_metrics(
metrics=self.metrics,
labels=labels,
val_predicts=val_predicts,
lossfunc_name=lossfunc_name,
loss=val_loss,
dataset_type=dataset_type
)
global_epoch = self.context["g_epoch"]
if dataset_type == "val":
local_epoch = None
elif dataset_type == "train":
local_epoch = self.context["l_epoch"]
CommonMetrics.save_metric_csv(
metrics_output=metrics_output,
output_config=self.common_config.output,
global_epoch=global_epoch,
local_epoch=local_epoch,
dataset_type=dataset_type,
)
early_stop_flag = self.context["early_stop_flag"]
if (self.common_config.save_frequency > 0) & \
(dataset_type == "val") & (self.earlystopping.patience > 0):
early_stop_flag = self.earlystopping(metrics_output, global_epoch)
if early_stop_flag:
# find the saved epoch closest to the best epoch
best_epoch = self.earlystopping.best_epoch
closest_epoch = round(best_epoch / self.common_config.save_frequency) * \
self.common_config.save_frequency
closest_epoch -= self.common_config.save_frequency \
if closest_epoch > global_epoch else 0
self.context["early_stop_flag"] = True
self.context["early_stop_epoch"] = closest_epoch
| 7,291 | 39.966292 | 99 | py |
XFL | XFL-master/python/algorithm/framework/horizontal/gcn_mol/label_trainer.py | # Copyright 2022 The XFL Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from algorithm.core.horizontal.template.agg_type import register_agg_type_for_label_trainer
from common.utils.logger import logger
from .common import Common
from functools import partial
class HorizontalGcnMolLabelTrainer(Common):
def __init__(self, train_conf: dict):
super().__init__(train_conf)
agg_type = list(self.common_config.aggregation["method"].keys())[0]
self.register_hook(
place="after_train_loop", rank=1,
func=partial(self.val_loop, "train"), desc="validation on trainset"
)
register_agg_type_for_label_trainer(self, 'torch', agg_type)
def train_loop(self):
self.model.train()
train_loss = 0
lossfunc = list(self.lossfunc.values())[0]
optimizer = list(self.optimizer.values())[0]
lr_scheduler = list(self.lr_scheduler.values())[
0] if self.lr_scheduler.values() else None
for batch, (smiles, bg, labels, masks) in enumerate(self.train_dataloader):
node_feats = bg.ndata.pop('h')
logits = self.model(bg, node_feats)
labels = labels.reshape((-1,1))
loss = lossfunc(logits, labels)
optimizer.zero_grad()
loss.backward()
optimizer.step()
train_loss += loss.item()
train_loss /= len(self.train_dataloader)
if lr_scheduler:
lr_scheduler.step()
self.context["train_loss"] = train_loss
logger.info(f"Train loss: {train_loss}")
| 2,116 | 36.803571 | 91 | py |
XFL | XFL-master/python/algorithm/framework/horizontal/gcn_mol/assist_trainer.py | # Copyright 2022 The XFL Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from functools import partial
from .common import Common
from algorithm.core.horizontal.template.agg_type import register_agg_type_for_assist_trainer
class HorizontalGcnMolAssistTrainer(Common):
def __init__(self, train_conf: dict):
super().__init__(train_conf)
agg_type = list(self.common_config.aggregation["method"].keys())[0]
register_agg_type_for_assist_trainer(self, 'torch', agg_type)
self.register_hook(place="after_local_epoch", rank=1,
func=partial(self._save_model, False), desc="save model ")
self.register_hook(place="after_local_epoch", rank=2,
func=partial(self.val_loop, "val"), desc="validation on valset")
self.register_hook(place="after_global_epoch", rank=1,
func=partial(self._save_model, True), desc="save final model")
def train_loop(self):
pass
| 1,528 | 42.685714 | 92 | py |
XFL | XFL-master/python/algorithm/framework/horizontal/gcn_mol/common.py | # Copyright 2022 The XFL Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.utils.data import DataLoader, Dataset
import dgl
from dgllife.model import GCNPredictor
from dgllife.utils import SMILESToBigraph, CanonicalAtomFeaturizer
from algorithm.core.data_io import CsvReader, NpzReader
from common.utils.logger import logger
from algorithm.core.horizontal.template.torch.base import BaseTrainer
from common.utils.config_sync import ConfigSynchronizer
from common.checker.x_types import All
from common.evaluation.metrics import CommonMetrics
class SmilesDataset(Dataset):
def __init__(self, smiles, graphs, labels):
assert len(smiles) == len(
graphs), "Inconsistent lengths of smiles and graphs"
assert len(graphs) == len(
labels), "Inconsistent lengths of graphs and labels"
self.smiles = smiles
self.graphs = graphs
self.labels = labels
def __len__(self):
return len(self.smiles)
def __getitem__(self, index):
return self.smiles[index], self.graphs[index], self.labels[index]
def collate_molgraphs(data):
"""Function from dgllife.examples.property_prediction.moleculenet.utils
Batching a list of datapoints for dataloader.
Parameters
----------
data : list of 3-tuples or 4-tuples.
Each tuple is for a single datapoint, consisting of
a SMILES, a DGLGraph, all-task labels and optionally a binary
mask indicating the existence of labels.
Returns
-------
smiles : list
List of smiles
bg : DGLGraph
The batched DGLGraph.
labels : Tensor of dtype float32 and shape (B, T)
Batched datapoint labels. B is len(data) and
T is the number of total tasks.
masks : Tensor of dtype float32 and shape (B, T)
Batched datapoint binary mask, indicating the
existence of labels.
"""
if len(data[0]) == 3:
smiles, graphs, labels = map(list, zip(*data))
else:
smiles, graphs, labels, masks = map(list, zip(*data))
bg = dgl.batch(graphs)
bg.set_n_initializer(dgl.init.zero_initializer)
bg.set_e_initializer(dgl.init.zero_initializer)
labels = torch.stack(labels, dim=0)
if len(data[0]) == 3:
masks = torch.ones(labels.shape)
else:
masks = torch.stack(masks, dim=0)
return smiles, bg, labels, masks
class Common(BaseTrainer):
def __init__(self, train_conf: dict) -> None:
sync_rule = {
"model_info": All(),
"train_info": {
"interaction_params": All(),
"train_params": {
"global_epoch": All(),
"aggregation": All(),
"encryption": All(),
"optimizer": All(),
"lr_scheduler": All(),
"lossfunc": All(),
"metric": All(),
"early_stopping": All()
}
}
}
train_conf = ConfigSynchronizer(train_conf).sync(sync_rule)
super().__init__(train_conf)
def _set_model(self) -> nn.Module:
model_config = self.common_config.model_info.get("config")
model_params = self._prepare_model_params(model_config)
model = GCNPredictor(**model_params)
return model
def _prepare_model_params(self, model_config):
config = {}
config['in_feats'] = model_config.get("input_dim", 100)
num_gnn_layers = model_config.get('num_gnn_layers', 1)
config['hidden_feats'] = [model_config.get(
'gnn_hidden_feats', 64)] * num_gnn_layers
if model_config['activation'] == 'relu':
config['activation'] = [F.relu] * num_gnn_layers
elif model_config['activation'] == 'tanh':
config['activation'] = [F.tanh] * num_gnn_layers
else:
logger.info(f"Setting gnn activation to relu")
config['activation'] = [F.relu] * num_gnn_layers
config['dropout'] = [model_config.get("dropout", 0.5)] * num_gnn_layers
config['batchnorm'] = [model_config.get(
'batchnorm', False)] * num_gnn_layers
config['residual'] = [model_config.get(
"residual", False)] * num_gnn_layers
config['predictor_hidden_feats'] = model_config.get(
'predictor_hidden_dim', 64)
config['n_tasks'] = model_config.get('n_tasks', 1)
return config
def _read_data(self, input_dataset):
if len(input_dataset) == 0:
return None
conf = input_dataset[0]
if conf["type"] == "csv":
path = os.path.join(conf['path'], conf['name'])
has_label = conf["has_label"]
has_id = conf['has_id']
return CsvReader(path, has_id, has_label)
elif conf["type"] == "npz":
path = os.path.join(conf['path'], conf['name'])
return NpzReader(path)
else:
return None
def _set_train_dataloader(self):
train_data = self._read_data(self.common_config.input_trainset)
trainset = None
train_dataloader = None
if train_data is None:
return train_dataloader
# construct smiles
smiles = train_data.features(type="series").values.reshape((-1))
labels = train_data.label().astype(np.int32)
smiles_to_g = SMILESToBigraph(
add_self_loop=True,
node_featurizer=CanonicalAtomFeaturizer()
)
graph_list = []
for smile in smiles:
graph_list.append(smiles_to_g(smile))
valid_ids = []
clean_graphs = []
failed_mols = []
clean_labels = []
clean_smiles = []
for i, g in enumerate(graph_list):
if g is not None:
valid_ids.append(i)
clean_graphs.append(g)
clean_labels.append(labels[i])
clean_smiles.append(smiles[i])
else:
failed_mols.append((i, smiles[i]))
# construct dataset
if train_data:
trainset = SmilesDataset(
clean_smiles, clean_graphs, torch.Tensor(clean_labels))
# construct dataloader
batch_size = self.common_config.train_params.get("train_batch_size")
if trainset:
train_dataloader = DataLoader(
trainset, batch_size=batch_size, shuffle=True,
collate_fn=collate_molgraphs
)
return train_dataloader
def _set_val_dataloader(self):
val_data = self._read_data(self.common_config.input_valset)
valset = None
val_dataloader = None
if val_data is None:
return val_dataloader
# construct smiles
smiles = val_data.features(type="series").values.reshape((-1))
labels = val_data.label().astype(np.int32)
smiles_to_g = SMILESToBigraph(
add_self_loop=True,
node_featurizer=CanonicalAtomFeaturizer()
)
graph_list = []
for smile in smiles:
graph_list.append(smiles_to_g(smile))
valid_ids = []
clean_graphs = []
failed_mols = []
clean_labels = []
clean_smiles = []
for i, g in enumerate(graph_list):
if g is not None:
valid_ids.append(i)
clean_graphs.append(g)
clean_labels.append(labels[i])
clean_smiles.append(smiles[i])
else:
failed_mols.append((i, smiles[i]))
# construct dataset
if val_data:
valset = SmilesDataset(
clean_smiles, clean_graphs, torch.Tensor(clean_labels))
# construct dataloader
batch_size = self.common_config.train_params.get("val_batch_size")
if valset:
val_dataloader = DataLoader(
valset, batch_size=batch_size, shuffle=True,
collate_fn=collate_molgraphs
)
return val_dataloader
def val_loop(self, dataset_type: str = "val", context: dict = {}):
self.model.eval()
val_loss = 0
val_predicts = []
labels = []
lossfunc_name = list(self.lossfunc.keys())[0]
lossfunc = list(self.lossfunc.values())[0]
if dataset_type == "val":
dataloader = self.val_dataloader
elif dataset_type == "train":
dataloader = self.train_dataloader
else:
raise ValueError(f"dataset type {dataset_type} is not valid.")
for batch, (smiles, bg, label, masks) in enumerate(dataloader):
node_feats = bg.ndata.pop('h')
logits = self.model(bg, node_feats)
label = label.reshape((-1, 1))
loss = lossfunc(logits, label)
val_predicts.append(logits.detach().cpu().squeeze(-1).numpy())
val_loss += loss.item()
labels.append(label.cpu().squeeze(-1).numpy())
val_loss /= len(dataloader)
labels: np.ndarray = np.concatenate(labels, axis=0)
val_predicts: np.ndarray = np.concatenate(val_predicts, axis=0)
if len(val_predicts.shape) == 1:
val_predicts = np.array(val_predicts > 0.5, dtype=np.int32)
elif len(val_predicts.shape) == 2:
val_predicts = val_predicts.argmax(axis=-1)
metrics_output = CommonMetrics._calc_metrics(
metrics=self.metrics,
labels=labels,
val_predicts=val_predicts,
lossfunc_name=lossfunc_name,
loss=val_loss,
dataset_type=dataset_type
)
global_epoch = self.context["g_epoch"]
if dataset_type == "val":
local_epoch = None
elif dataset_type == "train":
local_epoch = self.context["l_epoch"]
CommonMetrics.save_metric_csv(
metrics_output=metrics_output,
output_config=self.common_config.output,
global_epoch=global_epoch,
local_epoch=local_epoch,
dataset_type=dataset_type,
)
early_stop_flag = self.context["early_stop_flag"]
if (self.common_config.save_frequency > 0) & \
(dataset_type == "val") & (self.earlystopping.patience > 0):
early_stop_flag = self.earlystopping(metrics_output, global_epoch)
if early_stop_flag:
# find the saved epoch closest to the best epoch
best_epoch = self.earlystopping.best_epoch
closest_epoch = round(best_epoch / self.common_config.save_frequency) * \
self.common_config.save_frequency
closest_epoch -= self.common_config.save_frequency \
if closest_epoch > global_epoch else 0
self.context["early_stop_flag"] = True
self.context["early_stop_epoch"] = closest_epoch
| 11,572 | 34.069697 | 89 | py |
XFL | XFL-master/python/algorithm/framework/horizontal/densenet/label_trainer.py | # Copyright 2022 The XFL Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from algorithm.core.horizontal.template.agg_type import register_agg_type_for_label_trainer
from common.utils.logger import logger
from .common import Common
from functools import partial
class HorizontalDensenetLabelTrainer(Common):
def __init__(self, train_conf: dict):
super().__init__(train_conf)
agg_type = list(self.common_config.aggregation["method"].keys())[0]
self.register_hook(
place="after_train_loop", rank=1,
func=partial(self.val_loop, "train"), desc="validation on trainset"
)
register_agg_type_for_label_trainer(self, 'torch', agg_type)
def train_loop(self):
self.model.train()
train_loss = 0
lossfunc = list(self.lossfunc.values())[0]
optimizer = list(self.optimizer.values())[0]
lr_scheduler = list(self.lr_scheduler.values())[0] if self.lr_scheduler.values() else None
for batch, (feature, label) in enumerate(self.train_dataloader):
pred = self.model(feature)
loss = lossfunc(pred, label)
optimizer.zero_grad()
loss.backward()
optimizer.step()
train_loss += loss.item()
train_loss /= len(self.train_dataloader)
if lr_scheduler:
lr_scheduler.step()
self.context["train_loss"] = train_loss
logger.info(f"Train loss: {train_loss}")
| 2,017 | 37.807692 | 98 | py |
XFL | XFL-master/python/algorithm/framework/horizontal/densenet/assist_trainer.py | # Copyright 2022 The XFL Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from functools import partial
from .common import Common
from algorithm.core.horizontal.template.agg_type import register_agg_type_for_assist_trainer
class HorizontalDensenetAssistTrainer(Common):
def __init__(self, train_conf: dict):
super().__init__(train_conf)
agg_type = list(self.common_config.aggregation["method"].keys())[0]
register_agg_type_for_assist_trainer(self, 'torch', agg_type)
self.register_hook(place="after_local_epoch", rank=1,
func=partial(self._save_model, False), desc="save model ")
self.register_hook(place="after_local_epoch", rank=2,
func=partial(self.val_loop, "val"), desc="validation on valset")
self.register_hook(place="after_global_epoch", rank=1,
func=partial(self._save_model, True), desc="save final model")
def train_loop(self):
pass
| 1,537 | 42.942857 | 92 | py |
XFL | XFL-master/python/algorithm/framework/horizontal/densenet/common.py | # Copyright 2022 The XFL Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import numpy as np
import torch
import torch.nn as nn
from torch.utils.data import DataLoader, TensorDataset
from PIL import Image
import torchvision.transforms as transforms
from algorithm.core.data_io import CsvReader, NpzReader
from algorithm.model.densenet import DenseNet
from common.utils.logger import logger
from algorithm.core.horizontal.template.torch.base import BaseTrainer
from common.utils.config_sync import ConfigSynchronizer
from common.checker.x_types import All
from common.evaluation.metrics import CommonMetrics
class Common(BaseTrainer):
def __init__(self, train_conf: dict) -> None:
sync_rule = {
"model_info": All(),
"train_info": {
"interaction_params": All(),
"train_params": {
"global_epoch": All(),
"aggregation": All(),
"encryption": All(),
"optimizer": All(),
"lr_scheduler": All(),
"lossfunc": All(),
"metric": All(),
"early_stopping": All()
}
}
}
train_conf = ConfigSynchronizer(train_conf).sync(sync_rule)
super().__init__(train_conf)
def _set_model(self) -> nn.Module:
model_config = self.common_config.model_info.get("config")
model = DenseNet(num_classes=model_config["num_classes"], layers=model_config["layers"])
model = model.to(self.device)
return model
def _read_data(self, input_dataset):
if len(input_dataset) == 0:
return None
conf = input_dataset[0]
if conf["type"] == "csv":
path = os.path.join(conf['path'], conf['name'])
has_label = conf["has_label"]
has_id = conf['has_id']
return CsvReader(path, has_id, has_label)
elif conf["type"] == "npz":
path = os.path.join(conf['path'], conf['name'])
return NpzReader(path)
else:
return None
def _set_train_dataloader(self):
def img_collate_fn(batch):
labels = []
imgs = []
transform_train = transforms.Compose([
transforms.RandomCrop(32, padding=4),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010))])
for feature, label in batch:
img = Image.fromarray(feature.numpy().astype(np.uint8))
imgs.append(transform_train(img))
labels.append(label)
return torch.stack(imgs,0).to(self.device), torch.stack(labels, 0).long().to(self.device)
train_data = self._read_data(self.common_config.input_trainset)
trainset = None
train_dataloader = None
if train_data:
trainset = TensorDataset(
torch.tensor(train_data.features()), torch.tensor(train_data.label())
)
batch_size = self.common_config.train_params.get("train_batch_size")
if trainset:
train_dataloader = DataLoader(
trainset, batch_size, shuffle=True, collate_fn=img_collate_fn
)
return train_dataloader
def _set_val_dataloader(self):
def img_collate_fn(batch):
labels = []
imgs = []
transform_test = transforms.Compose([
transforms.ToTensor(),
transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010))])
for feature, label in batch:
img = Image.fromarray(feature.numpy().astype(np.uint8))
imgs.append(transform_test(img))
labels.append(label)
return torch.stack(imgs,0).to(self.device), torch.stack(labels, 0).long().to(self.device)
val_data = self._read_data(self.common_config.input_valset)
valset = None
val_dataloader = None
if val_data:
valset = TensorDataset(
torch.tensor(val_data.features()), torch.tensor(val_data.label())
)
batch_size = self.common_config.train_params.get("val_batch_size")
if valset:
val_dataloader = DataLoader(
valset, batch_size, shuffle=True, collate_fn=img_collate_fn
)
return val_dataloader
def val_loop(self, dataset_type: str = "val", context: dict = {}):
self.model.eval()
val_loss = 0
val_predicts = []
labels = []
lossfunc_name = list(self.lossfunc.keys())[0]
lossfunc = list(self.lossfunc.values())[0]
if dataset_type == "val":
dataloader = self.val_dataloader
elif dataset_type == "train":
dataloader = self.train_dataloader
else:
raise ValueError(f"dataset type {dataset_type} is not valid.")
for batch, (feature, label) in enumerate(dataloader):
pred = self.model(feature)
loss = lossfunc(pred, label)
val_predicts.append(pred.detach().cpu().squeeze(-1).numpy())
val_loss += loss.item()
labels.append(label.cpu().squeeze(-1).numpy())
val_loss /= len(dataloader)
labels: np.ndarray = np.concatenate(labels, axis=0)
val_predicts: np.ndarray = np.concatenate(val_predicts, axis=0)
if len(val_predicts.shape) == 1:
val_predicts = np.array(val_predicts > 0.5, dtype=np.int32)
elif len(val_predicts.shape) == 2:
val_predicts = val_predicts.argmax(axis=-1)
metrics_output = CommonMetrics._calc_metrics(
metrics=self.metrics,
labels=labels,
val_predicts=val_predicts,
lossfunc_name=lossfunc_name,
loss=val_loss,
dataset_type=dataset_type
)
global_epoch = self.context["g_epoch"]
if dataset_type == "val":
local_epoch = None
elif dataset_type == "train":
local_epoch = self.context["l_epoch"]
CommonMetrics.save_metric_csv(
metrics_output=metrics_output,
output_config=self.common_config.output,
global_epoch=global_epoch,
local_epoch=local_epoch,
dataset_type=dataset_type,
)
early_stop_flag = self.context["early_stop_flag"]
if (self.common_config.save_frequency > 0) & \
(dataset_type == "val") & (self.earlystopping.patience > 0):
early_stop_flag = self.earlystopping(metrics_output, global_epoch)
if early_stop_flag:
# find the saved epoch closest to the best epoch
best_epoch = self.earlystopping.best_epoch
closest_epoch = round(best_epoch / self.common_config.save_frequency) * \
self.common_config.save_frequency
closest_epoch -= self.common_config.save_frequency \
if closest_epoch > global_epoch else 0
self.context["early_stop_flag"] = True
self.context["early_stop_epoch"] = closest_epoch
| 7,943 | 37.75122 | 101 | py |
XFL | XFL-master/python/algorithm/framework/horizontal/logistic_regression/label_trainer.py | # Copyright 2022 The XFL Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from functools import partial
from algorithm.core.horizontal.template.agg_type import register_agg_type_for_label_trainer
from common.utils.logger import logger
from .common import Common
class HorizontalLogisticRegressionLabelTrainer(Common):
def __init__(self, train_conf: dict):
super().__init__(train_conf)
agg_type = list(self.common_config.aggregation["method"].keys())[0]
register_agg_type_for_label_trainer(self, 'torch', agg_type)
self.register_hook(
place="after_train_loop", rank=1,
func=partial(self.val_loop, "train"), desc="validation on trainset"
)
def train_loop(self):
self.model.train()
train_loss = 0
lossfunc = list(self.lossfunc.values())[0]
optimizer = list(self.optimizer.values())[0]
lr_scheduler = list(self.lr_scheduler.values())[0] if self.lr_scheduler.values() else None
for batch, (feature, label) in enumerate(self.train_dataloader):
pred = self.model(feature)
loss = lossfunc(pred, label)
optimizer.zero_grad()
loss.backward()
optimizer.step()
train_loss += loss.item()
train_loss /= len(self.train_dataloader)
if lr_scheduler:
lr_scheduler.step()
self.context["train_loss"] = train_loss
logger.info(f"Train loss: {train_loss}")
| 2,042 | 36.145455 | 98 | py |
XFL | XFL-master/python/algorithm/framework/horizontal/logistic_regression/assist_trainer.py | # Copyright 2022 The XFL Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from functools import partial
from .common import Common
from algorithm.core.horizontal.template.agg_type import register_agg_type_for_assist_trainer
class HorizontalLogisticRegressionAssistTrainer(Common):
def __init__(self, train_conf: dict):
super().__init__(train_conf)
agg_type = list(self.common_config.aggregation["method"].keys())[0]
register_agg_type_for_assist_trainer(self, 'torch', agg_type)
self.register_hook(place="after_local_epoch", rank=1,
func=partial(self._save_model, False), desc="save model ")
self.register_hook(place="after_local_epoch", rank=2,
func=partial(self.val_loop, "val"), desc="validation on valset")
self.register_hook(place="after_global_epoch", rank=1,
func=partial(self._save_model, True), desc="save final model")
def train_loop(self):
pass
| 1,543 | 43.114286 | 92 | py |
XFL | XFL-master/python/algorithm/framework/horizontal/logistic_regression/common.py | # Copyright 2022 The XFL Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import numpy as np
import torch
import torch.nn as nn
from torch.utils.data import DataLoader, TensorDataset
from algorithm.core.data_io import CsvReader
from algorithm.core.horizontal.template.torch.base import BaseTrainer
from algorithm.model.logistic_regression import LogisticRegression
from common.checker.x_types import All
from common.evaluation.metrics import CommonMetrics
from common.utils.config_sync import ConfigSynchronizer
class Common(BaseTrainer):
def __init__(self, train_conf: dict):
sync_rule = {
"model_info": {
"config": All()
},
"train_info": {
"interaction_params": All(),
"train_params": {
"global_epoch": All(),
"aggregation": All(),
"encryption": All(),
"optimizer": All(),
"lr_scheduler": All(),
"lossfunc": All(),
"metric": All(),
"early_stopping": All()
}
}
}
train_conf = ConfigSynchronizer(train_conf).sync(sync_rule)
super().__init__(train_conf)
def _set_model(self) -> nn.Module:
model_config = self.common_config.model_info.get("config")
model = LogisticRegression(input_dim=model_config["input_dim"],
bias=model_config["bias"])
return model
def _read_data(self, input_dataset):
if len(input_dataset) == 0:
return None
conf = input_dataset[0]
if conf["type"] == "csv":
path = os.path.join(conf['path'], conf['name'])
has_label = conf["has_label"]
has_id = conf['has_id']
return CsvReader(path, has_id, has_label)
else:
return None
def _set_train_dataloader(self):
train_data = self._read_data(self.common_config.input_trainset)
trainset = None
train_dataloader = None
if train_data:
trainset = TensorDataset(
torch.tensor(train_data.features(),
dtype=torch.float32).to(self.device),
torch.tensor(train_data.label(), dtype=torch.float32).unsqueeze(
dim=-1).to(self.device)
)
batch_size = self.common_config.train_params.get("train_batch_size")
if trainset:
train_dataloader = DataLoader(trainset, batch_size, shuffle=True)
return train_dataloader
def _set_val_dataloader(self):
val_data = self._read_data(self.common_config.input_valset)
valset = None
val_dataloader = None
if val_data:
valset = TensorDataset(
torch.tensor(val_data.features(),
dtype=torch.float32).to(self.device),
torch.tensor(val_data.label(), dtype=torch.float32).unsqueeze(
dim=-1).to(self.device)
)
batch_size = self.common_config.train_params.get("val_batch_size")
if valset:
val_dataloader = DataLoader(valset, batch_size, shuffle=True)
return val_dataloader
def val_loop(self, dataset_type: str = "val", context: dict = {}):
self.model.eval()
val_loss = 0
val_predicts = []
labels = []
lossfunc_name = list(self.lossfunc.keys())[0]
lossfunc = list(self.lossfunc.values())[0]
if dataset_type == "val":
dataloader = self.val_dataloader
elif dataset_type == "train":
dataloader = self.train_dataloader
else:
raise ValueError(f"dataset type {dataset_type} is not valid.")
for batch, (feature, label) in enumerate(dataloader):
pred = self.model(feature)
loss = lossfunc(pred, label)
val_predicts.append(pred.detach().cpu().squeeze(-1).numpy())
val_loss += loss.item()
labels.append(label.cpu().squeeze(-1).numpy())
val_loss /= len(dataloader)
labels: np.ndarray = np.concatenate(labels, axis=0)
val_predicts: np.ndarray = np.concatenate(val_predicts, axis=0)
if len(val_predicts.shape) == 1:
val_predicts = np.array(val_predicts > 0.5, dtype=np.int32)
elif len(val_predicts.shape) == 2:
val_predicts = val_predicts.argmax(axis=-1)
metrics_output = CommonMetrics._calc_metrics(
metrics=self.metrics,
labels=labels,
val_predicts=val_predicts,
lossfunc_name=lossfunc_name,
loss=val_loss,
dataset_type=dataset_type
)
global_epoch = self.context["g_epoch"]
if dataset_type == "val":
local_epoch = None
elif dataset_type == "train":
local_epoch = self.context["l_epoch"]
CommonMetrics.save_metric_csv(
metrics_output=metrics_output,
output_config=self.common_config.output,
global_epoch=global_epoch,
local_epoch=local_epoch,
dataset_type=dataset_type,
)
early_stop_flag = self.context["early_stop_flag"]
if (self.common_config.save_frequency > 0) & \
(dataset_type == "val") & (self.earlystopping.patience > 0):
early_stop_flag = self.earlystopping(metrics_output, global_epoch)
if early_stop_flag:
# find the saved epoch closest to the best epoch
best_epoch = self.earlystopping.best_epoch
closest_epoch = round(best_epoch / self.common_config.save_frequency) * \
self.common_config.save_frequency
closest_epoch -= self.common_config.save_frequency \
if closest_epoch > global_epoch else 0
self.context["early_stop_flag"] = True
self.context["early_stop_epoch"] = closest_epoch
| 6,601 | 36.089888 | 89 | py |
XFL | XFL-master/python/algorithm/framework/horizontal/resnet/label_trainer.py | # Copyright 2022 The XFL Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from algorithm.core.horizontal.template.agg_type import register_agg_type_for_label_trainer
from common.utils.logger import logger
from .common import Common
from functools import partial
class HorizontalResnetLabelTrainer(Common):
def __init__(self, train_conf: dict):
super().__init__(train_conf)
agg_type = list(self.common_config.aggregation["method"].keys())[0]
self.register_hook(
place="after_train_loop", rank=1,
func=partial(self.val_loop, "train"), desc="validation on trainset"
)
register_agg_type_for_label_trainer(self, 'torch', agg_type)
def train_loop(self):
self.model.train()
train_loss = 0
lossfunc = list(self.lossfunc.values())[0]
optimizer = list(self.optimizer.values())[0]
lr_scheduler = list(self.lr_scheduler.values())[0] if self.lr_scheduler.values() else None
for batch, (feature, label) in enumerate(self.train_dataloader):
pred = self.model(feature)
loss = lossfunc(pred, label)
optimizer.zero_grad()
loss.backward()
optimizer.step()
train_loss += loss.item()
train_loss /= len(self.train_dataloader)
if lr_scheduler:
lr_scheduler.step()
self.context["train_loss"] = train_loss
logger.info(f"Train loss: {train_loss}")
| 2,015 | 37.769231 | 98 | py |
XFL | XFL-master/python/algorithm/framework/horizontal/resnet/assist_trainer.py | # Copyright 2022 The XFL Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from functools import partial
from .common import Common
from algorithm.core.horizontal.template.agg_type import register_agg_type_for_assist_trainer
class HorizontalResnetAssistTrainer(Common):
def __init__(self, train_conf: dict):
super().__init__(train_conf)
agg_type = list(self.common_config.aggregation["method"].keys())[0]
register_agg_type_for_assist_trainer(self, 'torch', agg_type)
self.register_hook(place="after_local_epoch", rank=1,
func=partial(self._save_model, False), desc="save model ")
self.register_hook(place="after_local_epoch", rank=2,
func=partial(self.val_loop, "val"), desc="validation on valset")
self.register_hook(place="after_global_epoch", rank=1,
func=partial(self._save_model, True), desc="save final model")
def train_loop(self):
pass
| 1,531 | 42.771429 | 92 | py |
XFL | XFL-master/python/algorithm/framework/horizontal/resnet/common.py | # Copyright 2022 The XFL Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import numpy as np
import torch
import torch.nn as nn
from torch.utils.data import DataLoader, TensorDataset
from PIL import Image
import torchvision.transforms as transforms
from algorithm.core.data_io import CsvReader, NpzReader
from algorithm.model.resnet import ResNet
from common.utils.logger import logger
from algorithm.core.horizontal.template.torch.base import BaseTrainer
from common.utils.config_sync import ConfigSynchronizer
from common.checker.x_types import All
from common.evaluation.metrics import CommonMetrics
class Common(BaseTrainer):
def __init__(self, train_conf: dict) -> None:
sync_rule = {
"model_info": All(),
"train_info": {
"interaction_params": All(),
"train_params": {
"global_epoch": All(),
"aggregation": All(),
"encryption": All(),
"optimizer": All(),
"lr_scheduler": All(),
"lossfunc": All(),
"metric": All(),
"early_stopping": All()
}
}
}
train_conf = ConfigSynchronizer(train_conf).sync(sync_rule)
super().__init__(train_conf)
def _set_model(self) -> nn.Module:
model_config = self.common_config.model_info.get("config")
model = ResNet(num_classes=model_config["num_classes"], layers=model_config["layers"])
model = model.to(self.device)
# model = torch.nn.DataParallel(model)
# torch.backends.cudnn.benchmark = True
return model
def _read_data(self, input_dataset):
if len(input_dataset) == 0:
return None
conf = input_dataset[0]
if conf["type"] == "csv":
path = os.path.join(conf['path'], conf['name'])
has_label = conf["has_label"]
has_id = conf['has_id']
return CsvReader(path, has_id, has_label)
elif conf["type"] == "npz":
path = os.path.join(conf['path'], conf['name'])
return NpzReader(path)
else:
return None
def _set_train_dataloader(self):
def img_collate_fn(batch):
labels = []
imgs = []
transform_train = transforms.Compose([
transforms.RandomCrop(32, padding=4),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010))])
for feature, label in batch:
img = Image.fromarray(feature.numpy().astype(np.uint8))
imgs.append(transform_train(img))
labels.append(label)
return torch.stack(imgs,0).to(self.device), torch.stack(labels, 0).long().to(self.device)
train_data = self._read_data(self.common_config.input_trainset)
trainset = None
train_dataloader = None
if train_data:
trainset = TensorDataset(
torch.tensor(train_data.features()), torch.tensor(train_data.label())
)
batch_size = self.common_config.train_params.get("train_batch_size")
if trainset:
train_dataloader = DataLoader(
trainset, batch_size, shuffle=True, collate_fn=img_collate_fn
)
return train_dataloader
def _set_val_dataloader(self):
def img_collate_fn(batch):
labels = []
imgs = []
transform_test = transforms.Compose([
transforms.ToTensor(),
transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010))])
for feature, label in batch:
img = Image.fromarray(feature.numpy().astype(np.uint8))
imgs.append(transform_test(img))
labels.append(label)
return torch.stack(imgs,0).to(self.device), torch.stack(labels, 0).long().to(self.device)
val_data = self._read_data(self.common_config.input_valset)
valset = None
val_dataloader = None
if val_data:
valset = TensorDataset(
torch.tensor(val_data.features()), torch.tensor(val_data.label())
)
batch_size = self.common_config.train_params.get("val_batch_size")
if valset:
val_dataloader = DataLoader(
valset, batch_size, shuffle=True, collate_fn=img_collate_fn
)
return val_dataloader
def val_loop(self, dataset_type: str = "val", context: dict = {}):
self.model.eval()
val_loss = 0
val_predicts = []
labels = []
lossfunc_name = list(self.lossfunc.keys())[0]
lossfunc = list(self.lossfunc.values())[0]
if dataset_type == "val":
dataloader = self.val_dataloader
elif dataset_type == "train":
dataloader = self.train_dataloader
else:
raise ValueError(f"dataset type {dataset_type} is not valid.")
for batch, (feature, label) in enumerate(dataloader):
pred = self.model(feature)
loss = lossfunc(pred, label)
val_predicts.append(pred.detach().cpu().squeeze(-1).numpy())
val_loss += loss.item()
labels.append(label.cpu().squeeze(-1).numpy())
val_loss /= len(dataloader)
labels: np.ndarray = np.concatenate(labels, axis=0)
val_predicts: np.ndarray = np.concatenate(val_predicts, axis=0)
if len(val_predicts.shape) == 1:
val_predicts = np.array(val_predicts > 0.5, dtype=np.int32)
elif len(val_predicts.shape) == 2:
val_predicts = val_predicts.argmax(axis=-1)
metrics_output = CommonMetrics._calc_metrics(
metrics=self.metrics,
labels=labels,
val_predicts=val_predicts,
lossfunc_name=lossfunc_name,
loss=val_loss,
dataset_type=dataset_type
)
global_epoch = self.context["g_epoch"]
if dataset_type == "val":
local_epoch = None
elif dataset_type == "train":
local_epoch = self.context["l_epoch"]
CommonMetrics.save_metric_csv(
metrics_output=metrics_output,
output_config=self.common_config.output,
global_epoch=global_epoch,
local_epoch=local_epoch,
dataset_type=dataset_type,
)
early_stop_flag = self.context["early_stop_flag"]
if (self.common_config.save_frequency > 0) & \
(dataset_type == "val") & (self.earlystopping.patience > 0):
early_stop_flag = self.earlystopping(metrics_output, global_epoch)
if early_stop_flag:
# find the saved epoch closest to the best epoch
best_epoch = self.earlystopping.best_epoch
closest_epoch = round(best_epoch / self.common_config.save_frequency) * \
self.common_config.save_frequency
closest_epoch -= self.common_config.save_frequency \
if closest_epoch > global_epoch else 0
self.context["early_stop_flag"] = True
self.context["early_stop_epoch"] = closest_epoch
| 8,036 | 37.826087 | 101 | py |
XFL | XFL-master/python/algorithm/framework/transfer/base.py | # Copyright 2022 The XFL Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
import torch.nn as nn
from typing import OrderedDict
from functools import partial
from common.utils.config_parser import CommonConfigParser
from algorithm.core.loss.torch_loss import get_lossfunc
from algorithm.core.metrics import get_metric
from algorithm.core.optimizer.torch_optimizer import get_optimizer
from algorithm.core.lr_scheduler.torch_lr_scheduler import get_lr_scheduler
class BaseTrainer:
def __init__(self, train_conf: dict):
self.common_config = CommonConfigParser(train_conf)
if self.common_config.random_seed is not None:
self.set_seed(self.common_config.random_seed)
self.device = self.common_config.device
self._parse_config()
self.model = self._set_model()
self._set_train_dataloader()
self._set_val_dataloader()
self.optimizer = self._set_optimizer()
self.lr_scheduler = self._set_lr_scheduler(self.optimizer)
self.lossfunc = self._set_lossfunc()
self.optimizer = self._set_optimizer()
self.lr_scheduler = self._set_lr_scheduler(self.optimizer)
self.metrics = self._set_metrics()
@staticmethod
def set_seed(seed):
torch.manual_seed(seed)
torch.cuda.manual_seed(seed)
def _parse_config(self) -> nn.Module:
raise NotImplementedError("The _parse_config method is not implemented.")
def _set_optimizer(self):
""" Define self.optimizer """
optimizer_conf = OrderedDict(self.common_config.optimizer)
optimizer = OrderedDict()
for k, v in optimizer_conf.items():
optimizer[k] = get_optimizer(k)(self.model.parameters(), **v)
return optimizer
def _set_lossfunc(self):
""" Define self.lossfunc """
lossfunc_conf = OrderedDict(self.common_config.lossfunc)
lossfunc = OrderedDict()
for k, v in lossfunc_conf.items():
lossfunc[k] = get_lossfunc(k)(**v)
return lossfunc
def _set_lr_scheduler(self, optimizer: OrderedDict):
lr_scheduler_conf = OrderedDict(self.common_config.lr_scheduler)
lr_scheduler = OrderedDict()
for (k, v), o in zip(lr_scheduler_conf.items(), optimizer.values()):
lr_scheduler[k] = get_lr_scheduler(k)(o, **v)
return lr_scheduler
def _set_metrics(self):
""" Define metric """
metrics = {}
metrics_conf: dict = self.common_config.metric
for k, v in metrics_conf.items():
metric = get_metric(k)
metrics[k] = partial(metric, **v)
return metrics
def _set_model(self) -> nn.Module:
raise NotImplementedError("The _set_model method is not implemented.")
def _set_train_dataloader(self):
raise NotImplementedError(
"The _set_train_dataloader method is not implemented.")
def _set_val_dataloader(self):
raise NotImplementedError(
"The _set_val_dataloader method is not implemented.")
| 3,594 | 34.95 | 81 | py |
XFL | XFL-master/python/algorithm/framework/transfer/transfer_model_base.py | # Copyright 2022 The XFL Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from pathlib import Path
import torch
from functools import partial
from common.utils.config_parser import TrainConfigParser
from algorithm.core.metrics import get_metric
class TransferModelBase(TrainConfigParser):
def __init__(self, train_conf: dict, label: bool = False):
super().__init__(train_conf)
self.train_conf = train_conf
self.model_conf = train_conf["model_info"].get("config")
self.label = label
def _parse_config(self) -> None:
self.save_dir = Path(self.output.get("path", ""))
self.metric_dir = self.save_dir
# interaction_params
self.model_name = self.model_info.get("name")
self.save_model_name = self.output.get("model", {}).get("name", {})
self.pretrain_model_path = self.input.get("pretrained_model", {}).get("path")
self.num_features = self.model_conf.get("num_features")
self.hidden_features = self.model_conf.get("hidden_features")
self.constant_k = 1 / self.hidden_features
self.alpha = self.model_conf.get("alpha")
self.global_epoch = self.train_params.get("global_epoch")
self.local_epoch = self.train_params.get("local_epoch")
self.batch_size = self.train_params.get("batch_size")
self.shuffle_seed = self.train_params.get("shuffle_seed")
self.random_seed = self.train_params.get("random_seed")
@staticmethod
def set_seed(seed):
torch.manual_seed(seed)
torch.cuda.manual_seed(seed)
def _set_metrics(self):
""" Define metric """
metrics = {}
metrics_conf: dict = self.train_params.get("metric", {})
for k, v in metrics_conf.items():
metric = get_metric(k)
metrics[k] = partial(metric, **v)
return metrics
| 2,407 | 36.046154 | 85 | py |
XFL | XFL-master/python/algorithm/framework/transfer/logistic_regression/base.py | # Copyright 2022 The XFL Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import pandas as pd
import numpy as np
import torch
import torch.nn as nn
from torch.utils.data import DataLoader, TensorDataset
from algorithm.framework.transfer.transfer_model_base import TransferModelBase
from common.utils.logger import logger
from common.utils.model_preserver import ModelPreserver
class TransferLogisticRegression(nn.Module):
def __init__(self, input_dim: int, output_dim: int, bias: bool = False):
super().__init__()
self.linear = torch.nn.Linear(input_dim, output_dim, bias=bias)
def forward(self, x):
return self.linear(x)
class TransferLogisticRegressionBase(TransferModelBase):
def __init__(self, train_conf: dict, label: bool = False, *args, **kwargs):
"""_summary_
Args:
train_conf (dict): _description_
label (bool, optional): _description_. Defaults to False.
"""
super().__init__(train_conf)
self._parse_config()
self.label = label
self.model = None
self.phi = None # phi will be saved in the checkpoint of label_trainer
self.overlap_y, self.non_overlap_y = None, None
self.overlap_train_dataloader, self.non_overlap_train_dataloader = None, None
self.eval_dataloader = None
self.metric_functions = {}
self._set_train_dataloader()
self._set_val_dataloader()
def _init_model(self, bias: bool = False) -> None:
"""
Init logistic regression model.
Returns: None
"""
logger.info("Init model start.")
self.model = TransferLogisticRegression(
input_dim=self.num_features, output_dim=self.hidden_features, bias=bias
)
# Load pretrained model if needed.
if self.pretrain_model_path is not None and self.pretrain_model_path != "":
checkpoint = ModelPreserver.load(
os.path.join(self.pretrain_model_path, self.input.get("pretrained_model").get("name")))
state_dict = checkpoint["state_dict"]
if "phi" in state_dict.keys():
self.phi = state_dict.pop("phi")
self.model.load_state_dict(state_dict)
self.model = self.model.to(self.device)
logger.info("Init model completed.")
def _read_data(self, input_dataset, is_train=True):
if len(input_dataset) == 0:
return None
conf = input_dataset[0]
if conf["type"] == "csv":
path = os.path.join(conf['path'], conf['name'])
has_id = conf['has_id']
index_col = 0 if has_id else False
train_data = pd.read_csv(path, index_col=index_col)
if is_train:
index_name = "overlap_index.npy"
index_path = os.path.join(conf['path'], index_name)
overlap_index = np.load(index_path)
return train_data, overlap_index
else:
return train_data
else:
raise NotImplementedError(
"Dataset load method {} does not Implemented.".format(conf["type"])
)
def _set_train_dataloader(self):
train_data, overlap_index = self._read_data(self.input_trainset)
self.sample_num = train_data.shape[0]
overlap_train_data = train_data.loc[overlap_index]
if self.label:
non_overlap_index = np.array([])
for i in train_data.index:
if i not in overlap_index:
non_overlap_index = np.append(non_overlap_index, i)
non_overlap_train_data = train_data.loc[non_overlap_index]
# init overlap_y and non_overlap_y
self.overlap_y = torch.tensor(overlap_train_data.iloc[:, 0].to_numpy(), dtype=torch.float32).unsqueeze(1)
self.non_overlap_y = torch.tensor(non_overlap_train_data.iloc[:, 0].to_numpy(), dtype=torch.float32).unsqueeze(1)
# init train_dataloader
overlap_x = torch.tensor(overlap_train_data.iloc[:, 1:].to_numpy(), dtype=torch.float32)
overlap_trainset = TensorDataset(overlap_x, self.overlap_y)
self.overlap_train_dataloader = DataLoader(overlap_trainset, batch_size=self.batch_size, shuffle=False)
non_overlap_x = torch.tensor(non_overlap_train_data.iloc[:, 1:].to_numpy(), dtype=torch.float32)
non_overlap_trainset = TensorDataset(non_overlap_x, self.non_overlap_y)
self.non_overlap_train_dataloader = DataLoader(non_overlap_trainset, batch_size=self.batch_size, shuffle=False)
else:
# init train_dataloader
overlap_x = torch.tensor(overlap_train_data.to_numpy(), dtype=torch.float32)
overlap_trainset = TensorDataset(overlap_x)
self.overlap_train_dataloader = DataLoader(overlap_trainset, batch_size=self.batch_size, shuffle=False)
def _set_val_dataloader(self):
val_data = self._read_data(self.input_valset, is_train=False)
if self.label:
# init val_dataloader
labels = torch.tensor(val_data.iloc[:, 0].to_numpy(), dtype=torch.float32).unsqueeze(dim=-1)
valset = TensorDataset(labels)
self.val_dataloader = DataLoader(valset, batch_size=self.batch_size, shuffle=False)
else:
# init val_dataloader
features = torch.tensor(val_data.to_numpy(), dtype=torch.float32)
valset = TensorDataset(features)
self.val_dataloader = DataLoader(valset, batch_size=self.batch_size, shuffle=False) | 6,172 | 42.167832 | 125 | py |
XFL | XFL-master/python/algorithm/framework/transfer/logistic_regression/label_trainer.py | # Copyright 2022 The XFL Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
import torch
from common.communication.gRPC.python.channel import DualChannel
from common.utils.logger import logger
from service.fed_config import FedConfig
from .common import Common
from common.utils.model_io import ModelIO
from common.evaluation.metrics import CommonMetrics
class TransferLogisticRegressionLabelTrainer(Common):
def __init__(self, train_conf: dict):
super().__init__(train_conf, label=True)
def cal_phi_and_ua(self, dataloader):
phi = None # [1, hidden_features] Φ_A
ua = []
for batch_idx, (x_batch, y_batch) in enumerate(dataloader):
x_batch = x_batch.to(self.device)
ua_batch = self.model(x_batch) # [batch_size, hidden_features]
ua.append(ua_batch)
phi_batch = torch.sum(y_batch * ua_batch, axis=0).unsqueeze(0)
if phi is None:
phi = phi_batch
else:
phi += phi_batch
ua = torch.concat(ua, axis=0)
return phi, ua
def cal_parameters(self, dual_channel):
overlap_phi, overlap_ua = self.cal_phi_and_ua(self.overlap_train_dataloader)
non_overlap_phi, non_overlap_ua = self.cal_phi_and_ua(self.non_overlap_train_dataloader)
phi = (overlap_phi + non_overlap_phi) / self.sample_num
phi_2 = torch.matmul(phi.T, phi) # (Φ_A)‘(Φ_A) [hidden_features, hidden_features]
overlap_y = self.overlap_y # {C(y)=y} [overlap_size, 1]
overlap_y_2 = overlap_y * overlap_y # {D(y)=y^2} [overlap_size, 1]
# calculate 3 components will be sent to the trainer
overlap_y_2_phi_2 = 0.25 * overlap_y_2.unsqueeze(2) * phi_2 # [overlap_size, hidden_features, hidden_features]
overlap_y_phi = -0.5 * overlap_y * phi # [overlap_size, hidden_features]
comp_ua = -self.constant_k * overlap_ua # [overlap_size, 1]
# exchange components
dual_channel.send((overlap_y_2_phi_2, overlap_y_phi, comp_ua))
ub, ub_2, comp_ub = dual_channel.recv()
# compute gradients to excute backward
overlap_y_2_phi = (overlap_y_2 * phi).unsqueeze(1)
loss_grads_const_part1 = 0.25 * torch.matmul(overlap_y_2_phi, ub_2).squeeze(1)
loss_grads_const_part2 = overlap_y * ub
const = torch.sum(loss_grads_const_part1, axis=0) - 0.5 * torch.sum(loss_grads_const_part2, axis=0)
non_overlap_y = self.non_overlap_y
non_overlap_ua_grad = self.alpha * const * non_overlap_y / self.sample_num
overlap_ua_grad = self.alpha * const * overlap_y / self.sample_num + comp_ub
# compute loss
overlap_num = overlap_y.shape[0]
overlap_loss = torch.sum(comp_ua * ub)
ub_phi = torch.matmul(ub, phi.T)
part1 = -0.5 * torch.sum(overlap_y * ub_phi)
part2 = 1.0 / 8 * torch.sum(ub_phi * ub_phi)
part3 = len(overlap_y) * np.log(2)
loss_y = part1 + part2 + part3
loss = self.alpha * (loss_y / overlap_num) + overlap_loss / overlap_num
ua = torch.concat((overlap_ua, non_overlap_ua), axis=0)
ua_grad = torch.concat((overlap_ua_grad, non_overlap_ua_grad), axis=0)
# update phi
self.phi = phi
return loss, ua, ua_grad
def train_loop(self, optimizer, lr_scheduler, dual_channel):
loss_sum = 0
for lepoch in range(1, self.local_epoch + 1):
loss, ua, ua_grad = self.cal_parameters(dual_channel)
optimizer.zero_grad()
ua.backward(ua_grad)
optimizer.step()
loss_sum += loss
if lr_scheduler:
lr_scheduler.step()
loss_sum /= self.local_epoch
logger.info(f"loss: {loss_sum}")
def val_loop(self, dual_channel, global_epoch: int = 0):
logger.info("val_loop start")
self.model.eval()
labels = []
for batch_idx, [y_batch] in enumerate(self.val_dataloader):
labels.append(y_batch.numpy())
ub = dual_channel.recv()
predict_score = torch.matmul(ub, self.phi.T)
val_predicts = torch.sigmoid(predict_score)
labels: np.ndarray = np.concatenate(labels, axis=0)
val_predicts = np.array(val_predicts > 0.5, dtype=np.int32)
metrics_output = CommonMetrics._calc_metrics(
metrics=self.metrics,
labels=labels,
val_predicts=val_predicts,
lossfunc_name=None,
loss=None,
dataset_type="val",
)
CommonMetrics.save_metric_csv(
metrics_output=metrics_output,
output_config=self.common_config.output,
global_epoch=global_epoch,
local_epoch=None,
dataset_type="val",
)
def fit(self):
logger.info("Transfer logistic regression training start")
dual_channel = DualChannel(
name="transfer_logistic_regression_channel",
ids=FedConfig.get_trainer()+[FedConfig.node_id]
)
optimizer = list(self.optimizer.values())[0]
lr_scheduler = list(self.lr_scheduler.values())[0] if self.lr_scheduler.values() else None
for epoch in range(1, self.global_epoch + 1):
self.model.train()
logger.info(f"trainer's global epoch {epoch}/{self.global_epoch} start...")
self.train_loop(optimizer, lr_scheduler, dual_channel)
self.val_loop(dual_channel, global_epoch=epoch)
ModelIO.save_torch_model(
state_dict=self.model.state_dict(),
save_dir=self.save_dir,
model_name=self.save_model_name,
meta_dict={"phi": self.phi}
)
| 6,285 | 38.534591 | 118 | py |
XFL | XFL-master/python/algorithm/framework/transfer/logistic_regression/common.py | # Copyright 2022 The XFL Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import pandas as pd
import numpy as np
import torch
import torch.nn as nn
from torch.utils.data import DataLoader, TensorDataset
from pathlib import Path
from ..base import BaseTrainer
from common.utils.logger import logger
from common.utils.model_io import ModelIO
from common.checker.x_types import All
from common.utils.config_sync import ConfigSynchronizer
class TransferLogisticRegression(nn.Module):
def __init__(self, input_dim: int, output_dim: int, bias: bool = False):
super().__init__()
self.linear = torch.nn.Linear(input_dim, output_dim, bias=bias)
def forward(self, x):
return self.linear(x)
class Common(BaseTrainer):
def __init__(self, train_conf: dict, label: bool = False):
sync_rule = {
"model_info": {
"config": All()
},
"train_info": {
"interaction_params": All(),
"train_params": All()
}
}
train_conf = ConfigSynchronizer(train_conf).sync(sync_rule)
self.label = label
super().__init__(train_conf)
self._parse_config()
def _parse_config(self) -> None:
self.save_dir = Path(self.common_config.output.get("path", ""))
# interaction_params
self.model_name = self.common_config.model_info.get("name")
self.save_model_name = self.common_config.output.get("model", {}).get("name", {})
self.pretrain_model_path = self.common_config.input.get("pretrained_model", {}).get("path")
self.pretrain_model_name = self.common_config.input.get("pretrained_model", {}).get("name")
self.model_conf = self.common_config.model_info.get("config", {})
self.num_features = self.model_conf.get("num_features")
self.hidden_features = self.model_conf.get("hidden_features")
self.constant_k = 1 / self.hidden_features
self.alpha = self.model_conf.get("alpha")
self.bias = self.model_conf.get("bias", False)
self.global_epoch = self.common_config.train_params.get("global_epoch")
self.local_epoch = self.common_config.train_params.get("local_epoch")
self.train_batch_size = self.common_config.train_params.get("train_batch_size")
self.val_batch_size = self.common_config.train_params.get("val_batch_size")
def _set_model(self) -> None:
"""
Init logistic regression model.
Returns: None
"""
logger.info("Init model start.")
self.phi = None # phi will be saved in the model_info of label_trainer
model = TransferLogisticRegression(
input_dim=self.num_features, output_dim=self.hidden_features, bias=self.bias
)
# Load pretrained model if needed.
if self.pretrain_model_path is not None and self.pretrain_model_path != "":
model_info = ModelIO.load_torch_model(
os.path.join(self.pretrain_model_path, self.pretrain_model_name))
state_dict = model_info["state_dict"]
if "phi" in state_dict.keys():
self.phi = model_info["phi"]
model.load_state_dict(state_dict)
model = model.to(self.device)
logger.info("Init model completed.")
return model
def _read_data(self, input_dataset, is_train=True):
if len(input_dataset) == 0:
return None
conf = input_dataset[0]
if conf["type"] == "csv":
path = os.path.join(conf['path'], conf['name'])
has_id = conf['has_id']
index_col = 0 if has_id else False
train_data = pd.read_csv(path, index_col=index_col)
if is_train:
index_name = "overlap_index.npy"
index_path = os.path.join(conf['path'], index_name)
overlap_index = np.load(index_path)
return train_data, overlap_index
else:
return train_data
else:
raise NotImplementedError(
"Dataset load method {} does not Implemented.".format(conf["type"])
)
def _set_train_dataloader(self):
self.overlap_y, self.non_overlap_y = None, None
self.overlap_train_dataloader, self.non_overlap_train_dataloader = None, None
train_data, overlap_index = self._read_data(self.common_config.input_trainset)
self.sample_num = train_data.shape[0]
overlap_train_data = train_data.loc[overlap_index]
if self.label:
non_overlap_index = np.array([])
for i in train_data.index:
if i not in overlap_index:
non_overlap_index = np.append(non_overlap_index, i)
non_overlap_train_data = train_data.loc[non_overlap_index]
if len(non_overlap_index) == 0:
raise ValueError("There is no non-overlap data in the trainset. If non_overlap_index is empty, there is no need to use transfer learning")
# init overlap_y and non_overlap_y
self.overlap_y = torch.tensor(
overlap_train_data.iloc[:, 0].to_numpy(), dtype=torch.float32).unsqueeze(1)
self.non_overlap_y = torch.tensor(
non_overlap_train_data.iloc[:, 0].to_numpy(), dtype=torch.float32).unsqueeze(1)
# init train_dataloader
overlap_x = torch.tensor(
overlap_train_data.iloc[:, 1:].to_numpy(), dtype=torch.float32)
overlap_trainset = TensorDataset(overlap_x, self.overlap_y)
self.overlap_train_dataloader = DataLoader(
overlap_trainset, batch_size=self.train_batch_size, shuffle=False)
non_overlap_x = torch.tensor(
non_overlap_train_data.iloc[:, 1:].to_numpy(), dtype=torch.float32)
non_overlap_trainset = TensorDataset(non_overlap_x, self.non_overlap_y)
self.non_overlap_train_dataloader = DataLoader(
non_overlap_trainset, batch_size=self.train_batch_size, shuffle=False)
else:
# init train_dataloader
overlap_x = torch.tensor(overlap_train_data.to_numpy(), dtype=torch.float32)
overlap_trainset = TensorDataset(overlap_x)
self.overlap_train_dataloader = DataLoader(
overlap_trainset, batch_size=self.train_batch_size, shuffle=False)
def _set_val_dataloader(self):
self.val_dataloader = None
val_data = self._read_data(self.common_config.input_valset, is_train=False)
if self.label:
# init val_dataloader
labels = torch.tensor(val_data.iloc[:, 0].to_numpy(), dtype=torch.float32).unsqueeze(dim=-1)
valset = TensorDataset(labels)
self.val_dataloader = DataLoader(valset, batch_size=self.val_batch_size, shuffle=False)
else:
# init val_dataloader
features = torch.tensor(val_data.to_numpy(), dtype=torch.float32)
valset = TensorDataset(features)
self.val_dataloader = DataLoader(valset, batch_size=self.val_batch_size, shuffle=False)
| 7,724 | 42.644068 | 154 | py |
XFL | XFL-master/python/algorithm/framework/transfer/logistic_regression/trainer.py | # Copyright 2022 The XFL Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
from common.communication.gRPC.python.channel import DualChannel
from common.utils.logger import logger
from service.fed_config import FedConfig
from .common import Common
from common.utils.model_io import ModelIO
class TransferLogisticRegressionTrainer(Common):
def __init__(self, train_conf: dict):
super().__init__(train_conf, label=False)
def cal_ub(self, dataloader):
ub = []
for batch_idx, [x_batch] in enumerate(dataloader):
x_batch = x_batch.to(self.device)
ub_batch = self.model(x_batch) # [batch_size, hidden_features]
ub.append(ub_batch)
ub = torch.concat(ub, axis=0)
return ub
def cal_parameters(self, dual_channel):
ub = self.cal_ub(self.overlap_train_dataloader) # [overlap_size, hidden_features]
# calculate 3 components will be sent to the trainer
ub_ex = ub.unsqueeze(1)
ub_2 = torch.matmul(ub.unsqueeze(2), ub_ex) # [overlap_size, hidden_features, hidden_features]
comp_ub = -self.constant_k * ub # [overlap_size, hidden_features]
# exchange components
overlap_y_2_phi_2, overlap_y_phi, comp_ua = dual_channel.recv()
dual_channel.send((ub, ub_2, comp_ub))
# compute gradients to excute backward
ub_overlap_y_2_phi_2 = torch.matmul(ub_ex, overlap_y_2_phi_2)
l1_grad_b = ub_overlap_y_2_phi_2.squeeze(1) + overlap_y_phi
ub_grad = self.alpha * l1_grad_b + comp_ua
return ub, ub_grad
def fit(self):
logger.info("Transfer logistic regression training start")
dual_channel = DualChannel(
name="transfer_logistic_regression_channel",
ids=FedConfig.get_label_trainer()+[FedConfig.node_id]
)
optimizer = list(self.optimizer.values())[0]
lr_scheduler = list(self.lr_scheduler.values())[0] if self.lr_scheduler.values() else None
for epoch in range(1, self.global_epoch + 1):
self.model.train()
logger.info(f"trainer's global epoch {epoch}/{self.global_epoch} start...")
self.train_loop(optimizer, lr_scheduler, dual_channel)
self.val_loop(dual_channel)
ModelIO.save_torch_model(
state_dict=self.model.state_dict(),
save_dir=self.save_dir,
model_name=self.save_model_name
)
def train_loop(self, optimizer, lr_scheduler, dual_channel):
for lepoch in range(1, self.local_epoch + 1):
ub, ub_grad = self.cal_parameters(dual_channel)
optimizer.zero_grad()
ub.backward(ub_grad)
optimizer.step()
if lr_scheduler:
lr_scheduler.step()
def val_loop(self, dual_channel):
self.model.eval()
ub = self.cal_ub(self.val_dataloader)
dual_channel.send(ub)
| 3,462 | 36.236559 | 102 | py |
XFL | XFL-master/python/algorithm/framework/vertical/poisson_regression/base.py | # Copyright 2022 The XFL Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import torch
import torch.nn as nn
from torch.utils.data import DataLoader, TensorDataset
from algorithm.core.data_io import CsvReader
from algorithm.framework.vertical.vertical_model_base import VerticalModelBase
from common.utils.logger import logger
from common.utils.model_preserver import ModelPreserver
from service.fed_config import FedConfig
class VerticalPoissonRegression(nn.Module):
def __init__(self, input_dim: int, bias: bool = False):
super(VerticalPoissonRegression, self).__init__()
self.linear = torch.nn.Linear(input_dim, 1, bias=bias)
self.linear.requires_grad_(False)
def forward(self, x):
return self.linear(x)
class VerticalPoissonRegressionBase(VerticalModelBase):
def __init__(self, train_conf: dict, label: bool = False, *args, **kwargs):
"""_summary_
Args:
train_conf (dict): _description_
label (bool, optional): _description_. Defaults to False.
"""
super().__init__(train_conf)
self._parse_config()
self.train_conf = train_conf
self.label = label
self.data_dim = None
self.model = None
self.train_dataloader, self.eval_dataloader = None, None
if FedConfig.node_id != "assist_trainer":
self._init_dataloader()
def _parse_config(self) -> None:
super()._parse_config()
self.model_name = self.model_info.get("name")
self.save_model_name = self.output.get("model", {}).get("name")
if not os.path.exists(self.save_dir):
os.makedirs(self.save_dir)
self.global_epoch = self.train_params.get("global_epoch")
self.batch_size = self.train_params.get("batch_size")
self.encryption_config = self.train_params.get("encryption")
self.optimizer_config = self.train_params.get("optimizer")
self.pretrain_model_path = self.input.get("pretrained_model", {}).get("path")
self.random_seed = self.train_params.get("random_seed", None)
self.early_stopping_config = self.train_params.get("early_stopping")
self.save_frequency = self.interaction_params.get("save_frequency")
@staticmethod
def set_seed(seed):
torch.manual_seed(seed)
torch.cuda.manual_seed(seed)
def _init_model(self, bias: bool = False) -> None:
"""
Init poisson regression model.
Returns: None
"""
logger.info("Init model start.")
self.model = VerticalPoissonRegression(input_dim=self.data_dim, bias=bias)
# Load pretrained model if needed.
if self.pretrain_model_path is not None and self.pretrain_model_path != "":
checkpoint = ModelPreserver.load(os.path.join(self.pretrain_model_path, self.input.get(
"pretrained_model").get("name", None)))
self.model.load_state_dict(checkpoint["state_dict"])
logger.info("Init model completed.")
def __load_data(self, config) -> CsvReader:
config = config[0]
if config["type"] == "csv":
data_reader = CsvReader(path=os.path.join(config["path"], config["name"]), has_id=config["has_id"],
has_label=config["has_label"])
else:
raise NotImplementedError("Dataset type {} is not supported.".format(config["type"]))
return data_reader
def _init_data(self) -> None:
if len(self.input_trainset) > 0:
data: CsvReader = self.__load_data(self.input_trainset)
self.train = data.features()
self.train_label = data.label()
self.train_ids = list(range(len(data.ids)))
else:
raise NotImplementedError("Trainset was not configured.")
if self.label:
assert len(self.train) == len(self.train_label)
if len(self.input_valset) > 0:
data: CsvReader = self.__load_data(self.input_valset)
self.val = data.features()
self.val_label = data.label()
self.val_ids = list(range(len(data.ids)))
if self.label:
assert len(self.val) == len(self.val_label)
def _init_dataloader(self) -> None:
"""
Load raw data.
Returns:
"""
logger.info("Dataloader initiation start.")
self._init_data()
if self.label:
self.train_dataloader = DataLoader(
dataset=TensorDataset(torch.tensor(self.train, dtype=torch.float32),
torch.unsqueeze(torch.tensor(self.train_label), dim=-1),
torch.unsqueeze(torch.tensor(self.train_ids), dim=-1)),
batch_size=self.batch_size, shuffle=True
)
self.val_dataloader = DataLoader(
dataset=TensorDataset(torch.tensor(self.val, dtype=torch.float32),
torch.unsqueeze(torch.tensor(self.val_label), dim=-1),
torch.unsqueeze(torch.tensor(self.val_ids), dim=-1)),
batch_size=self.batch_size, shuffle=False
)
self.data_dim = torch.tensor(self.train).shape[-1]
logger.info("Train data shape: {}.".format(list(torch.tensor(self.train).shape)))
else:
self.train_dataloader = DataLoader(
dataset=TensorDataset(torch.tensor(self.train, dtype=torch.float32),
torch.unsqueeze(torch.tensor(self.train_ids), dim=-1)),
batch_size=self.batch_size, shuffle=True
)
self.val_dataloader = DataLoader(
dataset=TensorDataset(torch.tensor(self.val, dtype=torch.float32),
torch.unsqueeze(torch.tensor(self.val_ids), dim=-1)),
batch_size=self.batch_size, shuffle=False
)
self.data_dim = torch.tensor(self.train).shape[-1]
logger.info("Train data shape: {}.".format(list(torch.tensor(self.train).shape)))
logger.info("Dataloader initiation completed.")
| 6,716 | 41.783439 | 111 | py |
XFL | XFL-master/python/algorithm/framework/vertical/poisson_regression/label_trainer.py | # Copyright 2022 The XFL Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import copy
import secrets
import random
from functools import reduce
from pathlib import Path
from common.checker.x_types import All
import numpy as np
import pandas as pd
import tenseal as ts
import torch
from common.checker.matcher import get_matched_config
from common.communication.gRPC.python.channel import BroadcastChannel, DualChannel
from common.crypto.paillier.paillier import Paillier
from common.utils.algo_utils import earlyStopping
from common.utils.logger import logger
from common.utils.model_preserver import ModelPreserver
from common.utils.utils import save_model_config
from service.fed_config import FedConfig
from service.fed_node import FedNode
from service.fed_control import ProgressCalculator
from .base import VerticalPoissonRegressionBase
class VerticalPoissonRegressionLabelTrainer(VerticalPoissonRegressionBase):
def __init__(self, train_conf: dict, *args, **kwargs):
"""
Vertical Poisson Regression
Args:
train_conf: training parameters
*args:
**kwargs:
"""
self.sync_channel = BroadcastChannel(name="sync")
self._sync_config(train_conf)
super().__init__(train_conf, label=True, *args, **kwargs)
if self.random_seed is None:
self.random_seed = random.randint(-(1 << 32), 1 << 32)
self.sync_channel.broadcast(self.random_seed)
self.set_seed(self.random_seed)
self.progress_calculator = ProgressCalculator(self.global_epoch, len(self.train_dataloader))
self._init_model(bias=True)
self.export_conf = [{
"class_name": "VerticalPoissonRegression",
"identity": self.identity,
"filename": self.save_model_name,
"input_dim": self.data_dim,
"bias": True,
"version": "1.4.0"
}]
self.es = earlyStopping(key=self.early_stopping_config["key"],
patience=self.early_stopping_config["patience"],
delta=self.early_stopping_config["delta"])
self.best_model = None
self.broadcast_channel = BroadcastChannel(name="Public keys", root_id=FedConfig.get_assist_trainer())
self.dual_channels = {"intermediate_label_trainer": {}, "gradients_loss": None}
self.trainers = FedConfig.get_trainer()
for party_id in self.trainers:
self.dual_channels["intermediate_label_trainer"][party_id] = DualChannel(
name="intermediate_label_trainer_" + party_id, ids=[FedConfig.node_id, party_id])
self.dual_channels["gradients_loss"] = DualChannel(name="gradients_loss_" + FedConfig.node_id,
ids=[FedConfig.get_assist_trainer()] + [FedConfig.node_id])
self.train_result = None
self.val_result = None
self.dual_channels["gradients_loss"].send(len(self.train_dataloader))
self.dual_channels["gradients_loss"].send(self.global_epoch)
self.dual_channels["gradients_loss"].send(self.batch_size)
self.encryption_method = list(self.encryption_config.keys())[0].lower()
self.dual_channels["gradients_loss"].send(self.encryption_config)
self.dual_channels["gradients_loss"].send(self.encryption_method)
def _sync_config(self, config):
sync_rule = {
"train_info": All()
}
config_to_sync = get_matched_config(config, sync_rule)
self.sync_channel.broadcast(config_to_sync)
def predict(self, input_data):
pred_prob_epoch, y_epoch, pred_tmp_epoch = [], [], []
for batch_idx, (x_batch, y_batch, _) in enumerate(input_data):
pred_trainer_list = []
pre_tmp = self.model(x_batch)
# receive intermediate results from trainers
for party_id in FedConfig.get_trainer():
pred_trainer_list.append(self.dual_channels["intermediate_label_trainer"][party_id].recv(
use_pickle=True))
# calculate prediction of batch and tmp_pred of batch
pred_tmp_total = pre_tmp.numpy().astype(np.float32).flatten() + reduce(
lambda x, y: x + y, pred_trainer_list)
pred_total = np.exp(pred_tmp_total)
# calculate prediction of epoch
pred_prob_epoch += pred_total.tolist()
pred_tmp_epoch += pred_tmp_total.tolist()
y_epoch += y_batch.numpy().astype(np.float32).flatten().tolist()
return y_epoch, pred_prob_epoch, pred_tmp_epoch
def fit(self):
self.check_data()
public_context = None
num_cores = -1
rng = secrets.SystemRandom()
logger.info("Vertical poisson regression training start")
# receive encryption key from assist trainer
if self.encryption_method == "ckks":
logger.info("Receive ckks public key.")
public_context = self.broadcast_channel.recv(use_pickle=False)
public_context = ts.context_from(public_context)
logger.info("Public key received.")
elif self.encryption_method == "paillier":
logger.info("Receive paillier public key.")
public_context = self.broadcast_channel.recv(use_pickle=False)
public_context = Paillier.context_from(public_context)
logger.info("Public key received.")
elif self.encryption_method == "plain":
pass
else:
raise ValueError(f"Encryption method {self.encryption_method} not supported! Valid methods are "
f"'paillier', 'ckks', 'plain'.")
# train
for epoch in range(1, self.global_epoch + 1):
loss_epoch = 0
for batch_idx, (x_batch, y_batch, _) in enumerate(self.train_dataloader):
regular_loss_tmp = 0
regular_gradient_tmp = 0
enc_regular_gradient_tmp = 0
trainer_exp = None
# calculate regular results
if self.optimizer_config['p'] == 1:
regular_loss_tmp = torch.abs(self.model.linear.weight).sum() * self.optimizer_config['alpha']
regular_gradient_tmp = self.optimizer_config['alpha'] * (torch.abs(self.model.linear.weight)
/ self.model.linear.weight)
elif self.optimizer_config['p'] == 2:
regular_loss_tmp = (self.model.linear.weight ** 2).sum() * self.optimizer_config['alpha'] / 2
regular_gradient_tmp = self.optimizer_config['alpha'] * self.model.linear.weight
elif self.optimizer_config['p'] == 0:
pass
# receive intermediate exp results from trainers and compute total_exp
logger.info("Calculate predicted exp result of all trainers.")
pred_label_tmp = self.model(x_batch)
pred_label_trainer = torch.exp(pred_label_tmp)
if self.encryption_method == "ckks":
trainer_exp = self.dual_channels["intermediate_label_trainer"][self.trainers[-1]].recv(
use_pickle=False)
trainer_exp = ts.ckks_vector_from(public_context, trainer_exp)
elif self.encryption_method == "paillier":
trainer_exp = self.dual_channels["intermediate_label_trainer"][self.trainers[-1]].recv(
use_pickle=False)
trainer_exp = Paillier.ciphertext_from(public_context, trainer_exp)
elif self.encryption_method == "plain":
trainer_exp = self.dual_channels["intermediate_label_trainer"][self.trainers[-1]].recv()
total_exp = trainer_exp * pred_label_trainer.numpy().astype(np.float32).flatten()
# receive immediate results from trainers
logger.info("Calculate predicted result of all trainers.")
if self.encryption_method == "ckks":
total_sum = ts.ckks_vector(public_context, pred_label_tmp.numpy().astype(np.float32).flatten())
else:
total_sum = pred_label_tmp.numpy().astype(np.float32).flatten()
for party_id in self.trainers:
if self.encryption_method == "ckks":
total_sum = total_sum + ts.ckks_vector_from(public_context, self.dual_channels[
"intermediate_label_trainer"][party_id].recv(use_pickle=False))
trainer_regular_loss = ts.ckks_vector_from(public_context, self.dual_channels[
"intermediate_label_trainer"][party_id].recv(use_pickle=False))
elif self.encryption_method == "paillier":
total_sum = total_sum + Paillier.ciphertext_from(public_context, self.dual_channels[
"intermediate_label_trainer"][party_id].recv(use_pickle=False))
trainer_regular_loss = Paillier.ciphertext_from(public_context, self.dual_channels[
"intermediate_label_trainer"][party_id].recv(use_pickle=False))
elif self.encryption_method == "plain":
total_sum = total_sum + self.dual_channels["intermediate_label_trainer"][party_id].recv()
trainer_regular_loss = self.dual_channels["intermediate_label_trainer"][party_id].recv()
# calculate total loss
logger.info("Calculate total loss.")
enc_loss = total_exp - total_sum * y_batch.numpy().astype(np.float32).flatten()
if self.encryption_method == "ckks":
regular_loss_tmp = ts.ckks_vector(public_context,
regular_loss_tmp.numpy().astype(np.float32).flatten())
else:
regular_loss_tmp = regular_loss_tmp.numpy().astype(np.float32).flatten()
enc_loss_batch = enc_loss + regular_loss_tmp + trainer_regular_loss
# send total loss to assist_trainer
logger.info("Send encrypted total loss to assist_trainer.")
if self.encryption_method == "ckks":
self.dual_channels["gradients_loss"].send(enc_loss_batch.serialize(), use_pickle=False)
elif self.encryption_method == "paillier":
self.dual_channels["gradients_loss"].send(Paillier.serialize(enc_loss_batch), use_pickle=False)
elif self.encryption_method == "plain":
self.dual_channels["gradients_loss"].send(enc_loss_batch)
# receive decrypted loss from assist_trainer
logger.info("Receive decrypted total loss from assist_trainer.")
loss_batch = self.dual_channels["gradients_loss"].recv()
loss_batch = loss_batch / x_batch.shape[0]
logger.info("Loss of {} batch is {}".format(batch_idx, loss_batch))
loss_epoch += loss_batch * x_batch.shape[0]
# calculate intermediate result d
logger.info("Calculate intermediate result d.")
enc_y = None
if self.encryption_method == "ckks":
enc_y = ts.ckks_vector(public_context, y_batch.numpy().astype(np.float32).flatten())
elif self.encryption_method == "paillier":
enc_y = Paillier.encrypt(public_context, y_batch.numpy().astype(np.float32).flatten(),
precision=self.encryption_config[self.encryption_method][
"precision"], obfuscation=True, num_cores=num_cores)
elif self.encryption_method == "plain":
enc_y = y_batch.numpy().astype(np.float32).flatten()
enc_d = total_exp - enc_y
# send intermediate result d to trainers
logger.info("Send intermediate result d to trainers.")
for party_id in self.trainers:
if self.encryption_method == "ckks":
self.dual_channels["intermediate_label_trainer"][party_id].send(enc_d.serialize(),
use_pickle=False)
elif self.encryption_method == "paillier":
self.dual_channels["intermediate_label_trainer"][party_id].send(Paillier.serialize(enc_d),
use_pickle=False)
elif self.encryption_method == "plain":
self.dual_channels["intermediate_label_trainer"][party_id].send(enc_d)
# calculate gradient for label_trainer
logger.info("Calculate gradients for label_trainer.")
if self.encryption_method == "ckks":
enc_regular_gradient_tmp = ts.ckks_vector(public_context,
regular_gradient_tmp.numpy().astype(np.float32).flatten())
elif self.encryption_method == "paillier":
enc_regular_gradient_tmp = Paillier.encrypt(
public_context, regular_gradient_tmp.numpy().astype(np.float32).flatten(),
precision=self.encryption_config[self.encryption_method]["precision"],
obfuscation=True, num_cores=num_cores)
elif self.encryption_method == "plain":
enc_regular_gradient_tmp = regular_gradient_tmp.numpy().astype(np.float32).flatten()
if self.encryption_method == "ckks":
gradient_label_trainer_w = enc_d.matmul(x_batch.numpy()) + enc_regular_gradient_tmp
else:
gradient_label_trainer_w = np.matmul(enc_d.reshape(1, len(enc_d)), x_batch.numpy()
) + enc_regular_gradient_tmp
gradient_label_trainer_b = enc_d
if self.encryption_method == "ckks":
# add noise to encrypted gradients and send to assist_trainer
logger.info("Calculate noised gradients for label_trainer.")
noise = np.array([rng.randint(1 << 24, 1 << 26) - (1 << 25) for _ in range(x_batch.shape[1])],
dtype=np.float32)
noise /= 100000
noise_b = np.array([rng.randint(1 << 24, 1 << 26) - (1 << 25) for _ in range(x_batch.shape[0])],
dtype=np.float32)
noise_b /= 100000
noised_gradient_label_trainer_w = gradient_label_trainer_w + noise
noised_gradient_label_trainer_b = gradient_label_trainer_b + noise_b
logger.info("Send noised gradient to assist_trainer.")
self.dual_channels["gradients_loss"].send(noised_gradient_label_trainer_w.serialize(),
use_pickle=False)
self.dual_channels["gradients_loss"].send(noised_gradient_label_trainer_b.serialize(),
use_pickle=False)
# receive decrypted gradient from assist_trainer
logger.info("Receive decrypted gradient from assist_trainer.")
noised_decrypt_gradient = self.dual_channels["gradients_loss"].recv()
noised_decrypt_gradient_label_trainer_w = noised_decrypt_gradient["noised_gradient_label_trainer_w"]
noised_decrypt_gradient_label_trainer_b = noised_decrypt_gradient["noised_gradient_label_trainer_b"]
gradient_label_trainer_w = noised_decrypt_gradient_label_trainer_w - noise
gradient_label_trainer_b = noised_decrypt_gradient_label_trainer_b - np.sum(noise_b)
elif self.encryption_method == "paillier":
# add noise to encrypted gradients and send to assist_trainer
logger.info("Calculate noised gradients for label_trainer.")
noise = np.array([rng.randint(1 << 24, 1 << 26) - (1 << 25) for _ in range(x_batch.shape[1])],
dtype=np.float32)
noise /= 100000
noise_b = np.array([rng.randint(1 << 24, 1 << 26) - (1 << 25) for _ in range(x_batch.shape[0])],
dtype=np.float32)
noise_b /= 100000
noised_gradient_label_trainer_w = gradient_label_trainer_w + noise
noised_gradient_label_trainer_b = gradient_label_trainer_b + noise_b
logger.info("Send noised gradient to assist_trainer.")
self.dual_channels["gradients_loss"].send(Paillier.serialize(noised_gradient_label_trainer_w),
use_pickle=False)
self.dual_channels["gradients_loss"].send(Paillier.serialize(noised_gradient_label_trainer_b),
use_pickle=False)
# receive decrypted gradient from assist_trainer
logger.info("Receive decrypted gradient from assist_trainer.")
noised_decrypt_gradient = self.dual_channels["gradients_loss"].recv()
noised_decrypt_gradient_label_trainer_w = noised_decrypt_gradient["noised_gradient_label_trainer_w"]
noised_decrypt_gradient_label_trainer_b = noised_decrypt_gradient["noised_gradient_label_trainer_b"]
gradient_label_trainer_w = noised_decrypt_gradient_label_trainer_w - noise
gradient_label_trainer_b = noised_decrypt_gradient_label_trainer_b - np.sum(noise_b)
elif self.encryption_method == "plain":
gradient_label_trainer_b = gradient_label_trainer_b.sum()
# update w and b of label_trainer
gradient_label_trainer_w = gradient_label_trainer_w / x_batch.shape[0]
gradient_label_trainer_b = gradient_label_trainer_b / x_batch.shape[0]
logger.info("Update weights of label trainer.")
self.model.linear.weight -= (torch.FloatTensor(gradient_label_trainer_w) * self.optimizer_config["lr"])
self.model.linear.bias -= (gradient_label_trainer_b * self.optimizer_config["lr"])
# calculate and update the progress of the training
self.progress_calculator.cal_custom_progress(epoch, batch_idx+1)
loss_epoch = loss_epoch * (1 / len(self.train))
logger.info("Loss of {} epoch is {}".format(epoch, loss_epoch))
# predict train and val results for metrics
logger.info("Predict train weights of label trainer.")
self.train_result = self.predict(self.train_dataloader)
loss_train_met = {"loss": loss_epoch}
self._calc_metrics(np.array(self.train_result[1], dtype=float), np.array(self.train_result[0]),
epoch, stage="train", loss=loss_train_met)
logger.info("Predict val weights of label trainer.")
self.val_result = self.predict(self.val_dataloader)
loss_val = np.mean(
np.array(self.val_result[1]) - np.array(self.val_result[0]) * np.array(self.val_result[2]))
loss_val_met = {"loss": loss_val} # no regular
val_metrics = self._calc_metrics(np.array(self.val_result[1], dtype=float), np.array(self.val_result[0]),
epoch, stage="val", loss=loss_val_met)
# early stopping
val_metrics["loss"] = - val_metrics["loss"]
if self.early_stopping_config["patience"] > 0:
early_stop_flag, best_model_flag = self.es(val_metrics)
else:
early_stop_flag, best_model_flag = False, True
# update best model
if best_model_flag:
self.best_model = copy.deepcopy(self.model)
# send flags to trainers
for party_id in FedConfig.get_trainer():
self.dual_channels["intermediate_label_trainer"][party_id].send(
[early_stop_flag, best_model_flag, self.early_stopping_config["patience"]], use_pickle=True)
# if need to save results by epoch
if self.save_frequency > 0 and epoch % self.save_frequency == 0:
ModelPreserver.save(save_dir=self.save_dir, model_name=self.save_model_name,
state_dict=self.model.state_dict(), epoch=epoch)
# if early stopping, break
if early_stop_flag:
# update the progress of 100 to show the training is finished
ProgressCalculator.finish_progress()
break
# save model for infer
save_model_config(stage_model_config=self.export_conf, save_path=Path(self.save_dir))
# if not early stopping, save probabilities and model
self._save_prob()
ModelPreserver.save(save_dir=self.save_dir, model_name=self.save_model_name,
state_dict=self.best_model.state_dict(), final=True)
# calculate feature importance
self._save_feature_importance(self.dual_channels)
def _save_prob(self):
if self.interaction_params.get("write_training_prediction"):
self._write_prediction(self.train_result[1], self.train_result[0], self.train_ids,
stage="train", final=True)
if self.interaction_params.get("write_validation_prediction"):
self._write_prediction(self.val_result[1], self.val_result[0], self.val_ids,
stage="val", final=True)
def check_data(self):
dim_channel = BroadcastChannel(name="check_data_com", ids=[FedConfig.node_id] + FedConfig.get_trainer())
n = self.data_dim
dims = dim_channel.collect()
for dim in dims:
n += dim
if n <= 0:
raise ValueError("Number of the feature is zero. Stop training.")
def _save_feature_importance(self, channel):
res = {"owner_id": [], "fid": [], "importance": []}
other_weight_list = []
for party_id in FedConfig.get_trainer():
other_weight_list.append(channel["intermediate_label_trainer"][party_id].recv(use_pickle=True))
for (owner_id, weights) in other_weight_list:
for fid, weight in enumerate(weights):
res["owner_id"].append(owner_id)
res["fid"].append(fid)
res["importance"].append(float(weight))
for fid, weight in enumerate(self.best_model.state_dict()["linear.weight"][0]):
res["owner_id"].append(FedNode.node_id)
res["fid"].append(fid)
res["importance"].append(float(weight))
res = pd.DataFrame(res).sort_values(by="importance", key=lambda col: np.abs(col), ascending=False)
res.to_csv(Path(self.save_dir, self.output["feature_importance"]["name"]), header=True, index=False,
float_format="%.6g")
| 24,198 | 58.166259 | 120 | py |
XFL | XFL-master/python/algorithm/framework/vertical/poisson_regression/trainer.py | # Copyright 2022 The XFL Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import copy
import secrets
from pathlib import Path
import numpy as np
import tenseal as ts
import torch
from common.utils.utils import update_dict
from common.communication.gRPC.python.channel import BroadcastChannel, DualChannel
from common.crypto.paillier.paillier import Paillier
from common.utils.logger import logger
from service.fed_config import FedConfig
from service.fed_node import FedNode
from common.utils.model_preserver import ModelPreserver
from common.utils.utils import save_model_config
from .base import VerticalPoissonRegressionBase
class VerticalPoissonRegressionTrainer(VerticalPoissonRegressionBase):
def __init__(self, train_conf: dict, *args, **kwargs):
"""[summary]
Args:
train_conf (dict): [description]
"""
self.sync_channel = BroadcastChannel(name="sync")
conf = self._sync_config()
update_dict(train_conf, conf)
super().__init__(train_conf, label=False, *args, **kwargs)
self._init_model()
self.export_conf = [{
"class_name": "VerticalPoissonRegression",
"identity": self.identity,
"filename": self.save_model_name,
"input_dim": self.data_dim,
"bias": False
}]
if self.random_seed is None:
self.random_seed = self.sync_channel.recv()
self.set_seed(self.random_seed)
self.best_model = None
self.node_id = FedConfig.node_id
self.broadcast_channel = BroadcastChannel(name="Public keys", root_id=FedConfig.get_assist_trainer())
self.trainers = FedConfig.get_trainer()
self.dual_trainers = {}
if len(FedConfig.get_trainer()) > 1:
for trainer in self.trainers:
if trainer != self.node_id:
self.dual_trainers[trainer] = DualChannel(name="Trainer exchange",
ids=[trainer, self.node_id])
self.dual_channels = {
"intermediate_label_trainer": DualChannel(name="intermediate_label_trainer_" + self.node_id,
ids=FedConfig.get_label_trainer() + [self.node_id]),
"gradients_loss": DualChannel(name="gradients_loss_" + self.node_id,
ids=[FedConfig.get_assist_trainer()] + [self.node_id])
}
def _sync_config(self):
config = self.sync_channel.recv()
return config
def predict(self, input_data):
for batch_idx, x_batch in enumerate(input_data):
# calculate prediction of batch
pred_trainer = self.model(x_batch[0])
# send to label_trainer
self.dual_channels["intermediate_label_trainer"].send(pred_trainer.numpy().astype(np.float32).flatten(),
use_pickle=True)
def fit(self):
""" train model
Model parameters need to be updated before fitting.
"""
self.check_data()
num_cores = -1
encryption_config = self.encryption_config
encryption_method = list(self.encryption_config.keys())[0].lower()
logger.info("Vertical poisson regression training start")
# receive encryption key from assist trainer
public_context = None
if encryption_method == "ckks":
logger.info("Receive ckks public key.")
public_context = self.broadcast_channel.recv(use_pickle=False)
public_context = ts.context_from(public_context)
logger.info("Public key received.")
elif encryption_method == "paillier":
logger.info("Receive paillier public key.")
public_context = self.broadcast_channel.recv(use_pickle=False)
public_context = Paillier.context_from(public_context)
logger.info("Public key received.")
elif encryption_method == "plain":
pass
else:
raise ValueError(
f"Encryption method {encryption_method} not supported! Valid methods are 'paillier', 'ckks', 'plain'.")
rng = secrets.SystemRandom()
# train
for epoch in range(1, self.global_epoch + 1):
for batch_idx, x_batch in enumerate(self.train_dataloader):
regular_loss_tmp = 0
regular_gradient_tmp = 0
enc_regular_gradient_tmp = 0
# calculate regular results
if self.optimizer_config['p'] == 1:
regular_loss_tmp = torch.abs(self.model.linear.weight).sum() * self.optimizer_config['alpha']
regular_gradient_tmp = self.optimizer_config['alpha'] * (torch.abs(self.model.linear.weight)
/ self.model.linear.weight)
elif self.optimizer_config['p'] == 2:
regular_loss_tmp = (self.model.linear.weight ** 2).sum() * self.optimizer_config['alpha'] / 2
regular_gradient_tmp = self.optimizer_config['alpha'] * self.model.linear.weight
elif self.optimizer_config['p'] == 0:
pass
regular_loss_tmp = regular_loss_tmp.numpy().astype(np.float32).flatten()
# compute multiplication of exp of all trainers
pred_tmp = self.model(x_batch[0])
pred_trainer = torch.exp(pred_tmp).numpy().astype(np.float32).flatten()
# if node_id is the first trainer of trainers, encrypt the result
if self.node_id == self.trainers[0]:
if encryption_method == "ckks":
enc_pred_trainer = ts.ckks_vector(public_context, pred_trainer)
elif encryption_method == "paillier":
enc_pred_trainer = Paillier.encrypt(public_context, pred_trainer,
precision=encryption_config[encryption_method]["precision"],
obfuscation=True,
num_cores=num_cores)
elif encryption_method == "plain":
enc_pred_trainer = pred_trainer
else:
pass
# encrypt regular loss of trainers
if encryption_method == "ckks":
enc_regular_loss = ts.ckks_vector(public_context, regular_loss_tmp)
elif encryption_method == "paillier":
enc_regular_loss = Paillier.encrypt(public_context, regular_loss_tmp,
precision=encryption_config[encryption_method]["precision"],
obfuscation=True,
num_cores=num_cores)
elif encryption_method == "plain":
enc_regular_loss = regular_loss_tmp
# communicate and calculate multiplication of trainers
logger.info("Calculate predicted exp result of all trainers.")
if self.node_id == self.trainers[0]:
if len(self.trainers) > 1:
if encryption_method == "ckks":
self.dual_trainers[self.trainers[1]].send(enc_pred_trainer.serialize(), use_pickle=False)
elif encryption_method == "paillier":
self.dual_trainers[self.trainers[1]].send(Paillier.serialize(enc_pred_trainer),
use_pickle=False)
elif encryption_method == "plain":
self.dual_trainers[self.trainers[1]].send(enc_pred_trainer)
elif len(self.trainers) == 1:
if encryption_method == "ckks":
self.dual_channels["intermediate_label_trainer"].send(enc_pred_trainer.serialize(),
use_pickle=False)
elif encryption_method == "paillier":
self.dual_channels["intermediate_label_trainer"].send(Paillier.serialize(enc_pred_trainer),
use_pickle=False)
elif encryption_method == "plain":
self.dual_channels["intermediate_label_trainer"].send(enc_pred_trainer)
elif len(self.trainers) > 1:
train_ind = self.trainers.index(self.node_id)
pred_recv = self.dual_trainers[self.trainers[train_ind - 1]]
if train_ind != len(self.trainers) - 1:
pred_send = self.dual_trainers[self.trainers[train_ind + 1]]
if encryption_method == "ckks":
pre_pred = ts.ckks_vector_from(public_context, pred_recv.recv(use_pickle=False))
multi_pred = pre_pred * pred_trainer
pred_send.send(multi_pred.serialize(), use_pickle=False)
elif encryption_method == "paillier":
pre_pred = Paillier.ciphertext_from(public_context, pred_recv.recv(use_pickle=False))
multi_pred = pre_pred * pred_trainer
pred_send.send(Paillier.serialize(multi_pred), use_pickle=False)
elif encryption_method == "plain":
pre_pred = pred_recv.recv()
multi_pred = pre_pred * pred_trainer
pred_send.send(multi_pred)
elif train_ind == len(self.trainers) - 1:
if encryption_method == "ckks":
pre_pred = ts.ckks_vector_from(public_context, pred_recv.recv(use_pickle=False))
multi_pred = pre_pred * pred_trainer
self.dual_channels["intermediate_label_trainer"].send(multi_pred.serialize(),
use_pickle=False)
elif encryption_method == "paillier":
pre_pred = Paillier.ciphertext_from(public_context, pred_recv.recv(use_pickle=False))
multi_pred = pre_pred * pred_trainer
self.dual_channels["intermediate_label_trainer"].send(Paillier.serialize(multi_pred),
use_pickle=False)
elif encryption_method == "plain":
pre_pred = pred_recv.recv()
multi_pred = pre_pred * pred_trainer
self.dual_channels["intermediate_label_trainer"].send(multi_pred)
# send intermediate results to label trainer.
logger.info("Send intermediate result to label trainer.")
if encryption_method == "ckks":
enc_pred_tmp = ts.ckks_vector(public_context, pred_tmp.numpy().astype(np.float32).flatten())
self.dual_channels["intermediate_label_trainer"].send(enc_pred_tmp.serialize(),
use_pickle=False)
self.dual_channels["intermediate_label_trainer"].send(enc_regular_loss.serialize(),
use_pickle=False)
elif encryption_method == "paillier":
enc_pred_tmp = Paillier.encrypt(public_context,
pred_tmp.numpy().astype(np.float32).flatten(),
precision=encryption_config[encryption_method]["precision"],
obfuscation=True,
num_cores=num_cores)
self.dual_channels["intermediate_label_trainer"].send(Paillier.serialize(enc_pred_tmp),
use_pickle=False)
self.dual_channels["intermediate_label_trainer"].send(Paillier.serialize(enc_regular_loss),
use_pickle=False)
elif encryption_method == "plain":
enc_pred_tmp = pred_tmp.numpy().astype(np.float32).flatten()
self.dual_channels["intermediate_label_trainer"].send(enc_pred_tmp, use_pickle=True)
self.dual_channels["intermediate_label_trainer"].send(enc_regular_loss, use_pickle=True)
# receive intermediate result d from label_trainer
logger.info("Receive intermediate result d from label_trainer.")
if encryption_method == "ckks":
enc_d = self.dual_channels["intermediate_label_trainer"].recv(use_pickle=False)
enc_d = ts.ckks_vector_from(public_context, enc_d)
elif encryption_method == "paillier":
enc_d = self.dual_channels["intermediate_label_trainer"].recv(use_pickle=False)
enc_d = Paillier.ciphertext_from(public_context, enc_d)
elif encryption_method == "plain":
enc_d = self.dual_channels["intermediate_label_trainer"].recv()
# calculate gradient for trainer and send to assist_trainer
logger.info("Calculate gradients for trainer.")
if encryption_method == "ckks":
enc_regular_gradient_tmp = ts.ckks_vector(public_context,
regular_gradient_tmp.numpy().astype(np.float32).flatten())
elif encryption_method == "paillier":
enc_regular_gradient_tmp = Paillier.encrypt(
public_context, regular_gradient_tmp.numpy().astype(np.float32).flatten(),
precision=encryption_config[encryption_method]["precision"],
obfuscation=True, num_cores=num_cores)
elif encryption_method == "plain":
enc_regular_gradient_tmp = regular_gradient_tmp.numpy().astype(np.float32).flatten()
if encryption_method == "ckks":
gradient_trainer_w = enc_d.matmul(x_batch[0].numpy()) + enc_regular_gradient_tmp
else:
gradient_trainer_w = np.matmul(enc_d.reshape(1, len(enc_d)), x_batch[0].numpy()
) + enc_regular_gradient_tmp
# add noise to encrypted gradients and send to assist_trainer
if encryption_method == "ckks":
logger.info("Calculate noised gradient for trainer.")
noise = np.array([rng.randint(1 << 24, 1 << 26) - (1 << 25) for _ in range(x_batch[0].shape[1])],
dtype=np.float32)
noise /= 100000
noised_gradient_trainer_w = gradient_trainer_w + noise
logger.info("Send noised gradient to assist_trainer.")
self.dual_channels["gradients_loss"].send(noised_gradient_trainer_w.serialize(), use_pickle=False)
# receive decrypted gradient from assist_trainer
logger.info("Receive decrypted gradient from assist_trainer.")
noised_gradient_trainer_w = self.dual_channels["gradients_loss"].recv()
gradient_trainer_w = noised_gradient_trainer_w - noise
elif encryption_method == "paillier":
logger.info("Calculate noised gradient for trainer.")
noise = np.array([rng.randint(1 << 24, 1 << 26) - (1 << 25) for _ in range(x_batch[0].shape[1])],
dtype=np.float32)
noise /= 100000
noised_gradient_trainer_w = gradient_trainer_w + noise
logger.info("Send noised gradient to assist_trainer.")
self.dual_channels["gradients_loss"].send(Paillier.serialize(noised_gradient_trainer_w),
use_pickle=False)
# receive decrypted gradient from assist_trainer
logger.info("Receive decrypted gradient from assist_trainer.")
noised_gradient_trainer_w = self.dual_channels["gradients_loss"].recv()
gradient_trainer_w = noised_gradient_trainer_w - noise
# gradient_trainer_w = torch.FloatTensor(gradient_trainer_w).unsqueeze(-1)
# update w and b of trainer
gradient_trainer_w = gradient_trainer_w / x_batch[0].shape[0]
logger.info("Update weights of trainer.")
self.model.linear.weight -= (torch.FloatTensor(gradient_trainer_w) * self.optimizer_config["lr"])
# predict train and val for metrics
logger.info("Predict train weights of trainer.")
self.predict(self.train_dataloader)
logger.info("Predict val weights of trainer.")
self.predict(self.val_dataloader)
# receive flags
early_stop_flag, best_model_flag, patient = self.dual_channels["intermediate_label_trainer"].recv(
use_pickle=True)
# update best model
if best_model_flag:
self.best_model = copy.deepcopy(self.model)
# if need to save results by epoch
if self.save_frequency > 0 and epoch % self.save_frequency == 0:
ModelPreserver.save(save_dir=self.save_dir,
model_name=self.save_model_name,
state_dict=self.model.state_dict(),
epoch=epoch)
# if early stopping, break
if early_stop_flag:
break
# save model for infer
save_model_config(stage_model_config=self.export_conf, save_path=Path(self.save_dir))
# if not early stopping, save model
ModelPreserver.save(save_dir=self.save_dir, model_name=self.save_model_name,
state_dict=self.best_model.state_dict(), final=True)
# send w to label trainer
self._save_feature_importance(self.dual_channels["intermediate_label_trainer"])
def _save_feature_importance(self, channel):
channel.send((FedNode.node_id, self.best_model.state_dict()["linear.weight"][0]))
def check_data(self):
dim_channel = BroadcastChannel(name="check_data_com", ids=[self.node_id] + FedConfig.get_trainer())
dim_channel.send(self.data_dim)
| 19,818 | 57.463127 | 120 | py |
XFL | XFL-master/python/algorithm/framework/vertical/linear_regression/base.py | # Copyright 2022 The XFL Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import torch
import torch.nn as nn
from torch.utils.data import DataLoader, TensorDataset
from algorithm.core.data_io import CsvReader
from algorithm.framework.vertical.vertical_model_base import VerticalModelBase
from common.utils.logger import logger
from common.utils.model_preserver import ModelPreserver
from service.fed_config import FedConfig
class VerticalLinearRegression(nn.Module):
def __init__(self, input_dim: int, bias: bool = False):
super(VerticalLinearRegression, self).__init__()
self.linear = torch.nn.Linear(input_dim, 1, bias=bias)
self.linear.requires_grad_(False)
def forward(self, x):
return self.linear(x)
class VerticalLinearRegressionBase(VerticalModelBase):
def __init__(self, train_conf: dict, label: bool = False, *args, **kwargs):
"""_summary_
Args:
train_conf (dict): _description_
label (bool, optional): _description_. Defaults to False.
"""
super().__init__(train_conf)
self._parse_config()
self.train_conf = train_conf
self.label = label
self.data_dim = None
self.model = None
self.train_dataloader, self.eval_dataloader = None, None
if FedConfig.node_id != "assist_trainer":
self._init_dataloader()
def _parse_config(self) -> None:
super()._parse_config()
self.model_name = self.model_info.get("name")
self.save_model_name = self.output.get("model", {}).get("name")
self.save_onnx_model_name = self.output.get("onnx_model", {}).get("name", "")
if not os.path.exists(self.save_dir):
os.makedirs(self.save_dir)
self.global_epoch = self.train_params.get("global_epoch")
self.batch_size = self.train_params.get("batch_size")
self.encryption_config = self.train_params.get("encryption")
self.optimizer_config = self.train_params.get("optimizer")
self.pretrain_model_path = self.input.get("pretrained_model", {}).get("path")
self.random_seed = self.train_params.get("random_seed", None)
self.early_stopping_config = self.train_params.get("early_stopping")
self.save_frequency = self.interaction_params.get("save_frequency")
@staticmethod
def set_seed(seed):
torch.manual_seed(seed)
torch.cuda.manual_seed(seed)
def _init_model(self, bias: bool = False) -> None:
"""
Init linear regression model.
Returns: None
"""
logger.info("Init model start.")
self.model = VerticalLinearRegression(input_dim=self.data_dim, bias=bias)
# Load pretrained model if needed.
if self.pretrain_model_path is not None and self.pretrain_model_path != "":
checkpoint = ModelPreserver.load(os.path.join(self.pretrain_model_path, self.input.get(
"pretrained_model").get("name", None)))
self.model.load_state_dict(checkpoint["state_dict"])
logger.info("Init model completed.")
def __load_data(self, config) -> CsvReader:
config = config[0]
if config["type"] == "csv":
data_reader = CsvReader(path=os.path.join(config["path"], config["name"]), has_id=config["has_id"],
has_label=config["has_label"])
else:
raise NotImplementedError("Dataset type {} is not supported.".format(config["type"]))
return data_reader
def _init_data(self) -> None:
if len(self.input_trainset) > 0:
data: CsvReader = self.__load_data(self.input_trainset)
self.train = data.features()
self.train_label = data.label()
self.train_ids = list(range(len(data.ids)))
else:
raise NotImplementedError("Trainset was not configured.")
if self.label:
assert len(self.train) == len(self.train_label)
if len(self.input_valset) > 0:
data: CsvReader = self.__load_data(self.input_valset)
self.val = data.features()
self.val_label = data.label()
self.val_ids = list(range(len(data.ids)))
if self.label:
assert len(self.val) == len(self.val_label)
def _init_dataloader(self) -> None:
"""
Load raw data.
Returns:
"""
logger.info("Dataloader initiation start.")
self._init_data()
if self.label:
self.train_dataloader = DataLoader(
dataset=TensorDataset(torch.tensor(self.train, dtype=torch.float32),
torch.unsqueeze(torch.tensor(self.train_label), dim=-1),
torch.unsqueeze(torch.tensor(self.train_ids), dim=-1)),
batch_size=self.batch_size, shuffle=True
)
self.val_dataloader = DataLoader(
dataset=TensorDataset(torch.tensor(self.val, dtype=torch.float32),
torch.unsqueeze(torch.tensor(self.val_label), dim=-1),
torch.unsqueeze(torch.tensor(self.val_ids), dim=-1)),
batch_size=self.batch_size, shuffle=False
)
self.data_dim = torch.tensor(self.train).shape[-1]
logger.info("Train data shape: {}.".format(list(torch.tensor(self.train).shape)))
else:
self.train_dataloader = DataLoader(
dataset=TensorDataset(torch.tensor(self.train, dtype=torch.float32),
torch.unsqueeze(torch.tensor(self.train_ids), dim=-1)),
batch_size=self.batch_size, shuffle=True
)
self.val_dataloader = DataLoader(
dataset=TensorDataset(torch.tensor(self.val, dtype=torch.float32),
torch.unsqueeze(torch.tensor(self.val_ids), dim=-1)),
batch_size=self.batch_size, shuffle=False
)
self.data_dim = torch.tensor(self.train).shape[-1]
logger.info("Train data shape: {}.".format(list(torch.tensor(self.train).shape)))
logger.info("Dataloader initiation completed.")
| 6,807 | 41.55 | 111 | py |
XFL | XFL-master/python/algorithm/framework/vertical/linear_regression/label_trainer.py | # Copyright 2022 The XFL Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import copy
import secrets
from functools import reduce
from pathlib import Path
from typing import Optional
from common.checker.x_types import All
import numpy as np
import pandas as pd
import tenseal as ts
import torch
from common.checker.matcher import get_matched_config
from common.communication.gRPC.python.channel import BroadcastChannel, DualChannel
from common.crypto.paillier.paillier import Paillier
from common.utils.algo_utils import earlyStopping
from common.utils.logger import logger
from common.utils.utils import save_model_config
from common.utils.model_io import ModelIO
from service.fed_config import FedConfig
from service.fed_node import FedNode
from service.fed_control import ProgressCalculator
from .base import VerticalLinearRegressionBase
class VerticalLinearRegressionLabelTrainer(VerticalLinearRegressionBase):
def __init__(self, train_conf: dict, *args, **kwargs):
"""
Vertical Linear Regression
Args:
train_conf: training parameters
*args:
**kwargs:
"""
self.sync_channel = BroadcastChannel(name="sync")
self._sync_config(train_conf)
super().__init__(train_conf, label=True, *args, **kwargs)
if self.random_seed:
self.set_seed(self.random_seed)
self.progress_calculator = ProgressCalculator(self.global_epoch, len(self.train_dataloader))
self._init_model(bias=True)
self.export_conf = [{
"class_name": "VerticalLinearRegression",
"identity": self.identity,
"filename": self.save_onnx_model_name,
"input_dim": self.data_dim,
"bias": True,
"version": "1.4.0"
}]
self.es = earlyStopping(key=self.early_stopping_config["key"],
patience=self.early_stopping_config["patience"],
delta=self.early_stopping_config["delta"])
self.best_model = None
self.broadcast_channel = BroadcastChannel(name="Public keys", root_id=FedConfig.get_assist_trainer())
self.dual_channels = {"intermediate_label_trainer": {}, "gradients_loss": None}
for party_id in FedConfig.get_trainer():
self.dual_channels["intermediate_label_trainer"][party_id] = DualChannel(
name="intermediate_label_trainer_" + party_id, ids=[FedConfig.node_id, party_id])
self.dual_channels["gradients_loss"] = DualChannel(name="gradients_loss_" + FedConfig.node_id,
ids=[FedConfig.get_assist_trainer()] + [FedConfig.node_id])
self.train_result = None
self.val_result = None
self.dual_channels["gradients_loss"].send(len(self.train_dataloader))
self.dual_channels["gradients_loss"].send(self.global_epoch)
self.dual_channels["gradients_loss"].send(self.batch_size)
self.encryption_method = list(self.encryption_config.keys())[0].lower()
self.dual_channels["gradients_loss"].send(self.encryption_config)
self.dual_channels["gradients_loss"].send(self.encryption_method)
def _sync_config(self, config):
sync_rule = {
"train_info": All()
}
config_to_sync = get_matched_config(config, sync_rule)
self.sync_channel.broadcast(config_to_sync)
def predict(self, input_data):
pred_prob_epoch, y_epoch = [], []
for batch_idx, (x_batch, y_batch, _) in enumerate(input_data):
pred_trainer_list = []
pred_label_trainer = self.model(x_batch).numpy().astype(np.float32).flatten()
for party_id in FedConfig.get_trainer():
pred_trainer_list.append(self.dual_channels["intermediate_label_trainer"][party_id].recv(
use_pickle=True))
# calculate prediction of batch
pred_total = pred_label_trainer + reduce(lambda x, y: x + y, pred_trainer_list)
# calculate prediction of epoch
pred_prob_epoch += pred_total.tolist()
y_epoch += y_batch.numpy().astype(np.float32).flatten().tolist()
return y_epoch, pred_prob_epoch
def fit(self):
self.check_data()
public_context = None
num_cores = -1
rng = secrets.SystemRandom()
logger.info("Vertical linear regression training start")
# receive encryption key from assist trainer
if self.encryption_method == "ckks":
logger.info("Receive ckks public key.")
public_context = self.broadcast_channel.recv(use_pickle=False)
public_context = ts.context_from(public_context)
logger.info("Public key received.")
elif self.encryption_method == "paillier":
logger.info("Receive paillier public key.")
public_context = self.broadcast_channel.recv(use_pickle=False)
public_context = Paillier.context_from(public_context)
logger.info("Public key received.")
elif self.encryption_method == "plain":
pass
else:
raise ValueError(f"Encryption method {self.encryption_method} not supported! Valid methods are "
f"'paillier', 'ckks', 'plain'.")
# train
for epoch in range(1, self.global_epoch + 1):
loss_epoch = 0
for batch_idx, (x_batch, y_batch, _) in enumerate(self.train_dataloader):
pred_trainer = []
loss_trainer = []
loss_between_trainer = 0
enc_pred_residual = None
enc_loss_label_trainer = None
regular_loss_tmp = 0
regular_gradient_tmp = 0
enc_regular_gradient_tmp = 0
# calculate regular results
if self.optimizer_config['p'] == 1:
regular_loss_tmp = torch.abs(self.model.linear.weight).sum() * self.optimizer_config['alpha']
regular_gradient_tmp = self.optimizer_config['alpha'] * (torch.abs(self.model.linear.weight)
/ self.model.linear.weight)
elif self.optimizer_config['p'] == 2:
regular_loss_tmp = (self.model.linear.weight ** 2).sum() * self.optimizer_config['alpha'] / 2
regular_gradient_tmp = self.optimizer_config['alpha'] * self.model.linear.weight
elif self.optimizer_config['p'] == 0:
pass
# compute theta_scheduler * label_trainer and loss of label_trainer
logger.info("Calculate intermediate result of label trainer.")
pred_label_trainer = self.model(x_batch)
pred_residual = pred_label_trainer - y_batch
# receive intermediate results from trainers
for party_id in FedConfig.get_trainer():
if self.encryption_method == "ckks":
pred_trainer.append(ts.ckks_vector_from(public_context, self.dual_channels[
"intermediate_label_trainer"][party_id].recv(use_pickle=False)))
loss_trainer.append(ts.ckks_vector_from(public_context, self.dual_channels[
"intermediate_label_trainer"][party_id].recv(use_pickle=False)))
elif self.encryption_method == "paillier":
pred_trainer.append(Paillier.ciphertext_from(public_context, self.dual_channels[
"intermediate_label_trainer"][party_id].recv(use_pickle=False)))
loss_trainer.append(Paillier.ciphertext_from(public_context, self.dual_channels[
"intermediate_label_trainer"][party_id].recv(use_pickle=False)))
elif self.encryption_method == "plain":
pred_trainer.append(self.dual_channels["intermediate_label_trainer"][party_id].recv())
loss_trainer.append(self.dual_channels["intermediate_label_trainer"][party_id].recv())
logger.info("Received predictions from trainers, length of collect list is {}."
.format(len(pred_trainer)))
# calculate total loss
logger.info("Calculate total loss.")
square_tmp = (pred_residual ** 2).sum() / 2
loss_label_trainer = square_tmp + regular_loss_tmp
if self.encryption_method == "ckks":
loss_between_label_trainer = np.sum([pred_t.matmul(pred_residual.numpy()) for pred_t in pred_trainer
])
else:
loss_between_label_trainer = np.sum(pred_residual.numpy().flatten() * pred_trainer
)
# calculate total loss_between_trainer when there are more than one trainer
if len(pred_trainer) > 1:
if self.encryption_method == "plain":
loss_between_trainer = np.sum([np.sum(i * j) if ind_i != ind_j else 0
for ind_i, i in enumerate(pred_trainer)
for ind_j, j in enumerate(pred_trainer)]) / 2
elif self.encryption_method == "ckks":
loss_between_trainer = np.sum([i.dot(j) if ind_i != ind_j else 0
for ind_i, i in enumerate(pred_trainer)
for ind_j, j in enumerate(pred_trainer)]) * 0.5
elif self.encryption_method == "paillier":
loss_between_trainer = []
for party_id in FedConfig.get_trainer():
tmp = self.dual_channels["intermediate_label_trainer"][party_id].recv(use_pickle=False)
tmp = Paillier.ciphertext_from(public_context, tmp)
loss_between_trainer.append(tmp)
loss_between_trainer = np.sum(loss_between_trainer) / 2
if self.encryption_method == "ckks":
enc_loss_label_trainer = ts.ckks_vector(public_context,
loss_label_trainer.numpy().astype(np.float32).flatten())
elif self.encryption_method == "paillier":
enc_loss_label_trainer = Paillier.encrypt(public_context,
float(loss_label_trainer),
precision=self.encryption_config[self.encryption_method][
"precision"],
obfuscation=True,
num_cores=num_cores)
elif self.encryption_method == "plain":
enc_loss_label_trainer = loss_label_trainer
enc_loss_batch = loss_between_trainer + loss_between_label_trainer + enc_loss_label_trainer + np.sum(
loss_trainer)
# send total loss to assist_trainer
logger.info("Send total loss to assist_trainer.")
if self.encryption_method == "ckks":
self.dual_channels["gradients_loss"].send(enc_loss_batch.serialize(), use_pickle=False)
elif self.encryption_method == "paillier":
self.dual_channels["gradients_loss"].send(Paillier.serialize(enc_loss_batch), use_pickle=False)
elif self.encryption_method == "plain":
self.dual_channels["gradients_loss"].send(enc_loss_batch)
# receive decrypted loss from assist_trainer
logger.info("Receive total loss from assist_trainer.")
loss_batch = self.dual_channels["gradients_loss"].recv()
loss_batch = loss_batch / x_batch.shape[0]
logger.info("Loss of {} batch is {}".format(batch_idx, loss_batch))
loss_epoch += loss_batch * x_batch.shape[0]
# calculate intermediate result d
logger.info("Calculate intermediate result d.")
pred_rest_trainer = reduce(lambda x, y: x + y, pred_trainer)
if self.encryption_method == "ckks":
enc_pred_residual = ts.ckks_vector(public_context,
pred_residual.numpy().astype(np.float32).flatten())
elif self.encryption_method == "paillier":
enc_pred_residual = Paillier.encrypt(public_context,
pred_residual.numpy().astype(np.float32).flatten(),
precision=self.encryption_config[self.encryption_method][
"precision"], obfuscation=True, num_cores=num_cores)
elif self.encryption_method == "plain":
enc_pred_residual = pred_residual.numpy().astype(np.float32).flatten()
enc_d = enc_pred_residual + pred_rest_trainer
# send intermediate result d to trainer
logger.info("Send intermediate result d to trainer.")
for party_id in FedConfig.get_trainer():
if self.encryption_method == "ckks":
self.dual_channels["intermediate_label_trainer"][party_id].send(enc_d.serialize(),
use_pickle=False)
elif self.encryption_method == "paillier":
self.dual_channels["intermediate_label_trainer"][party_id].send(Paillier.serialize(enc_d),
use_pickle=False)
elif self.encryption_method == "plain":
self.dual_channels["intermediate_label_trainer"][party_id].send(enc_d)
# calculate gradient for label_trainer
logger.info("Calculate gradients for label_trainer.")
if self.encryption_method == "ckks":
enc_regular_gradient_tmp = ts.ckks_vector(public_context,
regular_gradient_tmp.numpy().astype(np.float32).flatten())
elif self.encryption_method == "paillier":
enc_regular_gradient_tmp = Paillier.encrypt(
public_context, regular_gradient_tmp.numpy().astype(np.float32).flatten(),
precision=self.encryption_config[self.encryption_method]["precision"],
obfuscation=True, num_cores=num_cores)
elif self.encryption_method == "plain":
enc_regular_gradient_tmp = regular_gradient_tmp.numpy().astype(np.float32).flatten()
if self.encryption_method == "ckks":
gradient_label_trainer_w = enc_d.matmul(x_batch.numpy()) + enc_regular_gradient_tmp
else:
gradient_label_trainer_w = np.matmul(enc_d.reshape(1, len(enc_d)), x_batch.numpy()
) + enc_regular_gradient_tmp
gradient_label_trainer_b = enc_d
if self.encryption_method == "ckks":
# add noise to encrypted gradients and send to assist_trainer
logger.info("Calculate noised gradients for label_trainer.")
noise = np.array([rng.randint(1 << 24, 1 << 26) - (1 << 25) for _ in range(x_batch.shape[1])],
dtype=np.float32)
noise /= 100000
noise_b = np.array([rng.randint(1 << 24, 1 << 26) - (1 << 25) for _ in range(x_batch.shape[0])],
dtype=np.float32)
noise_b /= 100000
noised_gradient_label_trainer_w = gradient_label_trainer_w + noise
noised_gradient_label_trainer_b = gradient_label_trainer_b + noise_b
logger.info("Send noised gradient to assist_trainer.")
self.dual_channels["gradients_loss"].send(noised_gradient_label_trainer_w.serialize(),
use_pickle=False)
self.dual_channels["gradients_loss"].send(noised_gradient_label_trainer_b.serialize(),
use_pickle=False)
# receive decrypted gradient from assist_trainer
logger.info("Receive decrypted gradient from assist_trainer.")
noised_decrypt_gradient = self.dual_channels["gradients_loss"].recv()
noised_decrypt_gradient_label_trainer_w = noised_decrypt_gradient["noised_gradient_label_trainer_w"]
noised_decrypt_gradient_label_trainer_b = noised_decrypt_gradient["noised_gradient_label_trainer_b"]
gradient_label_trainer_w = noised_decrypt_gradient_label_trainer_w - noise
gradient_label_trainer_b = noised_decrypt_gradient_label_trainer_b - np.sum(noise_b)
elif self.encryption_method == "paillier":
# add noise to encrypted gradients and send to assist_trainer
logger.info("Calculate noised gradients for label_trainer.")
noise = np.array([rng.randint(1 << 24, 1 << 26) - (1 << 25) for _ in range(x_batch.shape[1])],
dtype=np.float32)
noise /= 100000
noise_b = np.array([rng.randint(1 << 24, 1 << 26) - (1 << 25) for _ in range(x_batch.shape[0])],
dtype=np.float32)
noise_b /= 100000
noised_gradient_label_trainer_w = gradient_label_trainer_w + noise
noised_gradient_label_trainer_b = gradient_label_trainer_b + noise_b
logger.info("Send noised gradient to assist_trainer.")
self.dual_channels["gradients_loss"].send(Paillier.serialize(noised_gradient_label_trainer_w),
use_pickle=False)
self.dual_channels["gradients_loss"].send(Paillier.serialize(noised_gradient_label_trainer_b),
use_pickle=False)
# receive decrypted gradient from assist_trainer
logger.info("Receive decrypted gradient from assist_trainer.")
noised_decrypt_gradient = self.dual_channels["gradients_loss"].recv()
noised_decrypt_gradient_label_trainer_w = noised_decrypt_gradient["noised_gradient_label_trainer_w"]
noised_decrypt_gradient_label_trainer_b = noised_decrypt_gradient["noised_gradient_label_trainer_b"]
gradient_label_trainer_w = noised_decrypt_gradient_label_trainer_w - noise
gradient_label_trainer_b = noised_decrypt_gradient_label_trainer_b - np.sum(noise_b)
elif self.encryption_method == "plain":
gradient_label_trainer_b = gradient_label_trainer_b.sum()
# update w and b of label_trainer
gradient_label_trainer_w = gradient_label_trainer_w / x_batch.shape[0]
gradient_label_trainer_b = gradient_label_trainer_b / x_batch.shape[0]
logger.info("Update weights of label trainer.")
self.model.linear.weight -= (torch.FloatTensor(gradient_label_trainer_w) * self.optimizer_config["lr"])
self.model.linear.bias -= (gradient_label_trainer_b * self.optimizer_config["lr"])
# calculate and update the progress of the training
self.progress_calculator.cal_custom_progress(epoch, batch_idx+1)
loss_epoch = loss_epoch * (1 / len(self.train))
logger.info("Loss of {} epoch is {}".format(epoch, loss_epoch))
# predict train and val results for metrics
logger.info("Predict train weights of label trainer.")
self.train_result = self.predict(self.train_dataloader)
loss_train_met = {"loss": loss_epoch}
self._calc_metrics(np.array(self.train_result[1], dtype=float), np.array(self.train_result[0]),
epoch, stage="train", loss=loss_train_met)
logger.info("Predict val weights of label trainer.")
self.val_result = self.predict(self.val_dataloader)
val_residual = np.array(self.val_result[1]) - np.array(self.val_result[0])
loss_val_met = {"loss": np.mean((val_residual ** 2) / 2)} # no regular
val_metrics = self._calc_metrics(np.array(self.val_result[1], dtype=float), np.array(self.val_result[0]),
epoch, stage="val", loss=loss_val_met)
# early stopping
val_metrics["loss"] = - val_metrics["loss"]
if self.early_stopping_config["patience"] > 0:
early_stop_flag, best_model_flag = self.es(val_metrics)
else:
early_stop_flag, best_model_flag = False, True
# update best model
if best_model_flag:
self.best_model = copy.deepcopy(self.model)
# send flags to trainers
for party_id in FedConfig.get_trainer():
self.dual_channels["intermediate_label_trainer"][party_id].send(
[early_stop_flag, best_model_flag, self.early_stopping_config["patience"]], use_pickle=True)
# if need to save results by epoch
if self.save_frequency > 0 and epoch % self.save_frequency == 0:
# ModelPreserver.save(save_dir=self.save_dir, model_name=self.save_model_name,
# state_dict=self.model.state_dict(), epoch=epoch)
self.save_model(epoch=epoch)
# if early stopping, break
if early_stop_flag:
# update the progress of 100 to show the training is finished
ProgressCalculator.finish_progress()
break
# save model for infer
self.save_model(epoch=None)
# if not early stopping, save probabilities and model
self._save_prob()
# calculate feature importance
self._save_feature_importance(self.dual_channels)
def save_model(self, epoch: Optional[int] = None):
if not epoch:
save_model_config(stage_model_config=self.export_conf,
save_path=Path(self.save_dir))
if self.save_model_name:
ModelIO.save_torch_model(
state_dict=self.best_model.state_dict(),
save_dir=self.save_dir,
model_name=self.save_model_name,
meta_dict={},
epoch=epoch
)
if self.save_onnx_model_name:
ModelIO.save_torch_onnx(
model=self.best_model,
input_dim=(self.data_dim,),
save_dir=self.save_dir,
model_name=self.save_onnx_model_name,
epoch=epoch
)
def _save_prob(self):
if self.interaction_params.get("write_training_prediction"):
self._write_prediction(self.train_result[1], self.train_result[0], self.train_ids,
stage="train", final=True)
if self.interaction_params.get("write_validation_prediction"):
self._write_prediction(self.val_result[1], self.val_result[0], self.val_ids,
stage="val", final=True)
def check_data(self):
dim_channel = BroadcastChannel(name="check_data_com", ids=[FedConfig.node_id] + FedConfig.get_trainer())
n = self.data_dim
dims = dim_channel.collect()
for dim in dims:
n += dim
if n <= 0:
raise ValueError("Number of the feature is zero. Stop training.")
def _save_feature_importance(self, channel):
res = {"owner_id": [], "fid": [], "importance": []}
other_weight_list = []
for party_id in FedConfig.get_trainer():
other_weight_list.append(channel["intermediate_label_trainer"][party_id].recv(use_pickle=True))
for (owner_id, weights) in other_weight_list:
for fid, weight in enumerate(weights):
res["owner_id"].append(owner_id)
res["fid"].append(fid)
res["importance"].append(float(weight))
for fid, weight in enumerate(self.best_model.state_dict()["linear.weight"][0]):
res["owner_id"].append(FedNode.node_id)
res["fid"].append(fid)
res["importance"].append(float(weight))
res = pd.DataFrame(res).sort_values(by="importance", key=lambda col: np.abs(col), ascending=False)
res.to_csv(Path(self.save_dir, self.output["feature_importance"]["name"]), header=True, index=False,
float_format="%.6g")
| 26,235 | 57.825112 | 120 | py |
XFL | XFL-master/python/algorithm/framework/vertical/linear_regression/trainer.py | # Copyright 2022 The XFL Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import copy
import secrets
from pathlib import Path
from typing import Optional
import numpy as np
import tenseal as ts
import torch
from common.utils.utils import update_dict
from common.communication.gRPC.python.channel import BroadcastChannel, DualChannel
from common.crypto.paillier.paillier import Paillier
from common.utils.logger import logger
from common.utils.model_io import ModelIO
from service.fed_config import FedConfig
from service.fed_node import FedNode
from common.utils.utils import save_model_config
from .base import VerticalLinearRegressionBase
class VerticalLinearRegressionTrainer(VerticalLinearRegressionBase):
def __init__(self, train_conf: dict, *args, **kwargs):
"""[summary]
Args:
train_conf (dict): [description]
"""
self.sync_channel = BroadcastChannel(name="sync")
conf = self._sync_config()
update_dict(train_conf, conf)
super().__init__(train_conf, label=False, *args, **kwargs)
self._init_model()
self.export_conf = [{
"class_name": "VerticalLinearRegression",
"identity": self.identity,
"filename": self.save_onnx_model_name,
"input_dim": self.data_dim,
"bias": False
}]
if self.random_seed:
self.set_seed(self.random_seed)
self.best_model = None
self.broadcast_channel = BroadcastChannel(name="Public keys", root_id=FedConfig.get_assist_trainer())
if len(FedConfig.get_trainer()) > 1:
self.broadcast_trainer = BroadcastChannel(name="Trainer exchange", root_id=FedConfig.node_id,
ids=FedConfig.get_trainer())
self.dual_channels = {
"intermediate_label_trainer": DualChannel(name="intermediate_label_trainer_" + FedConfig.node_id,
ids=FedConfig.get_label_trainer() + [FedConfig.node_id]),
"gradients_loss": DualChannel(name="gradients_loss_" + FedConfig.node_id,
ids=[FedConfig.get_assist_trainer()] + [FedConfig.node_id])
}
def predict(self, input_data):
for batch_idx, x_batch in enumerate(input_data):
# calculate prediction of batch
pred_trainer = self.model(x_batch[0])
# send to label_trainer
self.dual_channels["intermediate_label_trainer"].send(pred_trainer.numpy().astype(np.float32).flatten(),
use_pickle=True)
def _sync_config(self):
config = self.sync_channel.recv()
return config
def fit(self):
""" train model
Model parameters need to be updated before fitting.
"""
self.check_data()
num_cores = -1
encryption_config = self.encryption_config
encryption_method = list(self.encryption_config.keys())[0].lower()
logger.info("Vertical linear regression training start")
# receive encryption key from assist trainer
public_context = None
if encryption_method == "ckks":
logger.info("Receive ckks public key.")
public_context = self.broadcast_channel.recv(use_pickle=False)
public_context = ts.context_from(public_context)
logger.info("Public key received.")
elif encryption_method == "paillier":
logger.info("Receive paillier public key.")
public_context = self.broadcast_channel.recv(use_pickle=False)
public_context = Paillier.context_from(public_context)
logger.info("Public key received.")
elif encryption_method == "plain":
pass
else:
raise ValueError(
f"Encryption method {encryption_method} not supported! Valid methods are 'paillier', 'ckks', 'plain'.")
rng = secrets.SystemRandom()
# train
for epoch in range(1, self.global_epoch + 1):
for batch_idx, x_batch in enumerate(self.train_dataloader):
regular_loss_tmp = 0
regular_gradient_tmp = 0
enc_regular_gradient_tmp = 0
# calculate regular results
if self.optimizer_config['p'] == 1:
regular_loss_tmp = torch.abs(self.model.linear.weight).sum() * self.optimizer_config['alpha']
regular_gradient_tmp = self.optimizer_config['alpha'] * (torch.abs(self.model.linear.weight)
/ self.model.linear.weight)
elif self.optimizer_config['p'] == 2:
regular_loss_tmp = (self.model.linear.weight ** 2).sum() * self.optimizer_config['alpha'] / 2
regular_gradient_tmp = self.optimizer_config['alpha'] * self.model.linear.weight
elif self.optimizer_config['p'] == 0:
pass
# compute theta_trainer * x_trainer and loss of x_trainer
pred_trainer = self.model(x_batch[0])
square_tmp = (pred_trainer ** 2).sum() / 2
loss_trainer = square_tmp + regular_loss_tmp
# send intermediate results to label trainer.
logger.info("Send intermediate result to label trainer.")
enc_pred_trainer = None
if encryption_method == "ckks":
enc_pred_trainer = ts.ckks_vector(public_context, pred_trainer.numpy().astype(np.float32).flatten())
enc_loss_trainer = ts.ckks_vector(public_context, loss_trainer.numpy().astype(np.float32).flatten())
self.dual_channels["intermediate_label_trainer"].send(enc_pred_trainer.serialize(),
use_pickle=False)
self.dual_channels["intermediate_label_trainer"].send(enc_loss_trainer.serialize(),
use_pickle=False)
elif encryption_method == "paillier":
enc_pred_trainer = Paillier.encrypt(public_context,
pred_trainer.numpy().astype(np.float32).flatten(),
precision=encryption_config[encryption_method]["precision"],
obfuscation=True,
num_cores=num_cores)
enc_loss_trainer = Paillier.encrypt(public_context,
loss_trainer.numpy().astype(np.float32).flatten(),
precision=encryption_config[encryption_method]["precision"],
obfuscation=True,
num_cores=num_cores)
self.dual_channels["intermediate_label_trainer"].send(Paillier.serialize(enc_pred_trainer),
use_pickle=False)
self.dual_channels["intermediate_label_trainer"].send(Paillier.serialize(enc_loss_trainer),
use_pickle=False)
elif encryption_method == "plain":
enc_pred_trainer = pred_trainer.numpy().astype(np.float32).flatten()
enc_loss_trainer = loss_trainer.numpy().astype(np.float32).flatten()
self.dual_channels["intermediate_label_trainer"].send(enc_pred_trainer, use_pickle=True)
self.dual_channels["intermediate_label_trainer"].send(enc_loss_trainer, use_pickle=True)
# exchange theta_trainer * x_trainer to calculate loss_between_trainer when encryption is paillier
logger.info("Calculate trainer_sum to label trainer when encryption is paillier.")
if encryption_method == "paillier" and len(FedConfig.get_trainer()) > 1:
trainer_sum = 0
logger.info("Send intermediate result to other trainers when encryption is paillier.")
self.broadcast_trainer.broadcast(Paillier.serialize(enc_pred_trainer), use_pickle=False)
logger.info("Receive intermediate result from other trainers when encryption is paillier.")
trainer_tmp = self.broadcast_trainer.collect(use_pickle=False)
for trainer_u in trainer_tmp:
trainer_u = Paillier.ciphertext_from(public_context, trainer_u)
trainer_sum += np.sum(trainer_u * pred_trainer.numpy().astype(np.float32).flatten())
logger.info("Send trainer_sum to label trainer when encryption is paillier.")
self.dual_channels["intermediate_label_trainer"].send(Paillier.serialize(trainer_sum),
use_pickle=False)
# receive intermediate result d from label_trainer
logger.info("Receive intermediate result d from label_trainer.")
if encryption_method == "ckks":
enc_d = self.dual_channels["intermediate_label_trainer"].recv(use_pickle=False)
enc_d = ts.ckks_vector_from(public_context, enc_d)
elif encryption_method == "paillier":
enc_d = self.dual_channels["intermediate_label_trainer"].recv(use_pickle=False)
enc_d = Paillier.ciphertext_from(public_context, enc_d)
elif encryption_method == "plain":
enc_d = self.dual_channels["intermediate_label_trainer"].recv()
# calculate gradient for trainer and send to assist_trainer
logger.info("Calculate gradients for trainer.")
if encryption_method == "ckks":
enc_regular_gradient_tmp = ts.ckks_vector(public_context,
regular_gradient_tmp.numpy().astype(np.float32).flatten())
elif encryption_method == "paillier":
enc_regular_gradient_tmp = Paillier.encrypt(
public_context, regular_gradient_tmp.numpy().astype(np.float32).flatten(),
precision=encryption_config[encryption_method]["precision"],
obfuscation=True, num_cores=num_cores)
elif encryption_method == "plain":
enc_regular_gradient_tmp = regular_gradient_tmp.numpy().astype(np.float32).flatten()
if encryption_method == "ckks":
gradient_trainer_w = enc_d.matmul(x_batch[0].numpy()) + enc_regular_gradient_tmp
else:
gradient_trainer_w = np.matmul(enc_d.reshape(1, len(enc_d)), x_batch[0].numpy()
) + enc_regular_gradient_tmp
# add noise to encrypted gradients and send to assist_trainer
if encryption_method == "ckks":
logger.info("Calculate noised gradient for trainer.")
noise = np.array([rng.randint(1 << 24, 1 << 26) - (1 << 25) for _ in range(x_batch[0].shape[1])],
dtype=np.float32)
noise /= 100000
noised_gradient_trainer_w = gradient_trainer_w + noise
logger.info("Send noised gradient to assist_trainer.")
self.dual_channels["gradients_loss"].send(noised_gradient_trainer_w.serialize(), use_pickle=False)
# receive decrypted gradient from assist_trainer
logger.info("Receive decrypted gradient from assist_trainer.")
noised_gradient_trainer_w = self.dual_channels["gradients_loss"].recv()
gradient_trainer_w = noised_gradient_trainer_w - noise
elif encryption_method == "paillier":
logger.info("Calculate noised gradient for trainer.")
noise = np.array([rng.randint(1 << 24, 1 << 26) - (1 << 25) for _ in range(x_batch[0].shape[1])],
dtype=np.float32)
noise /= 100000
noised_gradient_trainer_w = gradient_trainer_w + noise
logger.info("Send noised gradient to assist_trainer.")
self.dual_channels["gradients_loss"].send(Paillier.serialize(noised_gradient_trainer_w),
use_pickle=False)
# receive decrypted gradient from assist_trainer
logger.info("Receive decrypted gradient from assist_trainer.")
noised_gradient_trainer_w = self.dual_channels["gradients_loss"].recv()
gradient_trainer_w = noised_gradient_trainer_w - noise
# gradient_trainer_w = torch.FloatTensor(gradient_trainer_w).unsqueeze(-1)
# update w and b of trainer
gradient_trainer_w = gradient_trainer_w / x_batch[0].shape[0]
logger.info("Update weights of trainer.")
self.model.linear.weight -= (torch.FloatTensor(gradient_trainer_w) * self.optimizer_config["lr"])
# predict train and val for metrics
logger.info("Predict train weights of trainer.")
self.predict(self.train_dataloader)
logger.info("Predict val weights of trainer.")
self.predict(self.val_dataloader)
# receive flags
early_stop_flag, best_model_flag, patient = self.dual_channels["intermediate_label_trainer"].recv(
use_pickle=True)
# update best model
if best_model_flag:
self.best_model = copy.deepcopy(self.model)
# if need to save results by epoch
if self.save_frequency > 0 and epoch % self.save_frequency == 0:
# ModelPreserver.save(save_dir=self.save_dir,
# model_name=self.save_model_name,
# state_dict=self.model.state_dict(),
# epoch=epoch)
self.save_model(epoch=epoch)
# if early stopping, break
if early_stop_flag:
break
# save model for infer
# if not early stopping, save model
self.save_model(epoch=None)
# send w to label trainer
self._save_feature_importance(self.dual_channels["intermediate_label_trainer"])
def save_model(self, epoch: Optional[int] = None):
if not epoch:
save_model_config(stage_model_config=self.export_conf,
save_path=Path(self.save_dir))
if self.save_model_name:
ModelIO.save_torch_model(
state_dict=self.best_model.state_dict(),
save_dir=self.save_dir,
model_name=self.save_model_name,
meta_dict={},
epoch=epoch
)
if self.save_onnx_model_name:
ModelIO.save_torch_onnx(
model=self.best_model,
input_dim=(self.data_dim,),
save_dir=self.save_dir,
model_name=self.save_onnx_model_name,
epoch=epoch
)
def _save_feature_importance(self, channel):
channel.send((FedNode.node_id, self.best_model.state_dict()["linear.weight"][0]))
def check_data(self):
dim_channel = BroadcastChannel(name="check_data_com", ids=[FedConfig.node_id] + FedConfig.get_trainer())
dim_channel.send(self.data_dim)
| 16,611 | 54.373333 | 120 | py |
XFL | XFL-master/python/algorithm/framework/vertical/kmeans/base.py | # Copyright 2022 The XFL Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import copy
import functools
from itertools import chain
from pathlib import Path
import warnings
import numpy as np
import pandas as pd
import pyspark.pandas as ps
import torch
from common.utils.config_parser import TrainConfigParser
from common.utils.logger import logger
warnings.filterwarnings("ignore")
class VerticalKmeansBase(TrainConfigParser):
def __init__(self, train_conf: dict, label: bool = False, *args, **kwargs):
"""
init
Args:
train_conf:
label:
*args:
**kwargs:
"""
super().__init__(train_conf)
self.k = 0
self.max_iter = 0
self.tol = 0.0
self.is_converged = False
self.init = "random"
self.encryption = "plain"
self.label = label
self.cluster_centers = []
self.cluster_count_list = []
self.train_features, self.train_label, self.train_ids = None, None, None
self._init_data()
self._init_config()
def _init_data(self):
logger.info("init data loader.")
if not self.input_trainset:
return None
input_info = self.input_trainset[0]
file_path = str(Path(input_info.get("path"), input_info.get("name")))
type_ = input_info.get("type", "None")
if input_info.get("has_id", True):
index_col = input_info.get("index_col", 'id')
else:
index_col = None
if input_info.get("has_label", True):
label_name = input_info.get("label_name", 'y')
self.label = True
else:
label_name = None
self.label = False
if type_ == "csv":
if self.computing_engine == "local":
df = pd.read_csv(file_path, index_col=index_col)
elif self.computing_engine == "spark":
df = ps.read_csv(file_path, index_col=index_col)
else:
raise NotImplementedError("Computing engine {} is not supported.".format(self.computing_engine))
else:
raise NotImplementedError("Dataset type {} is not supported.".format(type_))
if self.label:
feature_cols = [_ for _ in df.columns if _ != label_name]
self.train_features = df[feature_cols]
if label_name:
self.train_label = df[label_name]
else:
self.train_label = None
else:
self.train_features = df
self.train_ids = df.index
def _init_config(self):
"""
Initialize model parameters
Returns:
"""
params = self.train_info.get("train_params", {})
self.k = params.get("k", 5)
self.init = params.get("init", "random")
self.max_iter = params.get("max_iter", 20)
self.tol = params.get("tol", 1e-5)
self.random_seed = params.get("extra_config", {}).get("random_seed", 2022)
if self.identity != "assist_trainer":
self._check()
self.encryption = params.get("encryption")
def _check(self):
"""
Check data and parameters
Returns:
"""
if len(self.train_features) <= 0:
raise ValueError("error: empty dataset.")
if self.k < 2:
raise ValueError("k must be an integer value larger than 1.")
elif self.k > len(self.train_features):
raise ValueError("k is larger than the size of current data.")
@staticmethod
def euclid_distance(u, center_list):
result = []
for i in range(len(center_list)):
result.append(sum(np.square(center_list[i] - u)))
return result
def distance_table(self, centers):
"""
Args:
centers: cluster centroids
Returns:
(n * k) tensor, whose [i, j] element is square of the distance of sample i to the centroid j.
"""
if isinstance(centers, ps.DataFrame):
centers = centers.to_numpy()
elif isinstance(centers, pd.DataFrame):
centers = centers.to_numpy()
elif isinstance(centers, list):
centers = np.array(centers)
n = len(self.train_features)
if self.train_features.empty:
return
d = functools.partial(self.euclid_distance, center_list=centers)
dt = self.train_features.apply(d, axis=1)
return torch.Tensor(list(chain.from_iterable(dt.to_numpy()))).reshape(n, len(centers))
@staticmethod
def distance_between_centers(center_list):
cluster_dist_list = []
for i in range(0, len(center_list)):
for j in range(0, len(center_list)):
if j != i:
cluster_dist_list.append(np.sum((np.array(center_list[i]) - np.array(center_list[j])) ** 2))
return torch.Tensor(cluster_dist_list)
def calc_centers(self, centers, cluster_result):
"""
Update cluster centers based on clustering results
Args:
centers: current center slice
cluster_result: result of clustering labels
Returns:
"""
feature_sum = {}
feature_count = {}
for feature, label in zip(self.train_features.values, cluster_result):
if label not in feature_sum:
feature_sum[label] = copy.deepcopy(feature)
else:
feature_sum[label] += feature
feature_count[label] = feature_count.get(label, 0) + 1
center_list = []
# for k in centroid_feature_sum:
for k in range(self.k):
if k not in feature_sum:
if isinstance(centers, ps.DataFrame):
center_list.append(centers.iloc[k])
elif isinstance(centers, pd.DataFrame):
center_list.append(centers.iloc[k])
elif isinstance(centers, list):
center_list.append(centers[k])
else:
raise NotImplementedError
else:
count = feature_count[k]
center_list.append(feature_sum[k] / count)
return center_list
def calc_cluster_count(self, cluster_result):
"""
Args:
cluster_result: result of clustering labels
Returns:
"""
feature_count = {}
for label in cluster_result:
feature_count[label] = feature_count.get(label, 0) + 1
cluster_count_list = []
count_all = len(cluster_result)
for k in range(self.k):
if k not in feature_count:
cluster_count_list.append([k, 0, 0])
else:
count = feature_count[k]
cluster_count_list.append([k, count, count / count_all])
return cluster_count_list
@staticmethod
def calc_tolerance(centers, centers_new):
"""
Calculate convergence metrics
Returns:
"""
if isinstance(centers, ps.DataFrame):
centers = centers.to_numpy()
elif isinstance(centers, pd.DataFrame):
centers = centers.to_numpy()
elif isinstance(centers, list):
centers = np.array(centers)
return np.sum(np.sum((centers - np.array(centers_new)) ** 2, axis=1))
| 6,602 | 26.627615 | 100 | py |
XFL | XFL-master/python/algorithm/framework/vertical/xgboost/base.py | # Copyright 2022 The XFL Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
from typing import Any
import numpy as np
import pandas as pd
from algorithm.core.data_io import CsvReader, NdarrayIterator
from algorithm.core.encryption_param import get_encryption_param
from algorithm.core.tree.cat_param_parser import parse_category_param
from algorithm.core.tree.tree_param import XGBTreeParam
from algorithm.framework.vertical.vertical_model_base import VerticalModelBase
from common.utils.logger import logger
class VerticalXgboostBase(VerticalModelBase):
def __init__(self, train_conf: dict, is_label_trainer: bool = False, *args, **kwargs):
super().__init__(train_conf)
self.train_conf = train_conf
self.train_features, self.train_label, self.train_ids = None, None, None
self.val_features, self.val_label, self.val_ids = None, None, None
self.test_features, self.test_label, self.test_ids = None, None, None
self.xgb_config = None
self.is_label_trainer = is_label_trainer
self.feature_importances_ = {}
self.__init_xgb_config()
self.__init_data()
self.__convert_to_binned_data()
def __init_data(self) -> None:
""" Init data, include features and label.
Returns: None
"""
self.bs = self.train_params.get("batch_size_val")
if self.input_trainset:
_ = self.__load_data(self.input_trainset)
self.train_features, self.train_label, self.train_ids, self.train_names = _
self.train_dataset = NdarrayIterator(self.train_features.to_numpy(), self.bs)
else:
self.train_dataset = None
if self.input_valset:
_ = self.__load_data(self.input_valset)
self.val_features, self.val_label, self.val_ids, self.val_names = _
self.val_dataset = NdarrayIterator(self.val_features.to_numpy(), self.bs)
else:
self.val_dataset = None
if self.input_testset:
_ = self.__load_data(self.input_testset)
self.test_features, self.test_label, self.test_ids, self.test_names = _
self.test_dataset = NdarrayIterator(self.test_features.to_numpy(), self.bs)
else:
self.test_dataset = None
def __convert_to_binned_data(self):
''' Note self.train_features will be converted to binned feature '''
cat_columns = parse_category_param(self.train_features,
col_index=self.xgb_config.cat_col_index,
col_names=self.xgb_config.cat_col_names,
col_index_type=self.xgb_config.cat_col_index_type,
col_names_type=self.xgb_config.cat_col_names_type,
max_num_value=self.xgb_config.cat_max_num_value,
max_num_value_type=self.xgb_config.cat_max_num_value_type)
self.cat_columns = cat_columns
self.cat_feature_names = []
if len(cat_columns) > 0:
self.cat_feature_names = self.train_features.columns[cat_columns].to_list()
self.train_features[self.cat_feature_names] = self.train_features[self.cat_feature_names].astype('category')
def f(x):
if self.train_features[x].dtypes == "category":
value_counts = self.train_features[x].value_counts() # descending order
if value_counts.shape[0] > self.xgb_config.num_bins:
values = value_counts.index.to_list()
list_unique = values[:self.xgb_config.num_bins - 1]
list_group = values[self.xgb_config.num_bins - 1:]
uniques = np.array(list_unique + [list_group], dtype=object)
value_map = {v: i for i, v in enumerate(list_unique)}
value_map.update({v: len(list_unique) for v in list_group})
codes = self.train_features[x].map(value_map)
else:
codes, uniques = pd.factorize(self.train_features[x], na_sentinel=0) # na_sentinel will not be activated actually
uniques = uniques.to_numpy()
# uniques: array of values that belongs to the same category
# codes: binned values
return pd.Series(codes, name=x), uniques.tolist()
else:
binned_values, split_points = pd.cut(self.train_features[x], bins=self.xgb_config.num_bins, retbins=True, labels=range(self.xgb_config.num_bins))
return binned_values, split_points
if self.input_trainset:
out = pd.Series(self.train_features.columns).apply(f)
if self.xgb_config.num_bins <= 256:
dtype = np.uint8
elif self.xgb_config.num_bins <= 2 ** 16:
dtype = np.uint16
else:
dtype = np.uint32
self.train_features = pd.DataFrame([out[i][0] for i in range(len(out))], dtype=dtype).T
# For continuous features, self.split_points stores the split points between bins, for example, 15 split points for 16 bins.
# For categorial features, self.split_points stores original values correspond to the bin values, for example, 16 values for 16 bins.
self.split_points = [out[i][1][1:-1] if i not in self.cat_columns else out[i][1][:] for i in range(len(out))]
def __load_data(self, config):
""" Load data from dataset config.
Args:
config: Dataset config.
Returns: [CsvReader, ...]
"""
if len(config) > 1:
logger.warning("More than one dataset is not supported.")
if not config:
return None, None, None, None
config = config[0]
if config["type"] == "csv":
path = os.path.join(config["path"], config["name"])
if not path:
return None, None, None
data_reader = CsvReader(path, has_id=config["has_id"], has_label=config["has_label"])
features = data_reader.features(type="pandas.dataframe")
features.replace({np.nan: 0, self.xgb_config.missing_value: 0}, inplace=True)
ids = data_reader.ids
names = data_reader.feature_names()
if self.is_label_trainer:
labels = data_reader.label()
else:
labels = None
else:
raise NotImplementedError("Dataset type {} is not supported.".format(config["type"]))
return features, labels, ids, names
def col_sample(self) -> tuple[Any, dict]:
col_size = self.train_features.shape[1]
if 0 < self.xgb_config.subsample_feature_rate <= 1:
sample_num = int(col_size * self.xgb_config.subsample_feature_rate)
else:
sample_num = col_size
sampled_idx = np.sort(np.random.choice(col_size, sample_num, replace=False))
feature_id_mapping = {a: b for a, b in enumerate(sampled_idx)}
sampled_features = self.train_features.iloc[:, sampled_idx]
return sampled_features, feature_id_mapping
def __init_xgb_config(self) -> None:
""" Init xgboost config.
Returns: None
"""
default_config = self.train_info.get("train_params")
cat_params = default_config.get("category", {}).get("cat_features", {})
encryption_methods = list(default_config.get("encryption", {}).keys())
if len(encryption_methods) > 0:
encryption_method = encryption_methods[0]
else:
encryption_method = "plain"
encryption_params = default_config.get("encryption", {"plain": {}})[encryption_method]
downsampling_params = default_config.get("downsampling", {})
self.xgb_config = XGBTreeParam(loss_param=default_config.get("lossfunc"), # ("BCEWithLogitsLoss"),
num_trees=default_config.get("num_trees"),
learning_rate=default_config.get("learning_rate"),
gamma=default_config.get("gamma"),
lambda_=default_config.get("lambda_"),
max_depth=default_config.get("max_depth"),
num_bins=default_config.get("num_bins", 16),
min_split_gain=default_config.get("min_split_gain"),
min_sample_split=default_config.get("min_sample_split"),
min_leaf_node=default_config.get("min_leaf_node"),
feature_importance_type=default_config.get("feature_importance_type"),
run_goss=downsampling_params.get("row", {}).get("run_goss", False),
top_rate=downsampling_params.get("row", {}).get("top_rate"),
other_rate=downsampling_params.get("row", {}).get("other_rate"),
metrics=default_config.get("metric"),
early_stopping_param=default_config.get("early_stopping",
{"patience": -1,
"key": "ks",
"delta": 0.001}),
encryption_param=get_encryption_param(encryption_method, encryption_params),
subsample_feature_rate=downsampling_params.get("column", {}).get("rate", 1.0),
missing_value=float('inf'),
max_num_cores=default_config.get("max_num_cores", 999),
col_batch=default_config.get("advanced", {}).get("col_batch", 64),
row_batch=default_config.get("advanced", {}).get("row_batch", 40000),
cat_col_index=cat_params.get("col_index", ""),
cat_col_names=cat_params.get("col_names", []),
cat_max_num_value=cat_params.get("max_num_value", 0),
cat_col_index_type=cat_params.get("col_index_type", "inclusive"),
cat_col_names_type=cat_params.get("col_names_type", "inclusive"),
cat_max_num_value_type=cat_params.get("max_num_value_type", "union"),
cat_smooth=default_config.get("category", {}).get("cat_smooth", 1.0))
| 11,631 | 51.633484 | 161 | py |
XFL | XFL-master/python/algorithm/framework/vertical/xgboost/decision_tree_label_trainer.py | # Copyright 2022 The XFL Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import gc
from typing import Dict, List, Optional, Union
import numpy as np
import pandas as pd
from pathos.pools import ThreadPool
from algorithm.core.encryption_param import PaillierParam, PlainParam
from algorithm.core.paillier_acceleration import embed, umbed
from algorithm.core.tree.big_feature import Feature
from algorithm.core.tree.feature_importance import FeatureImportance
from algorithm.core.tree.gain_calc import BestSplitInfo, cal_cat_rank, cal_gain, cal_weight
from algorithm.core.tree.goss import Goss
from algorithm.core.tree.tree_param import XGBTreeParam
from algorithm.core.tree.tree_structure import Node, SplitInfo, Tree
from algorithm.core.tree.xgboost_loss import get_xgb_loss_inst
from common.communication.gRPC.python.channel import BroadcastChannel, DualChannel
from common.crypto.paillier.paillier import Paillier, PaillierContext
from common.crypto.paillier.utils import get_core_num
from common.utils.constants import PAILLIER, PLAIN
from common.utils.logger import logger
from service.fed_config import FedConfig
from service.fed_control import ProgressCalculator
from .debug_params import EMBEDING
from service.fed_node import FedNode
class VerticalDecisionTreeLabelTrainer(object):
def __init__(self,
tree_param: XGBTreeParam,
y: np.ndarray,
y_pred: np.ndarray,
features: pd.DataFrame,
cat_columns: list,
split_points: np.ndarray,
channels: Dict[str, Union[BroadcastChannel, DualChannel]],
encryption_context: Optional[PaillierContext] = None,
feature_id_mapping: Optional[Dict[int, int]] = None,
tree_index: Optional[int] = None):
logger.info(
f"Label trainer decision tree {tree_index} initialize start.")
if tree_param.encryption_param.method not in [PAILLIER, PLAIN]:
raise ValueError(
f"Encryption method {tree_param.encryption_param.method} not supported.")
self.tree_param = tree_param
self.progress_calculator = ProgressCalculator(self.tree_param.num_trees, self.tree_param.max_depth)
self.y = y
self.y_pred = y_pred
self.cat_columns = cat_columns
self.split_points = split_points
self.party_id = FedConfig.node_id
self.max_num_cores = get_core_num(tree_param.max_num_cores)
self.tree_index = tree_index
loss_inst = get_xgb_loss_inst(list(self.tree_param.loss_param.keys())[0])
self.grad = loss_inst.cal_grad(
self.y, self.y_pred, after_prediction=True)
if tree_param.run_goss:
goss = Goss(tree_param.top_rate, tree_param.other_rate)
self.goss_selected_idx = goss.sampling(self.grad)
hess = loss_inst.cal_hess(
self.y[self.goss_selected_idx], self.y_pred[self.goss_selected_idx], after_prediction=True)
self.hess = np.zeros_like(self.grad)
self.hess[self.goss_selected_idx] = hess
goss.update_gradients(self.grad, self.hess)
else:
self.hess = loss_inst.cal_hess(self.y, self.y_pred, after_prediction=True)
self.goss_selected_idx = range(self.y.shape[0])
sample_index = self.goss_selected_idx
self.individual_grad_hess: BroadcastChannel = channels["individual_grad_hess"]
self.tree_node_chann: BroadcastChannel = channels["tree_node"]
self.summed_grad_hess_channs: Dict[str,
DualChannel] = channels["summed_grad_hess"]
self.min_split_info_channs: Dict[str,
DualChannel] = channels["min_split_info"]
self.sample_index_after_split_channs: Dict[str,
DualChannel] = channels["sample_index_after_split"]
encryption_param = self.tree_param.encryption_param
self.pri_context = encryption_context
self.feature_importance = {}
self.feature_importance_type = tree_param.feature_importance_type
self.feature_id_mapping = feature_id_mapping
if isinstance(encryption_param, PlainParam):
self.individual_grad_hess.broadcast(
[self.grad[sample_index], self.hess[sample_index]], use_pickle=True)
elif isinstance(encryption_param, PaillierParam):
num_cores = self.max_num_cores if encryption_param.parallelize_on else 1
if EMBEDING:
grad_hess = embed([self.grad[sample_index], self.hess[sample_index]], interval=(1 << 128), precision=64)
enc_grad_hess = Paillier.encrypt(context=self.pri_context,
data=grad_hess,
precision=0, # must be 0
obfuscation=True,
num_cores=num_cores)
self.individual_grad_hess.broadcast(Paillier.serialize(enc_grad_hess, compression=False),
use_pickle=True)
else:
enc_grad = Paillier.encrypt(context=self.pri_context,
data=self.grad[sample_index],
precision=encryption_param.precision,
obfuscation=True,
num_cores=num_cores)
enc_hess = Paillier.encrypt(context=self.pri_context,
data=self.hess[sample_index],
precision=encryption_param.precision,
obfuscation=True,
num_cores=num_cores)
self.individual_grad_hess.broadcast(
[Paillier.serialize(enc_grad, compression=False), Paillier.serialize(enc_hess, compression=False)],
use_pickle=True)
else:
raise ValueError("Encryption param not supported.")
if features.shape[1] == 0:
self.big_feature = None
else:
self.big_feature = Feature.create(values=features.iloc[sample_index, :],
sample_index=sample_index,
grad=self.grad[sample_index],
hess=self.hess[sample_index])
logger.info(
f"Label trainer decision tree {tree_index} initialize finished.")
def _cal_local_best_split(self, node: Node):
best_split_info = BestSplitInfo(feature_ower=self.party_id)
if node.sample_index is None or len(node.sample_index) == self.big_feature.data.shape[0]:
big_feature = self.big_feature
else:
big_feature = self.big_feature.slice_by_sample_index(node.sample_index)
res_hist_list = []
for col_name in big_feature.feature_columns:
res_hist_list.append(big_feature.data.groupby([col_name])[['xfl_grad', 'xfl_hess']].agg({'sum'})) # ({'count', 'sum'})
# for categorial features, resort
# cat column is count from the first col of cat feature
for feature_idx in self.cat_columns:
cat_rank = cal_cat_rank(res_hist_list[feature_idx][('xfl_grad', 'sum')],
res_hist_list[feature_idx][('xfl_hess', 'sum')],
self.tree_param.cat_smooth)
cat_rank.sort_values(inplace=True)
# index is saved in the Series's index
res_hist_list[feature_idx] = res_hist_list[feature_idx].loc[cat_rank.index.to_list()]
for feature_idx in range(len(res_hist_list)):
res_hist_list[feature_idx] = res_hist_list[feature_idx].cumsum(axis=0)
res_hist_list[feature_idx].rename(columns={"sum": "cum_sum"}, inplace=True)
cum_grad = res_hist_list[feature_idx][('xfl_grad', 'cum_sum')].to_numpy()
cum_hess = res_hist_list[feature_idx][('xfl_hess', 'cum_sum')].to_numpy()
gains = cal_gain(cum_grad, cum_hess, self.tree_param.lambda_)
if len(gains) == 1 and gains[0] == -np.inf:
continue
max_gain_index = np.argmax(gains)
max_gain = gains[max_gain_index].item()
if max_gain > best_split_info.gain:
best_split_info.gain = max_gain
best_split_info.feature_owner = self.party_id
best_split_info.feature_idx = self.feature_id_mapping[feature_idx].item()
# For categorial feature, split_point stores categories in left child branch
if feature_idx in self.cat_columns:
# It is not much precise if some categorial values are not be sampled
left_cat = res_hist_list[feature_idx].index.to_list()[:max_gain_index + 1]
best_split_info.left_cat = []
for cat in left_cat:
ori_cat = self.split_points[feature_idx][cat]
if isinstance(ori_cat, list):
best_split_info.left_cat += ori_cat
else:
best_split_info.left_cat.append(ori_cat)
best_split_info.split_point = None
best_split_info.is_category = True
filter = big_feature.data.iloc[:, feature_idx + 3].isin(left_cat)
else:
# Because of sampling
max_split_index = int(res_hist_list[feature_idx][('xfl_grad', 'cum_sum')].index[max_gain_index])
max_split_index = min(max_split_index, len(self.split_points[feature_idx]) - 1)
best_split_info.split_point = self.split_points[feature_idx][max_split_index]
best_split_info.left_cat = None
best_split_info.is_category = False
filter = big_feature.data.iloc[:, feature_idx + 3] <= max_split_index
best_split_info.left_sample_index = big_feature.data[filter]['xfl_id'].tolist()
best_split_info.right_sample_index = big_feature.data[~filter]['xfl_id'].tolist()
left_weight = cal_weight(cum_grad[max_gain_index],
cum_hess[max_gain_index],
self.tree_param.lambda_).item()
right_weight = cal_weight(cum_grad[-1] - cum_grad[max_gain_index],
cum_hess[-1] - cum_hess[max_gain_index],
self.tree_param.lambda_).item()
best_split_info.left_bin_weight = left_weight
best_split_info.right_bin_weight = right_weight
best_split_info.num_left_bin = len(best_split_info.left_sample_index)
best_split_info.num_right_bin = len(best_split_info.right_sample_index)
best_split_info.max_gain_index = max_gain_index # only valid for continuous feature
return best_split_info
def _cal_remote_best_split(self) -> Dict[str, BestSplitInfo]:
best_split_info_dict: Dict[str, BestSplitInfo] = {
party_id: BestSplitInfo(feature_ower=party_id) for party_id in self.summed_grad_hess_channs
}
gain_infos: Dict[str, list] = {
party_id: [] for party_id in self.summed_grad_hess_channs
}
is_continue_flags = np.array([True for party_id in self.summed_grad_hess_channs], dtype=bool)
def decrypt_hist(hist_list: List[np.ndarray], num_cores: int, out_origin: bool = True) -> list:
len_list = [len(item) for item in hist_list]
cum_len = np.cumsum([0] + len_list)
hist = np.concatenate(hist_list)
hist = Paillier.decrypt(self.pri_context, hist, num_cores=num_cores, out_origin=out_origin)
res = []
for i in range(len(cum_len) - 1):
res.append(hist[cum_len[i]: cum_len[i + 1]])
return res
while True:
for i, party_id in enumerate(self.summed_grad_hess_channs):
if not is_continue_flags[i]:
continue
data = self.summed_grad_hess_channs[party_id].recv(use_pickle=True, wait=False)
if data is None:
# Data has not been send, try it next round.
continue
is_continue, grad_hess_hist_list, remote_cat_index = data
if self.tree_param.encryption_param.method == PAILLIER:
if EMBEDING:
grad_hess_hist = []
count_hist_list = []
for item in grad_hess_hist_list:
grad_hess_hist.append(item[0])
count_hist_list.append(item[1])
grad_hess_hist = decrypt_hist(grad_hess_hist, num_cores=self.max_num_cores, out_origin=True)
grad_hist_list = []
hess_hist_list = []
for hist in grad_hess_hist:
a, b = umbed(hist, num=2, interval=(1 << 128), precison=64)
grad_hist_list.append(a)
hess_hist_list.append(b)
else:
grad_hist_list = []
hess_hist_list = []
count_hist_list = []
for item in grad_hess_hist_list:
grad_hist_list.append(item[0])
hess_hist_list.append(item[1])
count_hist_list.append(item[2])
grad_hist_list = decrypt_hist(grad_hist_list, num_cores=self.max_num_cores, out_origin=False)
hess_hist_list = decrypt_hist(hess_hist_list, num_cores=self.max_num_cores, out_origin=False)
else:
grad_hist_list = []
hess_hist_list = []
count_hist_list = []
for item in grad_hess_hist_list:
grad_hist_list.append(item[0])
hess_hist_list.append(item[1])
count_hist_list.append(item[2])
for idx in range(len(grad_hess_hist_list)):
grad_hist, hess_hist, count_hist = \
np.array(grad_hist_list[idx], dtype=np.float32), np.array(hess_hist_list[idx], dtype=np.float32), np.array(count_hist_list[idx])
# for categorial feature, resort
if idx in remote_cat_index:
cat_rank = cal_cat_rank(grad_hist, hess_hist, self.tree_param.cat_smooth)
cat_rank = np.argsort(cat_rank).tolist()
grad_hist = grad_hist[cat_rank]
hess_hist = hess_hist[cat_rank]
count_hist = count_hist[cat_rank]
else:
cat_rank = []
cum_grad_hist = np.cumsum(grad_hist)
cum_hess_hist = np.cumsum(hess_hist)
gains = cal_gain(cum_grad_hist, cum_hess_hist, self.tree_param.lambda_)
max_gain_index = np.argmax(gains)
max_gain = gains[max_gain_index].item()
num_left_sample = np.sum(count_hist[:max_gain_index + 1])
num_right_sample = np.sum(count_hist[max_gain_index + 1:])
info = {
'max_gain': max_gain,
'cum_grad': cum_grad_hist,
'cum_hess': cum_hess_hist,
'max_gain_index': max_gain_index,
"is_category": idx in remote_cat_index,
'cat_rank': cat_rank,
'num_left_sample': num_left_sample,
'num_right_sample': num_right_sample
}
gain_infos[party_id].append(info)
if not is_continue:
is_continue_flags[i] = is_continue
# No data will be send later, cal best_split_info
best_split_info: BestSplitInfo = best_split_info_dict[party_id]
for feature_idx, gain_info in enumerate(gain_infos[party_id]):
max_gain = gain_info["max_gain"]
cum_grad = gain_info["cum_grad"]
cum_hess = gain_info["cum_hess"]
max_gain_index = gain_info["max_gain_index"]
is_category = gain_info["is_category"]
cat_rank = gain_info["cat_rank"]
if max_gain > best_split_info.gain:
if len(cum_grad) == 1:
max_gain_split_index = 0
else:
max_gain_split_index = max_gain_index
if max_gain > best_split_info.gain:
best_split_info.gain = max_gain
best_split_info.feature_owner = party_id
best_split_info.feature_idx = feature_idx
best_split_info.split_point = None # should not know
best_split_info.missing_value_on_left = None # need not know
best_split_info.left_sample_index = None # get it later
best_split_info.right_sample_index = None # get it later
best_split_info.num_left_bin = gain_info["num_left_sample"]
best_split_info.num_right_bin = gain_info["num_right_sample"]
left_weight = cal_weight(cum_grad[max_gain_split_index],
cum_hess[max_gain_split_index],
self.tree_param.lambda_).item()
right_weight = cal_weight(cum_grad[-1] - cum_grad[max_gain_split_index],
cum_hess[-1] -
cum_hess[max_gain_split_index],
self.tree_param.lambda_).item()
best_split_info.left_bin_weight = left_weight
best_split_info.right_bin_weight = right_weight
best_split_info.max_gain_index = max_gain_index
# note this is not the final result of the left category
best_split_info.left_cat = [] if not cat_rank else cat_rank[:max_gain_index + 1]
best_split_info.is_category = is_category
flag = np.any(is_continue_flags)
if not flag:
break
gc.collect()
return best_split_info_dict
def get_feature_importance(self):
return self.feature_importance
def update_feature_importance(self, split_info: SplitInfo):
inc_split, inc_gain = 1, split_info.gain
owner_id = split_info.owner_id
fid = split_info.feature_idx
owner_name = owner_id
for node_id in FedNode.config["trainer"]:
if owner_id == node_id:
owner_name = FedNode.config["trainer"][owner_id]["name"]
break
if (owner_name, fid) not in self.feature_importance:
self.feature_importance[(owner_name, fid)] = FeatureImportance(
0, 0, self.feature_importance_type)
self.feature_importance[(owner_name, fid)].add_split(inc_split)
if inc_gain is not None:
self.feature_importance[(owner_name, fid)].add_gain(inc_gain)
def fit(self) -> Tree:
tree = Tree(self.party_id, self.tree_index)
thread_pool = ThreadPool(2)
logger.info(f"Decision tree {self.tree_index} training start..")
if self.tree_param.run_goss:
tree.root_node.sample_index = self.goss_selected_idx
for depth in range(self.tree_param.max_depth):
logger.info(f"Decision tree depth {depth} training start..")
this_depth_nodes = tree.search_nodes(depth)
for node in this_depth_nodes:
logger.info(f"Depth {depth} - node {node.id} start training.")
self.tree_node_chann.broadcast(node, use_pickle=True)
best_split_info_dict: Dict[str, BestSplitInfo] = {}
logger.info("Calculating local best split..")
if self.big_feature is not None:
res1 = thread_pool.apipe(self._cal_local_best_split, node)
logger.info("Calculating remote best split..")
res2 = thread_pool.apipe(self._cal_remote_best_split)
if self.big_feature is not None:
best_split_info_dict[self.party_id] = res1.get()
logger.info("Calculating local best split done.")
best_split_info_dict_remote = res2.get()
logger.info("Calculating remote best split done.")
best_split_info_dict.update(best_split_info_dict_remote)
party_ids = list(best_split_info_dict.keys())
best_split_party_id = party_ids[
np.argmax(
[best_split_info_dict[party_id].gain for party_id in party_ids])
]
best_split_info = best_split_info_dict[best_split_party_id]
if best_split_info.gain < self.tree_param.min_split_gain or \
min(best_split_info.num_left_bin,
best_split_info.num_right_bin) < self.tree_param.min_sample_split:
for party_id in self.min_split_info_channs:
self.min_split_info_channs[party_id].send([-1, -1, -1], use_pickle=True)
continue
if best_split_info.feature_owner == self.party_id:
for party_id in self.min_split_info_channs:
self.min_split_info_channs[party_id].send([-1, -1, -1], use_pickle=True)
split_info = SplitInfo(owner_id=best_split_info.feature_owner,
feature_idx=best_split_info.feature_idx,
is_category=best_split_info.is_category,
split_point=best_split_info.split_point,
left_cat=best_split_info.left_cat,
gain=best_split_info.gain)
else:
for party_id in self.min_split_info_channs:
if best_split_info.feature_owner == party_id:
self.min_split_info_channs[party_id].send(
[best_split_info.feature_idx, best_split_info.max_gain_index, best_split_info.left_cat],
use_pickle=True
)
else:
self.min_split_info_channs[party_id].send([-1, -1, -1], use_pickle=True)
split_info = SplitInfo(owner_id=best_split_info.feature_owner,
feature_idx=best_split_info.feature_idx,
gain=best_split_info.gain)
best_split_info.left_sample_index, best_split_info.right_sample_index = \
self.sample_index_after_split_channs[best_split_info.feature_owner].recv(
use_pickle=True)
left_node_id, right_node_id = tree.split(node_id=node.id,
split_info=split_info,
left_sample_index=best_split_info.left_sample_index,
right_sample_index=best_split_info.right_sample_index,
left_sample_weight=best_split_info.left_bin_weight,
right_sample_weight=best_split_info.right_bin_weight)
node.update_as_non_leaf(split_info=split_info,
left_node_id=left_node_id,
right_node_id=right_node_id)
self.update_feature_importance(split_info)
logger.info(f"Depth {depth} - node {node.id} finish training.")
gc.collect()
# calculate and update the progress of the training
self.progress_calculator.cal_custom_progress(self.tree_index, depth+1)
self.tree_node_chann.broadcast(None, use_pickle=True)
logger.info(f"Decision tree {self.tree_index} training finished")
return tree
| 26,246 | 50.974257 | 152 | py |
XFL | XFL-master/python/algorithm/framework/vertical/xgboost/label_trainer.py | # Copyright 2022 The XFL Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import copy
import os
from pathlib import Path
from typing import Dict, Optional
import numpy as np
import pandas as pd
from pathos.pools import ThreadPool
from algorithm.core.data_io import NdarrayIterator
from algorithm.core.encryption_param import PaillierParam, PlainParam
from algorithm.core.tree.tree_structure import BoostingTree, Tree
from algorithm.core.tree.xgboost_loss import get_xgb_loss_inst
from common.checker.matcher import get_matched_config
from common.checker.x_types import All
from common.communication.gRPC.python.channel import BroadcastChannel, DualChannel
from common.crypto.paillier.paillier import Paillier
from common.utils.algo_utils import earlyStopping
from common.utils.logger import logger
from common.utils.utils import save_model_config
from service.fed_config import FedConfig
from service.fed_node import FedNode
from .base import VerticalXgboostBase
from .decision_tree_label_trainer import VerticalDecisionTreeLabelTrainer
from service.fed_control import ProgressCalculator
from common.utils.model_io import ModelIO
class VerticalXgboostLabelTrainer(VerticalXgboostBase):
def __init__(self, train_conf: dict, *args, **kwargs):
self.channels = dict()
self.channels["sync"] = BroadcastChannel(name="sync")
self._sync_config(train_conf)
super().__init__(train_conf, is_label_trainer=True, *args, **kwargs)
self.party_id = FedConfig.node_id
self.channels["encryption_context"] = BroadcastChannel(
name="encryption_context")
self.channels["individual_grad_hess"] = BroadcastChannel(
name="individual_grad_hess")
self.channels["tree_node"] = BroadcastChannel(name="tree_node")
self.channels["check_dataset_com"] = BroadcastChannel(
name="check_dataset_com")
summed_grad_hess_channs: Dict[str, DualChannel] = {}
min_split_info_channs: Dict[str, DualChannel] = {}
sample_index_after_split_channs: Dict[str, DualChannel] = {}
val_com: Dict[str, DualChannel] = {}
restart_com: Dict[str, DualChannel] = {}
early_stop_com: Dict[str, DualChannel] = {}
for party_id in FedConfig.get_trainer():
summed_grad_hess_channs[party_id] = \
DualChannel(name="summed_grad_hess_" + party_id,
ids=[FedConfig.node_id, party_id])
min_split_info_channs[party_id] = \
DualChannel(name="min_split_info_" + party_id,
ids=[FedConfig.node_id, party_id])
sample_index_after_split_channs[party_id] = \
DualChannel(name="sample_index_after_split_" +
party_id, ids=[FedConfig.node_id, party_id])
val_com[party_id] = \
DualChannel(name="val_com_" + party_id,
ids=[FedConfig.node_id, party_id])
restart_com[party_id] = \
DualChannel(name="restart_com_" + party_id,
ids=[FedConfig.node_id, party_id])
early_stop_com[party_id] = \
DualChannel(name="early_stop_com_" + party_id,
ids=[FedConfig.node_id, party_id])
self.channels["summed_grad_hess"] = summed_grad_hess_channs
self.channels["min_split_info"] = min_split_info_channs
self.channels["sample_index_after_split"] = sample_index_after_split_channs
self.channels["val_com"] = val_com
self.channels["restart_com"] = restart_com
self.channels["early_stop_com"] = early_stop_com
if isinstance(self.xgb_config.encryption_param, (PlainParam, type(None))):
self.private_context = None
elif isinstance(self.xgb_config.encryption_param, PaillierParam):
self.private_context = Paillier.context(self.xgb_config.encryption_param.key_bit_size,
self.xgb_config.encryption_param.djn_on)
self.public_context = self.private_context.to_public()
self.channels["encryption_context"].broadcast(
self.public_context.serialize(), use_pickle=False)
else:
raise TypeError(
f"Encryption param type {type(self.xgb_config.encryption_param)} not valid.")
self.es = earlyStopping(key=self.xgb_config.early_stopping_param["key"],
patience=self.xgb_config.early_stopping_param["patience"],
delta=self.xgb_config.early_stopping_param["delta"])
self.best_round = -1
self.best_prediction_val = None
self.best_prediction_train = None
if self.train_features is not None:
input_schema = ','.join([_ for _ in self.train_features.columns if _ not in set(["y", "id"])])
else:
input_schema = ""
self.export_conf = [{
"class_name": "VerticalXGBooster",
"identity": self.identity,
"filename": self.output.get("proto_model", {}).get("name", ''),
# "filename": self.output.get("proto_model", {"name": "vertical_xgboost_guest.pmodel"})["name"],
"input_schema": input_schema,
"version": '1.4.0'
}]
def _sync_config(self, config):
sync_rule = {
"train_info": {
"interaction_params": All(),
"train_params": {
"lossfunc": All(),
"num_trees": All(),
"num_bins": All(),
"batch_size_val": All(),
"downsampling": {
"row": {
"run_goss": All()
}
},
"encryption": All()
}
}
}
config_to_sync = get_matched_config(config, sync_rule)
self.channels["sync"].broadcast(config_to_sync)
def fit(self):
f_names = self.channels["sync"].collect()
self.remote_f_names = {}
for name_dict in f_names:
self.remote_f_names.update(name_dict)
self.check_dataset()
boosting_tree = BoostingTree()
# train_y_pred_primitive, tree_list = np.zeros_like(self.train_label), []
train_y_pred_primitive = np.zeros_like(self.train_label)
val_y_pred_primitive = np.zeros_like(self.val_label)
loss_inst = get_xgb_loss_inst(
list(self.xgb_config.loss_param.keys())[0])
train_y_pred, val_y_pred = loss_inst.predict(
train_y_pred_primitive), loss_inst.predict(val_y_pred_primitive)
for tree_idx in range(1, self.xgb_config.num_trees+1):
logger.info("Tree {} start training.".format(tree_idx))
# 0: no need to restart, 1: restart, 2: max number of try reached
restart_status = 1
while True:
# train section
sampled_features, feature_id_mapping = self.col_sample()
cat_columns_after_sampling = list(filter(
lambda x: feature_id_mapping[x] in self.cat_columns, list(feature_id_mapping.keys())))
split_points_after_sampling = [
self.split_points[feature_id_mapping[k]] for k in feature_id_mapping.keys()]
trainer = VerticalDecisionTreeLabelTrainer(tree_param=self.xgb_config,
y=self.train_label,
y_pred=train_y_pred,
features=sampled_features,
cat_columns=cat_columns_after_sampling,
split_points=split_points_after_sampling,
channels=self.channels,
encryption_context=self.private_context,
feature_id_mapping=feature_id_mapping,
tree_index=tree_idx)
tree = trainer.fit()
if not tree.root_node.is_leaf:
restart_status = 0
else:
if self.xgb_config.early_stopping_param["patience"] <= 0:
# if not set patience, terminate immediately
restart_status = 2
else:
self.es.counter += 1
if self.es.counter == self.xgb_config.early_stopping_param["patience"]:
restart_status = 2
for party_id in FedConfig.get_trainer():
self.channels["restart_com"][party_id].send(restart_status)
if restart_status != 1:
break
logger.info(f"label trainer tree {tree_idx} training restart.")
if restart_status == 2:
logger.info("label trainer early stopped because a tree's root is leaf, best round: {}.".format(
self.best_round))
break
self.update_feature_importance(trainer.get_feature_importance())
if self.xgb_config.run_goss:
train_y_pred_primitive += self.predict_on_tree(
tree, self.train_dataset) * self.xgb_config.learning_rate
else:
for _, node in tree.nodes.items():
if node.is_leaf:
train_y_pred_primitive[node.sample_index] += node.weight * \
self.xgb_config.learning_rate
train_y_pred = loss_inst.predict(train_y_pred_primitive)
if self.interaction_params.get("echo_training_metrics"):
train_loss = loss_inst.cal_loss(
self.train_label, train_y_pred_primitive, after_prediction=False)
self._calc_metrics(self.train_label, train_y_pred, tree_idx, stage="train", loss={
loss_inst.name: train_loss})
tree.clear_training_info()
boosting_tree.append(tree=tree,
lr=self.xgb_config.learning_rate,
max_depth=self.xgb_config.max_depth)
logger.info("Tree {} training done.".format(tree_idx))
# validation section
logger.info("Validation on tree {} start.".format(tree_idx))
val_y_pred_primitive += self.predict_on_tree(
tree, self.val_dataset) * self.xgb_config.learning_rate
val_y_pred = loss_inst.predict(val_y_pred_primitive)
val_loss = loss_inst.cal_loss(
self.val_label, val_y_pred_primitive, after_prediction=False)
metric = self._calc_metrics(self.val_label, val_y_pred, tree_idx, stage="val",
loss={loss_inst.name: val_loss})
logger.info("Validation on tree {} done.".format(tree_idx))
if self.xgb_config.early_stopping_param["patience"] > 0:
early_stop_flag, save_flag = self.es(metric)
else:
early_stop_flag, save_flag = False, True
if save_flag:
# self.best_round = tree_idx + 1
self.best_round = tree_idx
self.best_prediction_train = copy.deepcopy(train_y_pred)
self.best_prediction_val = copy.deepcopy(val_y_pred)
for party_id in FedConfig.get_trainer():
self.channels["early_stop_com"][party_id].send(early_stop_flag)
if early_stop_flag:
logger.info(
"label trainer early stopped. best round: {}.".format(self.best_round))
break
# if self.interaction_params.get("save_frequency") > 0 and (tree_idx + 1) % self.interaction_params.get("save_frequency") == 0:
if self.interaction_params.get("save_frequency") > 0 and tree_idx % self.interaction_params.get("save_frequency") == 0:
# self.save(boosting_tree, epoch=tree_idx+1)
self.save(boosting_tree, epoch=tree_idx)
self._write_prediction(
# self.train_label, train_y_pred, self.train_ids, epoch=tree_idx + 1)
self.train_label, train_y_pred, self.train_ids, epoch=tree_idx)
self._write_prediction(
# self.val_label, val_y_pred, self.val_ids, epoch=tree_idx + 1, stage='val')
self.val_label, val_y_pred, self.val_ids, epoch=tree_idx, stage='val')
# add metrics during training for plot
self._write_loss(train_loss, val_loss, tree_idx)
# update the progress of 100 to show the training is finished
ProgressCalculator.finish_progress()
# model preserve
if self.xgb_config.early_stopping_param["patience"] <= 0:
self.best_round = len(boosting_tree)
self.best_prediction_train = copy.deepcopy(train_y_pred)
self.best_prediction_val = copy.deepcopy(val_y_pred)
else:
logger.info("num trees: %d, best: %d" % (len(boosting_tree), self.best_round))
if boosting_tree.trees:
logger.info("save")
# self.save(boosting_tree, final=True)
if self.best_round <= 0:
self.best_round = len(boosting_tree)
self.save(boosting_tree[:self.best_round], final=True)
logger.info('_write_prediction train')
self._write_prediction(
self.train_label, train_y_pred, self.train_ids, final=True)
logger.info('_write_prediction val')
self._write_prediction(
self.val_label, val_y_pred, self.val_ids, final=True, stage='val')
logger.info("Writing roc data...")
self._write_roc_data(
self.train_label, train_y_pred, self.val_label, val_y_pred)
logger.info("Writing ks data...")
self._write_ks_data(self.train_label, train_y_pred,
self.val_label, val_y_pred)
logger.info("Writing lift and gain data...")
self._write_lift_gain_data(
self.train_label, train_y_pred, self.val_label, val_y_pred
)
logger.info("Writing pr curve data...")
self._write_pr_data(
self.train_label, train_y_pred, self.val_label, val_y_pred)
self._write_feature_importance()
else:
logger.error("Model is none, ture off run_goss (false) and downsampling (1) please.")
raise SystemError(
"Model is none, ture off run_goss (false) and downsampling (1) please.")
def save(self, boosting_tree: BoostingTree, epoch: Optional[int] = None, final: bool = False):
if final:
save_model_config(stage_model_config=self.export_conf,
save_path=self.output.get("path"))
save_dir = self.output.get("path")
if not os.path.exists(save_dir):
os.makedirs(save_dir)
# dump out ks plot
suggest_threshold = 0.5
# if "ks" in self.xgb_config.metrics or "auc_ks" in self.xgb_config.metrics:
# # tc = ThresholdCutter(os.path.join(save_dir, "ks_plot_valid.csv"))
# tc = ThresholdCutter(os.path.join(
# save_dir, self.output.get("ks_plot_val")["name"]))
# # tc.cut_by_value(self.val_label, self.best_prediction_val)
# # suggest_threshold = float(tc.bst_threshold)
# # tc.save()
# if final:
# self.val_ks_metrics = tc.metrics
# self.val_ks_bst_threshold = tc.bst_threshold
# self.val_ks_bst_score = tc.bst_score
# if self.interaction_params.get("echo_training_metrics"):
# tc = ThresholdCutter(os.path.join(
# save_dir, self.output.get("ks_plot_train")["name"]))
# # tc.cut_by_value(self.train_label, self.best_prediction_train)
# # tc.save()
# if final:
# self.train_ks_metrics = tc.metrics
# self.train_ks_bst_threshold = tc.bst_threshold
# self.train_ks_bst_score = tc.bst_score
model_name = self.output.get("model", {}).get("name")
proto_name = self.output.get("proto_model", {}).get("name")
if model_name:
# model_dict = boosting_tree[:self.best_round].to_dict(
# suggest_threshold, compute_group=True)
model_dict = boosting_tree.to_dict(suggest_threshold, compute_group=True)
ModelIO.save_json_model(model_dict, save_dir, model_name, epoch=epoch, version='1.4.0')
if proto_name:
# TODO: temp
model_name_list = self.output.get("proto_model")["name"].split(".")
name_prefix, name_postfix = ".".join(
model_name_list[:-1]), model_name_list[-1]
if not final and epoch:
new_model_name = name_prefix + \
"_epoch_{}".format(epoch) + "." + name_postfix
else:
new_model_name = name_prefix + "." + name_postfix
model_path = os.path.join(save_dir, new_model_name)
# xgb_output = boosting_tree[:self.best_round].to_proto(
# suggest_threshold, compute_group=True)
xgb_output = boosting_tree.to_proto(suggest_threshold, compute_group=True)
with open(model_path, 'wb') as f:
f.write(xgb_output)
logger.info("model saved as: {}.".format(model_path))
self.make_readable_feature_importance(
os.path.join(save_dir, self.output.get("feature_importance")["name"]))
def make_readable_feature_importance(self, file_name):
with open(file_name, "w") as f:
f.write("owner_id,fid,importance\n")
normalizer = np.sum([_.get()
for _ in self.feature_importances_.values()])
for k, v in sorted(self.feature_importances_.items(), key=lambda d: d[1], reverse=True):
f.write("%s,%s,%.6g\n" % (k[0], k[1], v.get() / normalizer))
def _make_indicator_for_prediction(self, tree: Tree, feature: np.ndarray):
indicator = {}
for node_id, node in tree.nodes.items():
if not node.is_leaf and node.split_info.owner_id == self.party_id:
feature_idx = node.split_info.feature_idx
data = feature[:, feature_idx]
if node.split_info.is_category:
indicator[node_id] = np.isin(
data, node.split_info.left_cat)
else:
indicator[node_id] = (data <= node.split_info.split_point)
return indicator
def _gen_prediction(self, tree: Tree, indicator: Dict[str, np.ndarray], feature: np.ndarray):
prediction = np.zeros((len(feature),), dtype=np.float32)
depth = 0
sample_in_node = {}
while True:
node_list = tree.search_nodes(depth)
if not node_list:
break
for node in node_list:
if node.is_leaf:
prediction[sample_in_node[node.id]] = node.weight
else:
if depth == 0:
sample_in_node[node.left_node_id] = np.where(
indicator[node.id] == 1)[0]
sample_in_node[node.right_node_id] = np.where(
indicator[node.id] == 0)[0]
else:
sample_in_node[node.left_node_id] = np.intersect1d(
sample_in_node[node.id], np.where(indicator[node.id] == 1)[0])
sample_in_node[node.right_node_id] = np.intersect1d(
sample_in_node[node.id], np.where(indicator[node.id] == 0)[0])
depth += 1
return prediction
def predict_on_tree(self, tree: Tree, data_iterator: NdarrayIterator) -> np.ndarray:
prediction = np.zeros((len(data_iterator.data),), dtype=np.float32)
indicator = {}
def _update_local(tree, data):
res = self._make_indicator_for_prediction(tree, data)
return res
def _update_remote(channel, len_data):
remote_indicator = channel.recv()
res = {k: np.unpackbits(v)[:len_data]
for k, v in remote_indicator.items()}
return res
thread_pool = ThreadPool(len(self.channels["val_com"]) + 1)
for i, data in enumerate(data_iterator):
indicator = {}
threads = []
threads.append(thread_pool.apipe(_update_local, tree, data))
for party_id in self.channels["val_com"]:
threads.append(thread_pool.apipe(
_update_remote, self.channels["val_com"][party_id], len(data)))
for t in threads:
indicator.update(t.get())
prediction[i * data_iterator.bs: (
i + 1) * data_iterator.bs] = self._gen_prediction(tree, indicator, data)
return prediction
# Non-parallelized version
# def predict_on_tree(self, tree: Tree, data_iterator: NdarrayIterator) -> np.ndarray:
# prediction = np.zeros((len(data_iterator.data),), dtype=np.float32)
# for i, data in enumerate(data_iterator):
# indicator = {}
# indicator.update(self._make_indicator_for_prediction(tree, data))
# for party_id in self.channels["val_com"]:
# remote_indicator = self.channels["val_com"][party_id].recv()
# indicator.update({k: np.unpackbits(v)[:len(data)] for k, v in remote_indicator.items()})
# prediction[i * data_iterator.bs: (i + 1) * data_iterator.bs] = self._gen_prediction(tree, indicator, data)
# return prediction
def predict_on_boosting_tree(self, boosting_tree: BoostingTree, data_iterator: NdarrayIterator) -> np.ndarray:
prediction = np.zeros((len(data_iterator.data),), dtype=np.float32)
def _update_local(trees, data):
res = {}
for tree in trees:
res.update(self._make_indicator_for_prediction(tree, data))
return res
def _update_remote(channel, len_data):
remote_indicator = channel.recv()
res = {k: np.unpackbits(v)[:len_data]
for k, v in remote_indicator.items()}
return res
thread_pool = ThreadPool(len(self.channels["val_com"]) + 1)
for i, data in enumerate(data_iterator):
indicator = {}
threads = []
threads.append(thread_pool.apipe(
_update_local, boosting_tree.trees, data))
for party_id in self.channels["val_com"]:
threads.append(thread_pool.apipe(
_update_remote, self.channels["val_com"][party_id], len(data)))
for t in threads:
indicator.update(t.get())
for j, tree in enumerate(boosting_tree.trees):
prediction[i * data_iterator.bs: (i + 1) * data_iterator.bs] += \
self._gen_prediction(
tree, indicator, data) * boosting_tree.lr[j]
return prediction
# Non-parallelized version
# def predict_on_boosting_tree(self, boosting_tree: BoostingTree, data_iterator: NdarrayIterator) -> np.ndarray:
# prediction = np.zeros((len(data_iterator.data),), dtype=np.float32)
# for i, data in enumerate(data_iterator):
# indicator = {}
# for tree in boosting_tree.trees:
# indicator.update(self._make_indicator_for_prediction(tree, data))
# for party_id in self.channels["val_com"]:
# remote_indicator = self.channels["val_com"][party_id].recv()
# indicator.update({k: np.unpackbits(v)[:len(data)] for k, v in remote_indicator.items()})
# for j, tree in enumerate(boosting_tree.trees):
# prediction[i * data_iterator.bs: (i + 1) * data_iterator.bs] += \
# self._gen_prediction(tree, indicator, data) * boosting_tree.lr[j]
# return prediction
def update_feature_importance(self, tree_feature_importance):
for (owner_name, fid) in tree_feature_importance:
if owner_name == FedConfig.node_name:
f_name = self.train_names[fid]
else:
f_name = self.remote_f_names[owner_name][fid]
if (owner_name, f_name) not in self.feature_importances_:
self.feature_importances_[
(owner_name, f_name)] = tree_feature_importance[(owner_name, fid)]
else:
self.feature_importances_[
(owner_name, f_name)] += tree_feature_importance[(owner_name, fid)]
# if (owner_id, fid) not in self.feature_importances_:
# self.feature_importances_[
# (owner_id, fid)] = tree_feature_importance[(owner_id, fid)]
# else:
# self.feature_importances_[
# (owner_id, fid)] += tree_feature_importance[(owner_id, fid)]
logger.debug("cur feature importance {}".format(
self.feature_importances_))
def load_model(self):
pretrain_path = self.input.get("pretrained_model", {}).get("path", '')
model_name = self.input.get("pretrained_model", {}).get("name", '')
model_path = Path(
pretrain_path, model_name
)
suffix = model_name.split(".")[-1]
if suffix != "pmodel":
model_dict = ModelIO.load_json_model(model_path)
boosting_tree = BoostingTree.from_dict(model_dict)
else:
with open(model_path, 'rb') as f:
byte_str = f.read()
boosting_tree = BoostingTree.from_proto(byte_str)
return boosting_tree
def check_dataset(self):
shapes = self.channels["check_dataset_com"].collect()
if self.train_dataset is not None:
m = len(self.train_ids)
n = len(self.train_features.columns)
for d in shapes:
if d["train"][0] != m:
raise ValueError(
"Lengths of the train set mismatched: %d, %d." % (d["train"][0], m))
n += d["train"][1]
if n <= 0:
raise ValueError(
"Number of the feature is zero. Stop training.")
if self.val_dataset is not None:
m = len(self.val_ids)
n = len(self.val_features.columns)
for d in shapes:
if d["valid"][0] != m:
raise ValueError(
"Lengths of the valid set mismatched: %d, %d." % (d["valid"][0], m))
n += d["valid"][1]
if n <= 0:
raise ValueError(
"Number of the feature is zero. Stop training.")
if self.test_dataset is not None:
m = len(self.test_ids)
n = len(self.test_features.columns)
for d in shapes:
if d["test"][0] != m:
raise ValueError(
"Lengths of the test set mismatched: %d, %d." % (d["test"][0], m))
n += d["test"][1]
if n <= 0:
raise ValueError(
"Number of the feature is zero. Stop predicting.")
else:
if len(shapes) > 0 and "test" in shapes[0]:
m = shapes[0]["test"][0]
n = 0
for d in shapes:
if d["test"][0] != m:
raise ValueError("Lengths of the test set mismatched.")
n += d["test"][1]
if n <= 0:
raise ValueError(
"Number of the feature is zero. Stop predicting.")
else:
self.test_dataset = NdarrayIterator(
np.zeros((m, 0)), self.bs)
self.test_ids = np.arange(m)
def predict(self):
out_dict_list = self.channels["sync"].collect()
self.check_dataset()
boosting_tree = self.load_model()
test_y_pred_primitive = self.predict_on_boosting_tree(boosting_tree=boosting_tree,
data_iterator=self.test_dataset)
loss_inst = get_xgb_loss_inst(boosting_tree.loss_method)
test_y_pred = loss_inst.predict(test_y_pred_primitive)
output = {
"testset": test_y_pred
}
for out_keys_dict in out_dict_list:
for key in out_keys_dict:
if key in output:
out_keys_dict[key] = output["testset"]
self.channels["sync"].scatter(out_dict_list)
save_path = self.output.get("path", '')
if save_path:
if not os.path.exists(save_path):
os.makedirs(save_path)
file_path = Path(save_path, self.output.get("testset", {}).get("name", ''))
if file_path:
logger.info("predicted results saved at {}".format(file_path))
pd.DataFrame({"id": self.test_ids, "pred": test_y_pred}).to_csv(
file_path, float_format="%.6g", index=False, header=True
)
| 30,579 | 44.57377 | 139 | py |
XFL | XFL-master/python/algorithm/framework/vertical/logistic_regression/base.py | # Copyright 2022 The XFL Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
from typing import Optional
from pathlib import Path
from collections import OrderedDict
import pandas as pd
import torch
import torch.nn as nn
from torch.utils.data import DataLoader, TensorDataset
from google.protobuf import json_format
from algorithm.framework.vertical.vertical_model_base import VerticalModelBase
from common.utils.model_io import ModelIO
from common.utils.logger import logger
from common.model.python.linear_model_pb2 import LinearModel
BLOCKCHAIN = False
class VerticalLogisticRegression(nn.Module):
def __init__(self, input_dim: int, bias: bool = False):
super(VerticalLogisticRegression, self).__init__()
self.linear = torch.nn.Linear(input_dim, 1, bias=bias)
self.linear.requires_grad_(False)
def forward(self, x):
return self.linear(x)
class VerticalLogisticRegressionBase(VerticalModelBase):
def __init__(self, train_conf: dict, label: bool = False, *args, **kwargs):
"""_summary_
Args:
train_conf (dict): _description_
label (bool, optional): _description_. Defaults to False.
"""
super().__init__(train_conf)
self._parse_config()
self.train_conf = train_conf
self.model_conf = train_conf["model_info"].get("config")
self.label = label
self.schema = None
self.data_dim = None
self.model = None
self.train_dataloader, self.eval_dataloader = None, None
self.loss_function = None
self.metric_functions = {}
self._init_dataloader()
def _parse_config(self) -> None:
super()._parse_config()
self.model_name = self.model_info.get("name")
self.save_model_name = self.output.get("model", {}).get("name", "")
self.save_onnx_model_name = self.output.get("onnx_model", {}).get("name", "")
self.evaluation_path = self.save_dir
self.global_epoch = self.train_params.get("global_epoch")
self.batch_size = self.train_params.get("batch_size")
self.encryption_config = self.train_params.get("encryption")
self.optimizer_config = self.train_params.get("optimizer")
self.pretrain_model_path = self.input.get("pretrained_model", {}).get("path")
self.pretrain_model_name = self.input.get("pretrained_model", {}).get("name")
self.random_seed = self.train_params.get("random_seed")
self.early_stopping_config = self.train_params.get("early_stopping")
self.save_frequency = self.interaction_params.get("save_frequency")
self.save_probabilities = self.interaction_params.get("save_probabilities")
@staticmethod
def set_seed(seed):
torch.manual_seed(seed)
torch.cuda.manual_seed(seed)
def _init_model(self, bias: bool = False) -> None:
"""
Init logistic regression model.
Returns: None
"""
logger.info("Init model start.")
self.model = VerticalLogisticRegression(input_dim=self.data_dim, bias=bias)
# Load pretrained model if needed.
if self.pretrain_model_path is not None and self.pretrain_model_path != "":
if self.pretrain_model_name.split(".")[-1] == "model":
model_dict = ModelIO.load_torch_model(os.path.join(self.pretrain_model_path, self.pretrain_model_name))
self.model.load_state_dict(model_dict["state_dict"])
# elif self.pretrain_model_name.split(".")[-1] == "pmodel":
# checkpoint = self.load_from_proto(os.path.join(self.pretrain_model_path, self.pretrain_model_name))
# self.model.load_state_dict(checkpoint)
else:
raise NotImplementedError(
"Pretrained model {} does not support.".format(self.pretrain_model_name)
)
logger.info("Init model completed.")
def _init_dataloader(self) -> None:
"""
Load raw data.
Returns:
"""
logger.info("Init validation dataloader start.")
df_list = []
# Check file exists.
for ts in self.input_trainset:
file_path = os.path.join(ts.get("path"), ts.get("name"))
if not os.path.exists(file_path):
raise FileNotFoundError("File {} cannot be found.".format(file_path))
if ts.get("type") == "csv":
if ts.get("has_id"):
df_list.append(pd.read_csv(file_path, index_col=0))
else:
df_list.append(pd.read_csv(file_path))
else:
raise NotImplementedError(
"LDataset load method {} does not Implemented.".format(ts.get("type"))
)
node_train_df = pd.concat(df_list)
df_list = []
for vs in self.input_valset:
file_path = os.path.join(vs.get("path"), vs.get("name"))
if not os.path.exists(file_path):
raise FileNotFoundError("File {} cannot be found.".format(file_path))
if vs.get("type") == "csv":
if vs.get("has_id"):
df_list.append(pd.read_csv(file_path, index_col=0))
else:
df_list.append(pd.read_csv(file_path))
else:
raise NotImplementedError(
"Dataset load method {} does not Implemented.".format(vs.get("type"))
)
node_val_df = pd.concat(df_list)
self.schema = ','.join([_ for _ in node_train_df.columns if _ not in set(["y", "id"])])
if node_train_df.index.dtype == 'O':
node_train_df = node_train_df.reset_index(drop=True)
if node_val_df.index.dtype == 'O':
node_val_df = node_val_df.reset_index(drop=True)
if self.label:
# Check column y exists.
if "y" not in node_train_df.columns:
raise KeyError("Cannot found column y in train set.")
if "y" not in node_val_df.columns:
raise KeyError("Cannot found column y in val set.")
node_train_id = node_train_df.index.to_list()
node_train_label = node_train_df["y"].values # .tolist()
node_train_data = node_train_df.drop(labels=["y"], axis=1).values # .tolist()
assert len(node_train_label) == len(node_train_data)
node_val_id = node_val_df.index.to_list()
node_val_label = node_val_df["y"].values # .tolist()
node_val_data = node_val_df.drop(labels=["y"], axis=1).values # .tolist()
assert len(node_val_label) == len(node_val_data)
self.train_dataloader = DataLoader(
dataset=TensorDataset(torch.tensor(node_train_data, dtype=torch.float32),
torch.unsqueeze(torch.tensor(node_train_label), dim=-1),
torch.unsqueeze(torch.tensor(node_train_id), dim=-1)),
batch_size=self.batch_size, shuffle=True
)
self.train_f_names = node_val_df.columns.tolist()[1:]
self.val_dataloader = DataLoader(
dataset=TensorDataset(torch.tensor(node_val_data, dtype=torch.float32),
torch.unsqueeze(torch.tensor(node_val_label), dim=-1),
torch.unsqueeze(torch.tensor(node_val_id), dim=-1)),
batch_size=self.batch_size, shuffle=False
)
self.data_dim = torch.tensor(node_train_data).shape[-1]
logger.info("Data shape: {}.".format(list(torch.tensor(node_train_data).shape)))
else:
node_train_id = node_train_df.index.to_list()
node_train_data = node_train_df.values.tolist()
node_val_id = node_val_df.index.to_list()
node_val_data = node_val_df.values.tolist()
self.train_dataloader = DataLoader(
dataset=TensorDataset(torch.tensor(node_train_data, dtype=torch.float32),
torch.unsqueeze(torch.tensor(node_train_id), dim=-1)),
batch_size=self.batch_size, shuffle=True
)
self.train_f_names = node_val_df.columns.tolist()
self.val_dataloader = DataLoader(
dataset=TensorDataset(torch.tensor(node_val_data, dtype=torch.float32),
torch.unsqueeze(torch.tensor(node_val_id), dim=-1)),
batch_size=self.batch_size, shuffle=False
)
self.data_dim = torch.tensor(node_train_data).shape[-1]
logger.info("Data shape: {}.".format(list(torch.tensor(node_train_data).shape)))
logger.info("Init dataloader completed.")
# unused
@staticmethod
def load_from_proto(path: str):
with open(path, 'rb') as f:
b = f.read()
lr = LinearModel()
lr.ParseFromString(b)
d = json_format.MessageToDict(lr,
including_default_value_fields=True,
preserving_proto_field_name=True)
state_dict = OrderedDict()
for k, v in d.items():
state_dict[k] = torch.Tensor([v])
return state_dict
@staticmethod
def dump_as_proto(save_dir: str,
model_name: str,
state_dict: OrderedDict,
epoch: int = None,
final: bool = False,
suggest_threshold: float = None
):
if not os.path.exists(save_dir):
os.makedirs(save_dir)
json_dict = dict()
for k, v in state_dict.items():
if isinstance(v, torch.Tensor):
json_dict[k.replace("linear.", "")] = v.tolist()[0]
model_info = {"state_dict": json_dict}
if suggest_threshold:
model_info["suggest_threshold"] = suggest_threshold
model_name_list = model_name.split(".")
name_prefix, name_postfix = ".".join(model_name_list[:-1]), model_name_list[-1]
if not final and epoch:
model_name = name_prefix + "_epoch_{}".format(epoch) + "." + name_postfix
else:
model_name = name_prefix + "." + name_postfix
model_path = os.path.join(save_dir, model_name)
lr = LinearModel()
json_format.ParseDict(model_info, lr)
with open(model_path, 'wb') as f:
f.write(lr.SerializeToString())
logger.info("model saved as: {}.".format(model_path))
return
| 11,213 | 40.227941 | 119 | py |
XFL | XFL-master/python/algorithm/framework/vertical/logistic_regression/label_trainer.py | # Copyright 2022 The XFL Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import copy
import hashlib
import os
from pathlib import Path
import numpy as np
import pandas as pd
import tenseal as ts
import torch
from sklearn.metrics import confusion_matrix
import random
import pickle
from common.checker.matcher import get_matched_config
from common.checker.x_types import All
from common.communication.gRPC.python.channel import BroadcastChannel
from common.crypto.paillier.paillier import Paillier
from common.evaluation.metrics import ThresholdCutter
from common.utils.algo_utils import earlyStopping
from common.utils.logger import logger
from common.utils.model_io import ModelIO
from common.utils.utils import save_model_config
from service.fed_node import FedNode
from service.fed_control import ProgressCalculator
from .base import VerticalLogisticRegressionBase
from .base import BLOCKCHAIN
class VerticalLogisticRegressionLabelTrainer(VerticalLogisticRegressionBase):
def __init__(self, train_conf: dict, *args, **kwargs):
self.sync_channel = BroadcastChannel(name="sync")
self._sync_config(train_conf)
super().__init__(train_conf, label=True, *args, **kwargs)
if self.random_seed is None:
self.random_seed = random.randint(-(1 << 32), 1 << 32)
self.sync_channel.broadcast(self.random_seed)
if BLOCKCHAIN:
logger.debug(
f"Broadcast random seed, SHA256: {hashlib.sha256(pickle.dumps(self.random_seed)).hexdigest()}")
self.set_seed(self.random_seed)
self.progress_calculator = ProgressCalculator(self.global_epoch, len(self.train_dataloader))
self._init_model(bias=True)
self.export_conf = [{
"class_name": "VerticalLogisticRegression",
"identity": self.identity,
"filename": self.save_onnx_model_name,
"input_dim": self.data_dim,
"bias": True,
"version": "1.4.0",
"input_schema": self.schema,
}]
self.es = earlyStopping(key=self.early_stopping_config["key"],
patience=self.early_stopping_config["patience"],
delta=self.early_stopping_config["delta"])
self.best_model = None
self.best_prediction_val = None
self.best_prediction_train = None
def _sync_config(self, config):
sync_rule = {
"train_info": {
"interaction_params": All(),
"train_params": {
"global_epoch": All(),
"batch_size": All(),
"encryption": All(),
"optimizer": All(),
"early_stopping": All(),
"random_seed": All()
}
}
}
config_to_sync = get_matched_config(config, sync_rule)
self.sync_channel.broadcast(config_to_sync)
if BLOCKCHAIN:
logger.debug(
f"Sync config, SHA256: {hashlib.sha256(pickle.dumps(config_to_sync)).hexdigest()}")
def fit(self):
self.check_data()
logger.debug("Vertical logistic regression training start")
broadcast_channel = BroadcastChannel(
name="vertical_logistic_regression_channel")
encryption_config = self.encryption_config
# encryption_method = encryption_config["method"].lower()
encryption_method = list(self.encryption_config.keys())[0].lower()
private_context = None
num_cores = -1
pred_prob_list, y_list = [], []
if encryption_method == "ckks":
private_context = ts.context(
ts.SCHEME_TYPE.CKKS,
poly_modulus_degree=encryption_config[encryption_method]["poly_modulus_degree"],
coeff_mod_bit_sizes=encryption_config[encryption_method]["coeff_mod_bit_sizes"]
)
private_context.generate_galois_keys()
private_context.generate_relin_keys()
private_context.global_scale = 1 << encryption_config[
encryption_method]["global_scale_bit_size"]
serialized_public_context = private_context.serialize(
save_public_key=True,
save_secret_key=False,
save_galois_keys=True,
save_relin_keys=True
)
logger.debug("Broadcast ckks public keys.")
broadcast_channel.broadcast(
serialized_public_context, use_pickle=False)
if BLOCKCHAIN:
logger.debug(
f"SHA256: {hashlib.sha256(serialized_public_context).hexdigest()}")
logger.debug("Broadcast completed.")
elif encryption_method == "paillier":
num_cores = - \
1 if encryption_config[encryption_method]["parallelize_on"] else 1
private_context = Paillier.context(encryption_config[encryption_method]["key_bit_size"],
djn_on=encryption_config[encryption_method]["djn_on"])
logger.debug("Broadcast paillier public keys.")
serialized_public_context = private_context.to_public().serialize()
broadcast_channel.broadcast(
serialized_public_context, use_pickle=False)
if BLOCKCHAIN:
logger.debug(
f"SHA256: {hashlib.sha256(serialized_public_context).hexdigest()}")
logger.debug("Broadcast completed.")
elif encryption_method == "plain":
pass
else:
raise ValueError(f"Encryption method {encryption_method} not supported! Valid methods are 'paillier', "
f"'ckks', 'plain'.")
loss_func = torch.nn.BCELoss()
for epoch in range(1, self.global_epoch + 1):
training_cm = np.zeros((2, 2))
training_pred_prob_list, training_y_list, training_metric = [], [], {}
for batch_idx, (x_batch, y_batch, _) in enumerate(self.train_dataloader):
x_batch = x_batch.to(self.device)
y_batch = y_batch.to(self.device)
# compute theta_scheduler * x_scheduler
pred_label_trainer = self.model(x_batch)
# collect predict result from trainers.
pred_trainer_list = broadcast_channel.collect()
logger.debug("Received predictions from trainers, length of collect list is {}."
.format(len(pred_trainer_list)))
if BLOCKCHAIN:
logger.debug(
f"SHA256: {hashlib.sha256(pickle.dumps(pred_trainer_list)).hexdigest()}")
# Add predictions.
pred_total = torch.clone(pred_label_trainer)
for pred_trainer in pred_trainer_list:
pred_total += pred_trainer
pred_total = torch.sigmoid(pred_total)
logger.debug("Aggregated predictions.")
# Calculate gradients.
pred_residual = y_batch - pred_total
if encryption_method == "ckks":
enc_pred_residual = ts.ckks_vector(
private_context, pred_residual.numpy().flatten())
serialized_enc_pred_residual = enc_pred_residual.serialize()
broadcast_channel.broadcast(
serialized_enc_pred_residual, use_pickle=False)
if BLOCKCHAIN:
logger.debug(
f"Broadcast encrypted pred residual, SHA256: {hashlib.sha256(serialized_enc_pred_residual).hexdigest()}")
elif encryption_method == "paillier":
enc_pred_residual = Paillier.encrypt(private_context,
pred_residual.numpy().astype(np.float32).flatten(),
precision=encryption_config[encryption_method]["precision"],
obfuscation=True,
num_cores=num_cores)
serialized_enc_pred_residual = Paillier.serialize(
enc_pred_residual)
broadcast_channel.broadcast(
serialized_enc_pred_residual, use_pickle=False)
if BLOCKCHAIN:
logger.debug(
f"Broadcast encrypted pred residual, SHA256: {hashlib.sha256(serialized_enc_pred_residual).hexdigest()}")
elif encryption_method == "plain":
broadcast_channel.broadcast(pred_residual)
if BLOCKCHAIN:
logger.debug(
f"Broadcast pred residual, SHA256: {hashlib.sha256(pickle.dumps(pred_residual)).hexdigest()}")
training_pred_prob_list += torch.squeeze(
pred_total, dim=-1).tolist()
training_y_list += torch.squeeze(y_batch, dim=-1).tolist()
if self.echo_training_metrics:
pred_total = (pred_total > 0.5).float()
training_cm += confusion_matrix(
y_true=y_batch.detach().numpy(), y_pred=pred_total.detach().numpy())
# Gradients for label trainer.
logger.debug("Calculate gradients for label trainer.")
if self.optimizer_config['p'] == 1:
gradient_label_trainer_linear = -torch.mm(pred_residual.t(), x_batch) / x_batch.shape[0] + (
self.optimizer_config['alpha'] * (torch.abs(self.model.linear.weight)
/ self.model.linear.weight)
) / x_batch.shape[0]
elif self.optimizer_config['p'] == 2:
gradient_label_trainer_linear = -torch.mm(pred_residual.t(), x_batch) / x_batch.shape[0] + (
2 * self.optimizer_config['alpha'] * self.model.linear.weight) / x_batch.shape[0]
elif self.optimizer_config['p'] == 0:
gradient_label_trainer_linear = - \
torch.mm(pred_residual.t(), x_batch) / x_batch.shape[0]
else:
raise NotImplementedError(
"Regular P={} not implement.".format(self.optimizer_config['p']))
gradient_label_trainer_bias = -torch.mean(pred_residual, dim=0)
gradient_label_trainer_linear = gradient_label_trainer_linear.t()
# Collect trainers noise gradients, decrypt and broadcast.
if encryption_method == "ckks":
gradient_list_trainer_linear = broadcast_channel.collect(
use_pickle=False)
if BLOCKCHAIN:
logger.debug(
f"Collect gradient list, SHA256: {hashlib.sha256(pickle.dumps(gradient_list_trainer_linear)).hexdigest()}")
gradient_list_trainer_linear = [ts.ckks_vector_from(private_context, i).decrypt() for i in
gradient_list_trainer_linear]
broadcast_channel.scatter(gradient_list_trainer_linear)
if BLOCKCHAIN:
logger.debug(
f"Scatter gradient, SHA256: {hashlib.sha256(pickle.dumps(gradient_list_trainer_linear)).hexdigest()}")
elif encryption_method == "paillier":
gradient_list_trainer_linear = broadcast_channel.collect(
use_pickle=False)
if BLOCKCHAIN:
logger.debug(
f"Collect random seed, SHA256: {hashlib.sha256(pickle.dumps(gradient_list_trainer_linear)).hexdigest()}")
gradient_list_trainer_linear = [
Paillier.decrypt(private_context, Paillier.ciphertext_from(None, c), dtype='float',
num_cores=num_cores) for c in gradient_list_trainer_linear]
broadcast_channel.scatter(gradient_list_trainer_linear)
if BLOCKCHAIN:
logger.debug(
f"Scatter gradient, SHA256: {hashlib.sha256(pickle.dumps(gradient_list_trainer_linear)).hexdigest()}")
elif encryption_method == "plain":
pass
self.model.linear.weight -= (gradient_label_trainer_linear *
self.optimizer_config["lr"]).t()
self.model.linear.bias -= (gradient_label_trainer_bias *
self.optimizer_config["lr"]).t()
logger.debug("Weights update completed.")
# calculate and update the progress of the training
self.progress_calculator.cal_custom_progress(epoch, batch_idx+1)
train_loss = loss_func(
torch.tensor(training_pred_prob_list, dtype=torch.float32),
torch.tensor(training_y_list, dtype=torch.float32)
).detach().item()
self._calc_metrics(np.array(training_y_list, dtype=float), np.array(
training_pred_prob_list), epoch)
# Validation step should be added here.
cm = np.zeros((2, 2))
pred_prob_list, y_list = [], []
for batch_idx, (x_batch, y_batch, _) in enumerate(self.val_dataloader):
x_batch = x_batch.to(self.device)
y_batch = y_batch.to(self.device)
pred_label_trainer = self.model(x_batch)
pred_trainer_list = broadcast_channel.collect()
if BLOCKCHAIN:
logger.debug(
f"Collect pred, SHA256: {hashlib.sha256(pickle.dumps(pred_trainer_list)).hexdigest()}")
# Add predictions.
pred_total = torch.clone(pred_label_trainer)
for pred_trainer in pred_trainer_list:
pred_total += pred_trainer
pred_total = torch.sigmoid(pred_total)
pred_prob_list += torch.squeeze(pred_total, dim=-1).tolist()
y_list += torch.squeeze(y_batch, dim=-1).tolist()
pred_total = (pred_total > 0.5).float()
cm += confusion_matrix(y_true=y_batch.detach().numpy(),
y_pred=pred_total.detach().numpy())
metric = self._calc_metrics(np.array(y_list, dtype=float), np.array(pred_prob_list),
epoch, stage="val")
val_loss = loss_func(
torch.tensor(pred_prob_list, dtype=torch.float32),
torch.tensor(y_list, dtype=torch.float32)
).detach().item()
try:
# loss_file = self.train_conf['output']['plot_loss']['name']
logger.info(f"Writing loss for epoch {epoch}")
self._write_loss(train_loss, val_loss, epoch)
except Exception:
pass
if self.early_stopping_config["patience"] > 0:
early_stop_flag, save_flag = self.es(metric)
else:
early_stop_flag, save_flag = False, True
if save_flag:
self.best_model = copy.deepcopy(self.model)
self.best_prediction_train = copy.deepcopy(
training_pred_prob_list)
self.best_prediction_val = copy.deepcopy(
np.array(pred_prob_list))
early_stop = [early_stop_flag, save_flag,
self.early_stopping_config["patience"]]
broadcast_channel.broadcast(early_stop,
use_pickle=True)
if BLOCKCHAIN:
logger.debug(
f"Broadcast early stop flag, SHA256: {hashlib.sha256(pickle.dumps(early_stop))}")
if early_stop_flag:
break
# self.dump_as_proto(save_dir=self.save_dir, model_name=self.save_model_name,
# state_dict=self.best_model.state_dict(), final=True)
# # if self.save_probabilities:
# self._save_prob(best_model=self.best_model, channel=broadcast_channel)
# return None
if self.save_frequency > 0 and epoch % self.save_frequency == 0:
if self.save_model_name.split(".")[-1] == "pmodel":
self.dump_as_proto(
save_dir=self.save_dir,
model_name=self.save_model_name,
state_dict=self.model.state_dict(),
epoch=epoch,
)
else:
ModelIO.save_torch_model(
state_dict=self.model.state_dict(),
save_dir=self.save_dir,
model_name=self.save_model_name,
epoch=epoch,
)
if self.save_onnx_model_name:
ModelIO.save_torch_onnx(
model=self.model,
input_dim=(self.data_dim,),
save_dir=self.save_dir,
model_name=self.save_onnx_model_name,
epoch=epoch,
)
if self.early_stopping_config["patience"] <= 0:
self.best_model = copy.deepcopy(self.model)
self.best_prediction_train = copy.deepcopy(training_pred_prob_list)
self.best_prediction_val = copy.deepcopy(np.array(pred_prob_list))
self.save(y_list, training_y_list)
# if self.save_probabilities:
self._save_prob(best_model=self.best_model, channel=broadcast_channel)
self._save_feature_importance(broadcast_channel)
# prepare data for writing
train_label = np.array(training_y_list, dtype=float)
train_y_pred = np.array(training_pred_prob_list, dtype=float)
val_label = np.array(y_list, dtype=float)
val_y_pred = np.array(pred_prob_list, dtype=float)
# write roc data
logger.info("Writing roc data...")
self._write_roc_data(train_label, train_y_pred,
val_label, val_y_pred)
# write ks data
logger.info("Writing ks data...")
self._write_ks_data(train_label, train_y_pred,
val_label, val_y_pred)
# write lift and gain
logger.info("Writing lift and gain data...")
self._write_lift_gain_data(
train_label, train_y_pred, val_label, val_y_pred)
# write pr curve
logger.info("Writing pr curve data")
self._write_pr_data(
train_label, train_y_pred, val_label, val_y_pred)
# write feature importance
logger.info("Writing feature importance data")
logger.info("Self importances: {}".format(self.feature_importances_))
self._write_feature_importance()
ProgressCalculator.finish_progress()
def save(self, y_list, training_y_list=None):
save_model_config(stage_model_config=self.export_conf,
save_path=Path(self.save_dir))
if not os.path.exists(self.evaluation_path):
os.makedirs(self.evaluation_path)
# dump out ks plot
suggest_threshold = 0.5
if "ks" in self.metric_config or "auc_ks" in self.metric_config:
tc = ThresholdCutter(os.path.join(
self.save_dir, self.output.get("ks_plot_val")["name"]))
# tc.cut_by_value(np.array(y_list, dtype=float),
# self.best_prediction_val)
# suggest_threshold = tc.bst_threshold
# tc.save()
if self.interaction_params.get("echo_training_metrics"):
tc = ThresholdCutter(os.path.join(
self.save_dir, self.output.get("ks_plot_val")["name"]))
# tc.cut_by_value(
# np.array(training_y_list, dtype=float), self.best_prediction_train)
# tc.save()
if self.save_model_name:
if self.save_model_name.split(".")[-1] == "pmodel":
self.dump_as_proto(
save_dir=self.save_dir,
model_name=self.save_model_name,
state_dict=self.best_model.state_dict(),
final=True,
suggest_threshold=suggest_threshold
)
else:
ModelIO.save_torch_model(
state_dict=self.best_model.state_dict(),
save_dir=self.save_dir,
model_name=self.save_model_name,
meta_dict={"suggest_threshold": suggest_threshold}
)
if self.save_onnx_model_name:
ModelIO.save_torch_onnx(
model=self.best_model,
input_dim=(self.data_dim,),
save_dir=self.save_dir,
model_name=self.save_onnx_model_name,
)
def _save_feature_importance(self, channel):
res = {"owner_id": [], "fid": [], "importance": []}
other_weight_list = channel.collect()
if BLOCKCHAIN:
logger.debug(
f"Collect weight list, SHA256: {hashlib.sha256(pickle.dumps(other_weight_list)).hexdigest()}")
for (owner_id, weights, f_names) in other_weight_list:
for fid, weight in enumerate(weights):
res["owner_id"].append(owner_id)
# res["fid"].append(fid)
res["fid"].append(f_names[fid])
res["importance"].append(float(weight))
for fid, weight in enumerate(self.best_model.state_dict()["linear.weight"][0]):
# res["owner_id"].append(FedNode.node_id)
res["owner_id"].append(FedNode.node_name)
# res["fid"].append(fid)
f_name = self.train_f_names[fid]
res["fid"].append(f_name)
res["importance"].append(float(weight))
res = pd.DataFrame(res).sort_values(
by="importance", key=lambda col: np.abs(col), ascending=False)
res.to_csv(
# Path(self.save_dir, "feature_importances.csv"), header=True, index=False, float_format="%.6g"
Path(self.save_dir, self.output["feature_importance"]["name"]), header=True, index=False,
float_format="%.6g"
)
# prepare feature_importances_ attribute
feature_importances_ = {}
for _, row in res.iterrows():
feature_importances_[(row['owner_id'], row['fid'])] = row['importance']
self.feature_importances_ = feature_importances_
def _save_prob(self, best_model, channel):
if self.interaction_params.get("write_training_prediction"):
train_prob_list, train_label_list, train_id_list = [], [], []
for batch_idx, (x_batch, y_batch, id_batch) in enumerate(self.train_dataloader):
x_batch, y_batch, id_batch = x_batch.to(self.device), y_batch.to(
self.device), id_batch.to(self.device)
pred_label_trainer = best_model(x_batch)
pred_trainer_list = channel.collect()
if BLOCKCHAIN:
logger.debug(
f"Collect pred list, SHA256: {hashlib.sha256(pickle.dumps(pred_trainer_list)).hexdigest()}")
pred_total = torch.clone(pred_label_trainer)
for pred_trainer in pred_trainer_list:
pred_total += pred_trainer
pred_total = torch.sigmoid(pred_total)
train_id_list += torch.squeeze(id_batch, dim=-1).tolist()
train_label_list += torch.squeeze(y_batch, dim=-1).tolist()
train_prob_list += torch.squeeze(pred_total, dim=-1).tolist()
self._write_prediction(
train_label_list, train_prob_list, train_id_list, final=True)
if self.interaction_params.get("write_validation_prediction"):
val_prob_list, val_label_list, val_id_list = [], [], []
for batch_idx, (x_batch, y_batch, id_batch) in enumerate(self.val_dataloader):
x_batch, y_batch, id_batch = x_batch.to(self.device), y_batch.to(
self.device), id_batch.to(self.device)
pred_label_trainer = best_model(x_batch)
pred_trainer_list = channel.collect()
if BLOCKCHAIN:
logger.debug(
f"Collect pred list, SHA256: {hashlib.sha256(pickle.dumps(pred_trainer_list)).hexdigest()}")
pred_total = torch.clone(pred_label_trainer)
for pred_trainer in pred_trainer_list:
pred_total += pred_trainer
pred_total = torch.sigmoid(pred_total)
val_id_list += torch.squeeze(id_batch, dim=-1).tolist()
val_label_list += torch.squeeze(y_batch, dim=-1).tolist()
val_prob_list += torch.squeeze(pred_total, dim=-1).tolist()
self._write_prediction(
val_label_list, val_prob_list, val_id_list, stage="val", final=True)
def check_data(self):
dim_channel = BroadcastChannel(name="check_data_com")
n = self.data_dim
dims = dim_channel.collect()
if BLOCKCHAIN:
logger.debug(
f"Collect dim, SHA256: {hashlib.sha256(pickle.dumps(dims)).hexdigest()}")
for dim in dims:
n += dim
if n <= 0:
raise ValueError("Number of the feature is zero. Stop training.")
| 26,570 | 47.13587 | 135 | py |
XFL | XFL-master/python/algorithm/framework/vertical/logistic_regression/trainer.py | # Copyright 2022 The XFL Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import copy
import random
import hashlib
import pickle
import secrets
from pathlib import Path
import numpy as np
import tenseal as ts
import torch
from common.communication.gRPC.python.channel import BroadcastChannel
from common.crypto.paillier.paillier import Paillier
from common.utils.logger import logger
from common.utils.utils import update_dict
from service.fed_node import FedNode
from common.utils.model_io import ModelIO
from common.utils.utils import save_model_config
from .base import VerticalLogisticRegressionBase
from .base import BLOCKCHAIN
class VerticalLogisticRegressionTrainer(VerticalLogisticRegressionBase):
def __init__(self, train_conf: dict, *args, **kwargs):
self.sync_channel = BroadcastChannel(name="sync")
conf = self._sync_config()
update_dict(train_conf, conf)
super().__init__(train_conf, label=False, *args, **kwargs)
self._init_model()
self.export_conf = [{
"class_name": "VerticalLogisticRegression",
"identity": self.identity,
"filename": self.save_onnx_model_name,
"input_dim": self.data_dim,
"bias": False,
"version": "1.4.0",
"input_schema": self.schema,
}]
if self.random_seed is None:
self.random_seed = self.sync_channel.recv()
if BLOCKCHAIN:
logger.debug(f"Sync random seed, SHA256: {hashlib.sha256(pickle.dumps(self.random_seed)).hexdigest()}")
self.set_seed(self.random_seed)
self.best_model = None
def _sync_config(self):
config = self.sync_channel.recv()
if BLOCKCHAIN:
logger.debug(f"Sync config, SHA256: {hashlib.sha256(pickle.dumps(config)).hexdigest()}")
return config
def fit(self):
""" train model
Model parameters need to be updated before fitting.
"""
self.check_data()
patient = -1
# encryption_config = self.encryption_config
# encryption_method = encryption_config["method"].lower()
encryption_method = list(self.encryption_config.keys())[0].lower()
logger.info("Vertical logistic regression training start")
broadcast_channel = BroadcastChannel(name="vertical_logistic_regression_channel")
public_context = None
if encryption_method == "ckks":
logger.debug("Receive ckks public key.")
public_context = broadcast_channel.recv(use_pickle=False)
if BLOCKCHAIN:
logger.debug(f"SHA256: {hashlib.sha256(public_context).hexdigest()}")
public_context = ts.context_from(public_context)
logger.debug("Public key received.")
elif encryption_method == "paillier":
logger.debug("Receive paillier public key.")
public_context = broadcast_channel.recv(use_pickle=False)
if BLOCKCHAIN:
logger.debug(f"SHA256: {hashlib.sha256(public_context).hexdigest()}")
public_context = Paillier.context_from(public_context)
logger.debug("Public key received.")
elif encryption_method == "plain":
pass
else:
raise ValueError(
f"Encryption method {encryption_method} not supported! Valid methods are 'paillier', 'ckks', 'plain'.")
rng = secrets.SystemRandom()
for epoch in range(1, self.global_epoch + 1):
for batch_idx, (x_batch) in enumerate(self.train_dataloader):
x_batch = x_batch[0].to(self.device)
# compute theta_trainer * x_trainer
pred_trainer = self.model(x_batch)
# send predict result to label trainer.
logger.debug("Send predict result to label trainer.")
broadcast_channel.send(pred_trainer)
if BLOCKCHAIN:
logger.debug(f"Broadcast pred, SHA256: {hashlib.sha256(pickle.dumps(pred_trainer)).hexdigest()}")
if encryption_method == "ckks":
pred_residual = broadcast_channel.recv(use_pickle=False)
if BLOCKCHAIN:
logger.debug(f"SHA256: {hashlib.sha256(pred_residual).hexdigest()}")
pred_residual = ts.ckks_vector_from(public_context, pred_residual)
elif encryption_method == "paillier":
pred_residual = broadcast_channel.recv(use_pickle=False)
if BLOCKCHAIN:
logger.debug(f"SHA256: {hashlib.sha256(pred_residual).hexdigest()}")
pred_residual = Paillier.ciphertext_from(public_context, pred_residual)
elif encryption_method == "plain":
pred_residual = broadcast_channel.recv()
if BLOCKCHAIN:
logger.debug(f"SHA256: {hashlib.sha256(pickle.dumps(pred_residual)).hexdigest()}")
logger.debug("Received prediction residual from label trainer.")
# Compute gradients for trainer.
logger.debug("Calculate gradients for trainer.")
if encryption_method == "ckks":
# Add noise
noise = np.array([rng.randint(1 << 24, 1 << 26) - (1 << 25) for _ in range(x_batch.shape[1])],
dtype=np.float32)
noise /= 100000
x_batch_numpy = x_batch.numpy()
# avoid bug in seal ckks when a column is all zero
sign = 1 if random.randint(0, 1) == 1 else -1
x_batch_numpy[np.where(np.sum(x_batch_numpy, axis=0) == 0)] = 1e-7 * sign
ciphertext = pred_residual.matmul(x_batch_numpy)
noised_gradient_trainer_linear = ciphertext + noise
# Send to label trainer
serialized_gradient = noised_gradient_trainer_linear.serialize()
broadcast_channel.send(serialized_gradient, use_pickle=False)
if BLOCKCHAIN:
logger.debug(f"Send gradient, SHA256: {hashlib.sha256(serialized_gradient).hexdigest()}")
gradient_trainer_linear = broadcast_channel.recv()
if BLOCKCHAIN:
logger.debug(f"Recv gradient, SHA256: {hashlib.sha256(pickle.dumps(gradient_trainer_linear)).hexdigest()}")
gradient_trainer_linear = np.array(gradient_trainer_linear, dtype=np.float32)
gradient_trainer_linear -= noise
gradient_trainer_linear = - gradient_trainer_linear / x_batch.shape[0]
gradient_trainer_linear = torch.FloatTensor(gradient_trainer_linear).unsqueeze(-1)
elif encryption_method == "paillier":
noise = np.array([rng.randint(1 << 24, 1 << 26) - (1 << 25) for _ in range(x_batch.shape[1])],
dtype=np.float32)
noise /= 100000
# Add noise
ciphertext = np.matmul(pred_residual, x_batch.numpy())
noised_gradient_trainer_linear = ciphertext + noise
# Send to label trainer
serialized_gradient = Paillier.serialize(noised_gradient_trainer_linear)
broadcast_channel.send(serialized_gradient, use_pickle=False)
if BLOCKCHAIN:
logger.debug(f"Send gradient, SHA256: {hashlib.sha256(serialized_gradient).hexdigest()}")
gradient_trainer_linear = broadcast_channel.recv()
if BLOCKCHAIN:
logger.debug(f"Recv gradient, SHA256: {hashlib.sha256(pickle.dumps(gradient_trainer_linear)).hexdigest()}")
gradient_trainer_linear = np.array(gradient_trainer_linear, dtype=np.float32)
gradient_trainer_linear -= noise
gradient_trainer_linear = - gradient_trainer_linear / x_batch.shape[0]
gradient_trainer_linear = torch.FloatTensor(gradient_trainer_linear).unsqueeze(-1)
elif encryption_method == "plain":
gradient_trainer_linear = -torch.mm(pred_residual.t(), x_batch) / x_batch.shape[0]
gradient_trainer_linear = gradient_trainer_linear.t()
# Regular section
gradient_trainer_linear = gradient_trainer_linear.t()
if self.optimizer_config['p'] == 1:
gradient_trainer_linear += (self.optimizer_config['alpha'] * (
torch.abs(self.model.linear.weight) / self.model.linear.weight)) / x_batch.shape[0]
elif self.optimizer_config['p'] == 2:
gradient_trainer_linear += (2 * self.optimizer_config['alpha'] * self.model.linear.weight) / \
x_batch.shape[0]
elif self.optimizer_config['p'] == 0:
gradient_trainer_linear += 0
else:
raise NotImplementedError("Regular P={} not implement.".format(self.optimizer_config['p']))
gradient_trainer_linear = gradient_trainer_linear.t()
self.model.linear.weight -= (gradient_trainer_linear * self.optimizer_config["lr"]).t()
logger.debug("Weights update completed.")
for batch_idx, (x_batch) in enumerate(self.val_dataloader):
x_batch = x_batch[0].to(self.device)
pred_trainer = self.model(x_batch)
broadcast_channel.send(pred_trainer)
if BLOCKCHAIN:
logger.debug(f"Send pred, batch_idx {batch_idx}, SHA256: {hashlib.sha256(pickle.dumps(pred_trainer)).hexdigest()}")
early_stop_flag, save_flag, patient = broadcast_channel.recv()
if BLOCKCHAIN:
logger.debug(f"Recv early stop flag, SHA256: {hashlib.sha256(pickle.dumps([early_stop_flag, save_flag, patient])).hexdigest()}")
if save_flag:
self.best_model = copy.deepcopy(self.model)
if early_stop_flag:
break
# self.dump_as_proto(save_dir=self.save_dir, model_name=self.save_model_name,
# state_dict=self.best_model.state_dict(), final=True)
# # if self.save_probabilities:
# self._save_prob(best_model=self.best_model, channel=broadcast_channel)
# return None
if self.save_frequency > 0 and epoch % self.save_frequency == 0:
if self.save_model_name.split(".")[-1] == "pmodel":
self.dump_as_proto(
save_dir=self.save_dir,
model_name=self.save_model_name,
state_dict=self.model.state_dict(),
epoch=epoch
)
else:
ModelIO.save_torch_model(
state_dict=self.model.state_dict(),
save_dir=self.save_dir,
model_name=self.save_model_name,
epoch=epoch
)
if self.save_onnx_model_name is not None and self.save_onnx_model_name != "":
ModelIO.save_torch_onnx(
model=self.model,
input_dim=(self.data_dim,),
save_dir=self.save_dir,
model_name=self.save_onnx_model_name,
epoch=epoch,
)
if patient <= 0:
self.best_model = copy.deepcopy(self.model)
save_model_config(stage_model_config=self.export_conf, save_path=Path(self.save_dir))
if self.save_model_name.split(".")[-1] == "pmodel":
self.dump_as_proto(
save_dir=self.save_dir,
model_name=self.save_model_name,
state_dict=self.best_model.state_dict(),
final=True,
)
else:
ModelIO.save_torch_model(
state_dict=self.best_model.state_dict(),
save_dir=self.save_dir,
model_name=self.save_model_name,
)
if self.save_onnx_model_name:
ModelIO.save_torch_onnx(
model=self.best_model,
input_dim=(self.data_dim,),
save_dir=self.save_dir,
model_name=self.save_onnx_model_name,
)
# if self.save_probabilities:
self._save_prob(best_model=self.best_model, channel=broadcast_channel)
self._save_feature_importance(broadcast_channel)
def _save_prob(self, best_model, channel):
if self.interaction_params.get("write_training_prediction"):
for batch_idx, (x_batch) in enumerate(self.train_dataloader):
x_batch = x_batch[0].to(self.device)
pred_trainer = best_model(x_batch)
channel.send(pred_trainer)
if BLOCKCHAIN:
logger.debug(f"Send pred, SHA256: {hashlib.sha256(pickle.dumps(pred_trainer)).hexdigest()}")
if self.interaction_params.get("write_validation_prediction"):
for batch_idx, (x_batch) in enumerate(self.val_dataloader):
x_batch = x_batch[0].to(self.device)
pred_trainer = best_model(x_batch)
channel.send(pred_trainer)
if BLOCKCHAIN:
logger.debug(f"Send pred, SHA256: {hashlib.sha256(pickle.dumps(pred_trainer)).hexdigest()}")
def _save_feature_importance(self, channel):
# weight = (FedNode.node_id, self.best_model.state_dict()["linear.weight"][0])
weight = (FedNode.node_name, self.best_model.state_dict()["linear.weight"][0], self.train_f_names)
channel.send(weight)
if BLOCKCHAIN:
logger.debug(f"Send weight, SHA256: {hashlib.sha256(pickle.dumps(weight)).hexdigest()}")
def check_data(self):
dim_channel = BroadcastChannel(name="check_data_com")
dim_channel.send(self.data_dim)
if BLOCKCHAIN:
logger.debug(f"Send dim, SHA256: {hashlib.sha256(pickle.dumps(self.data_dim)).hexdigest()}")
| 15,010 | 47.579288 | 144 | py |
XFL | XFL-master/python/algorithm/framework/local/feature_preprocess/label_trainer.py | # Copyright 2022 The XFL Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
from pathlib import Path
import numpy as np
import pandas as pd
from sklearn.preprocessing import OneHotEncoder
import torch
from algorithm.core.data_io import CsvReader
from common.utils.config_parser import TrainConfigParser
from common.utils.logger import logger
from sklearn.impute import SimpleImputer
from service.fed_control import ProgressCalculator
from common.utils.utils import save_model_config
def data_impute(form, strategy, fill=None):
if strategy != 'constant':
return SimpleImputer(missing_values=form, strategy=strategy, copy=False)
else:
return SimpleImputer(missing_values=form, strategy=strategy, fill_value=fill, copy=False)
def config_combination(config_a, config_b):
if isinstance(config_a, list):
if isinstance(config_b, list):
config_combine = set(config_a + config_b)
else:
config_combine = set(config_a + [config_b])
else:
if isinstance(config_b, list):
config_combine = set([config_a] + config_b)
else:
config_combine = set([config_a] + [config_b])
if len(config_combine) > 1:
return list(config_combine)
elif len(config_combine) == 1:
return list(config_combine)[0]
else:
return config_combine
class LocalFeaturePreprocessLabelTrainer(TrainConfigParser):
def __init__(self, train_conf):
"""
Args:
train_conf:
"""
super().__init__(train_conf)
self.train = None
self.val = None
self.save_dir = None
self.transform_switch = False
self.impute_dict = {}
self.outlier_dict = {}
self.onehot_dict = {}
self.imputer_values_overall = []
self.imputer_strategy_overall = "mean" # default
self.imputer_fillvalue_overall = None # default
self.impute_dict = {}
self.onehot_feat_conf = {}
self.feature_flag = False # whether to impute by features
self.model_file = {}
self._init_data()
self._parse_config()
def _parse_config(self) -> None:
"""
parse algo config
missing_values: int, float, str or list, e.g. [-999, 999] or ["none", "null", "na", ""], default=null
strategy: str, default="mean"
fill_value: str or numerical value if strategy == "constant", default=None
"""
self.save_dir = self.output.get("path")
self.save_model = self.output.get("model", {})
if len(self.save_model) > 0:
self.save_model_name = self.save_model.get("name")
if not os.path.exists(self.save_dir):
os.makedirs(self.save_dir)
self.save_model_path = Path(self.save_dir, self.save_model_name)
self.export_conf = [{
"class_name": "LocalFeaturePreprocess",
"filename": self.save_model_name
}]
# missing config
self.missing_conf = self.train_params.get("missing", {})
if len(self.missing_conf) > 0:
self.missing_values_overall = self.missing_conf.get(
"missing_values", [np.NaN, '', None, ' ', 'nan', 'none', 'null', 'na', 'None'])
# transform null: None to default missing_values config
if self.missing_values_overall is None:
self.missing_values_overall = [np.NaN, '', None, ' ', 'nan', 'none', 'null', 'na', 'None']
self.missing_strategy_overall = self.missing_conf.get("strategy", "mean")
self.missing_fillvalue_overall = self.missing_conf.get("fill_value", None)
self.missing_feat_conf = self.missing_conf.get("missing_features", {})
self.imputer_values_overall = self.missing_values_overall
self.imputer_strategy_overall = self.missing_strategy_overall
self.imputer_fillvalue_overall = self.missing_fillvalue_overall
logger.info("Missing values need to be imputed")
# outlier config
self.outlier_conf = self.train_params.get("outlier", {})
if len(self.outlier_conf) > 0:
self.outlier_values_overall = self.outlier_conf.get("outlier_values", [])
self.outlier_feat_conf = self.outlier_conf.get("outlier_features", {})
self.imputer_values_overall = config_combination(self.imputer_values_overall, self.outlier_values_overall)
logger.info("Outlier values need to be imputed")
# initialize impute_dict
if self.imputer_values_overall:
self.impute_dict = dict(zip(self.columns, [{"missing_values": self.imputer_values_overall,
"strategy": self.imputer_strategy_overall,
"fill_value": self.imputer_fillvalue_overall}
for i in self.columns]))
# if different features have different missing_values
if len(self.missing_conf) > 0:
if len(self.missing_feat_conf) > 0:
for key in self.missing_feat_conf.keys():
if len(self.missing_feat_conf[key]) > 0:
missing_values_feat = self.missing_feat_conf[key].get("missing_values", None)
if missing_values_feat is not None:
self.impute_dict[key]["missing_values"] = missing_values_feat
self.feature_flag = True
missing_strategy_feat = self.missing_feat_conf[key].get("strategy", None)
if missing_strategy_feat is not None:
self.impute_dict[key]["strategy"] = missing_strategy_feat
self.feature_flag = True
missing_fillvalue_feat = self.missing_feat_conf[key].get("fill_value", None)
if missing_fillvalue_feat is not None:
self.impute_dict[key]["fill_value"] = missing_fillvalue_feat
self.feature_flag = True
# if different features have different outlier_values
if len(self.outlier_conf) > 0:
if len(self.outlier_feat_conf) > 0:
for key in self.outlier_feat_conf.keys():
if len(self.outlier_feat_conf[key]) > 0:
outlier_values_feat = self.outlier_feat_conf[key].get("outlier_values", None)
if outlier_values_feat is not None:
if key in self.impute_dict.keys():
self.impute_dict[key]["missing_values"] = config_combination(
self.impute_dict[key]["missing_values"], outlier_values_feat)
else:
self.impute_dict[key] = {}
self.impute_dict[key]["missing_values"] = outlier_values_feat
self.feature_flag = True
# check the three params
if len(self.impute_dict) > 0:
for key in self.impute_dict.keys():
if "strategy" not in self.impute_dict[key].keys():
self.impute_dict[key]["strategy"] = self.imputer_strategy_overall
self.impute_dict[key]["fill_value"] = self.imputer_fillvalue_overall
# onehot config
self.onehot_conf = self.train_params.get("onehot", {})
if len(self.onehot_conf) > 0:
self.onehot_feat_conf = self.onehot_conf.get("onehot_features", {})
# output config
self.save_trainset_name = self.output.get("trainset", {})
self.save_valset_name = self.output.get("valset", {})
def __load_data(self, config) -> CsvReader:
if len(config) > 1:
logger.warning("More than one dataset is not supported.")
config = config[0]
if config["type"] == "csv":
data_reader = CsvReader(path=os.path.join(config["path"], config["name"]), has_id=config["has_id"],
has_label=config["has_label"])
else:
raise NotImplementedError("Dataset type {} is not supported.".format(config["type"]))
return data_reader
def _init_data(self) -> None:
if len(self.input["trainset"]) > 0:
data: CsvReader = self.__load_data(self.input["trainset"])
self.train = data.table.set_index(data.ids)
self.label_name = data.label_name()
if self.label_name is not None:
self.train_label = self.train[[self.label_name]]
self.train = self.train.drop(columns=self.label_name)
self.columns = self.train.columns
self.train_ids = data.ids
else:
raise NotImplementedError("Trainset was not configured.")
if len(self.input["valset"]) > 0:
data: CsvReader = self.__load_data(self.input["valset"])
self.val = data.table.set_index(data.ids)
if self.label_name is not None:
self.val_label = self.val[[self.label_name]]
self.val = self.val.drop(columns=self.label_name)
self.val_ids = data.ids
def impute(self):
# fill missing_values for different features
def imputer_series(data, col, flag):
if flag == "train":
missing_value_new = self.impute_dict[col]["missing_values"]
if isinstance(missing_value_new, list) and len(missing_value_new) > 0:
data[col] = data[[col]].replace(self.impute_dict[col]["missing_values"], np.NaN)
missing_value_new = np.NaN
imputer = data_impute(missing_value_new, self.impute_dict[col]["strategy"],
self.impute_dict[col]["fill_value"])
imputer.fit(data[[col]])
data[col] = imputer.transform(data[[col]])
imputer_list.update({col: imputer})
elif flag == "val":
if isinstance(self.impute_dict[col]["missing_values"], list) and \
len(self.impute_dict[col]["missing_values"]) > 0:
data[[col]] = data[[col]].replace(self.impute_dict[col]["missing_values"], np.NaN)
data[col] = imputer_list[col].transform(data[[col]])
if not self.feature_flag and len(self.imputer_values_overall) > 0:
# if all features are imputed as a whole
imputer_values_overall = self.imputer_values_overall
# deal with more than one missing_values: transform the missing_values to np.NaN
if isinstance(self.imputer_values_overall, list):
self.train = self.train.replace(self.imputer_values_overall, np.NaN)
if self.val is not None:
self.val = self.val.replace(self.imputer_values_overall, np.NaN)
imputer_values_overall = np.NaN
# initialization
imupter = data_impute(imputer_values_overall, self.imputer_strategy_overall, self.imputer_fillvalue_overall)
self.train = pd.DataFrame(imupter.fit_transform(self.train), columns=self.columns, index=self.train_ids)
if self.val is not None:
self.val = pd.DataFrame(imupter.transform(self.val), columns=self.columns, index=self.val_ids)
self.model_file.update({"imputer": imupter})
logger.info("Overall imputation done")
elif self.feature_flag:
# if different features have different missing_values
imputer_list = {}
pd.Series(self.impute_dict.keys()).apply(lambda x: imputer_series(self.train, x, "train"))
if self.val is not None:
pd.Series(self.impute_dict.keys()).apply(lambda x: imputer_series(self.val, x, "val"))
self.model_file.update({"imputer": imputer_list})
logger.info("Imputation for features done")
def onehoter(self):
def onehot_series(col, flag):
if flag == "train":
onehot = OneHotEncoder(handle_unknown='ignore')
onehot.fit(self.train[[col]])
new_data = pd.DataFrame(onehot.transform(self.train[[col]]).toarray())
onehot_list[col] = onehot
col_len = len(onehot.categories_[0])
col_name = ["{}_{}".format(col, i) for i in range(col_len)]
new_data.columns = col_name
new_data.index = self.train.index
self.train = self.train.join(new_data).drop(columns=col)
elif flag == "val":
new_data = pd.DataFrame(onehot_list[col].transform(self.val[[col]]).toarray())
col_name = ["{}_{}".format(col, i) for i in range(len(onehot_list[col].categories_[0]))]
new_data.columns = col_name
new_data.index = self.val.index
self.val = self.val.join(new_data).drop(columns=col)
if len(self.onehot_feat_conf) > 0:
onehot_list = {}
pd.Series(self.onehot_feat_conf.keys()).apply(lambda x: onehot_series(x, "train"))
if self.val is not None:
pd.Series(self.onehot_feat_conf.keys()).apply(lambda x: onehot_series(x, "val"))
self.model_file.update({"onehot": onehot_list})
logger.info("Onehot for features done")
def fit(self) -> None:
"""
missing_values and outlier_values are combined to transform the data
"""
if len(self.missing_conf) == 0 and len(self.outlier_conf) == 0:
logger.info("No missing values and outlier values need to be imputed")
else:
logger.info("Missing values or outlier values will be imputed")
self.impute()
logger.info("Imputation done")
if len(self.onehot_conf) == 0:
logger.info("No onehot process")
else:
logger.info("Onehot will starts")
self.onehoter()
logger.info("Onehot done")
# recover label column
if self.label_name is not None:
self.train = self.train_label.join(self.train)
if self.val is not None:
self.val = self.val_label.join(self.val)
# save model file (optional)
if len(self.save_model) > 0:
save_model_config(stage_model_config=self.export_conf,
save_path=self.save_dir)
torch.save(self.model_file, self.save_model_path)
logger.info("Model file saved")
# save transformed data
if len(self.save_trainset_name) > 0:
save_train_path = self.save_dir / Path(self.save_trainset_name["name"])
if not os.path.exists(os.path.dirname(save_train_path)):
os.makedirs(os.path.dirname(save_train_path))
self.train.to_csv(save_train_path, index=self.input["trainset"][0]["has_id"])
logger.info("Preprocessed trainset done")
if self.val is not None:
save_val_path = self.save_dir / Path(self.save_valset_name["name"])
if not os.path.exists(os.path.dirname(save_val_path)):
os.makedirs(os.path.dirname(save_val_path))
self.val.to_csv(save_val_path, index=self.input["trainset"][0]["has_id"])
logger.info("Preprocessed valset done")
ProgressCalculator.finish_progress()
| 16,082 | 48.638889 | 120 | py |
XFL | XFL-master/test/service/test_service_scheduler.py | # Copyright 2022 The XFL Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
from concurrent import futures
import grpc
import pytest
from google.protobuf import text_format
from google.protobuf import json_format
import service.scheduler
from common.communication.gRPC.python import (checker_pb2, commu_pb2,
control_pb2, scheduler_pb2,
scheduler_pb2_grpc, status_pb2)
from common.storage.redis.redis_conn import RedisConn
from common.utils.config_parser import replace_variable
from common.utils.grpc_channel_options import insecure_options
from common.utils.logger import get_node_log_path, get_stage_node_log_path
from service.fed_config import FedConfig
from service.fed_job import FedJob
from service.fed_node import FedNode
from service.scheduler import SchedulerService
host = 'localhost'
listening_port = 55001
@pytest.fixture(scope='module', autouse=True)
def start_scheduler():
# 启动scheduler
server = grpc.server(futures.ThreadPoolExecutor(
max_workers=10), options=insecure_options)
scheduler_pb2_grpc.add_SchedulerServicer_to_server(
SchedulerService(is_bar=True), server)
server.add_insecure_port(f"[::]:{listening_port}")
server.start()
yield
server.stop(None)
@pytest.fixture()
def start_client():
channel = grpc.insecure_channel(
f"{host}:{listening_port}", options=insecure_options)
stub = scheduler_pb2_grpc.SchedulerStub(channel)
return stub
def yield_post_request():
requests = [
commu_pb2.PostRequest(key='test~test_channel_1~1', value=bytes(1)),
commu_pb2.PostRequest(key='test~test_channel_1~1', value=bytes(2)),
commu_pb2.PostRequest(key='test~test_channel_1~1', value=bytes(3))
]
for r in requests:
yield r
class TestSchedulerService():
def test_post(self, start_client, mocker):
# mock redis service
mocker.patch.object(RedisConn, 'put')
response = start_client.post(yield_post_request())
assert response == commu_pb2.PostResponse(code=0)
request_key = 'test~test_channel_1~1'
RedisConn.put.assert_called_once_with(request_key, bytes(6))
@pytest.mark.parametrize('nodeId, config', [('node-1', {0: {'node-1': {'trainer': 'test'}, 'node-2': {'label_trainer': 'test'}}})])
def test_getConfig(self, start_client, nodeId, config, mocker):
mocker.patch.object(FedConfig, 'trainer_config', config)
mocker.patch.object(FedJob, 'current_stage', 0)
mocker.patch.object(FedJob, 'job_id', 0)
request = scheduler_pb2.GetConfigRequest(nodeId=nodeId)
response = start_client.getConfig(request)
assert response == scheduler_pb2.GetConfigResponse(
config=json.dumps(config[0][nodeId]), code=0, jobId=0)
def test_control(self, start_client, mocker):
mocker.patch('service.scheduler.trainer_control',
return_value=control_pb2.ControlResponse(code=1, message='test'))
mocker.patch.object(FedJob, 'job_id', 1)
request = control_pb2.ControlRequest(control=control_pb2.STOP)
response = start_client.control(request)
service.scheduler.trainer_control.assert_called_once_with(
control_pb2.STOP)
assert response == control_pb2.ControlResponse(
code=1, message='Stop Scheduler Successful.\n'+'test', jobId=1, nodeLogPath={}, stageNodeLogPath={})
mocker.patch.object(FedJob, 'job_id', 1)
mocker.patch.object(FedJob, 'status', status_pb2.STOP_TRAIN)
request = control_pb2.ControlRequest(control=control_pb2.START)
response = start_client.control(request)
assert response == control_pb2.ControlResponse(
code=1, message="Scheduler not ready.", jobId=1, nodeLogPath={}, stageNodeLogPath={})
mocker.patch.object(FedJob, 'status', status_pb2.IDLE)
mocker.patch('service.scheduler.get_trainer_status', return_value={
'node-1': status_pb2.Status(code=2, status='TRAINING')})
request = control_pb2.ControlRequest(control=control_pb2.START)
response = start_client.control(request)
service.scheduler.get_trainer_status.assert_called()
assert response == control_pb2.ControlResponse(
code=1, message="Trainer node-1 not ready..", jobId=1, nodeLogPath={}, stageNodeLogPath={})
mocker.patch('service.scheduler.get_trainer_status', return_value={
'node-1': status_pb2.Status(code=4, status='FAILED')})
mocker.patch.object(RedisConn, 'incr', return_value=2)
mocker.patch.object(RedisConn, 'set')
request = control_pb2.ControlRequest(control=control_pb2.START)
response = start_client.control(request)
RedisConn.incr.assert_called_once_with('XFL_JOB_ID')
RedisConn.set.assert_called_once_with(
"XFL_JOB_STATUS_2", status_pb2.TRAINING)
job_log_path = get_node_log_path(job_id=FedJob.job_id, node_ids=list(FedNode.trainers.keys()) + ['scheduler'])
job_stages_log_path = get_stage_node_log_path(job_id=FedJob.job_id, train_conf=FedConfig.converted_trainer_config)
# if not FedConfig.trainer_config:
# job_log_path = {}
# job_stages_log_path = {}
# assert response == control_pb2.ControlResponse(
# code=0, message="", jobId=2, nodeLogPath=json.dumps(job_log_path), stageNodeLogPath=json.dumps(job_stages_log_path))
# assert FedJob.status == status_pb2.TRAINING
def test_recProgress(self, start_client, mocker):
# mocker.patch.object(FedJob, 'progress', {0: 0})
# request = scheduler_pb2.RecProgressRequest(stageId=0, progress=10)
# response = start_client.recProgress(request)
# assert response == scheduler_pb2.RecProgressResponse(code=0)
# assert FedJob.progress[0] == 10
mocker.patch.object(FedJob, 'job_id', 2)
mocker.patch.object(FedJob, 'current_stage', 0)
mocker.patch.object(FedJob, 'total_stage_num', 1)
mocker.patch.object(FedJob, 'progress', {0: 0})
mocker.patch.object(FedConfig, 'trainer_config', {
0: {'trainer': {'model_info': {'name': 'test'}}}})
mocker.patch.object(RedisConn, 'set', return_value=None)
request = scheduler_pb2.RecProgressRequest(stageId=0, progress=10)
response = start_client.recProgress(request)
assert response == scheduler_pb2.RecProgressResponse(code=0)
assert FedJob.progress[0] == 10
def test_status(self, start_client, mocker):
# 当前节点状态
mocker.patch.object(FedJob, 'job_id', 2)
mocker.patch.object(FedJob, 'status', 2)
mocker.patch('service.scheduler.get_trainer_status', return_value={
'node-1': status_pb2.Status(code=2, status='TRAINING')})
request = status_pb2.StatusRequest(jobId=0)
response = start_client.status(request)
assert response.schedulerStatus == status_pb2.Status(
code=2, status='TRAINING')
service.scheduler.get_trainer_status.assert_called()
assert response.trainerStatus == {
'node-1': status_pb2.Status(code=2, status='TRAINING')}
assert response.jobId == 2
# request = status_pb2.StatusRequest(jobId=2)
# response = start_client.status(request)
# assert response.jobStatus == status_pb2.Status(
# code=2, status='TRAINING')
# assert response.jobId == 2
mocker.patch.object(
RedisConn, 'get', return_value=status_pb2.SUCCESSFUL)
request = status_pb2.StatusRequest(jobId=1)
response = start_client.status(request)
# RedisConn.get.assert_called_once_with("XFL_JOB_STATUS_1")
assert response.jobStatus == status_pb2.Status(
code=3, status='SUCCESSFUL')
mocker.patch.object(RedisConn, 'get', return_value=status_pb2.FAILED)
request = status_pb2.StatusRequest(jobId=1)
response = start_client.status(request)
# RedisConn.get.assert_called_once_with("XFL_JOB_STATUS_1")
assert response.jobStatus == status_pb2.Status(code=4, status='FAILED')
@pytest.mark.parametrize('algo, config',
[
('vertical_xgboost', {
"trainer": 'test', "label_trainer": 'test'}),
('local_normalization', {
"trainer": 'test', "label_trainer": 'test'})
])
def test_getAlgorithmList(self, start_client, algo, config, mocker):
mocker.patch.object(FedConfig, 'algorithm_list', [
'vertical_xgboost', 'local_normalization'])
mocker.patch.object(FedConfig, 'default_config_map', {'vertical_xgboost': {'trainer': {'info': 'test'}, 'label_trainer': {
'info': 'test'}}, 'local_normalization': {'trainer': {'info': 'test'}, 'label_trainer': {'info': 'test'}}})
mocker.patch.object(json, 'dumps', return_value='test')
request = scheduler_pb2.GetAlgorithmListRequest()
response = start_client.getAlgorithmList(request)
assert response.algorithmList == [
'vertical_xgboost', 'local_normalization']
assert response.defaultConfigMap[algo] == scheduler_pb2.DefaultConfig(
config=config)
def test_getStage(self, start_client, mocker):
mocker.patch.object(FedJob, 'current_stage', 2)
mocker.patch.object(FedJob, 'total_stage_num', 3)
progress = {0: 100, 1: 45}
mocker.patch.object(FedJob, 'progress', progress)
stage_response = scheduler_pb2.GetStageResponse()
stage_name = "test"
stage_response.code = 0
stage_response.currentStageId = 1
stage_response.totalStageNum = 3
stage_response.currentStageName = stage_name
bar_response = scheduler_pb2.ProgressBar()
for stage, progress in progress.items():
bar_response.stageId = stage
bar_response.stageProgress = progress
stage_response.progressBar.append(bar_response)
mocker.patch.object(RedisConn, 'get', return_value=json_format.MessageToJson(stage_response))
request = scheduler_pb2.GetStageRequest()
request.jobId = 0
response = start_client.getStage(request)
assert response.code == 0
assert response.currentStageId == 1
assert response.totalStageNum == 3
assert response.currentStageName == 'test'
assert response.progressBar[0].stageId == 0
assert response.progressBar[0].stageProgress == 100
assert response.progressBar[1].stageId == 1
assert response.progressBar[1].stageProgress == 45
mocker.patch.object(RedisConn, 'get', return_value=None)
request = scheduler_pb2.GetStageRequest()
request.jobId = 0
response = start_client.getStage(request)
assert response.code == 3
# mocker.patch.object(FedJob, 'current_stage', 0)
# mocker.patch.object(FedJob, 'total_stage_num', 1)
# mocker.patch.object(FedJob, 'progress', {0: 0})
# mocker.patch.object(FedConfig, 'trainer_config', {
# 0: {'trainer': {'model_info': {'name': 'test'}}}})
# request = scheduler_pb2.GetStageRequest()
# response = start_client.getStage(request)
# assert response.code == 0
# assert response.currentStageId == 0
# assert response.totalStageNum == 1
# assert response.currentStageName == 'test'
# assert response.progressBar[0].stageId == 0
# assert response.progressBar[0].stageProgress == 0
# mocker.patch.object(FedConfig, 'trainer_config', {0: {}})
# request = scheduler_pb2.GetStageRequest()
# response = start_client.getStage(request)
# assert response.code == 1
# assert response.currentStageName == ''
# mocker.patch.object(FedConfig, 'trainer_config', [])
# request = scheduler_pb2.GetStageRequest()
# response = start_client.getStage(request)
# assert response.code == 2
# assert response.currentStageName == ''
def test_checkTaskConfig(self, start_client, mocker):
request = checker_pb2.CheckTaskConfigRequest()
conf = \
[
{
"identity": "label_trainer",
"model_info": {
# "name": "vertical_binning_woe_iv_fintech"
"name": "vertical_logistic_regression"
},
"input": {
"trainset": [
{
"type": "csv",
"path": "/opt/dataset/testing/fintech",
"name": "banking_guest_train_v01_20220216_TL.csv",
"has_id": True,
"has_label": True,
"nan_list": [
]
}
]
},
"output": {
"path": "/opt/checkpoints/[JOB_ID]/[NODE_ID]",
"model": {
"name": "vertical_binning_woe_iv_[STAGE_ID].json"
},
"iv": {
"name": "woe_iv_result_[STAGE_ID].json"
},
"split_points": {
"name": "binning_split_points_[STAGE_ID].json"
},
"trainset": {
"name": "fintech_woe_map_train_[STAGE_ID].csv"
}
},
"train_info": {
"interaction_params": {
"save_model": True
},
"train_params": {
"encryption": {
"paillier": {
"key_bit_size": 2048,
"precision": 7,
"djn_on": True,
"parallelize_on": True
}
},
"binning": {
"method": "equal_width",
"bins": 5
}
}
}
},
{
"identity": "label_trainer",
"model_info": {
# "name": "vertical_feature_selection"
"name": "vertical_logistic_regression"
},
"input": {
"iv_result": {
"path": "/opt/checkpoints/[JOB_ID]/[NODE_ID]",
"name": "woe_iv_result_[STAGE_ID-1].json"
},
"trainset": [
{
"type": "csv",
"path": "/opt/dataset/testing/fintech",
"name": "banking_guest_train_v01_20220216_TL.csv",
"has_id": True,
"has_label": True
}
],
"valset": [
{
"type": "csv",
"path": "/opt/dataset/testing/fintech",
"name": "banking_guest_train_v01_20220216_TL.csv",
"has_id": True,
"has_label": True
}
]
},
"output": {
"path": "/opt/checkpoints/[JOB_ID]/[NODE_ID]",
"model": {
"name": "feature_selection_[STAGE_ID].pkl"
},
"trainset": {
"name": "selected_train_[STAGE_ID].csv"
},
"valset": {
"name": "selected_val_[STAGE_ID].csv"
}
},
"train_info": {
"train_params": {
"filter": {
"common": {
"metrics": "iv",
"filter_method": "threshold",
"threshold": 0.01
}
}
}
}
},
{
"identity": "label_trainer",
"model_info": {
# "name": "vertical_pearson"
"name": "vertical_logistic_regression"
},
"input": {
"trainset": [
{
"type": "csv",
"path": "/opt/checkpoints/[JOB_ID]/[NODE_ID]",
"name": "selected_train_[STAGE_ID-1].csv",
"has_id": True,
"has_label": True
}
]
},
"output": {
"path": "/opt/checkpoints/[JOB_ID]/[NODE_ID]",
"corr": {
"name": "vertical_pearson_[STAGE_ID].pkl"
}
},
"train_info": {
"train_params": {
"col_index": -1,
"col_names": "",
"encryption": {
"paillier": {
"key_bit_size": 2048,
"precision": 6,
"djn_on": True,
"parallelize_on": True
}
},
"max_num_cores": 999,
"sample_size": 9999
}
}
},
{
"identity": "label_trainer",
"model_info": {
# "name": "vertical_feature_selection"
"name": "vertical_logistic_regression"
},
"input": {
"corr_result": {
"path": "/opt/checkpoints/[JOB_ID]/[NODE_ID]",
"name": "vertical_pearson_[STAGE_ID-1].pkl"
},
"iv_result": {
"path": "/opt/checkpoints/[JOB_ID]/[NODE_ID]",
"name": "woe_iv_result_[STAGE_ID].json" # "name": "woe_iv_result_[STAGE_ID-3].json"
},
"trainset": [
{
"type": "csv",
"path": "/opt/checkpoints/[JOB_ID]/[NODE_ID]",
"name": "selected_train_[STAGE_ID-2].csv",
"has_id": True,
"has_label": True
}
],
"valset": [
{
"type": "csv",
"path": "/opt/checkpoints/[JOB_ID]/[NODE_ID]",
"name": "selected_val_[STAGE_ID-2].csv",
"has_id": True,
"has_label": True
}
]
},
"output": {
"path": "/opt/checkpoints/[JOB_ID]/[NODE_ID]",
"model": {
"name": "feature_selection_[STAGE_ID].pkl"
},
"trainset": {
"name": "selected_train_[STAGE_ID].csv"
},
"valset": {
"name": "selected_val_[STAGE_ID].csv"
}
},
"train_info": {
"train_params": {
"filter": {
"common": {
"metrics": "iv",
"filter_method": "threshold",
"threshold": 0.01
},
"correlation": {
"sort_metric": "iv",
"correlation_threshold": 0.7
}
}
}
}
},
{
"identity": "label_trainer",
"model_info": {
# "name": "local_normalization"
"name": "vertical_logistic_regression"
},
"input": {
"trainset": [
{
"type": "csv",
"path": "/opt/checkpoints/[JOB_ID]/[NODE_ID]",
"name": "selected_train_[STAGE_ID-1].csv",
"has_id": True,
"has_label": True
}
],
"valset": [
{
"type": "csv",
"path": "/opt/checkpoints/[JOB_ID]/[NODE_ID]",
"name": "selected_val_[STAGE_ID-1].csv",
"has_id": True,
"has_label": True
}
]
},
"output": {
"path": "/opt/checkpoints/[JOB_ID]/[NODE_ID]",
"model": {
# "name": "local_normalization_[STAGE_ID].pt"
"name": "vertical_logitstic_regression_[STAGE_ID].pt"
},
"trainset": {
"name": "normalized_train_[STAGE_ID].csv"
},
"valset": {
"name": "normalized_val_[STAGE_ID].csv"
}
},
"train_info": {
"train_params": {
"norm": "max",
"axis": 0
}
}
},
{
"identity": "label_trainer",
"model_info": {
"name": "vertical_logistic_regression"
},
"input": {
"trainset": [
{
"type": "csv",
"path": "/opt/checkpoints/[JOB_ID]/[NODE_ID]",
"name": "normalized_train_[STAGE_ID-1].csv",
"has_id": True,
"has_label": True
}
],
"valset": [
{
"type": "csv",
"path": "/opt/checkpoints/[JOB_ID]/[NODE_ID]",
"name": "normalized_val_[STAGE_ID-1].csv",
"has_id": True,
"has_label": True
}
],
"pretrained_model": {
"path": "",
"name": ""
}
},
"output": {
"path": "/opt/checkpoints/[JOB_ID]/[NODE_ID]",
"model": {
# "name": "vertical_logitstic_regression_[STAGE_ID].pt"
"name": "vertical_logitstic_regression_[STAGE_ID - 1].pt"
},
"metric_train": {
"name": "lr_metric_train_[STAGE_ID].csv"
},
"metric_val": {
"name": "lr_metric_val_[STAGE_ID].csv"
},
"prediction_train": {
"name": "lr_prediction_train_[STAGE_ID].csv"
},
"prediction_val": {
"name": "lr_prediction_val_[STAGE_ID].csv"
},
"ks_plot_train": {
"name": "lr_ks_plot_train_[STAGE_ID].csv"
},
"ks_plot_val": {
"name": "lr_ks_plot_val_[STAGE_ID].csv"
},
"decision_table_train": {
"name": "lr_decision_table_train_[STAGE_ID].csv"
},
"decision_table_val": {
"name": "lr_decision_table_val_[STAGE_ID].csv"
},
"feature_importance": {
"name": "lr_feature_importance_[STAGE_ID].csv"
}
},
"train_info": {
"interaction_params": {
"save_frequency": -1,
"write_training_prediction": True,
"write_validation_prediction": True,
"echo_training_metrics": True
},
"train_params": {
"global_epoch": 2,
"batch_size": 512,
"encryption": {
"ckks": {
"poly_modulus_degree": 8192,
"coeff_mod_bit_sizes": [
60,
40,
40,
60
],
"global_scale_bit_size": 40
}
},
"optimizer": {
"lr": 0.01,
"p": 2,
"alpha": 1e-4
},
"metric": {
"decision_table": {
"method": "equal_frequency",
"bins": 10
},
"acc": {},
"precision": {},
"recall": {},
"f1_score": {},
"auc": {},
"ks": {}
},
"early_stopping": {
"key": "acc",
"patience": 10,
"delta": 0
},
"random_seed": 50
}
}
}
]
request.dumpedTrainConfig = json.dumps(conf)
# request.existedInputPath.append()
response = start_client.checkTaskConfig(request)
# print("-------")
# print(text_format.MessageToString(response.multiStageResult))
# print(response.message)
# print(response.code)
# print(response)
m = text_format.MessageToString(response.crossStageResult)
assert m.replace(' ', '').replace('\n', '') == '''
duplicatedInputOutput {
dumpedValue: "\\"/opt/checkpoints/JOB_ID/NODE_ID/vertical_logitstic_regression_4.pt\\""
positionList {
stageId: 4
pathInfo {
dictPath {
key: "model"
}
}
}
positionList {
stageId: 5
pathInfo {
dictPath {
key: "model"
}
}
}
}
blankInputOutput {
dumpedValue: "\\"\\""
positionList {
stageId: 5
pathInfo {
dictPath {
key: "pretrained_model"
}
}
}
}
nonexistentInput {
dumpedValue: "\\"/opt/dataset/testing/fintech/banking_guest_train_v01_20220216_TL.csv\\""
positionList {
pathInfo {
dictPath {
key: "trainset"
}
}
}
}
nonexistentInput {
dumpedValue: "\\"/opt/dataset/testing/fintech/banking_guest_train_v01_20220216_TL.csv\\""
positionList {
stageId: 1
pathInfo {
dictPath {
key: "trainset"
}
}
}
}
nonexistentInput {
dumpedValue: "\\"/opt/dataset/testing/fintech/banking_guest_train_v01_20220216_TL.csv\\""
positionList {
stageId: 1
pathInfo {
dictPath {
key: "valset"
}
}
}
}
nonexistentInput {
dumpedValue: "\\"/opt/checkpoints/JOB_ID/NODE_ID/woe_iv_result_3.json\\""
positionList {
stageId: 3
pathInfo {
dictPath {
key: "iv_result"
}
}
}
}
nonexistentInput {
dumpedValue: "\\"\\""
positionList {
stageId: 5
pathInfo {
dictPath {
key: "pretrained_model"
}
}
}
}
'''.replace(' ', '').replace('\n', '')
| 31,059 | 40.303191 | 136 | py |
XFL | XFL-master/test/service/test_fed_config.py | # Copyright 2022 The XFL Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pytest
from common.communication.gRPC.python import scheduler_pb2
import service
from service.fed_config import FedConfig
from service.fed_job import FedJob
from service.fed_node import FedNode
class Test_FedConfig():
@pytest.mark.parametrize('trainer_list, result',
[
(['test_1', 'test_2'], ['test_1', 'test_2']),
([], [])
])
def test_get_label_trainer(self, mocker, trainer_list, result):
mocker.patch.object(FedConfig, 'stage_config', {
"fed_info": {"label_trainer": trainer_list}})
res = FedConfig.get_label_trainer()
assert res == result
@pytest.mark.parametrize('trainer_list, result',
[
(['test_1', 'test_2'], 'test_1'),
([], None)
])
def test_get_assist_trainer(self, mocker, trainer_list, result):
mocker.patch.object(FedConfig, 'stage_config', {
"fed_info": {"assist_trainer": trainer_list}})
res = FedConfig.get_assist_trainer()
assert res == result
@pytest.mark.parametrize('trainer_list, result',
[
(['test_1', 'test_2'], ['test_1', 'test_2']),
([], [])
])
def test_get_trainer(self, mocker, trainer_list, result):
mocker.patch.object(FedConfig, 'stage_config', {
"fed_info": {"trainer": trainer_list}})
res = FedConfig.get_trainer()
assert res == result
def test_load_config(self, mocker):
mocker.patch.object(FedJob, 'job_id', 1)
mocker.patch('service.fed_config.add_job_log_handler')
mocker.patch.object(FedConfig, 'load_trainer_config', return_value={})
FedConfig.load_config('test')
service.fed_config.add_job_log_handler.assert_called_once_with(1, '')
assert FedConfig.trainer_config == {}
def test_load_trainer_config(self, mocker):
mocker.patch.object(FedNode, 'trainers', {"node-1": "test"})
mocker.patch('service.fed_config.load_json_config',
return_value=[{"identity": "trainer"}])
mocker.patch("os.path.exists", return_value=True)
trainer_config = FedConfig.load_trainer_config("test")
assert trainer_config == {0: {"node-1": {"identity": "trainer", "fed_info": {
"label_trainer": [],
"trainer": ["node-1"],
"assist_trainer": []
}}}}
##
mocker.patch.object(FedNode, 'trainers', {"node-1": "test", "assist_trainer": "test2"})
def mock_load_json_config(*args, **kwargs):
if load_json_config.call_count == 1:
return [{
"identity": "trainer",
"model_info": {
"name": "vertical_xgboost"
}
}]
elif load_json_config.call_count == 2:
return [{}]
load_json_config = mocker.patch('service.fed_config.load_json_config', side_effect=mock_load_json_config)
def mock_func(*args, **kwargs):
if os_path.call_count % 2 == 1:
return True
elif os_path.call_count % 2 == 0:
return False
os_path = mocker.patch("os.path.exists", side_effect=mock_func)
trainer_config = FedConfig.load_trainer_config("test")
assert trainer_config == \
{
0: {
'node-1': {
'identity': 'trainer',
'model_info': {
'name': 'vertical_xgboost'
},
'fed_info': {
'label_trainer': [],
'trainer': ['node-1'],
'assist_trainer': []
}
},
'assist_trainer': {
'fed_info': {
'label_trainer': [],
'trainer': ['node-1'],
'assist_trainer': []
}
}
}
}
##
def mock_func(*args, **kwargs):
return False
os_path = mocker.patch("os.path.exists", side_effect=mock_func)
trainer_config = FedConfig.load_trainer_config("test")
assert trainer_config == {}
##
mocker.patch.object(FedNode, 'trainers', {"node-1": "test", "node-2": "test3", "assist_trainer": "test2"})
def mock_load_json_config(*args, **kwargs):
if load_json_config.call_count == 1:
return [{
"identity": "trainer",
"model_info": {
"name": "vertical_xgboost"
}
}]
elif load_json_config.call_count == 2:
return [
{"identity": "label_trainer",
"model_info": {
"name": "vertical_logistic_regression"
}
}
]
else:
return [{
"identity": "assist_trainer",
"model_info": {
"name": "vertical_xgboost"
}
}]
load_json_config = mocker.patch('service.fed_config.load_json_config', side_effect=mock_load_json_config)
def mock_func(*args, **kwargs):
if os_path.call_count <= 2:
return True
else:
return False
os_path = mocker.patch("os.path.exists", side_effect=mock_func)
trainer_config = FedConfig.load_trainer_config("test")
assert trainer_config == \
{
0:
{
'node-1': {
'identity': 'trainer',
'model_info': {'name': 'vertical_xgboost'},
'fed_info': {'label_trainer': ['node-2'], 'trainer': ['node-1'], 'assist_trainer': []}
},
'node-2': {
'identity': 'label_trainer',
'model_info': {'name': 'vertical_logistic_regression'},
'fed_info': {'label_trainer': ['node-2'], 'trainer': ['node-1'], 'assist_trainer': []}
},
}
}
###
##
mocker.patch.object(FedNode, 'trainers', {"node-1": "test", "node-2": "test3", "assist_trainer": "test2"})
def mock_load_json_config(*args, **kwargs):
if load_json_config.call_count == 1:
return [{
"identity": "trainer",
"model_info": {
"name": "vertical_kmeans"
}
}]
elif load_json_config.call_count == 2:
return [{
"identity": "label_trainer",
"model_info": {
"name": "vertical_kmeans"
}
}]
else:
return [{}]
load_json_config = mocker.patch('service.fed_config.load_json_config', side_effect=mock_load_json_config)
def mock_func(*args, **kwargs):
if os_path.call_count <= 2:
return True
else:
return False
os_path = mocker.patch("os.path.exists", side_effect=mock_func)
trainer_config = FedConfig.load_trainer_config("test")
# assert trainer_config == \
# {
# 0:
# {
# 'node-1': {
# 'identity': 'trainer',
# 'model_info': {'name': 'vertical_xgboost'},
# 'fed_info': {'label_trainer': ['node-2'], 'trainer': ['node-1'], 'assist_trainer': []}
# },
# 'node-2': {
# 'identity': 'label_trainer',
# 'model_info': {'name': 'vertical_logistic_regression'},
# 'fed_info': {'label_trainer': ['node-2'], 'trainer': ['node-1'], 'assist_trainer': []}
# },
# }
# }
def test_get_config(self, mocker):
mocker.patch.object(FedNode, "create_channel", return_value='55001')
mocker.patch("service.fed_config.scheduler_pb2_grpc.SchedulerStub.__init__", side_effect=lambda x: None)
mocker.patch("service.fed_config.scheduler_pb2_grpc.SchedulerStub.getConfig", create=True,
return_value=scheduler_pb2.GetConfigResponse(jobId=2, config="test_config"))
mocker.patch.object(FedJob, "global_epoch", 0)
mocker.patch("json.loads",
return_value={"model_info": {"name": "test"}, "train_info": {"train_params": {"global_epoch": 1}}})
mocker.patch("service.fed_config.add_job_log_handler", return_value="job_log_handler")
mocker.patch("service.fed_config.add_job_stage_log_handler", return_value="job_stage_log_handler")
resp = FedConfig.get_config()
FedNode.create_channel.assert_called_once_with("scheduler")
assert FedConfig.job_log_handler == "job_log_handler"
assert FedConfig.job_stage_log_handler == "job_stage_log_handler"
service.fed_config.add_job_log_handler.assert_called_once_with(2, '')
service.fed_config.add_job_stage_log_handler.assert_called_once_with(2, '', 0, "test")
assert FedJob.global_epoch == 1
assert resp.config == "test_config"
def test_load_algorithm_list(self, mocker):
def mock_load_json_config(args):
if '/algorithm/config/vertical_xgboost/trainer' in args:
return {"identity": "trainer"}
elif '/algorithm/config/vertical_xgboost/label_trainer' in args:
return {"identity": "label_trainer"}
mocker.patch('service.fed_config.load_json_config',
side_effect=mock_load_json_config)
FedConfig.load_algorithm_list()
assert FedConfig.default_config_map["vertical_xgboost"] == {"trainer": {
"identity": "trainer"}, "label_trainer": {"identity": "label_trainer"}}
| 11,371 | 39.614286 | 120 | py |
XFL | XFL-master/test/common/test_xoperator.py | import pytest
from common.xoperator import get_operator
from algorithm.framework.vertical.xgboost.trainer import VerticalXgboostTrainer
@pytest.mark.parametrize("name, role", [
("vertical_xgboost", "trainer"), ("vertical_xgboost", "client"),
("mixed_xgboost", "label_trainer"), ("vertical_abc", "assist_trainer")
])
def test_get_operator(name, role):
if role == "client" or name in ["mixed_xgboost", "vertical_abc"]:
with pytest.raises(ValueError):
get_operator(name, role)
else:
assert get_operator(name, role).__name__ == VerticalXgboostTrainer.__name__
| 614 | 33.166667 | 83 | py |
XFL | XFL-master/test/common/test_xregister.py | import pytest
from common.xregister import xregister, XRegister
from algorithm.framework.vertical.xgboost.trainer import VerticalXgboostTrainer
class Abc():
pass
class TestXRegister():
@pytest.mark.parametrize("target", [
(Abc), ("abc"), ("Abc"), (Abc), ("CDE")
])
def test_register(self, target):
if target == "abc":
xregister.register(target)(lambda x: x+2)
assert 'abc' in xregister.__dict__
elif target == "CDE":
with pytest.raises(TypeError):
xregister.register(target)("CDE")
else:
xregister.register(target)
assert 'Abc' in xregister.__dict__
@pytest.mark.parametrize("name", ["Abc", "XYZ"])
def test_call(self, name):
if name == "Abc":
assert xregister(name).__name__ == Abc.__name__
else:
with pytest.raises(KeyError):
xregister(name)
@pytest.mark.parametrize("name", ["Abc", "XYZ"])
def test_unregister(self, name):
xregister.unregister(name)
assert "Abc" not in xregister.__dict__
def test_registered_object(self):
res = xregister.registered_object
assert xregister.__dict__ == res
def test_get_class_name(self):
name = XRegister.get_class_name()
assert name == "XRegister"
| 1,380 | 29.021739 | 79 | py |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.