repo
stringlengths 2
99
| file
stringlengths 13
225
| code
stringlengths 0
18.3M
| file_length
int64 0
18.3M
| avg_line_length
float64 0
1.36M
| max_line_length
int64 0
4.26M
| extension_type
stringclasses 1
value |
|---|---|---|---|---|---|---|
FATE
|
FATE-master/python/federatedml/components/hetero_sshe_linr.py
|
#
# Copyright 2019 The FATE Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from .components import ComponentMeta
hetero_sshe_linr_cpn_meta = ComponentMeta("HeteroSSHELinR")
@hetero_sshe_linr_cpn_meta.bind_param
def hetero_sshe_linr_param():
from federatedml.param.hetero_sshe_linr_param import HeteroSSHELinRParam
return HeteroSSHELinRParam
@hetero_sshe_linr_cpn_meta.bind_runner.on_guest
def hetero_sshe_linr_runner_guest():
from federatedml.linear_model.bilateral_linear_model.hetero_sshe_linear_regression.hetero_linr_guest import (
HeteroLinRGuest,
)
return HeteroLinRGuest
@hetero_sshe_linr_cpn_meta.bind_runner.on_host
def hetero_sshe_linr_runner_host():
from federatedml.linear_model.bilateral_linear_model.hetero_sshe_linear_regression.hetero_linr_host import (
HeteroLinRHost,
)
return HeteroLinRHost
| 1,411
| 29.695652
| 113
|
py
|
FATE
|
FATE-master/python/federatedml/components/homo_secure_boost.py
|
#
# Copyright 2019 The FATE Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from .components import ComponentMeta
homo_secure_boost_cpn_meta = ComponentMeta("HomoSecureBoost", "HomoSecureboost")
@homo_secure_boost_cpn_meta.bind_param
def homo_secure_boost_param():
from federatedml.param.boosting_param import HomoSecureBoostParam
return HomoSecureBoostParam
@homo_secure_boost_cpn_meta.bind_runner.on_guest.on_host
def homo_secure_boost_runner_client():
from federatedml.ensemble import (HomoSecureBoostingTreeClient)
return HomoSecureBoostingTreeClient
@homo_secure_boost_cpn_meta.bind_runner.on_arbiter
def homo_secure_boost_runner_arbiter():
from federatedml.ensemble import (HomoSecureBoostingTreeArbiter)
return HomoSecureBoostingTreeArbiter
| 1,324
| 30.547619
| 80
|
py
|
FATE
|
FATE-master/python/federatedml/components/secure_information_retrieval.py
|
#
# Copyright 2019 The FATE Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from .components import ComponentMeta
secure_information_retrieval_cpn_meta = ComponentMeta("SecureInformationRetrieval")
@secure_information_retrieval_cpn_meta.bind_param
def secure_information_retrieval_param():
from federatedml.param.sir_param import SecureInformationRetrievalParam
return SecureInformationRetrievalParam
@secure_information_retrieval_cpn_meta.bind_runner.on_guest
def secure_information_retrieval_guest_runner():
from federatedml.secure_information_retrieval.secure_information_retrieval_guest import (
SecureInformationRetrievalGuest,
)
return SecureInformationRetrievalGuest
@secure_information_retrieval_cpn_meta.bind_runner.on_host
def secure_information_retrieval_host_runner():
from federatedml.secure_information_retrieval.secure_information_retrieval_host import (
SecureInformationRetrievalHost,
)
return SecureInformationRetrievalHost
| 1,542
| 32.543478
| 93
|
py
|
FATE
|
FATE-master/python/federatedml/components/hetero_secure_boost.py
|
#
# Copyright 2019 The FATE Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from .components import ComponentMeta
hetero_secure_boost_cpn_meta = ComponentMeta("HeteroSecureBoost")
@hetero_secure_boost_cpn_meta.bind_param
def hetero_secure_boost_param():
from federatedml.param.boosting_param import HeteroSecureBoostParam
return HeteroSecureBoostParam
@hetero_secure_boost_cpn_meta.bind_runner.on_guest
def hetero_secure_boost_guest_runner():
from federatedml.ensemble import (HeteroSecureBoostingTreeGuest)
return HeteroSecureBoostingTreeGuest
@hetero_secure_boost_cpn_meta.bind_runner.on_host
def hetero_secure_boost_host_runner():
from federatedml.ensemble import (HeteroSecureBoostingTreeHost)
return HeteroSecureBoostingTreeHost
| 1,310
| 30.214286
| 75
|
py
|
FATE
|
FATE-master/python/federatedml/components/dataio.py
|
#
# Copyright 2019 The FATE Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from .components import ComponentMeta
dataio_cpn_meta = ComponentMeta("DataIO")
@dataio_cpn_meta.bind_param
def dataio_param():
from federatedml.param.dataio_param import DataIOParam
return DataIOParam
@dataio_cpn_meta.bind_runner.on_guest.on_host
def dataio_runner():
from federatedml.util.data_io import DataIO
return DataIO
| 968
| 26.685714
| 75
|
py
|
FATE
|
FATE-master/python/federatedml/components/feature_scale.py
|
#
# Copyright 2019 The FATE Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from .components import ComponentMeta
feature_scale_cpn_meta = ComponentMeta("FeatureScale")
@feature_scale_cpn_meta.bind_param
def feature_scale_param():
from federatedml.param.scale_param import ScaleParam
return ScaleParam
@feature_scale_cpn_meta.bind_runner.on_guest.on_host
def feature_scale_runner():
from federatedml.feature.scale import Scale
return Scale
| 1,005
| 27.742857
| 75
|
py
|
FATE
|
FATE-master/python/federatedml/components/homo_onehot_encoder.py
|
#
# Copyright 2019 The FATE Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from .components import ComponentMeta
homo_onehot_encoder_cpn_meta = ComponentMeta("HomoOneHotEncoder")
@homo_onehot_encoder_cpn_meta.bind_param
def homo_onehot_encoder_param():
from federatedml.param.homo_onehot_encoder_param import HomoOneHotParam
return HomoOneHotParam
@homo_onehot_encoder_cpn_meta.bind_runner.on_guest.on_host
def homo_onehot_encoder_client():
from federatedml.feature.homo_onehot.homo_ohe_base import HomoOneHotBase
return HomoOneHotBase
@homo_onehot_encoder_cpn_meta.bind_runner.on_arbiter
def homo_onehot_encoder_arbiter():
from federatedml.feature.homo_onehot.homo_ohe_arbiter import HomoOneHotArbiter
return HomoOneHotArbiter
| 1,305
| 30.095238
| 82
|
py
|
FATE
|
FATE-master/python/federatedml/components/scorecard.py
|
#
# Copyright 2019 The FATE Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from .components import ComponentMeta
scorecard_cpn_meta = ComponentMeta("Scorecard")
@scorecard_cpn_meta.bind_param
def scorecard_param():
from federatedml.param.scorecard_param import ScorecardParam
return ScorecardParam
@scorecard_cpn_meta.bind_runner.on_guest.on_host
def scorecard_client_runner():
from federatedml.statistic.scorecard.score_transformer import Scorecard
return Scorecard
| 1,033
| 28.542857
| 75
|
py
|
FATE
|
FATE-master/python/federatedml/components/hetero_linr.py
|
#
# Copyright 2019 The FATE Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from .components import ComponentMeta
hetero_linr_cpn_meta = ComponentMeta("HeteroLinR")
@hetero_linr_cpn_meta.bind_param
def hetero_linr_param():
from federatedml.param.linear_regression_param import LinearParam
return LinearParam
@hetero_linr_cpn_meta.bind_runner.on_guest
def hetero_linr_runner_guest():
from federatedml.linear_model.coordinated_linear_model.linear_regression.hetero_linear_regression.hetero_linr_guest import (
HeteroLinRGuest, )
return HeteroLinRGuest
@hetero_linr_cpn_meta.bind_runner.on_host
def hetero_linr_runner_host():
from federatedml.linear_model.coordinated_linear_model.linear_regression.hetero_linear_regression.hetero_linr_host import (
HeteroLinRHost, )
return HeteroLinRHost
@hetero_linr_cpn_meta.bind_runner.on_arbiter
def hetero_linr_runner_arbiter():
from federatedml.linear_model.coordinated_linear_model.linear_regression.hetero_linear_regression.hetero_linr_arbiter import (
HeteroLinRArbiter, )
return HeteroLinRArbiter
| 1,650
| 30.75
| 130
|
py
|
FATE
|
FATE-master/python/federatedml/components/onehot_encoder.py
|
#
# Copyright 2019 The FATE Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from .components import ComponentMeta
onehot_encoder_cpn_meta = ComponentMeta("OneHotEncoder")
@onehot_encoder_cpn_meta.bind_param
def onehot_encoder_param():
from federatedml.param.onehot_encoder_param import OneHotEncoderParam
return OneHotEncoderParam
@onehot_encoder_cpn_meta.bind_runner.on_guest.on_host
def onehot_encoder_client_runner():
from federatedml.feature.one_hot_encoder import OneHotEncoder
return OneHotEncoder
| 1,069
| 29.571429
| 75
|
py
|
FATE
|
FATE-master/python/federatedml/components/feldman_verifiable_sum.py
|
#
# Copyright 2019 The FATE Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from .components import ComponentMeta
feldman_verifiable_sum_cpn_meta = ComponentMeta("FeldmanVerifiableSum")
@feldman_verifiable_sum_cpn_meta.bind_param
def feldman_verifiable_sum_param():
from federatedml.param.feldman_verifiable_sum_param import FeldmanVerifiableSumParam
return FeldmanVerifiableSumParam
@feldman_verifiable_sum_cpn_meta.bind_runner.on_guest
def feldman_verifiable_sum_guest_runner():
from federatedml.statistic.feldman_verifiable_sum.feldman_verifiable_sum_guest import (
FeldmanVerifiableSumGuest,
)
return FeldmanVerifiableSumGuest
@feldman_verifiable_sum_cpn_meta.bind_runner.on_host
def feldman_verifiable_sum_host_runner():
from federatedml.statistic.feldman_verifiable_sum.feldman_verifiable_sum_host import (
FeldmanVerifiableSumHost,
)
return FeldmanVerifiableSumHost
| 1,473
| 31.043478
| 91
|
py
|
FATE
|
FATE-master/python/federatedml/components/union.py
|
#
# Copyright 2019 The FATE Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from .components import ComponentMeta
union_cpn_meta = ComponentMeta("Union")
@union_cpn_meta.bind_param
def union_param():
from federatedml.param.union_param import UnionParam
return UnionParam
@union_cpn_meta.bind_runner.on_guest.on_host
def union_client_runner():
from federatedml.statistic.union.union import Union
return Union
| 973
| 26.828571
| 75
|
py
|
FATE
|
FATE-master/python/federatedml/components/column_expand.py
|
#
# Copyright 2019 The FATE Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from .components import ComponentMeta
column_expand_cpn_meta = ComponentMeta("ColumnExpand")
@column_expand_cpn_meta.bind_param
def column_expand_param():
from federatedml.param.column_expand_param import ColumnExpandParam
return ColumnExpandParam
@column_expand_cpn_meta.bind_runner.on_guest.on_host
def column_expand_runner():
from federatedml.feature.column_expand import ColumnExpand
return ColumnExpand
| 1,049
| 29
| 75
|
py
|
FATE
|
FATE-master/python/federatedml/components/hetero_fast_secure_boost.py
|
#
# Copyright 2019 The FATE Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from .components import ComponentMeta
hetero_secure_boost_cpn_meta = ComponentMeta("HeteroFastSecureBoost")
@hetero_secure_boost_cpn_meta.bind_param
def hetero_secure_boost_param():
from federatedml.param.boosting_param import HeteroSecureBoostParam
return HeteroSecureBoostParam
@hetero_secure_boost_cpn_meta.bind_runner.on_guest
def hetero_secure_boost_guest_runner():
from federatedml.ensemble import (HeteroSecureBoostingTreeGuest)
return HeteroSecureBoostingTreeGuest
@hetero_secure_boost_cpn_meta.bind_runner.on_host
def hetero_secure_boost_host_runner():
from federatedml.ensemble import (HeteroSecureBoostingTreeHost)
return HeteroSecureBoostingTreeHost
| 1,314
| 30.309524
| 75
|
py
|
FATE
|
FATE-master/python/federatedml/components/homo_data_split.py
|
#
# Copyright 2019 The FATE Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from .components import ComponentMeta
homo_data_split_cpn_meta = ComponentMeta("HomoDataSplit")
@homo_data_split_cpn_meta.bind_param
def homo_data_split_param():
from federatedml.param.data_split_param import DataSplitParam
return DataSplitParam
@homo_data_split_cpn_meta.bind_runner.on_guest
def homo_data_split_guest_runner():
from federatedml.model_selection.data_split.homo_data_split import (
HomoDataSplitGuest,
)
return HomoDataSplitGuest
@homo_data_split_cpn_meta.bind_runner.on_host
def homo_data_split_host_runner():
from federatedml.model_selection.data_split.homo_data_split import HomoDataSplitHost
return HomoDataSplitHost
| 1,301
| 28.590909
| 88
|
py
|
FATE
|
FATE-master/python/federatedml/components/feature_inputation.py
|
#
# Copyright 2019 The FATE Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from .components import ComponentMeta
feature_imputation_cpn_meta = ComponentMeta("FeatureImputation")
@feature_imputation_cpn_meta.bind_param
def feature_imputation_param():
from federatedml.param.feature_imputation_param import FeatureImputationParam
return FeatureImputationParam
@feature_imputation_cpn_meta.bind_runner.on_guest.on_host
def feature_imputation_runner():
from federatedml.feature.feature_imputation import FeatureImputation
return FeatureImputation
| 1,109
| 30.714286
| 81
|
py
|
FATE
|
FATE-master/python/federatedml/components/homo_feature_binning.py
|
#
# Copyright 2019 The FATE Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from .components import ComponentMeta
homo_feature_binning_cpn_meta = ComponentMeta("HomoFeatureBinning")
@homo_feature_binning_cpn_meta.bind_param
def homo_feature_binning_param():
from federatedml.param.feature_binning_param import HomoFeatureBinningParam
return HomoFeatureBinningParam
@homo_feature_binning_cpn_meta.bind_runner.on_guest.on_host
def homo_feature_binning_client_runner():
from federatedml.feature.homo_feature_binning.homo_binning_cpn import (
HomoBinningClient,
)
return HomoBinningClient
@homo_feature_binning_cpn_meta.bind_runner.on_arbiter
def homo_feature_binning_arbiter_runner():
from federatedml.feature.homo_feature_binning.homo_binning_cpn import (
HomoBinningArbiter,
)
return HomoBinningArbiter
| 1,402
| 29.5
| 79
|
py
|
FATE
|
FATE-master/python/federatedml/components/psi.py
|
#
# Copyright 2019 The FATE Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from .components import ComponentMeta
psi_cpn_meta = ComponentMeta("PSI")
@psi_cpn_meta.bind_param
def psi_param():
from federatedml.param.psi_param import PSIParam
return PSIParam
@psi_cpn_meta.bind_runner.on_guest.on_host
def psi_client_runner():
from federatedml.statistic.psi.psi import PSI
return PSI
| 947
| 26.085714
| 75
|
py
|
FATE
|
FATE-master/python/federatedml/components/federated_sample.py
|
#
# Copyright 2019 The FATE Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from .components import ComponentMeta
federated_sample_cpn_meta = ComponentMeta("FederatedSample")
@federated_sample_cpn_meta.bind_param
def federated_sample_param():
from federatedml.param.sample_param import SampleParam
return SampleParam
@federated_sample_cpn_meta.bind_runner.on_guest.on_host
def federated_sample_runner():
from federatedml.feature.sampler import Sampler
return Sampler
| 1,032
| 28.514286
| 75
|
py
|
FATE
|
FATE-master/python/federatedml/components/components.py
|
#
# Copyright 2019 The FATE Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import importlib
import inspect
import typing
from pathlib import Path
from federatedml.model_base import ModelBase
from federatedml.param.base_param import BaseParam
from federatedml.util import LOGGER
_ml_base = Path(__file__).resolve().parent.parent.parent
class _RunnerDecorator:
def __init__(self, meta: "ComponentMeta") -> None:
self._roles = set()
self._meta = meta
@property
def on_guest(self):
self._roles.add("guest")
return self
@property
def on_host(self):
self._roles.add("host")
return self
@property
def on_arbiter(self):
self._roles.add("arbiter")
return self
@property
def on_local(self):
self._roles.add("local")
return self
def __call__(self, cls):
if inspect.isclass(cls) and issubclass(cls, ModelBase):
for role in self._roles:
self._meta._role_to_runner_cls[role] = cls
elif inspect.isfunction(cls):
for role in self._roles:
self._meta._role_to_runner_cls_getter[role] = cls
else:
raise NotImplementedError(f"type of {cls} not supported")
return cls
class ComponentMeta:
__name_to_obj: typing.Dict[str, "ComponentMeta"] = {}
def __init__(self, name, *others) -> None:
if len(others) > 0:
self._alias = [name, *others]
self._name = "|".join(self._alias)
else:
self._alias = [name]
self._name = name
self._role_to_runner_cls = {}
self._role_to_runner_cls_getter = {} # lazy
self._param_cls = None
self._param_cls_getter = None # lazy
for alias in self._alias:
self.__name_to_obj[alias] = self
@property
def name(self):
return self._name
@property
def alias(self):
return self._alias
@classmethod
def get_meta(cls, name):
return cls.__name_to_obj[name]
@property
def bind_runner(self):
return _RunnerDecorator(self)
@property
def bind_param(self):
def _wrap(cls):
if inspect.isclass(cls) and issubclass(cls, BaseParam):
self._param_cls = cls
elif inspect.isfunction(cls):
self._param_cls_getter = cls
else:
raise NotImplementedError(f"type of {cls} not supported")
return cls
return _wrap
def _get_runner(self, role: str):
if role in self._role_to_runner_cls:
runner_class = self._role_to_runner_cls[role]
elif role in self._role_to_runner_cls_getter:
runner_class = self._role_to_runner_cls_getter[role]()
else:
raise ModuleNotFoundError(
f"Runner for component `{self.name}` at role `{role}` not found"
)
runner_class.set_component_name(self.alias[0])
return runner_class
def get_run_obj(self, role: str):
return self._get_runner(role)()
def get_run_obj_name(self, role: str) -> str:
return self._get_runner(role).__name__
def get_param_obj(self, cpn_name: str):
if self._param_cls is not None:
param_obj = self._param_cls()
elif self._param_cls_getter is not None:
param_obj = self._param_cls_getter()()
else:
raise ModuleNotFoundError(f"Param for component `{self.name}` not found")
return param_obj.set_name(f"{self.name}#{cpn_name}")
def get_supported_roles(self):
return set(self._role_to_runner_cls) | set(self._role_to_runner_cls_getter)
def _get_module_name_by_path(path, base):
return '.'.join(path.resolve().relative_to(base.resolve()).with_suffix('').parts)
def _search_components(path, base):
try:
module_name = _get_module_name_by_path(path, base)
module = importlib.import_module(module_name)
except ImportError as e:
# or skip ?
raise e
_obj_pairs = inspect.getmembers(module, lambda obj: isinstance(obj, ComponentMeta))
return _obj_pairs, module_name
class Components:
provider_version = None
provider_name = None
provider_path = None
@classmethod
def _module_base(cls):
return Path(cls.provider_path).resolve().parent
@classmethod
def _components_base(cls):
return Path(cls.provider_path, 'components').resolve()
@classmethod
def get_names(cls) -> typing.Dict[str, dict]:
names = {}
for p in cls._components_base().glob("**/*.py"):
obj_pairs, module_name = _search_components(p, cls._module_base())
for name, obj in obj_pairs:
for alias in obj.alias:
names[alias] = {"module": module_name}
LOGGER.info(
f"component register {obj.name} with cache info {module_name}"
)
return names
@classmethod
def get(cls, name: str, cache) -> ComponentMeta:
if cache:
importlib.import_module(cache[name]["module"])
else:
for p in cls._components_base().glob("**/*.py"):
module_name = _get_module_name_by_path(p, cls._module_base())
importlib.import_module(module_name)
return ComponentMeta.get_meta(name)
| 5,958
| 28.944724
| 87
|
py
|
FATE
|
FATE-master/python/federatedml/components/hetero_poisson.py
|
#
# Copyright 2019 The FATE Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from .components import ComponentMeta
hetero_poisson_cpn_meta = ComponentMeta("HeteroPoisson")
@hetero_poisson_cpn_meta.bind_param
def hetero_poisson_param():
from federatedml.param.poisson_regression_param import PoissonParam
return PoissonParam
@hetero_poisson_cpn_meta.bind_runner.on_guest
def hetero_poisson_runner_guest():
from federatedml.linear_model.coordinated_linear_model.poisson_regression.hetero_poisson_regression.hetero_poisson_guest import (
HeteroPoissonGuest, )
return HeteroPoissonGuest
@hetero_poisson_cpn_meta.bind_runner.on_host
def hetero_poisson_runner_host():
from federatedml.linear_model.coordinated_linear_model.poisson_regression.hetero_poisson_regression.hetero_poisson_host import (
HeteroPoissonHost, )
return HeteroPoissonHost
@hetero_poisson_cpn_meta.bind_runner.on_arbiter
def hetero_poisson_runner_arbiter():
from federatedml.linear_model.coordinated_linear_model.poisson_regression.hetero_poisson_regression.hetero_poisson_arbiter import (
HeteroPoissonArbiter, )
return HeteroPoissonArbiter
| 1,716
| 32.019231
| 135
|
py
|
FATE
|
FATE-master/python/federatedml/components/ftl.py
|
#
# Copyright 2019 The FATE Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from .components import ComponentMeta
ftl_cpn_meta = ComponentMeta("FTL")
@ftl_cpn_meta.bind_param
def ftl_param():
from federatedml.param.ftl_param import FTLParam
return FTLParam
@ftl_cpn_meta.bind_runner.on_guest
def ftl_guest_runner():
from federatedml.transfer_learning.hetero_ftl.ftl_guest import FTLGuest
return FTLGuest
@ftl_cpn_meta.bind_runner.on_host
def ftl_host_runner():
from federatedml.transfer_learning.hetero_ftl.ftl_host import FTLHost
return FTLHost
| 1,122
| 25.738095
| 75
|
py
|
FATE
|
FATE-master/python/federatedml/components/homo_nn.py
|
#
# Copyright 2019 The FATE Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from .components import ComponentMeta
homo_nn_cpn_meta = ComponentMeta("HomoNN")
@homo_nn_cpn_meta.bind_param
def homo_nn_param():
from federatedml.param.homo_nn_param import HomoNNParam
return HomoNNParam
@homo_nn_cpn_meta.bind_runner.on_guest.on_host
def homo_nn_runner_client():
from federatedml.nn.homo.client import HomoNNClient
return HomoNNClient
@homo_nn_cpn_meta.bind_runner.on_arbiter
def homo_nn_runner_arbiter():
from federatedml.nn.homo.server import HomoNNServer
return HomoNNServer
| 1,149
| 26.380952
| 75
|
py
|
FATE
|
FATE-master/python/federatedml/components/spdz_test_cpn.py
|
#
# Copyright 2019 The FATE Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from .components import ComponentMeta
spdz_secure_cpn_meta = ComponentMeta("SPDZTest")
@spdz_secure_cpn_meta.bind_param
def spdz_test_param():
from federatedml.test.spdz_test.spdz_test_param import SPDZTestParam
return SPDZTestParam
@spdz_secure_cpn_meta.bind_runner.on_guest.on_host
def spdz_test_runner():
from federatedml.test.spdz_test.spdz_test import SPDZTest
return SPDZTest
| 1,022
| 29.088235
| 75
|
py
|
FATE
|
FATE-master/python/federatedml/components/positive_unlabeled.py
|
#
# Copyright 2019 The FATE Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from .components import ComponentMeta
positive_unlabeled_cpn_meta = ComponentMeta("PositiveUnlabeled")
@positive_unlabeled_cpn_meta.bind_param
def positive_unlabeled_param():
from federatedml.param.positive_unlabeled_param import PositiveUnlabeledParam
return PositiveUnlabeledParam
@positive_unlabeled_cpn_meta.bind_runner.on_guest.on_host
def positive_unlabeled_client_runner():
from federatedml.semi_supervised_learning.positive_unlabeled.positive_unlabeled_transformer import PositiveUnlabeled
return PositiveUnlabeled
| 1,164
| 32.285714
| 120
|
py
|
FATE
|
FATE-master/python/federatedml/components/hetero_pearson.py
|
#
# Copyright 2019 The FATE Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from .components import ComponentMeta
hetero_pearson_cpn_meta = ComponentMeta("HeteroPearson")
@hetero_pearson_cpn_meta.bind_param
def hetero_pearson_param():
from federatedml.param.pearson_param import PearsonParam
return PearsonParam
@hetero_pearson_cpn_meta.bind_runner.on_guest.on_host
def hetero_pearson_runner():
from federatedml.statistic.correlation.hetero_pearson import HeteroPearson
return HeteroPearson
| 1,056
| 29.2
| 78
|
py
|
FATE
|
FATE-master/python/federatedml/components/data_statistics.py
|
#
# Copyright 2019 The FATE Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from .components import ComponentMeta
data_statistic_cpn_meta = ComponentMeta("DataStatistics")
@data_statistic_cpn_meta.bind_param
def data_statistics_param():
from federatedml.param.statistics_param import StatisticsParam
return StatisticsParam
@data_statistic_cpn_meta.bind_runner.on_guest.on_host
def data_statistics_runner():
from federatedml.statistic.data_statistics import DataStatistics
return DataStatistics
| 1,059
| 29.285714
| 75
|
py
|
FATE
|
FATE-master/python/federatedml/components/homo_lr.py
|
#
# Copyright 2019 The FATE Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from .components import ComponentMeta
homo_lr_cpn_meta = ComponentMeta("HomoLR")
@homo_lr_cpn_meta.bind_param
def homo_lr_param():
from federatedml.param.logistic_regression_param import HomoLogisticParam
return HomoLogisticParam
@homo_lr_cpn_meta.bind_runner.on_guest
def homo_lr_runner_guest():
from federatedml.linear_model.coordinated_linear_model.logistic_regression.homo_logistic_regression.homo_lr_client import (
HomoLRClient, )
return HomoLRClient
@homo_lr_cpn_meta.bind_runner.on_host
def homo_lr_runner_host():
from federatedml.linear_model.coordinated_linear_model.logistic_regression.homo_logistic_regression.homo_lr_client import (
HomoLRClient, )
return HomoLRClient
@homo_lr_cpn_meta.bind_runner.on_arbiter
def homo_lr_runner_arbiter():
from federatedml.linear_model.coordinated_linear_model.logistic_regression.homo_logistic_regression.homo_lr_server import (
HomoLRServer, )
return HomoLRServer
| 1,600
| 29.788462
| 127
|
py
|
FATE
|
FATE-master/python/federatedml/components/hetero_feature_binning.py
|
#
# Copyright 2019 The FATE Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from .components import ComponentMeta
hetero_binning_cpn_meta = ComponentMeta("HeteroFeatureBinning")
@hetero_binning_cpn_meta.bind_param
def hetero_feature_binning_param():
from federatedml.param.feature_binning_param import HeteroFeatureBinningParam
return HeteroFeatureBinningParam
@hetero_binning_cpn_meta.bind_runner.on_guest
def hetero_feature_binning_guest_runner():
from federatedml.feature.hetero_feature_binning.hetero_binning_guest import (
HeteroFeatureBinningGuest,
)
return HeteroFeatureBinningGuest
@hetero_binning_cpn_meta.bind_runner.on_host
def hetero_feature_binning_host_runner():
from federatedml.feature.hetero_feature_binning.hetero_binning_host import (
HeteroFeatureBinningHost,
)
return HeteroFeatureBinningHost
| 1,414
| 29.76087
| 81
|
py
|
FATE
|
FATE-master/python/federatedml/components/hetero_sshe_lr.py
|
#
# Copyright 2019 The FATE Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from .components import ComponentMeta
hetero_sshe_lr_cpn_meta = ComponentMeta("HeteroSSHELR")
@hetero_sshe_lr_cpn_meta.bind_param
def hetero_sshe_lr_param():
from federatedml.param.hetero_sshe_lr_param import HeteroSSHELRParam
return HeteroSSHELRParam
@hetero_sshe_lr_cpn_meta.bind_runner.on_guest
def hetero_sshe_lr_runner_guest():
from federatedml.linear_model.bilateral_linear_model.hetero_sshe_logistic_regression.hetero_lr_guest import (
HeteroLRGuest,
)
return HeteroLRGuest
@hetero_sshe_lr_cpn_meta.bind_runner.on_host
def hetero_sshe_lr_runner_host():
from federatedml.linear_model.bilateral_linear_model.hetero_sshe_logistic_regression.hetero_lr_host import (
HeteroLRHost,
)
return HeteroLRHost
| 1,381
| 29.043478
| 113
|
py
|
FATE
|
FATE-master/python/federatedml/components/secure_add_example.py
|
#
# Copyright 2019 The FATE Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from .components import ComponentMeta
secure_add_example_cpn_meta = ComponentMeta("SecureAddExample")
@secure_add_example_cpn_meta.bind_param
def secure_add_example_param():
from federatedml.param.secure_add_example_param import SecureAddExampleParam
return SecureAddExampleParam
@secure_add_example_cpn_meta.bind_runner.on_guest
def secure_add_example_guest_runner():
from federatedml.toy_example.secure_add_guest import SecureAddGuest
return SecureAddGuest
@secure_add_example_cpn_meta.bind_runner.on_host
def secure_add_example_host_runner():
from federatedml.toy_example.secure_add_host import SecureAddHost
return SecureAddHost
| 1,285
| 29.619048
| 80
|
py
|
FATE
|
FATE-master/python/federatedml/components/cust_nn.py
|
#
# Copyright 2019 The FATE Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from .components import ComponentMeta
homo_nn_cpn_meta = ComponentMeta("CustNN")
@homo_nn_cpn_meta.bind_param
def homo_nn_param():
from federatedml.custom_nn.homo_nn_param import CustNNParam
return CustNNParam
@homo_nn_cpn_meta.bind_runner.on_guest.on_host
def homo_nn_runner_client():
from federatedml.custom_nn.homo_nn_client import HomoNNClient
return HomoNNClient
@homo_nn_cpn_meta.bind_runner.on_arbiter
def homo_nn_runner_arbiter():
from federatedml.custom_nn.homo_nn_arbiter import HomoNNArbiter
return HomoNNArbiter
| 1,176
| 27.02381
| 75
|
py
|
FATE
|
FATE-master/python/federatedml/components/local_baseline.py
|
#
# Copyright 2019 The FATE Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from .components import ComponentMeta
local_baseline_cpn_meta = ComponentMeta("LocalBaseline")
@local_baseline_cpn_meta.bind_param
def local_baseline_param():
from federatedml.param.local_baseline_param import LocalBaselineParam
return LocalBaselineParam
@local_baseline_cpn_meta.bind_runner.on_guest.on_host
def local_baseline_client_runner():
from federatedml.local_baseline.local_baseline import LocalBaseline
return LocalBaseline
| 1,075
| 29.742857
| 75
|
py
|
FATE
|
FATE-master/python/federatedml/components/sample_weight.py
|
#
# Copyright 2019 The FATE Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from .components import ComponentMeta
sample_weight_cpn_meta = ComponentMeta("SampleWeight")
@sample_weight_cpn_meta.bind_param
def sample_weight_param():
from federatedml.param.sample_weight_param import SampleWeightParam
return SampleWeightParam
@sample_weight_cpn_meta.bind_runner.on_guest.on_host
def sample_weight_client_runner():
from federatedml.util.sample_weight import SampleWeight
return SampleWeight
| 1,053
| 29.114286
| 75
|
py
|
FATE
|
FATE-master/python/federatedml/components/label_transform.py
|
#
# Copyright 2019 The FATE Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from .components import ComponentMeta
label_transform_cpn_meta = ComponentMeta("LabelTransform")
@label_transform_cpn_meta.bind_param
def label_transform_param():
from federatedml.param.label_transform_param import LabelTransformParam
return LabelTransformParam
@label_transform_cpn_meta.bind_runner.on_guest.on_host
def label_transform_client_runner():
from federatedml.util.label_transform import LabelTransformer
return LabelTransformer
| 1,081
| 29.914286
| 75
|
py
|
FATE
|
FATE-master/python/federatedml/components/hetero_kmeans.py
|
#
# Copyright 2019 The FATE Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from .components import ComponentMeta
hetero_kmeans_cpn_meta = ComponentMeta("HeteroKmeans")
@hetero_kmeans_cpn_meta.bind_param
def hetero_kmeans_param():
from federatedml.param.hetero_kmeans_param import KmeansParam
return KmeansParam
@hetero_kmeans_cpn_meta.bind_runner.on_guest
def hetero_kmeans_runner_guest():
from federatedml.unsupervised_learning.kmeans.hetero_kmeans.hetero_kmeans_client import (
HeteroKmeansGuest,
)
return HeteroKmeansGuest
@hetero_kmeans_cpn_meta.bind_runner.on_host
def hetero_kmeans_runner_host():
from federatedml.unsupervised_learning.kmeans.hetero_kmeans.hetero_kmeans_client import (
HeteroKmeansHost,
)
return HeteroKmeansHost
@hetero_kmeans_cpn_meta.bind_runner.on_arbiter
def hetero_kmeans_runner_arbiter():
from federatedml.unsupervised_learning.kmeans.hetero_kmeans.hetero_kmeans_arbiter import (
HeteroKmeansArbiter,
)
return HeteroKmeansArbiter
| 1,585
| 27.836364
| 94
|
py
|
FATE
|
FATE-master/python/federatedml/components/hetero_data_split.py
|
#
# Copyright 2019 The FATE Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from .components import ComponentMeta
hetero_data_split_cpn_meta = ComponentMeta("HeteroDataSplit")
@hetero_data_split_cpn_meta.bind_param
def hetero_data_split_param():
from federatedml.param.data_split_param import DataSplitParam
return DataSplitParam
@hetero_data_split_cpn_meta.bind_runner.on_guest
def hetero_data_split_guest_runner():
from federatedml.model_selection.data_split.hetero_data_split import (
HeteroDataSplitGuest,
)
return HeteroDataSplitGuest
@hetero_data_split_cpn_meta.bind_runner.on_host
def hetero_data_split_host_runner():
from federatedml.model_selection.data_split.hetero_data_split import (
HeteroDataSplitHost,
)
return HeteroDataSplitHost
| 1,346
| 28.282609
| 75
|
py
|
FATE
|
FATE-master/python/federatedml/components/__init__.py
|
#
# Copyright 2019 The FATE Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
| 617
| 35.352941
| 75
|
py
|
FATE
|
FATE-master/python/federatedml/components/evaluation.py
|
#
# Copyright 2019 The FATE Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from .components import ComponentMeta
evaluation_cpn_meta = ComponentMeta("Evaluation")
@evaluation_cpn_meta.bind_param
def evaluation_param():
from federatedml.param.evaluation_param import EvaluateParam
return EvaluateParam
@evaluation_cpn_meta.bind_runner.on_guest.on_host.on_arbiter
def evaluation_runner():
from federatedml.evaluation.evaluation import Evaluation
return Evaluation
| 1,028
| 28.4
| 75
|
py
|
FATE
|
FATE-master/python/federatedml/components/data_transform.py
|
#
# Copyright 2019 The FATE Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from .components import ComponentMeta
data_transform_cpn_meta = ComponentMeta("DataTransform")
@data_transform_cpn_meta.bind_param
def intersection_param():
from federatedml.param.data_transform_param import DataTransformParam
return DataTransformParam
@data_transform_cpn_meta.bind_runner.on_guest.on_host
def data_transform_runner():
from federatedml.util.data_transform import DataTransform
return DataTransform
| 1,056
| 29.2
| 75
|
py
|
FATE
|
FATE-master/python/federatedml/components/hetero_feature_selection.py
|
#
# Copyright 2019 The FATE Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from .components import ComponentMeta
hetero_feature_selection_cpn_meta = ComponentMeta("HeteroFeatureSelection")
@hetero_feature_selection_cpn_meta.bind_param
def hetero_feature_selection_param():
from federatedml.param.feature_selection_param import FeatureSelectionParam
return FeatureSelectionParam
@hetero_feature_selection_cpn_meta.bind_runner.on_guest.on_host
def hetero_feature_selection_runner():
from federatedml.feature.hetero_feature_selection.base_feature_selection import (
BaseHeteroFeatureSelection,
)
return BaseHeteroFeatureSelection
| 1,205
| 31.594595
| 85
|
py
|
FATE
|
FATE-master/python/federatedml/components/hetero_nn.py
|
#
# Copyright 2019 The FATE Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from .components import ComponentMeta
hetero_nn_cpn_meta = ComponentMeta("HeteroNN")
@hetero_nn_cpn_meta.bind_param
def hetero_nn_param():
from federatedml.param.hetero_nn_param import HeteroNNParam
return HeteroNNParam
@hetero_nn_cpn_meta.bind_runner.on_guest
def hetero_nn_guest_runner():
from federatedml.nn.hetero.guest import HeteroNNGuest
return HeteroNNGuest
@hetero_nn_cpn_meta.bind_runner.on_host
def hetero_nn_host_runner():
from federatedml.nn.hetero.host import HeteroNNHost
return HeteroNNHost
| 1,159
| 26.619048
| 75
|
py
|
FATE
|
FATE-master/python/federatedml/components/hetero_lr.py
|
#
# Copyright 2019 The FATE Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from .components import ComponentMeta
hetero_lr_cpn_meta = ComponentMeta("HeteroLR")
@hetero_lr_cpn_meta.bind_param
def hetero_lr_param():
from federatedml.param.logistic_regression_param import HeteroLogisticParam
return HeteroLogisticParam
@hetero_lr_cpn_meta.bind_runner.on_guest
def hetero_lr_runner_guest():
from federatedml.linear_model.coordinated_linear_model.logistic_regression.hetero_logistic_regression.hetero_lr_guest import (
HeteroLRGuest, )
return HeteroLRGuest
@hetero_lr_cpn_meta.bind_runner.on_host
def hetero_lr_runner_host():
from federatedml.linear_model.coordinated_linear_model.logistic_regression.hetero_logistic_regression.hetero_lr_host import (
HeteroLRHost, )
return HeteroLRHost
@hetero_lr_cpn_meta.bind_runner.on_arbiter
def hetero_lr_runner_arbiter():
from federatedml.linear_model.coordinated_linear_model.logistic_regression.hetero_logistic_regression.hetero_lr_arbiter import (
HeteroLRArbiter, )
return HeteroLRArbiter
| 1,642
| 30.596154
| 132
|
py
|
FATE
|
FATE-master/python/federatedml/local_baseline/local_baseline.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright 2019 The FATE Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import copy
import numpy as np
from sklearn.linear_model import LogisticRegression
from federatedml.model_base import ModelBase
from federatedml.param.local_baseline_param import LocalBaselineParam
from federatedml.protobuf.generated import lr_model_meta_pb2, lr_model_param_pb2
from federatedml.statistic import data_overview
from federatedml.util import LOGGER
from federatedml.util import abnormal_detection
from federatedml.util.io_check import assert_io_num_rows_equal
class LocalBaseline(ModelBase):
def __init__(self):
super(LocalBaseline, self).__init__()
self.model_param = LocalBaselineParam()
self.model_name = "LocalBaseline"
self.metric_type = ""
self.model_param_name = "LocalBaselineParam"
self.model_meta_name = "LocalBaselineMeta"
# one_ve_rest parameter
self.need_one_vs_rest = None
self.one_vs_rest_classes = []
self.one_vs_rest_obj = None
def _init_model(self, params):
self.model_name = params.model_name
self.model_opts = params.model_opts
self.predict_param = params.predict_param
self.model = None
self.model_fit = None
self.header = None
self.model_weights = None
def get_model(self):
# extend in future with more model types
model = LogisticRegression(**self.model_opts)
self.model = copy.deepcopy(model)
return model
def _get_model_param(self):
model = self.model_fit
n_iter = int(model.n_iter_[0])
is_converged = bool(n_iter < model.max_iter)
coef = model.coef_[0]
#LOGGER.debug(f"model coef len {coef.shape[0]}, value: {coef}")
weight_dict = dict(zip(self.header, [float(i) for i in coef]))
#LOGGER.debug(f"model weight dict {weight_dict}")
# intercept is in array format if fit_intercept
intercept = model.intercept_[0] if model.fit_intercept else model.intercept_
result = {'iters': n_iter,
'is_converged': is_converged,
'weight': weight_dict,
'intercept': intercept,
'header': self.header,
'best_iteration': -1
}
return result
def _get_model_param_ovr(self):
model = self.model_fit
n_iter = int(model.n_iter_[0])
is_converged = bool(n_iter < model.max_iter)
classes = model.classes_
coef_all = model.coef_
intercept_all = model.intercept_
ovr_pb_objs = []
ovr_pb_classes = []
for i, label in enumerate(classes):
coef = coef_all[i, ]
weight_dict = dict(zip(self.header, list(coef)))
intercept = intercept_all[i] if model.fit_intercept else intercept_all
result = {'iters': n_iter,
'is_converged': is_converged,
'weight': weight_dict,
'intercept': intercept,
'header': self.header,
'best_iteration': -1
}
param_protobuf_obj = lr_model_param_pb2.SingleModel(**result)
ovr_pb_objs.append(param_protobuf_obj)
ovr_pb_classes.append(str(label))
one_vs_rest_result = {
'completed_models': ovr_pb_objs,
'one_vs_rest_classes': ovr_pb_classes
}
param_result = {'one_vs_rest_result': one_vs_rest_result,
'need_one_vs_rest': True,
'header': self.header}
return param_result
def _get_param(self):
header = self.header
#LOGGER.debug("In get_param, header: {}".format(header))
if header is None:
param_protobuf_obj = lr_model_param_pb2.LRModelParam()
return param_protobuf_obj
if self.need_one_vs_rest:
result = self._get_model_param_ovr()
param_protobuf_obj = lr_model_param_pb2.LRModelParam(**result)
else:
result = self._get_model_param()
param_protobuf_obj = lr_model_param_pb2.LRModelParam(**result)
#LOGGER.debug("in _get_param, result: {}".format(result))
return param_protobuf_obj
def _get_meta(self):
model = self.model_fit
predict_param = lr_model_meta_pb2.PredictMeta(**{"threshold": self.predict_param.threshold})
result = {'penalty': model.penalty,
'tol': model.tol,
'fit_intercept': model.fit_intercept,
'optimizer': model.solver,
'need_one_vs_rest': self.need_one_vs_rest,
'max_iter': model.max_iter,
'predict_param': predict_param
}
meta_protobuf_obj = lr_model_meta_pb2.LRModelMeta(**result)
return meta_protobuf_obj
def export_model(self):
if not self.need_run:
return
meta_obj = self._get_meta()
param_obj = self._get_param()
result = {
self.model_meta_name: meta_obj,
self.model_param_name: param_obj
}
return result
def get_model_summary(self):
header = self.header
if header is None:
return {}
if not self.need_one_vs_rest:
param = self._get_model_param()
summary = {
'coef': param['weight'],
'intercept': param['intercept'],
'is_converged': param['is_converged'],
'iters': param['iters'],
'one_vs_rest': False
}
else:
model = self.model_fit
n_iter = int(model.n_iter_[0])
is_converged = bool(n_iter < model.max_iter)
classes = model.classes_
coef_all = model.coef_
intercept_all = model.intercept_
summary = {}
for i, label in enumerate(classes):
coef = coef_all[i, ]
weight_dict = dict(zip(self.header, [float(i) for i in coef]))
intercept = float(intercept_all[i]) if model.fit_intercept else float(intercept_all)
single_summary = {
'coef': weight_dict,
'intercept': intercept,
'is_converged': is_converged,
'iters': n_iter
}
single_key = f"{label}"
summary[single_key] = single_summary
summary['one_vs_rest'] = True
return summary
@assert_io_num_rows_equal
def _load_single_coef(self, result_obj):
feature_shape = len(self.header)
tmp_vars = np.zeros(feature_shape)
weight_dict = dict(result_obj.weight)
for idx, header_name in enumerate(self.header):
tmp_vars[idx] = weight_dict.get(header_name)
return tmp_vars
def _load_single_model(self, result_obj):
coef = self._load_single_coef(result_obj)
self.model_fit.__setattr__('coef_', np.array([coef]))
self.model_fit.__setattr__('intercept_', np.array([result_obj.intercept]))
self.model_fit.__setattr__('classes_', np.array([0, 1]))
self.model_fit.__setattr__('n_iter_', [result_obj.iters])
return
def _load_ovr_model(self, result_obj):
one_vs_rest_result = result_obj.one_vs_rest_result
classes = np.array([int(i) for i in one_vs_rest_result.one_vs_rest_classes])
models = one_vs_rest_result.completed_models
class_count, feature_shape = len(classes), len(self.header)
coef_all = np.zeros((class_count, feature_shape))
intercept_all = np.zeros(class_count)
iters = -1
for i, label in enumerate(classes):
model = models[i]
coef = self._load_single_coef(model)
coef_all[i, ] = coef
intercept_all[i] = model.intercept
iters = model.iters
self.model_fit.__setattr__('coef_', coef_all)
self.model_fit.__setattr__('intercept_', intercept_all)
self.model_fit.__setattr__('classes_', classes)
self.model_fit.__setattr__('n_iter_', [iters])
return
def _load_model_meta(self, meta_obj):
self.model_fit.__setattr__('penalty', meta_obj.penalty)
self.model_fit.__setattr__('tol', meta_obj.tol)
self.model_fit.__setattr__('fit_intercept', meta_obj.fit_intercept)
self.model_fit.__setattr__('solver', meta_obj.optimizer)
self.model_fit.__setattr__('max_iter', meta_obj.max_iter)
def load_model(self, model_dict):
result_obj = list(model_dict.get('model').values())[0].get(self.model_param_name)
meta_obj = list(model_dict.get('model').values())[0].get(self.model_meta_name)
self.model_fit = LogisticRegression()
self._load_model_meta(meta_obj)
self.header = list(result_obj.header)
self.need_one_vs_rest = meta_obj.need_one_vs_rest
LOGGER.debug("in _load_model need_one_vs_rest: {}".format(self.need_one_vs_rest))
if self.need_one_vs_rest:
self._load_ovr_model(result_obj)
else:
self._load_single_model(result_obj)
return
@assert_io_num_rows_equal
def predict(self, data_instances):
if not self.need_run:
return
model_fit = self.model_fit
classes = [int(x) for x in model_fit.classes_]
if self.need_one_vs_rest:
pred_prob = data_instances.mapValues(lambda v: model_fit.predict_proba(v.features[None, :])[0])
else:
pred_prob = data_instances.mapValues(lambda v: model_fit.predict_proba(v.features[None, :])[0][1])
predict_result = self.predict_score_to_output(data_instances=data_instances, predict_score=pred_prob,
classes=classes, threshold=self.predict_param.threshold)
return predict_result
def fit(self, data_instances, validate_data=None):
if not self.need_run:
return
# check if empty table
LOGGER.info("Enter Local Baseline fit")
abnormal_detection.empty_table_detection(data_instances)
abnormal_detection.empty_feature_detection(data_instances)
# get model
model = self.get_model()
# get header
self.header = data_overview.get_header(data_instances)
X_table = data_instances.mapValues(lambda v: v.features)
y_table = data_instances.mapValues(lambda v: v.label)
X = np.array([v[1] for v in list(X_table.collect())])
y = np.array([v[1] for v in list(y_table.collect())])
w = None
if data_overview.with_weight(data_instances):
LOGGER.info(f"Input Data with Weight. Weight will be used to fit model.")
weight_table = data_instances.mapValues(lambda v: v.weight)
w = np.array([v[1] for v in list(weight_table.collect())])
self.model_fit = model.fit(X, y, w)
self.need_one_vs_rest = len(self.model_fit.classes_) > 2
self.set_summary(self.get_model_summary())
| 11,763
| 37.953642
| 110
|
py
|
FATE
|
FATE-master/python/federatedml/local_baseline/__init__.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright 2019 The FATE Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
| 661
| 35.777778
| 75
|
py
|
FATE
|
FATE-master/python/federatedml/local_baseline/test/local_baseline_test.py
|
#
# Copyright 2019 The FATE Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import numpy as np
import unittest
import uuid
from fate_arch.common import profile
from fate_arch.session import computing_session as session
from federatedml.local_baseline.local_baseline import LocalBaseline
from federatedml.param.local_baseline_param import LocalBaselineParam
from federatedml.feature.instance import Instance
from sklearn.linear_model import LogisticRegression
profile._PROFILE_LOG_ENABLED = False
class TestLocalBaseline(unittest.TestCase):
def setUp(self):
self.job_id = str(uuid.uuid1())
session.init("test_random_sampler_" + self.job_id)
data_num = 100
feature_num = 8
self.prepare_data(data_num, feature_num)
params = LocalBaselineParam()
local_baseline_obj = LocalBaseline()
local_baseline_obj._init_model(params)
local_baseline_obj.need_run = True
local_baseline_obj.header = ["x1", "x2", "x3", "x4", "x5", "x6", "x7", "x8"]
local_baseline_obj.model_name = "LogisticRegression"
local_baseline_obj.model_opts = {}
self.local_baseline_obj = local_baseline_obj
def prepare_data(self, data_num, feature_num):
self.X = np.random.randint(0, 10, (data_num, feature_num))
self.y = np.random.randint(0, 2, data_num)
final_result = []
for i in range(data_num):
tmp = self.X[i, :]
inst = Instance(inst_id=i, features=tmp, label=self.y[i])
final_result.append((i, inst))
table = session.parallelize(final_result,
include_key=True,
partition=3)
self.table = table
def test_predict(self):
glm = LogisticRegression().fit(self.X, self.y)
real_predict_result = glm.predict(self.X)
real_predict_result = dict(zip(range(self.X.shape[0]), real_predict_result))
self.local_baseline_obj.model_fit = glm
model_predict_result = self.local_baseline_obj.predict(self.table)
model_predict_result = {v[0]: v[1].features[1] for v in model_predict_result.collect()}
self.assertDictEqual(model_predict_result, real_predict_result)
def tearDown(self):
session.stop()
if __name__ == '__main__':
unittest.main()
| 2,878
| 36.881579
| 95
|
py
|
FATE
|
FATE-master/python/federatedml/local_baseline/test/__init__.py
|
#
# Copyright 2019 The FATE Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
| 616
| 37.5625
| 75
|
py
|
FATE
|
FATE-master/python/federatedml/transfer_learning/__init__.py
| 0
| 0
| 0
|
py
|
|
FATE
|
FATE-master/python/federatedml/transfer_learning/hetero_ftl/ftl_dataloder.py
|
import numpy as np
import tensorflow as tf
from federatedml.util import LOGGER
class FTLDataLoader(tf.keras.utils.Sequence):
def __init__(self, non_overlap_samples, overlap_samples, batch_size, guest_side=True):
self.batch_size = batch_size
self.guest_side = guest_side
self._overlap_index = []
self._non_overlap_index = []
if guest_side:
self.size = non_overlap_samples.count() + overlap_samples.count()
else:
self.size = overlap_samples.count()
_, one_data = overlap_samples.first()
self.y_shape = (1,)
self.x_shape = one_data.features.shape
self.x = np.zeros((self.size, *self.x_shape))
self.y = np.zeros((self.size, *self.y_shape))
index = 0
self._overlap_keys = []
self._non_overlap_keys = []
for k, inst in overlap_samples.collect():
self._overlap_keys.append(k)
self.x[index] = inst.features
if guest_side:
self.y[index] = inst.label
index += 1
if self.guest_side:
for k, inst in non_overlap_samples.collect():
self._non_overlap_keys.append(k)
self.x[index] = inst.features
if guest_side:
self.y[index] = inst.label
index += 1
if guest_side:
self._overlap_index = np.array(list(range(0, overlap_samples.count())))
self._non_overlap_index = np.array(list(range(overlap_samples.count(), self.size)))
else:
self._overlap_index = list(range(len(self.x)))
def get_overlap_indexes(self):
return self._overlap_index
def get_non_overlap_indexes(self):
return self._non_overlap_index
def get_batch_indexes(self, batch_index):
start = self.batch_size * batch_index
end = self.batch_size * (batch_index + 1)
return start, end
def get_relative_overlap_index(self, batch_index):
start, end = self.get_batch_indexes(batch_index)
return self._overlap_index[(self._overlap_index >= start) & (self._overlap_index < end)] % self.batch_size
def get_overlap_x(self):
return self.x[self._overlap_index]
def get_overlap_y(self):
return self.y[self._overlap_index]
def get_overlap_keys(self):
return self._overlap_keys
def get_non_overlap_keys(self):
return self._non_overlap_keys
def __getitem__(self, index):
start, end = self.get_batch_indexes(index)
if self.guest_side:
return self.x[start: end], self.y[start: end]
else:
return self.x[start: end]
def __len__(self):
return int(np.ceil(self.size / float(self.batch_size)))
def get_idx(self):
return self._keys
def data_basic_info(self):
return 'total sample num is {}, overlap sample num is {}, non_overlap sample is {},'\
'x_shape is {}'.format(self.size, len(self._overlap_index), len(self._non_overlap_index),
self.x_shape)
| 3,111
| 31.416667
| 114
|
py
|
FATE
|
FATE-master/python/federatedml/transfer_learning/hetero_ftl/ftl_host.py
|
import numpy as np
from federatedml.transfer_learning.hetero_ftl.ftl_base import FTL
from federatedml.statistic.intersect import RsaIntersectionHost
from federatedml.util import LOGGER
from federatedml.transfer_learning.hetero_ftl.ftl_dataloder import FTLDataLoader
from federatedml.util import consts
from federatedml.secureprotol.paillier_tensor import PaillierTensor
from federatedml.util.io_check import assert_io_num_rows_equal
from federatedml.statistic import data_overview
class FTLHost(FTL):
def __init__(self):
super(FTLHost, self).__init__()
self.overlap_ub = None # u_b
self.overlap_ub_2 = None # u_b squared
self.mapping_comp_b = None
self.constant_k = None # κ
self.feat_dim = None # output feature dimension
self.m_b = None # random mask
self.role = consts.HOST
def init_intersect_obj(self):
LOGGER.debug('creating intersect obj done')
intersect_obj = RsaIntersectionHost()
intersect_obj.host_party_id = self.component_properties.local_partyid
intersect_obj.host_party_id_list = self.component_properties.host_party_idlist
intersect_obj.load_params(self.intersect_param)
return intersect_obj
def batch_compute_components(self, data_loader: FTLDataLoader):
"""
compute host components
"""
overlap_ub = []
for i in range(len(data_loader)):
batch_x = data_loader[i]
ub_batch = self.nn.predict(batch_x)
overlap_ub.append(ub_batch)
overlap_ub = np.concatenate(overlap_ub, axis=0)
overlap_ub_2 = np.matmul(np.expand_dims(overlap_ub, axis=2), np.expand_dims(overlap_ub, axis=1))
mapping_comp_b = - overlap_ub * self.constant_k
if self.verbose:
LOGGER.debug('overlap_ub is {}'.format(overlap_ub))
LOGGER.debug('overlap_ub_2 is {}'.format(overlap_ub_2))
return overlap_ub, overlap_ub_2, mapping_comp_b
def exchange_components(self, comp_to_send, epoch_idx):
"""
compute host components and sent to guest
"""
if self.mode == 'encrypted':
comp_to_send = self.encrypt_tensor(comp_to_send)
# receiving guest components
y_overlap_2_phi_2 = self.transfer_variable.y_overlap_2_phi_2.get(idx=0, suffix=(epoch_idx, ))
y_overlap_phi = self.transfer_variable.y_overlap_phi.get(idx=0, suffix=(epoch_idx, ))
mapping_comp_a = self.transfer_variable.mapping_comp_a.get(idx=0, suffix=(epoch_idx, ))
guest_components = [y_overlap_2_phi_2, y_overlap_phi, mapping_comp_a]
# sending host components
self.transfer_variable.overlap_ub.remote(comp_to_send[0], suffix=(epoch_idx, ))
self.transfer_variable.overlap_ub_2.remote(comp_to_send[1], suffix=(epoch_idx, ))
self.transfer_variable.mapping_comp_b.remote(comp_to_send[2], suffix=(epoch_idx, ))
if self.mode == 'encrypted':
guest_paillier_tensors = [PaillierTensor(tb, partitions=self.partitions) for tb in guest_components]
return guest_paillier_tensors
else:
return guest_components
def decrypt_guest_data(self, epoch_idx, local_round=-1):
encrypted_consts = self.transfer_variable.guest_side_const.get(suffix=(epoch_idx, local_round, ),
idx=0)
grad_table = self.transfer_variable.guest_side_gradients.get(suffix=(epoch_idx, local_round, ),
idx=0)
inter_grad = PaillierTensor(grad_table, partitions=self.partitions)
decrpyted_grad = inter_grad.decrypt(self.encrypter)
decrypted_const = self.encrypter.recursive_decrypt(encrypted_consts)
self.transfer_variable.decrypted_guest_const.remote(decrypted_const,
suffix=(epoch_idx, local_round, ))
self.transfer_variable.decrypted_guest_gradients.remote(decrpyted_grad.get_obj(),
suffix=(epoch_idx, local_round, ))
def decrypt_inter_result(self, loss_grad_b, epoch_idx, local_round=-1):
rand_0 = PaillierTensor(
self.rng_generator.generate_random_number(
loss_grad_b.shape),
partitions=self.partitions)
grad_a_overlap = loss_grad_b + rand_0
self.transfer_variable.host_side_gradients.remote(grad_a_overlap.get_obj(),
suffix=(epoch_idx, local_round, 'host_de_send'))
de_loss_grad_b = self.transfer_variable.decrypted_host_gradients\
.get(suffix=(epoch_idx, local_round, 'host_de_get'), idx=0)
de_loss_grad_b = PaillierTensor(de_loss_grad_b, partitions=self.partitions) - rand_0
return de_loss_grad_b
def compute_backward_gradients(self, guest_components, data_loader: FTLDataLoader, epoch_idx, local_round=-1):
"""
compute host bottom model gradients
"""
y_overlap_2_phi_2, y_overlap_phi, mapping_comp_a = guest_components[0], guest_components[1], guest_components[2]
ub_overlap_ex = np.expand_dims(self.overlap_ub, axis=1)
if self.mode == 'plain':
ub_overlap_y_overlap_2_phi_2 = np.matmul(ub_overlap_ex, y_overlap_2_phi_2)
l1_grad_b = np.squeeze(ub_overlap_y_overlap_2_phi_2, axis=1) + y_overlap_phi
loss_grad_b = self.alpha * l1_grad_b + mapping_comp_a
return loss_grad_b
if self.mode == 'encrypted':
ub_overlap_ex = np.expand_dims(self.overlap_ub, axis=1)
ub_overlap_y_overlap_2_phi_2 = y_overlap_2_phi_2.matmul_3d(ub_overlap_ex, multiply='right')
ub_overlap_y_overlap_2_phi_2 = ub_overlap_y_overlap_2_phi_2.squeeze(axis=1)
l1_grad_b = ub_overlap_y_overlap_2_phi_2 + y_overlap_phi
en_loss_grad_b = l1_grad_b * self.alpha + mapping_comp_a
self.decrypt_guest_data(epoch_idx, local_round=local_round)
loss_grad_b = self.decrypt_inter_result(en_loss_grad_b, epoch_idx, local_round=local_round)
return loss_grad_b.numpy()
def compute_loss(self, epoch_idx):
"""
help guest compute ftl loss. plain mode will skip/ in encrypted mode will decrypt received loss
"""
if self.mode == 'plain':
return
elif self.mode == 'encrypted':
encrypted_loss = self.transfer_variable.encrypted_loss.get(idx=0, suffix=(epoch_idx, 'send_loss'))
rs = self.encrypter.recursive_decrypt(encrypted_loss)
self.transfer_variable.decrypted_loss.remote(rs, suffix=(epoch_idx, 'get_loss'))
def fit(self, data_inst, validate_data=None):
LOGGER.info('start to fit a ftl model, '
'run mode is {},'
'communication efficient mode is {}'.format(self.mode, self.comm_eff))
data_loader, self.x_shape, self.data_num, self.overlap_num = self.prepare_data(self.init_intersect_obj(),
data_inst, guest_side=False)
self.input_dim = self.x_shape[0]
# cache data_loader for faster validation
self.cache_dataloader[self.get_dataset_key(data_inst)] = data_loader
self.partitions = data_inst.partitions
self.initialize_nn(input_shape=self.x_shape)
self.feat_dim = self.nn._model.output_shape[1]
self.constant_k = 1 / self.feat_dim
self.callback_list.on_train_begin(data_inst, validate_data)
for epoch_idx in range(self.epochs):
LOGGER.debug('fitting epoch {}'.format(epoch_idx))
self.callback_list.on_epoch_begin(epoch_idx)
self.overlap_ub, self.overlap_ub_2, self.mapping_comp_b = self.batch_compute_components(data_loader)
send_components = [self.overlap_ub, self.overlap_ub_2, self.mapping_comp_b]
guest_components = self.exchange_components(send_components, epoch_idx)
for local_round_idx in range(self.local_round):
if self.comm_eff:
LOGGER.debug('running local iter {}'.format(local_round_idx))
grads = self.compute_backward_gradients(guest_components, data_loader, epoch_idx,
local_round=local_round_idx)
self.update_nn_weights(grads, data_loader, epoch_idx, decay=self.comm_eff)
if local_round_idx == 0:
self.compute_loss(epoch_idx)
if local_round_idx + 1 != self.local_round:
self.overlap_ub, self.overlap_ub_2, self.mapping_comp_b = self.batch_compute_components(data_loader)
self.callback_list.on_epoch_end(epoch_idx)
if self.n_iter_no_change is True:
stop_flag = self.sync_stop_flag(epoch_idx)
if stop_flag:
break
LOGGER.debug('fitting epoch {} done'.format(epoch_idx))
self.callback_list.on_train_end()
self.set_summary(self.generate_summary())
def generate_summary(self):
summary = {"best_iteration": self.callback_variables.best_iteration}
return summary
@assert_io_num_rows_equal
def predict(self, data_inst):
LOGGER.debug('host start to predict')
self.transfer_variable.predict_host_u.disable_auto_clean()
data_loader_key = self.get_dataset_key(data_inst)
data_inst_ = data_overview.header_alignment(data_inst, self.store_header)
if data_loader_key in self.cache_dataloader:
data_loader = self.cache_dataloader[data_loader_key]
else:
data_loader, _, _, _ = self.prepare_data(self.init_intersect_obj(), data_inst_, guest_side=False)
self.cache_dataloader[data_loader_key] = data_loader
ub_batches = []
for i in range(len(data_loader)):
batch_x = data_loader[i]
ub_batch = self.nn.predict(batch_x)
ub_batches.append(ub_batch)
predicts = np.concatenate(ub_batches, axis=0)
self.transfer_variable.predict_host_u.remote(predicts, suffix=(0, 'host_u'))
LOGGER.debug('ftl host prediction done')
return None
def export_model(self):
return {"FTLHostMeta": self.get_model_meta(), "FTLHostParam": self.get_model_param()}
def load_model(self, model_dict):
model_param = None
model_meta = None
for _, value in model_dict["model"].items():
for model in value:
if model.endswith("Meta"):
model_meta = value[model]
if model.endswith("Param"):
model_param = value[model]
LOGGER.info("load model")
self.set_model_meta(model_meta)
self.set_model_param(model_param)
| 11,050
| 41.667954
| 120
|
py
|
FATE
|
FATE-master/python/federatedml/transfer_learning/hetero_ftl/ftl_guest.py
|
import numpy as np
from fate_arch.session import computing_session as session
from federatedml.util import consts
from federatedml.transfer_learning.hetero_ftl.ftl_base import FTL
from federatedml.util import LOGGER
from federatedml.transfer_learning.hetero_ftl.ftl_dataloder import FTLDataLoader
from federatedml.statistic.intersect import RsaIntersectionGuest
from federatedml.model_base import Metric
from federatedml.model_base import MetricMeta
from federatedml.optim.convergence import converge_func_factory
from federatedml.secureprotol.paillier_tensor import PaillierTensor
from federatedml.optim.activation import sigmoid
from federatedml.statistic import data_overview
class FTLGuest(FTL):
def __init__(self):
super(FTLGuest, self).__init__()
self.phi = None # Φ_A
self.phi_product = None # (Φ_A)‘(Φ_A) [feature_dim, feature_dim]
self.overlap_y = None # y_i ∈ N_c
self.overlap_y_2 = None # (y_i ∈ N_c )^2
self.overlap_ua = None # u_i ∈ N_AB
self.constant_k = None # κ
self.feat_dim = None # output feature dimension
self.send_components = None # components to send
self.convergence = None
self.overlap_y_pt = None # paillier tensor
self.history_loss = [] # list to record history loss
self.role = consts.GUEST
def init_intersect_obj(self):
intersect_obj = RsaIntersectionGuest()
intersect_obj.guest_party_id = self.component_properties.local_partyid
intersect_obj.host_party_id_list = self.component_properties.host_party_idlist
intersect_obj.load_params(self.intersect_param)
LOGGER.debug('intersect done')
return intersect_obj
def check_convergence(self, loss):
LOGGER.info("check convergence")
if self.convergence is None:
self.convergence = converge_func_factory("diff", self.tol)
return self.convergence.is_converge(loss)
def compute_phi_and_overlap_ua(self, data_loader: FTLDataLoader):
"""
compute Φ and ua of overlap samples
"""
phi = None # [1, feature_dim] Φ_A
overlap_ua = []
for i in range(len(data_loader)):
batch_x, batch_y = data_loader[i]
ua_batch = self.nn.predict(batch_x) # [batch_size, feature_dim]
relative_overlap_index = data_loader.get_relative_overlap_index(i)
if len(relative_overlap_index) != 0:
if self.verbose:
LOGGER.debug('batch {}/{} overlap index is {}'.format(i, len(data_loader), relative_overlap_index))
overlap_ua.append(ua_batch[relative_overlap_index])
phi_tmp = np.expand_dims(np.sum(batch_y * ua_batch, axis=0), axis=0)
if phi is None:
phi = phi_tmp
else:
phi += phi_tmp
phi = phi / self.data_num
return phi, overlap_ua
def batch_compute_components(self, data_loader: FTLDataLoader):
"""
compute guest components
"""
phi, overlap_ua = self.compute_phi_and_overlap_ua(data_loader) # Φ_A [1, feature_dim]
phi_product = np.matmul(phi.transpose(), phi) # (Φ_A)‘(Φ_A) [feature_dim, feature_dim]
if self.overlap_y is None:
self.overlap_y = data_loader.get_overlap_y() # {C(y)=y} [1, feat_dim]
if self.overlap_y_2 is None:
self.overlap_y_2 = self.overlap_y * self.overlap_y # {D(y)=y^2} # [1, feat_dim]
overlap_ua = np.concatenate(overlap_ua, axis=0) # [overlap_num, feat_dim]
# 3 components will be sent to host
y_overlap_2_phi_2 = 0.25 * np.expand_dims(self.overlap_y_2, axis=2) * phi_product
y_overlap_phi = -0.5 * self.overlap_y * phi
mapping_comp_a = -overlap_ua * self.constant_k
return phi, phi_product, overlap_ua, [y_overlap_2_phi_2, y_overlap_phi, mapping_comp_a]
def exchange_components(self, comp_to_send, epoch_idx):
"""
send guest components and get host components
"""
if self.mode == 'encrypted':
comp_to_send = self.encrypt_tensor(comp_to_send)
# sending [y_overlap_2_phi_2, y_overlap_phi, mapping_comp_a]
self.transfer_variable.y_overlap_2_phi_2.remote(comp_to_send[0], suffix=(epoch_idx, ))
self.transfer_variable.y_overlap_phi.remote(comp_to_send[1], suffix=(epoch_idx, ))
self.transfer_variable.mapping_comp_a.remote(comp_to_send[2], suffix=(epoch_idx, ))
# receiving [overlap_ub, overlap_ub_2, mapping_comp_b]
overlap_ub = self.transfer_variable.overlap_ub.get(idx=0, suffix=(epoch_idx, ))
overlap_ub_2 = self.transfer_variable.overlap_ub_2.get(idx=0, suffix=(epoch_idx, ))
mapping_comp_b = self.transfer_variable.mapping_comp_b.get(idx=0, suffix=(epoch_idx, ))
host_components = [overlap_ub, overlap_ub_2, mapping_comp_b]
if self.mode == 'encrypted':
host_paillier_tensors = [PaillierTensor(tb, partitions=self.partitions) for tb in host_components]
return host_paillier_tensors
else:
return host_components
def decrypt_inter_result(self, encrypted_const, grad_a_overlap, epoch_idx, local_round=-1):
"""
add random mask to encrypted inter-result, get decrypted data from host add subtract random mask
"""
rand_0 = self.rng_generator.generate_random_number(encrypted_const.shape)
encrypted_const = encrypted_const + rand_0
rand_1 = PaillierTensor(
self.rng_generator.generate_random_number(
grad_a_overlap.shape),
partitions=self.partitions)
grad_a_overlap = grad_a_overlap + rand_1
self.transfer_variable.guest_side_const.remote(encrypted_const, suffix=(epoch_idx,
local_round,))
self.transfer_variable.guest_side_gradients.remote(grad_a_overlap.get_obj(), suffix=(epoch_idx,
local_round,))
const = self.transfer_variable.decrypted_guest_const.get(suffix=(epoch_idx, local_round, ), idx=0)
grad = self.transfer_variable.decrypted_guest_gradients.get(suffix=(epoch_idx, local_round, ), idx=0)
const = const - rand_0
grad_a_overlap = PaillierTensor(grad, partitions=self.partitions) - rand_1
return const, grad_a_overlap
def decrypt_host_data(self, epoch_idx, local_round=-1):
inter_grad = self.transfer_variable.host_side_gradients.get(suffix=(epoch_idx,
local_round,
'host_de_send'), idx=0)
inter_grad_pt = PaillierTensor(inter_grad, partitions=self.partitions)
self.transfer_variable.decrypted_host_gradients.remote(inter_grad_pt.decrypt(self.encrypter).get_obj(),
suffix=(epoch_idx,
local_round,
'host_de_get'))
def decrypt_loss_val(self, encrypted_loss, epoch_idx):
self.transfer_variable.encrypted_loss.remote(encrypted_loss, suffix=(epoch_idx, 'send_loss'))
decrypted_loss = self.transfer_variable.decrypted_loss.get(idx=0, suffix=(epoch_idx, 'get_loss'))
return decrypted_loss
def compute_backward_gradients(self, host_components, data_loader: FTLDataLoader, epoch_idx, local_round=-1):
"""
compute backward gradients using host components
"""
# they are Paillier tensors or np array
overlap_ub, overlap_ub_2, mapping_comp_b = host_components[0], host_components[1], host_components[2]
y_overlap_2_phi = np.expand_dims(self.overlap_y_2 * self.phi, axis=1)
if self.mode == 'plain':
loss_grads_const_part1 = 0.25 * np.squeeze(np.matmul(y_overlap_2_phi, overlap_ub_2), axis=1)
loss_grads_const_part2 = self.overlap_y * overlap_ub
const = np.sum(loss_grads_const_part1, axis=0) - 0.5 * np.sum(loss_grads_const_part2, axis=0)
grad_a_nonoverlap = self.alpha * const * \
data_loader.y[data_loader.get_non_overlap_indexes()] / self.data_num
grad_a_overlap = self.alpha * const * self.overlap_y / self.data_num + mapping_comp_b
return np.concatenate([grad_a_overlap, grad_a_nonoverlap], axis=0)
elif self.mode == 'encrypted':
loss_grads_const_part1 = overlap_ub_2.matmul_3d(0.25 * y_overlap_2_phi, multiply='right')
loss_grads_const_part1 = loss_grads_const_part1.squeeze(axis=1)
if self.overlap_y_pt is None:
self.overlap_y_pt = PaillierTensor(self.overlap_y, partitions=self.partitions)
loss_grads_const_part2 = overlap_ub * self.overlap_y_pt
encrypted_const = loss_grads_const_part1.reduce_sum() - 0.5 * loss_grads_const_part2.reduce_sum()
grad_a_overlap = self.overlap_y_pt.map_ndarray_product(
(self.alpha / self.data_num * encrypted_const)) + mapping_comp_b
const, grad_a_overlap = self.decrypt_inter_result(
encrypted_const, grad_a_overlap, epoch_idx=epoch_idx, local_round=local_round)
self.decrypt_host_data(epoch_idx, local_round=local_round)
grad_a_nonoverlap = self.alpha * const * \
data_loader.y[data_loader.get_non_overlap_indexes()] / self.data_num
return np.concatenate([grad_a_overlap.numpy(), grad_a_nonoverlap], axis=0)
def compute_loss(self, host_components, epoch_idx, overlap_num):
"""
compute training loss
"""
overlap_ub, overlap_ub_2, mapping_comp_b = host_components[0], host_components[1], host_components[2]
if self.mode == 'plain':
loss_overlap = np.sum((-self.overlap_ua * self.constant_k) * overlap_ub)
ub_phi = np.matmul(overlap_ub, self.phi.transpose())
part1 = -0.5 * np.sum(self.overlap_y * ub_phi)
part2 = 1.0 / 8 * np.sum(ub_phi * ub_phi)
part3 = len(self.overlap_y) * np.log(2)
loss_y = part1 + part2 + part3
return self.alpha * (loss_y / overlap_num) + loss_overlap / overlap_num
elif self.mode == 'encrypted':
loss_overlap = overlap_ub.element_wise_product((-self.overlap_ua * self.constant_k))
sum = np.sum(loss_overlap.reduce_sum())
ub_phi = overlap_ub.T.fast_matmul_2d(self.phi.transpose())
part1 = -0.5 * np.sum((self.overlap_y * ub_phi))
ub_2 = overlap_ub_2.reduce_sum()
enc_phi_uB_2_phi = np.matmul(np.matmul(self.phi, ub_2), self.phi.transpose())
part2 = 1 / 8 * np.sum(enc_phi_uB_2_phi)
part3 = len(self.overlap_y) * np.log(2)
loss_y = part1 + part2 + part3
en_loss = (self.alpha / self.overlap_num) * loss_y + sum / overlap_num
loss_val = self.decrypt_loss_val(en_loss, epoch_idx)
return loss_val
@staticmethod
def sigmoid(x):
return np.array(list(map(sigmoid, x)))
def generate_summary(self):
summary = {'loss_history': self.history_loss,
"best_iteration": self.callback_variables.best_iteration}
summary['validation_metrics'] = self.callback_variables.validation_summary
return summary
def check_host_number(self):
host_num = len(self.component_properties.host_party_idlist)
LOGGER.info('host number is {}'.format(host_num))
if host_num != 1:
raise ValueError('only 1 host party is allowed')
def fit(self, data_inst, validate_data=None):
LOGGER.debug('in training, partitions is {}'.format(data_inst.partitions))
LOGGER.info('start to fit a ftl model, '
'run mode is {},'
'communication efficient mode is {}'.format(self.mode, self.comm_eff))
self.check_host_number()
data_loader, self.x_shape, self.data_num, self.overlap_num = self.prepare_data(self.init_intersect_obj(),
data_inst, guest_side=True)
self.input_dim = self.x_shape[0]
# cache data_loader for faster validation
self.cache_dataloader[self.get_dataset_key(data_inst)] = data_loader
self.partitions = data_inst.partitions
LOGGER.debug('self partitions is {}'.format(self.partitions))
self.initialize_nn(input_shape=self.x_shape)
self.feat_dim = self.nn._model.output_shape[1]
self.constant_k = 1 / self.feat_dim
self.callback_list.on_train_begin(train_data=data_inst, validate_data=validate_data)
self.callback_meta("loss",
"train",
MetricMeta(name="train",
metric_type="LOSS",
extra_metas={"unit_name": "iters"}))
# compute intermediate result of first epoch
self.phi, self.phi_product, self.overlap_ua, self.send_components = self.batch_compute_components(data_loader)
for epoch_idx in range(self.epochs):
LOGGER.debug('fitting epoch {}'.format(epoch_idx))
self.callback_list.on_epoch_begin(epoch_idx)
host_components = self.exchange_components(self.send_components, epoch_idx=epoch_idx)
loss = None
for local_round_idx in range(self.local_round):
if self.comm_eff:
LOGGER.debug('running local iter {}'.format(local_round_idx))
grads = self.compute_backward_gradients(host_components, data_loader, epoch_idx=epoch_idx,
local_round=local_round_idx)
self.update_nn_weights(grads, data_loader, epoch_idx, decay=self.comm_eff)
if local_round_idx == 0:
loss = self.compute_loss(host_components, epoch_idx, len(data_loader.get_overlap_indexes()))
if local_round_idx + 1 != self.local_round:
self.phi, self.overlap_ua = self.compute_phi_and_overlap_ua(data_loader)
self.callback_metric("loss", "train", [Metric(epoch_idx, loss)])
self.history_loss.append(loss)
# updating variables for next epochs
if epoch_idx + 1 == self.epochs:
# only need to update phi in last epochs
self.phi, _ = self.compute_phi_and_overlap_ua(data_loader)
else:
# compute phi, phi_product, overlap_ua etc. for next epoch
self.phi, self.phi_product, self.overlap_ua, self.send_components = self.batch_compute_components(
data_loader)
self.callback_list.on_epoch_end(epoch_idx)
# check n_iter_no_change
if self.n_iter_no_change is True:
if self.check_convergence(loss):
self.sync_stop_flag(epoch_idx, stop_flag=True)
break
else:
self.sync_stop_flag(epoch_idx, stop_flag=False)
LOGGER.debug('fitting epoch {} done, loss is {}'.format(epoch_idx, loss))
self.callback_list.on_train_end()
self.callback_meta("loss",
"train",
MetricMeta(name="train",
metric_type="LOSS",
extra_metas={"Best": min(self.history_loss)}))
self.set_summary(self.generate_summary())
LOGGER.debug('fitting ftl model done')
def predict(self, data_inst):
LOGGER.debug('guest start to predict')
data_loader_key = self.get_dataset_key(data_inst)
data_inst_ = data_overview.header_alignment(data_inst, self.store_header)
if data_loader_key in self.cache_dataloader:
data_loader = self.cache_dataloader[data_loader_key]
else:
data_loader, _, _, _ = self.prepare_data(self.init_intersect_obj(), data_inst_, guest_side=True)
self.cache_dataloader[data_loader_key] = data_loader
LOGGER.debug('try to get predict u from host, suffix is {}'.format((0, 'host_u')))
host_predicts = self.transfer_variable.predict_host_u.get(idx=0, suffix=(0, 'host_u'))
predict_score = np.matmul(host_predicts, self.phi.transpose())
predicts = self.sigmoid(predict_score) # convert to predict scores
predicts = list(map(float, predicts))
predict_tb = session.parallelize(zip(data_loader.get_overlap_keys(), predicts,), include_key=True,
partition=data_inst.partitions)
threshold = self.predict_param.threshold
predict_result = self.predict_score_to_output(data_inst_, predict_tb, classes=[0, 1], threshold=threshold)
LOGGER.debug('ftl guest prediction done')
return predict_result
def export_model(self):
model_param = self.get_model_param()
model_param.phi_a.extend(self.phi.tolist()[0])
return {"FTLGuestMeta": self.get_model_meta(), "FTLHostParam": model_param}
def load_model(self, model_dict):
model_param = None
model_meta = None
for _, value in model_dict["model"].items():
for model in value:
if model.endswith("Meta"):
model_meta = value[model]
if model.endswith("Param"):
model_param = value[model]
LOGGER.info("load model")
self.set_model_meta(model_meta)
self.set_model_param(model_param)
self.phi = np.array([model_param.phi_a])
| 18,034
| 42.880779
| 119
|
py
|
FATE
|
FATE-master/python/federatedml/transfer_learning/hetero_ftl/ftl_base.py
|
import copy
import json
import functools
import numpy as np
from federatedml.util import LOGGER
from federatedml.transfer_learning.hetero_ftl.backend.nn_model import get_nn_builder
from federatedml.model_base import ModelBase
from federatedml.param.ftl_param import FTLParam
from federatedml.transfer_learning.hetero_ftl.backend.tf_keras.nn_model import KerasNNModel
from federatedml.util.classify_label_checker import ClassifyLabelChecker
from federatedml.transfer_variable.transfer_class.ftl_transfer_variable import FTLTransferVariable
from federatedml.transfer_learning.hetero_ftl.ftl_dataloder import FTLDataLoader
from federatedml.transfer_learning.hetero_ftl.backend.tf_keras.data_generator import KerasSequenceDataConverter
from federatedml.nn.backend.utils import rng as random_number_generator
from federatedml.secureprotol import PaillierEncrypt
from federatedml.util import consts
from federatedml.secureprotol.paillier_tensor import PaillierTensor
from federatedml.protobuf.generated.ftl_model_param_pb2 import FTLModelParam
from federatedml.protobuf.generated.ftl_model_meta_pb2 import FTLModelMeta, FTLPredictParam, FTLOptimizerParam
class FTL(ModelBase):
def __init__(self):
super(FTL, self).__init__()
# input para
self.nn_define = None
self.alpha = None
self.tol = None
self.learning_rate = None
self.n_iter_no_change = None
self.validation_freqs = None
self.early_stopping_rounds = None
self.use_first_metric_only = None
self.optimizer = None
self.intersect_param = None
self.config_type = 'keras'
self.comm_eff = None
self.local_round = 1
# runtime variable
self.verbose = False
self.nn: KerasNNModel = None
self.nn_builder = None
self.model_param = FTLParam()
self.x_shape = None
self.input_dim = None
self.data_num = 0
self.overlap_num = 0
self.transfer_variable = FTLTransferVariable()
self.data_convertor = KerasSequenceDataConverter()
self.mode = 'plain'
self.encrypter = None
self.partitions = 16
self.batch_size = None
self.epochs = None
self.store_header = None # header of input data table
self.model_float_type = np.float32
self.cache_dataloader = {}
self.validation_strategy = None
def _init_model(self, param: FTLParam):
self.nn_define = param.nn_define
self.alpha = param.alpha
self.tol = param.tol
self.n_iter_no_change = param.n_iter_no_change
self.validation_freqs = param.validation_freqs
self.optimizer = param.optimizer
self.intersect_param = param.intersect_param
self.batch_size = param.batch_size
self.epochs = param.epochs
self.mode = param.mode
self.comm_eff = param.communication_efficient
self.local_round = param.local_round
assert 'learning_rate' in self.optimizer.kwargs, 'optimizer setting must contain learning_rate'
self.learning_rate = self.optimizer.kwargs['learning_rate']
if not self.comm_eff:
self.local_round = 1
LOGGER.debug('communication efficient mode is not enabled, local_round set as 1')
self.encrypter = self.generate_encrypter(param)
self.predict_param = param.predict_param
self.rng_generator = random_number_generator.RandomNumberGenerator()
@staticmethod
def debug_data_inst(data_inst):
collect_data = list(data_inst.collect())
LOGGER.debug('showing Table')
for d in collect_data:
LOGGER.debug('key {} id {}, features {} label {}'.format(d[0], d[1].inst_id, d[1].features, d[1].label))
@staticmethod
def reset_label(inst, mapping):
new_inst = copy.deepcopy(inst)
new_inst.label = mapping[new_inst.label]
return new_inst
@staticmethod
def check_label(data_inst):
"""
check label. FTL only supports binary classification, and labels should be 1 or -1
"""
LOGGER.debug('checking label')
label_checker = ClassifyLabelChecker()
num_class, class_set = label_checker.validate_label(data_inst)
if num_class != 2:
raise ValueError(
'ftl only support binary classification, however {} labels are provided.'.format(num_class))
if 1 in class_set and -1 in class_set:
return data_inst
else:
soreted_class_set = sorted(list(class_set))
new_label_mapping = {soreted_class_set[1]: 1, soreted_class_set[0]: -1}
reset_label = functools.partial(FTL.reset_label, mapping=new_label_mapping)
new_table = data_inst.mapValues(reset_label)
new_table.schema = copy.deepcopy(data_inst.schema)
return new_table
def generate_encrypter(self, param) -> PaillierEncrypt:
LOGGER.info("generate encrypter")
if param.encrypt_param.method.lower() == consts.PAILLIER.lower():
encrypter = PaillierEncrypt()
encrypter.generate_key(param.encrypt_param.key_length)
else:
raise NotImplementedError("encrypt method not supported yet!!!")
return encrypter
def encrypt_tensor(self, components, return_dtable=True):
"""
transform numpy array into Paillier tensor and encrypt
"""
encrypted_tensors = []
for comp in components:
encrypted_tensor = PaillierTensor(comp, partitions=self.partitions)
if return_dtable:
encrypted_tensors.append(encrypted_tensor.encrypt(self.encrypter).get_obj())
else:
encrypted_tensors.append(encrypted_tensor.encrypt(self.encrypter))
return encrypted_tensors
def learning_rate_decay(self, learning_rate, epoch):
"""
learning_rate decay
"""
return learning_rate * 1 / np.sqrt(epoch + 1)
def sync_stop_flag(self, num_round, stop_flag=None):
"""
stop flag for n_iter_no_change
"""
LOGGER.info("sync stop flag, boosting round is {}".format(num_round))
if self.role == consts.GUEST:
self.transfer_variable.stop_flag.remote(stop_flag,
role=consts.HOST,
idx=-1,
suffix=(num_round,))
elif self.role == consts.HOST:
return self.transfer_variable.stop_flag.get(idx=0, suffix=(num_round, ))
def prepare_data(self, intersect_obj, data_inst, guest_side=False):
"""
find intersect ids and prepare dataloader
"""
if guest_side:
data_inst = self.check_label(data_inst)
overlap_samples = intersect_obj.run_intersect(data_inst) # find intersect ids
overlap_samples = intersect_obj.get_value_from_data(overlap_samples, data_inst)
non_overlap_samples = data_inst.subtractByKey(overlap_samples)
LOGGER.debug('num of overlap/non-overlap sampels: {}/{}'.format(overlap_samples.count(),
non_overlap_samples.count()))
if overlap_samples.count() == 0:
raise ValueError('no overlap samples')
if guest_side and non_overlap_samples == 0:
raise ValueError('overlap samples are required in guest side')
self.store_header = data_inst.schema['header']
LOGGER.debug('data inst header is {}'.format(self.store_header))
LOGGER.debug('has {} overlap samples'.format(overlap_samples.count()))
batch_size = self.batch_size
if self.batch_size == -1:
batch_size = data_inst.count() + 1 # make sure larger than sample number
data_loader = FTLDataLoader(non_overlap_samples=non_overlap_samples,
batch_size=batch_size, overlap_samples=overlap_samples, guest_side=guest_side)
LOGGER.debug("data details are :{}".format(data_loader.data_basic_info()))
return data_loader, data_loader.x_shape, data_inst.count(), len(data_loader.get_overlap_indexes())
def get_model_float_type(self, nn):
weights = nn.get_trainable_weights()
self.model_float_type = weights[0].dtype
def initialize_nn(self, input_shape):
"""
initializing nn weights
"""
loss = "keep_predict_loss"
self.nn_builder = get_nn_builder(config_type=self.config_type)
self.nn = self.nn_builder(loss=loss, nn_define=self.nn_define, optimizer=self.optimizer, metrics=None,
input_shape=input_shape)
self.get_model_float_type(self.nn)
LOGGER.debug('printing nn layers structure')
for layer in self.nn._model.layers:
LOGGER.debug('input shape {}, output shape {}'.format(layer.input_shape, layer.output_shape))
def generate_mask(self, shape):
"""
generate random number mask
"""
return self.rng_generator.generate_random_number(shape)
def _batch_gradient_update(self, X, grads):
"""
compute and update gradients for all samples
"""
data = self.data_convertor.convert_data(X, grads)
self.nn.train(data)
def _get_mini_batch_gradient(self, X_batch, backward_grads_batch):
"""
compute gradient for a mini batch
"""
X_batch = X_batch.astype(self.model_float_type)
backward_grads_batch = backward_grads_batch.astype(self.model_float_type)
grads = self.nn.get_weight_gradients(X_batch, backward_grads_batch)
return grads
def update_nn_weights(self, backward_grads, data_loader: FTLDataLoader, epoch_idx, decay=False):
"""
updating bottom nn model weights using backward gradients
"""
LOGGER.debug('updating grads at epoch {}'.format(epoch_idx))
assert len(data_loader.x) == len(backward_grads)
weight_grads = []
for i in range(len(data_loader)):
start, end = data_loader.get_batch_indexes(i)
batch_x = data_loader.x[start: end]
batch_grads = backward_grads[start: end]
batch_weight_grads = self._get_mini_batch_gradient(batch_x, batch_grads)
if len(weight_grads) == 0:
weight_grads.extend(batch_weight_grads)
else:
for w, bw in zip(weight_grads, batch_weight_grads):
w += bw
if decay:
new_learning_rate = self.learning_rate_decay(self.learning_rate, epoch_idx)
self.nn.set_learning_rate(new_learning_rate)
LOGGER.debug('epoch {} optimizer details are {}'.format(epoch_idx, self.nn.export_optimizer_config()))
self.nn.apply_gradients(weight_grads)
def export_nn(self):
return self.nn.export_model()
@staticmethod
def get_dataset_key(data_inst):
return id(data_inst)
def get_model_meta(self):
model_meta = FTLModelMeta()
model_meta.config_type = self.config_type
model_meta.nn_define = json.dumps(self.nn_define)
model_meta.batch_size = self.batch_size
model_meta.epochs = self.epochs
model_meta.tol = self.tol
model_meta.input_dim = self.input_dim
predict_param = FTLPredictParam()
optimizer_param = FTLOptimizerParam()
optimizer_param.optimizer = self.optimizer.optimizer
optimizer_param.kwargs = json.dumps(self.optimizer.kwargs)
model_meta.optimizer_param.CopyFrom(optimizer_param)
model_meta.predict_param.CopyFrom(predict_param)
return model_meta
def get_model_param(self):
model_param = FTLModelParam()
model_bytes = self.nn.export_model()
model_param.model_bytes = model_bytes
model_param.header.extend(list(self.store_header))
return model_param
def set_model_meta(self, model_meta):
self.config_type = model_meta.config_type
self.nn_define = json.loads(model_meta.nn_define)
self.batch_size = model_meta.batch_size
self.epochs = model_meta.epochs
self.tol = model_meta.tol
self.optimizer = FTLParam()._parse_optimizer(FTLParam().optimizer)
self.input_dim = model_meta.input_dim
self.optimizer.optimizer = model_meta.optimizer_param.optimizer
self.optimizer.kwargs = json.loads(model_meta.optimizer_param.kwargs)
self.initialize_nn((self.input_dim,))
def set_model_param(self, model_param):
self.nn.restore_model(model_param.model_bytes)
self.store_header = list(model_param.header)
LOGGER.debug('stored header load, is {}'.format(self.store_header))
| 12,882
| 37.804217
| 116
|
py
|
FATE
|
FATE-master/python/federatedml/transfer_learning/hetero_ftl/__init__.py
| 0
| 0
| 0
|
py
|
|
FATE
|
FATE-master/python/federatedml/transfer_learning/hetero_ftl/backend/nn_model.py
|
#
# Copyright 2019 The FATE Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import typing
from federatedml.framework.weights import Weights
class NNModel(object):
def get_model_weights(self) -> Weights:
pass
def set_model_weights(self, weights: Weights):
pass
def export_model(self):
pass
def load_model(self):
pass
def train(self, data, **kwargs):
pass
def predict(self, data, **kwargs):
pass
def evaluate(self, data, **kwargs):
pass
def modify(self, func: typing.Callable[[Weights], Weights]) -> Weights:
weights = self.get_model_weights()
self.set_model_weights(func(weights))
return weights
class DataConverter(object):
def convert(self, data, *args, **kwargs):
pass
def get_nn_builder(config_type):
if config_type == "keras":
from federatedml.transfer_learning.hetero_ftl.backend.tf_keras.nn_model import build_keras
return build_keras
else:
raise ValueError(f"{config_type} is not supported")
| 1,611
| 25
| 98
|
py
|
FATE
|
FATE-master/python/federatedml/transfer_learning/hetero_ftl/backend/data_generator.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright 2019 The FATE Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import tensorflow as tf
import numpy as np
class KerasSequenceData(tf.keras.utils.Sequence):
def __init__(self, X, y=None):
if X.shape[0] == 0:
raise ValueError("Data is empty!")
self.X = X
if y is None:
self.y = np.zeros(X.shape[0])
else:
self.y = y
def __len__(self):
return 1
def __getitem__(self, idx):
return self.X, self.y
class KerasSequenceDataConverter(object):
@classmethod
def convert_data(cls, x=None, y=None):
return KerasSequenceData(x, y)
| 1,238
| 25.361702
| 75
|
py
|
FATE
|
FATE-master/python/federatedml/transfer_learning/hetero_ftl/backend/tf_keras/losses.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright 2019 The FATE Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from tensorflow.keras.losses import *
from tensorflow.python.keras import backend as K
from tensorflow.python.framework import ops
from tensorflow.python.util.tf_export import keras_export
@keras_export('keras.losses.keep_predict_loss')
def keep_predict_loss(y_true, y_pred):
y_pred = ops.convert_to_tensor(y_pred)
return K.sum(y_true * y_pred)
| 1,019
| 33
| 75
|
py
|
FATE
|
FATE-master/python/federatedml/transfer_learning/hetero_ftl/backend/tf_keras/nn_model.py
|
#
# Copyright 2019 The FATE Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import copy
import io
import json
import os
import uuid
import zipfile
import numpy as np
import tensorflow as tf
from federatedml.framework.weights import OrderDictWeights, Weights
from federatedml.transfer_learning.hetero_ftl.backend.tf_keras import losses
from federatedml.transfer_learning.hetero_ftl.backend.nn_model import DataConverter, NNModel
def _zip_dir_as_bytes(path):
with io.BytesIO() as io_bytes:
with zipfile.ZipFile(io_bytes, "w", zipfile.ZIP_DEFLATED) as zipper:
for root, dirs, files in os.walk(path, topdown=False):
for name in files:
full_path = os.path.join(root, name)
relative_path = os.path.relpath(full_path, path)
zipper.write(filename=full_path, arcname=relative_path)
for name in dirs:
full_path = os.path.join(root, name)
relative_path = os.path.relpath(full_path, path)
zipper.write(filename=full_path, arcname=relative_path)
zip_bytes = io_bytes.getvalue()
return zip_bytes
def _modify_model_input_shape(nn_struct, input_shape):
if not input_shape:
return json.dumps(nn_struct)
if isinstance(input_shape, int):
input_shape = [input_shape]
else:
input_shape = list(input_shape)
struct = copy.deepcopy(nn_struct)
if (
not struct.get("config")
or not struct["config"].get("layers")
or not struct["config"]["layers"][0].get("config")
):
return json.dumps(struct)
if struct["config"]["layers"][0].get("config"):
struct["config"]["layers"][0]["config"]["batch_input_shape"] = [
None,
*input_shape,
]
return json.dumps(struct)
else:
return json.dump(struct)
def build_keras(nn_define, loss, optimizer, metrics, **kwargs):
nn_define_json = _modify_model_input_shape(
nn_define, kwargs.get("input_shape", None)
)
model = tf.keras.models.model_from_json(nn_define_json, custom_objects={})
keras_model = KerasNNModel(model)
keras_model.compile(loss=loss, optimizer=optimizer, metrics=metrics)
return keras_model
class KerasNNModel(NNModel):
def __init__(self, model):
self._model: tf.keras.Sequential = model
self._trainable_weights = {v.name: v for v in self._model.trainable_weights}
self._loss = None
self._loss_fn = None
def compile(self, loss, optimizer, metrics):
optimizer_instance = getattr(tf.keras.optimizers, optimizer.optimizer)(
**optimizer.kwargs
)
self._loss_fn = getattr(losses, loss)
self._model.compile(
optimizer=optimizer_instance, loss=self._loss_fn, metrics=metrics
)
def get_model_weights(self) -> OrderDictWeights:
return OrderDictWeights(self._trainable_weights)
def set_model_weights(self, weights: Weights):
unboxed = weights.unboxed
for name, v in self._trainable_weights.items():
v.assign(unboxed[name])
def get_layer_by_index(self, layer_idx):
return self._model.layers[layer_idx]
def set_layer_weights_by_index(self, layer_idx, weights):
self._model.layers[layer_idx].set_weights(weights)
def get_input_gradients(self, X, y):
with tf.GradientTape() as tape:
X = tf.constant(X)
y = tf.constant(y)
tape.watch(X)
loss = self._loss_fn(y, self._model(X))
return [tape.gradient(loss, X).numpy()]
def get_trainable_gradients(self, X, y):
return self._get_gradients(X, y, self._trainable_weights)
def apply_gradients(self, grads):
self._model.optimizer.apply_gradients(
zip(grads, self._model.trainable_variables)
)
def get_weight_gradients(self, X, y):
return self._get_gradients(X, y, self._model.trainable_variables)
def get_trainable_weights(self):
return [w.numpy() for w in self._model.trainable_weights]
def get_loss(self):
return self._loss
def get_forward_loss_from_input(self, X, y):
loss = self._loss_fn(tf.constant(y), self._model(X))
return loss.numpy()
def _get_gradients(self, X, y, variable):
with tf.GradientTape() as tape:
y = tf.constant(y)
loss = self._loss_fn(y, self._model(X))
g = tape.gradient(loss, variable)
if isinstance(g, list):
return [t.numpy() for t in g]
else:
return [g.numpy()]
def set_learning_rate(self, learning_rate):
self._model.optimizer.learning_rate.assign(learning_rate)
def train(self, data: tf.keras.utils.Sequence, **kwargs):
epochs = 1
left_kwargs = copy.deepcopy(kwargs)
if "aggregate_every_n_epoch" in kwargs:
epochs = kwargs["aggregate_every_n_epoch"]
del left_kwargs["aggregate_every_n_epoch"]
left_kwargs["callbacks"] = [tf.keras.callbacks.History()]
self._model.fit(x=data, epochs=epochs, verbose=1, shuffle=True, **left_kwargs)
self._loss = left_kwargs["callbacks"][0].history["loss"]
return epochs * len(data)
def evaluate(self, data: tf.keras.utils.Sequence, **kwargs):
names = self._model.metrics_names
values = self._model.evaluate(x=data, verbose=1)
if not isinstance(values, list):
values = [values]
return dict(zip(names, values))
def predict(self, data: tf.keras.utils.Sequence, **kwargs):
return self._model.predict(data)
def export_model(self):
model_base = "./saved_model"
if not os.path.exists(model_base):
os.mkdir(model_base)
model_path = f"{model_base}/{uuid.uuid1()}"
os.mkdir(model_path)
self._model.save(model_path)
model_bytes = _zip_dir_as_bytes(model_path)
return model_bytes
@staticmethod
def restore_model(
model_bytes,
): # todo: restore optimizer to support incremental learning
model_base = "./restore_model"
if not os.path.exists(model_base):
os.mkdir(model_base)
model_path = f"{model_base}/{uuid.uuid1()}"
os.mkdir(model_path)
with io.BytesIO(model_bytes) as bytes_io:
with zipfile.ZipFile(bytes_io, "r", zipfile.ZIP_DEFLATED) as f:
f.extractall(model_path)
# add custom objects
from federatedml.transfer_learning.hetero_ftl.backend.tf_keras.losses import keep_predict_loss
tf.keras.utils.get_custom_objects().update(
{"keep_predict_loss": keep_predict_loss}
)
model = tf.keras.models.load_model(f"{model_path}")
return KerasNNModel(model)
def export_optimizer_config(self):
return self._model.optimizer.get_config()
class KerasSequenceData(tf.keras.utils.Sequence):
def get_shape(self):
return self.x_shape, self.y_shape
def __init__(self, data_instances, batch_size, encode_label, label_mapping):
self.size = data_instances.count()
if self.size <= 0:
raise ValueError("empty data")
_, one_data = data_instances.first()
self.x_shape = one_data.features.shape
num_label = len(label_mapping)
print(label_mapping)
if encode_label:
if num_label > 2:
self.y_shape = (num_label,)
self.x = np.zeros((self.size, *self.x_shape))
self.y = np.zeros((self.size, *self.y_shape))
index = 0
self._keys = []
for k, inst in data_instances.collect():
self._keys.append(k)
self.x[index] = inst.features
self.y[index][label_mapping[inst.label]] = 1
index += 1
else:
raise ValueError(f"num_label is {num_label}")
else:
if num_label >= 2:
self.y_shape = (1,)
else:
raise ValueError(f"num_label is {num_label}")
self.x = np.zeros((self.size, *self.x_shape))
self.y = np.zeros((self.size, *self.y_shape))
index = 0
self._keys = []
for k, inst in data_instances.collect():
self._keys.append(k)
self.x[index] = inst.features
self.y[index] = label_mapping[inst.label]
index += 1
self.batch_size = batch_size if batch_size > 0 else self.size
def __getitem__(self, index):
"""Gets batch at position `index`.
# Arguments
index: position of the batch in the Sequence.
# Returns
A batch
"""
start = self.batch_size * index
end = self.batch_size * (index + 1)
return self.x[start:end], self.y[start:end]
def __len__(self):
"""Number of batch in the Sequence.
# Returns
The number of batches in the Sequence.
"""
return int(np.ceil(self.size / float(self.batch_size)))
def get_keys(self):
return self._keys
class KerasSequenceDataConverter(DataConverter):
def convert(self, data, *args, **kwargs):
return KerasSequenceData(data, *args, **kwargs)
| 9,949
| 33.548611
| 102
|
py
|
FATE
|
FATE-master/python/federatedml/transfer_learning/hetero_ftl/backend/tf_keras/data_generator.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright 2019 The FATE Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import tensorflow as tf
import numpy as np
class KerasSequenceData(tf.keras.utils.Sequence):
def __init__(self, X, y=None):
if X.shape[0] == 0:
raise ValueError("Data is empty!")
self.X = X
if y is None:
self.y = np.zeros(X.shape[0])
else:
self.y = y
def __len__(self):
return 1
def __getitem__(self, idx):
return self.X, self.y
class KerasSequenceDataConverter(object):
@classmethod
def convert_data(cls, x=None, y=None):
return KerasSequenceData(x, y)
| 1,238
| 25.361702
| 75
|
py
|
FATE
|
FATE-master/python/federatedml/transfer_learning/hetero_ftl/backend/tf_keras/__init__.py
|
#
# Copyright 2019 The FATE Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
| 616
| 37.5625
| 75
|
py
|
FATE
|
FATE-master/python/federatedml/transfer_learning/hetero_ftl/backend/tf_keras/layers/pooling.py
|
#
# Copyright 2019 The FATE Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from tensorflow.python.keras import layers
def _build_maxpooling1d(pool_size=2,
strides=None,
padding='valid',
data_format=None,
**kwargs):
return layers.pooling.MaxPooling1D(pool_size=pool_size,
strides=strides,
padding=padding,
data_format=data_format,
**kwargs)
def _build_maxpooling2d(pool_size=(2, 2),
strides=None,
padding='valid',
data_format=None,
**kwargs):
return layers.pooling.MaxPooling2D(pool_size=pool_size,
strides=strides,
padding=padding,
data_format=data_format,
**kwargs)
def _build_maxpooling3d(pool_size=(2, 2, 2),
strides=None,
padding='valid',
data_format=None,
**kwargs):
return layers.pooling.MaxPooling3D(pool_size=pool_size,
strides=strides,
padding=padding,
data_format=data_format,
**kwargs)
def _build_averagepooling1d(pool_size=2,
strides=None,
padding='valid',
data_format=None,
**kwargs):
return layers.pooling.AveragePooling1D(pool_size=pool_size,
strides=strides,
padding=padding,
data_format=data_format,
**kwargs)
def _build_averagepooling2d(pool_size=(2, 2),
strides=None,
padding='valid',
data_format=None,
**kwargs):
return layers.pooling.AveragePooling2D(pool_size=pool_size,
strides=strides,
padding=padding,
data_format=data_format,
**kwargs)
def _build_averagepooling3d(pool_size=(2, 2, 2),
strides=None,
padding='valid',
data_format=None,
**kwargs):
return layers.pooling.AveragePooling3D(pool_size=pool_size,
strides=strides,
padding=padding,
data_format=data_format,
**kwargs)
_build_global_averagepooling1d = layers.pooling.GlobalAveragePooling1D.__init__
_build_global_averagepooling2d = layers.pooling.GlobalAveragePooling2D.__init__
_build_global_averagepooling3d = layers.pooling.GlobalAveragePooling3D.__init__
_build_global_maxpooling1d = layers.pooling.GlobalMaxPooling1D.__init__
_build_global_maxpooling2d = layers.pooling.GlobalMaxPooling2D.__init__
_build_global_maxpooling3d = layers.pooling.GlobalMaxPooling3D.__init__
| 4,149
| 39.291262
| 79
|
py
|
FATE
|
FATE-master/python/federatedml/transfer_learning/hetero_ftl/backend/tf_keras/layers/baisc.py
|
#
# Copyright 2019 The FATE Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from tensorflow.python.keras import layers
from .util import _get_initializer
def _build_dense(units, activation, use_bias=True, kernel_initializer="glorot_uniform",
bias_initializer="zeros", kernel_regularizer=None, bias_regularizer=None, activity_regularizer=None,
kernel_constraint=None, bias_constraint=None, seed=None, **kwargs):
return layers.Dense(units=units,
activation=activation,
use_bias=use_bias,
kernel_initializer=_get_initializer(kernel_initializer, seed),
bias_initializer=_get_initializer(bias_initializer, seed),
kernel_regularizer=kernel_regularizer,
bias_regularizer=bias_regularizer,
activity_regularizer=activity_regularizer,
kernel_constraint=kernel_constraint,
bias_constraint=bias_constraint,
**kwargs)
def _build_dropout(rate, noise_shape=None, seed=None, **kwargs):
return layers.Dropout(rate, noise_shape=noise_shape, seed=seed, **kwargs)
def _build_flatten(data_format=None, **kwargs):
return layers.Flatten(data_format=data_format, **kwargs)
| 1,881
| 43.809524
| 117
|
py
|
FATE
|
FATE-master/python/federatedml/transfer_learning/hetero_ftl/backend/tf_keras/layers/util.py
|
#
# Copyright 2019 The FATE Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from tensorflow.python.keras import initializers
def _get_initializer(initializer, seed):
if not seed:
return initializer
initializer_class = getattr(initializers, initializer, None)
if initializer_class:
initializer_instance = initializer_class()
if hasattr(initializer_instance, "seed"):
initializer_instance.seed = seed
return initializer_instance
return initializer
| 1,052
| 30.909091
| 75
|
py
|
FATE
|
FATE-master/python/federatedml/transfer_learning/hetero_ftl/backend/tf_keras/layers/__init__.py
|
#
# Copyright 2019 The FATE Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from .baisc import _build_dense, _build_dropout, _build_flatten
from .conv import _build_conv1d, _build_conv2d, _build_conv3d
from .pooling import _build_maxpooling1d, _build_maxpooling2d, _build_maxpooling3d, _build_averagepooling3d
DENSE = "Dense".lower()
DROPOUT = "Dropout".lower()
FLATTEN = "Flatten".lower()
CONV_1D = "Conv1D".lower()
CONV_2D = "Conv2D".lower()
CONV_3D = "Conv3D".lower()
MAX_POOLING_1D = "MaxPooling1D".lower()
MAX_POOLING_2D = "MaxPooling2D".lower()
MAX_POOLING_3D = "MaxPooling3D".lower()
layer2builder = {
DENSE: _build_dense,
DROPOUT: _build_dropout,
FLATTEN: _build_flatten,
CONV_1D: _build_conv1d,
CONV_2D: _build_conv2d,
CONV_3D: _build_conv3d,
MAX_POOLING_1D: _build_maxpooling1d,
MAX_POOLING_2D: _build_maxpooling2d,
MAX_POOLING_3D: _build_maxpooling3d
}
def get_builder(layer):
return layer2builder.get(layer.lower())
def has_builder(layer):
return layer.lower() in layer2builder
| 1,583
| 29.461538
| 107
|
py
|
FATE
|
FATE-master/python/federatedml/transfer_learning/hetero_ftl/backend/tf_keras/layers/conv.py
|
#
# Copyright 2019 The FATE Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from tensorflow.python.keras import layers
def _build_conv1d(filters, kernel_size, strides=1, padding='valid', data_format='channels_last', dilation_rate=1,
activation=None, use_bias=True, kernel_initializer='glorot_uniform', bias_initializer='zeros',
kernel_regularizer=None, bias_regularizer=None, activity_regularizer=None, kernel_constraint=None,
bias_constraint=None, **kwargs):
return layers.convolutional.Conv1D(
filters=filters,
kernel_size=kernel_size,
strides=strides,
padding=padding,
data_format=data_format,
dilation_rate=dilation_rate,
activation=activation,
use_bias=use_bias,
kernel_initializer=kernel_initializer,
bias_initializer=bias_initializer,
kernel_regularizer=kernel_regularizer,
bias_regularizer=bias_regularizer,
activity_regularizer=activity_regularizer,
kernel_constraint=kernel_constraint,
bias_constraint=bias_constraint,
**kwargs)
def _build_conv2d(filters, kernel_size, strides=(1, 1), padding='valid', data_format='channels_last',
dilation_rate=(1, 1), activation=None, use_bias=True, kernel_initializer='glorot_uniform',
bias_initializer='zeros', kernel_regularizer=None, bias_regularizer=None, activity_regularizer=None,
kernel_constraint=None, bias_constraint=None, **kwargs):
return layers.convolutional.Conv2D(
filters=filters,
kernel_size=kernel_size,
strides=strides,
padding=padding,
data_format=data_format,
dilation_rate=dilation_rate,
activation=activation,
use_bias=use_bias,
kernel_initializer=kernel_initializer,
bias_initializer=bias_initializer,
kernel_regularizer=kernel_regularizer,
bias_regularizer=bias_regularizer,
activity_regularizer=activity_regularizer,
kernel_constraint=kernel_constraint,
bias_constraint=bias_constraint,
**kwargs)
def _build_conv3d(filters, kernel_size, strides=(1, 1, 1), padding='valid', data_format='channels_last',
dilation_rate=(1, 1, 1), activation=None, use_bias=True, kernel_initializer='glorot_uniform',
bias_initializer='zeros', kernel_regularizer=None, bias_regularizer=None, activity_regularizer=None,
kernel_constraint=None, bias_constraint=None, **kwargs):
return layers.convolutional.Conv3D(
filters=filters,
kernel_size=kernel_size,
strides=strides,
padding=padding,
data_format=data_format,
dilation_rate=dilation_rate,
activation=activation,
use_bias=use_bias,
kernel_initializer=kernel_initializer,
bias_initializer=bias_initializer,
kernel_regularizer=kernel_regularizer,
bias_regularizer=bias_regularizer,
activity_regularizer=activity_regularizer,
kernel_constraint=kernel_constraint,
bias_constraint=bias_constraint,
**kwargs)
| 3,710
| 41.655172
| 118
|
py
|
FATE
|
FATE-master/python/federatedml/transfer_learning/hetero_ftl/test/test_ftl_modules.py
|
#
# Copyright 2019 The FATE Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import unittest
import numpy as np
from federatedml.util import consts
from federatedml.nn.homo_nn.nn_model import get_nn_builder
import json
from federatedml.param.ftl_param import FTLParam
from numpy import array
from fate_arch.session import computing_session as session
import pandas as pd
from federatedml.nn.hetero_nn.backend.tf_keras.data_generator import KerasSequenceDataConverter
from federatedml.transfer_learning.hetero_ftl.ftl_guest import FTLGuest
from federatedml.transfer_learning.hetero_ftl.ftl_host import FTLHost
from federatedml.transfer_learning.hetero_ftl.ftl_base import FTL
from federatedml.param.ftl_param import FTLParam
from federatedml.feature.instance import Instance
import json
class TestFTL(unittest.TestCase):
def setUp(self):
session.init('test', 0)
def test_guest_model_init(self):
model = FTLGuest()
param = FTLParam(
nn_define=json.loads('{"class_name": "Sequential", "config": {"name": "sequential_1", "layers": [{"class_name": "Dense", "config": {"name": "dense_1", "trainable": true, "batch_input_shape": [null, 32], "dtype": "float32", "units": 64, "activation": "sigmoid", "use_bias": true, "kernel_initializer": {"class_name": "RandomNormal", "config": {"mean": 0.0, "stddev": 1.0, "seed": 100, "dtype": "float32"}}, "bias_initializer": {"class_name": "Constant", "config": {"value": 0, "dtype": "float32"}}, "kernel_regularizer": null, "bias_regularizer": null, "activity_regularizer": null, "kernel_constraint": null, "bias_constraint": null}}]}, "keras_version": "2.2.4-tf", "backend": "tensorflow"}')
)
param.check()
model._init_model(param)
model.initialize_nn(input_shape=100)
print(model.nn.get_trainable_weights())
def test_host_model_init(self):
model = FTLHost()
param = FTLParam(
nn_define=json.loads('{"class_name": "Sequential", "config": {"name": "sequential_1", "layers": [{"class_name": "Dense", "config": {"name": "dense_1", "trainable": true, "batch_input_shape": [null, 32], "dtype": "float32", "units": 64, "activation": "sigmoid", "use_bias": true, "kernel_initializer": {"class_name": "RandomNormal", "config": {"mean": 0.0, "stddev": 1.0, "seed": 100, "dtype": "float32"}}, "bias_initializer": {"class_name": "Constant", "config": {"value": 0, "dtype": "float32"}}, "kernel_regularizer": null, "bias_regularizer": null, "activity_regularizer": null, "kernel_constraint": null, "bias_constraint": null}}]}, "keras_version": "2.2.4-tf", "backend": "tensorflow"}')
)
param.check()
model._init_model(param)
model.initialize_nn(input_shape=100)
print(model.nn.get_trainable_weights())
def test_label_reset(self):
l = []
for i in range(100):
inst = Instance()
inst.features = np.random.random(20)
l.append(inst)
inst.label = -1
for i in range(100):
inst = Instance()
inst.features = np.random.random(20)
l.append(inst)
inst.label = 1
table = session.parallelize(l, partition=4, include_key=False)
rs = FTL().check_label(table)
new_label = [i[1].label for i in list(rs.collect())]
print(new_label)
if __name__ == '__main__':
unittest.main()
| 3,947
| 44.906977
| 705
|
py
|
FATE
|
FATE-master/python/federatedml/transfer_learning/hetero_ftl/test/__init__.py
| 0
| 0
| 0
|
py
|
|
FATE
|
FATE-master/python/federatedml/transfer_learning/hetero_ftl/test/test_paillier_tensor.py
|
#
# Copyright 2019 The FATE Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import numpy as np
from federatedml.secureprotol.paillier_tensor import PaillierTensor
from federatedml.secureprotol import PaillierEncrypt
from federatedml.secureprotol.encrypt_mode import EncryptModeCalculator
from federatedml.param.encrypt_param import EncryptParam
from federatedml.param.encrypted_mode_calculation_param import EncryptedModeCalculatorParam
from federatedml.nn.hetero_nn.util import random_number_generator
import unittest
from federatedml.util import consts
from fate_arch.session import computing_session as session
class TestPaillierTensor(unittest.TestCase):
def setUp(self):
session.init('test', 0)
def test_tensor_op(self):
arr1 = np.ones((10, 1, 3))
arr1[0] = np.array([[2, 3, 4]])
arr2 = np.ones((10, 3, 3))
arr3 = np.ones([1, 1, 3])
arr4 = np.ones([50, 1])
arr5 = np.ones([32])
pt = PaillierTensor(arr1)
pt2 = PaillierTensor(arr2)
pt3 = PaillierTensor(arr3)
pt4 = PaillierTensor(arr4)
pt5 = PaillierTensor(arr5)
encrypter = PaillierEncrypt()
encrypter.generate_key(EncryptParam().key_length)
encrypted_calculator = EncryptModeCalculator(encrypter,
EncryptedModeCalculatorParam().mode,
EncryptedModeCalculatorParam().re_encrypted_rate)
rs1 = pt * arr2
rs2 = pt * pt2
rs3 = pt.matmul_3d(pt2)
enpt = pt2.encrypt(encrypted_calculator)
enrs = enpt.matmul_3d(arr1, multiply='right')
rng_generator = random_number_generator.RandomNumberGenerator()
enpt2 = pt4.encrypt(encrypted_calculator)
random_num = rng_generator.generate_random_number(enpt2.shape)
if __name__ == '__main__':
unittest.main()
| 2,450
| 34.014286
| 102
|
py
|
FATE
|
FATE-master/python/federatedml/secureprotol/paillier_tensor.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright 2019 The FATE Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import numpy as np
from federatedml.util import LOGGER
from fate_arch.session import computing_session
from fate_arch.abc import CTableABC
class PaillierTensor(object):
def __init__(self, obj, partitions=1):
if obj is None:
raise ValueError("Cannot convert None to Paillier tensor")
if isinstance(obj, (list, np.ndarray)):
self._ori_data = obj
self._partitions = partitions
self._obj = computing_session.parallelize(obj,
include_key=False,
partition=partitions)
elif isinstance(obj, CTableABC):
self._ori_data = None
self._partitions = obj.partitions
self._obj = obj
else:
raise ValueError(f"Cannot convert obj to Paillier tensor, object type is {type(obj)}")
LOGGER.debug("tensor's partition is {}".format(self._partitions))
def __add__(self, other):
if isinstance(other, PaillierTensor):
return PaillierTensor(self._obj.join(other._obj, lambda v1, v2: v1 + v2))
elif isinstance(other, CTableABC):
return PaillierTensor(self._obj.join(other, lambda v1, v2: v1 + v2))
elif isinstance(other, (np.ndarray, int, float)):
return PaillierTensor(self._obj.mapValues(lambda v: v + other))
else:
raise ValueError(f"Unrecognized type {type(other)}, dose not support subtraction")
def __radd__(self, other):
return self.__add__(other)
def __sub__(self, other):
if isinstance(other, PaillierTensor):
return PaillierTensor(self._obj.join(other._obj, lambda v1, v2: v1 - v2))
elif isinstance(other, CTableABC):
return PaillierTensor(self._obj.join(other, lambda v1, v2: v1 - v2))
elif isinstance(other, (np.ndarray, int, float)):
return PaillierTensor(self._obj.mapValues(lambda v: v - other))
else:
raise ValueError(f"Unrecognized type {type(other)}, dose not support subtraction")
def __rsub__(self, other):
return self.__sub__(other)
def __mul__(self, other):
if isinstance(other, (int, float)):
return PaillierTensor(self._obj.mapValues(lambda val: val * other))
elif isinstance(other, np.ndarray):
return PaillierTensor(self._obj.mapValues(lambda val: np.matmul(val, other)))
elif isinstance(other, CTableABC):
other = PaillierTensor(other)
return self.__mul__(other)
elif isinstance(other, PaillierTensor):
ret = self.numpy() * other.numpy()
return PaillierTensor(ret, partitions=max(self.partitions, other.partitions))
def matmul(self, other):
if isinstance(other, np.ndarray):
if len(other.shape) != 2:
raise ValueError("Only Support 2-D multiplication in matmul op, "
"if you want to do 3-D, use fast_multiply_3d")
return self.fast_matmul_2d(other)
def multiply(self, other):
if isinstance(other, np.ndarray):
if other.shape != self.shape:
raise ValueError(f"operands could not be broadcast together with shapes {self.shape} {other.shape}")
rhs = PaillierTensor(other)
return PaillierTensor(self.multiply(rhs))
elif isinstance(other, CTableABC):
other = PaillierTensor(other)
return self.multiply(other)
elif isinstance(other, PaillierTensor):
return PaillierTensor(self._obj.join(other._obj, lambda v1, v2: v1 * v2))
else:
raise ValueError(f"Not support type in multiply op {type(other)}")
@property
def T(self):
if self._ori_data is None:
self._ori_data = self.numpy()
new_data = self._ori_data.T
return PaillierTensor(new_data, self.partitions)
@property
def partitions(self):
return self._partitions
def get_obj(self):
return self._obj
@property
def shape(self):
if self._ori_data is not None:
return self._ori_data.shape
else:
first_dim = self._obj.count()
if not first_dim:
return (0, )
second_dim = self._obj.first()[1].shape
return tuple([first_dim] + list(second_dim))
def mean(self, axis=-1):
if axis == -1:
size = 1
for shape in self._ori_data.shape:
size *= shape
if not size:
raise ValueError("shape of data is zero, it should be positive")
return self._obj.mapValues(lambda val: np.sum(val)).reduce(lambda val1, val2: val1 + val2) / size
else:
ret_obj = self._obj.mapValues(lambda val: np.mean(val, axis - 1))
return PaillierTensor(ret_obj)
def reduce_sum(self):
return self._obj.reduce(lambda t1, t2: t1 + t2)
def map_ndarray_product(self, other):
if isinstance(other, np.ndarray):
return PaillierTensor(self._obj.mapValues(lambda val: val * other))
else:
raise ValueError('only support numpy array')
def numpy(self):
if self._ori_data is not None:
return self._ori_data
arr = [None for i in range(self._obj.count())]
for k, v in self._obj.collect():
arr[k] = v
self._ori_data = np.array(arr, dtype=arr[0].dtype)
return self._ori_data
def encrypt(self, encrypt_tool):
return PaillierTensor(encrypt_tool.distribute_encrypt(self._obj))
def decrypt(self, decrypt_tool):
return PaillierTensor(self._obj.mapValues(lambda val: decrypt_tool.recursive_decrypt(val)))
def encode(self, encoder):
return PaillierTensor(self._obj.mapValues(lambda val: encoder.encode(val)))
def decode(self, decoder):
return PaillierTensor(self._obj.mapValues(lambda val: decoder.decode(val)))
@staticmethod
def _vector_mul(kv_iters):
ret_mat = None
for k, v in kv_iters:
tmp_mat = np.outer(v[0], v[1])
if ret_mat is not None:
ret_mat += tmp_mat
else:
ret_mat = tmp_mat
return ret_mat
def fast_matmul_2d(self, other):
"""
Matrix multiplication between two matrix, please ensure that self's shape is (m, n) and other's shape is (m, k)
Their result is a matrix of (n, k)
"""
if isinstance(other, np.ndarray):
mat_tensor = PaillierTensor(other, partitions=self.partitions)
return self.fast_matmul_2d(mat_tensor)
if isinstance(other, CTableABC):
other = PaillierTensor(other)
func = self._vector_mul
ret_mat = self._obj.join(other.get_obj(), lambda vec1, vec2: (vec1, vec2)).applyPartitions(func).reduce(
lambda mat1, mat2: mat1 + mat2)
return ret_mat
def matmul_3d(self, other, multiply='left'):
assert multiply in ['left', 'right']
if isinstance(other, PaillierTensor):
mat = other
elif isinstance(other, CTableABC):
mat = PaillierTensor(other)
elif isinstance(other, np.ndarray):
mat = PaillierTensor(other, partitions=self.partitions)
else:
raise ValueError('only support numpy array and Paillier Tensor')
if multiply == 'left':
return PaillierTensor(self._obj.join(mat._obj, lambda val1, val2: np.tensordot(val1, val2, (1, 0))),
partitions=self._partitions)
if multiply == 'right':
return PaillierTensor(mat._obj.join(self._obj, lambda val1, val2: np.tensordot(val1, val2, (1, 0))),
partitions=self._partitions)
def element_wise_product(self, other):
if isinstance(other, np.ndarray):
mat = PaillierTensor(other, partitions=self.partitions)
elif isinstance(other, CTableABC):
mat = PaillierTensor(other)
else:
mat = other
return PaillierTensor(self._obj.join(mat._obj, lambda val1, val2: val1 * val2))
def squeeze(self, axis):
if axis == 0:
return PaillierTensor(list(self._obj.collect())[0][1], partitions=self.partitions)
else:
return PaillierTensor(self._obj.mapValues(lambda val: np.squeeze(val, axis=axis - 1)))
def select_columns(self, select_table):
return PaillierTensor(self._obj.join(select_table, lambda v1, v2: v1[v2]))
| 9,312
| 36.103586
| 119
|
py
|
FATE
|
FATE-master/python/federatedml/secureprotol/gmpy_math.py
|
#
# Copyright 2019 The FATE Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import os
import random
import gmpy2
POWMOD_GMP_SIZE = pow(2, 64)
def powmod(a, b, c):
"""
return int: (a ** b) % c
"""
if a == 1:
return 1
if max(a, b, c) < POWMOD_GMP_SIZE:
return pow(a, b, c)
else:
return int(gmpy2.powmod(a, b, c))
def crt_coefficient(p, q):
"""
return crt coefficient
"""
tq = gmpy2.invert(p, q)
tp = gmpy2.invert(q, p)
return tp * q, tq * p
def powmod_crt(x, d, n, p, q, cp, cq):
"""
return int: (a ** b) % n
"""
rp = gmpy2.powmod(x, d % (p - 1), p)
rq = gmpy2.powmod(x, d % (q - 1), q)
return int((rp * cp + rq * cq) % n)
def invert(a, b):
"""return int: x, where a * x == 1 mod b"""
x = int(gmpy2.invert(a, b))
if x == 0:
raise ZeroDivisionError("invert(a, b) no inverse exists")
return x
def getprimeover(n):
"""return a random n-bit prime number"""
r = gmpy2.mpz(random.SystemRandom().getrandbits(n))
r = gmpy2.bit_set(r, n - 1)
return int(gmpy2.next_prime(r))
def isqrt(n):
""" return the integer square root of N """
return int(gmpy2.isqrt(n))
def is_prime(n):
"""
true if n is probably a prime, false otherwise
:param n:
:return:
"""
return gmpy2.is_prime(int(n))
def legendre(a, p):
return pow(a, (p - 1) // 2, p)
def tonelli(n, p):
# assert legendre(n, p) == 1, "not a square (mod p)"
q = p - 1
s = 0
while q % 2 == 0:
q //= 2
s += 1
if s == 1:
return pow(n, (p + 1) // 4, p)
for z in range(2, p):
if p - 1 == legendre(z, p):
break
c = pow(z, q, p)
r = pow(n, (q + 1) // 2, p)
t = pow(n, q, p)
m = s
while (t - 1) % p != 0:
t2 = (t * t) % p
for i in range(1, m):
if (t2 - 1) % p == 0:
break
t2 = (t2 * t2) % p
b = pow(c, 1 << (m - i - 1), p)
r = (r * b) % p
c = (b * b) % p
t = (t * c) % p
m = i
return r
def gcd(a, b):
return int(gmpy2.gcd(a, b))
def next_prime(n):
return int(gmpy2.next_prime(n))
def mpz(n):
return gmpy2.mpz(n)
| 2,784
| 19.62963
| 75
|
py
|
FATE
|
FATE-master/python/federatedml/secureprotol/elliptic_curve_encryption.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright 2019 The FATE Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from fate_crypto.psi import Curve25519
class EllipticCurve(object):
"""
Symmetric encryption key
"""
def __init__(self, curve_name, curve_key=None):
self.curve = self.__get_curve_instance(curve_name, curve_key)
@staticmethod
def __get_curve_instance(curve_name, curve_key):
if curve_key is None:
return Curve25519()
return Curve25519(curve_key)
def get_curve_key(self):
return self.curve.get_private_key()
def encrypt(self, plaintext):
"""
Encryption method
:param plaintext:
:return:
"""
return self.curve.encrypt(plaintext)
def sign(self, ciphertext):
return self.curve.diffie_hellman(ciphertext)
def map_hash_encrypt(self, plaintable, mode, hash_operator, salt):
"""
adapted from CryptorExecutor
Process the input Table as (k, v)
(k, enc_k) for mode == 0
(enc_k, -1) for mode == 1
(enc_k, v) for mode == 2
(k, (enc_k, v)) for mode == 3
(enc_k, k) for mode == 4
(enc_k, (k, v)) for mode == 5
:param plaintable: Table
:param mode: int
:return: Table
"""
if mode == 0:
return plaintable.map(
lambda k, v: (
k, self.curve.encrypt(
hash_operator.compute(
k, suffix_salt=salt))))
elif mode == 1:
return plaintable.map(
lambda k, v: (
self.curve.encrypt(
hash_operator.compute(
k, suffix_salt=salt)), -1))
elif mode == 2:
return plaintable.map(
lambda k, v: (
self.curve.encrypt(
hash_operator.compute(
k, suffix_salt=salt)), v))
elif mode == 3:
return plaintable.map(
lambda k, v: (
k, (self.curve.encrypt(
hash_operator.compute(
k, suffix_salt=salt)), v)))
elif mode == 4:
return plaintable.map(
lambda k, v: (
self.curve.encrypt(
hash_operator.compute(
k, suffix_salt=salt)), k))
elif mode == 5:
return plaintable.map(
lambda k, v: (self.curve.encrypt(hash_operator.compute(k, suffix_salt=salt)), (k, v)))
else:
raise ValueError("Unsupported mode for elliptic curve map encryption")
def map_encrypt(self, plaintable, mode):
"""
adapted from CryptorExecutor
Process the input Table as (k, v)
(k, enc_k) for mode == 0
(enc_k, -1) for mode == 1
(enc_k, v) for mode == 2
(k, (enc_k, v)) for mode == 3
(enc_k, k) for mode == 4
(enc_k, (k, v)) for mode == 5
:param plaintable: Table
:param mode: int
:return: Table
"""
if mode == 0:
return plaintable.map(lambda k, v: (k, self.curve.encrypt(k)))
elif mode == 1:
return plaintable.map(lambda k, v: (self.curve.encrypt(k), -1))
elif mode == 2:
return plaintable.map(lambda k, v: (self.curve.encrypt(k), v))
elif mode == 3:
return plaintable.map(lambda k, v: (k, (self.curve.encrypt(k), v)))
elif mode == 4:
return plaintable.map(lambda k, v: (self.curve.encrypt(k), k))
elif mode == 5:
return plaintable.map(lambda k, v: (self.curve.encrypt(k), (k, v)))
else:
raise ValueError("Unsupported mode for elliptic curve map encryption")
def map_sign(self, plaintable, mode):
"""
adapted from CryptorExecutor
Process the input Table as (k, v)
(k, enc_k) for mode == 0
(enc_k, -1) for mode == 1
(enc_k, v) for mode == 2
(k, (enc_k, v)) for mode == 3
(enc_k, k) for mode == 4
(enc_k, (k, v)) for mode == 5
:param plaintable: Table
:param mode: int
:return: Table
"""
if mode == 0:
return plaintable.map(lambda k, v: (k, self.curve.diffie_hellman(k)))
elif mode == 1:
return plaintable.map(lambda k, v: (self.curve.diffie_hellman(k), -1))
elif mode == 2:
return plaintable.map(lambda k, v: (self.curve.diffie_hellman(k), v))
elif mode == 3:
return plaintable.map(lambda k, v: (k, (self.curve.diffie_hellman(k), v)))
elif mode == 4:
return plaintable.map(lambda k, v: (self.curve.diffie_hellman(k), k))
elif mode == 5:
return plaintable.map(lambda k, v: (self.curve.diffie_hellman(k), (k, v)))
else:
raise ValueError("Unsupported mode for elliptic curve map sign")
| 5,609
| 33.62963
| 102
|
py
|
FATE
|
FATE-master/python/federatedml/secureprotol/random.py
|
#
# Copyright 2019 The FATE Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from numpy.random import RandomState
class RandomPads(object):
"""random pads utils for secret homogeneous aggregation
currently use numpy.random, which use bit generator MT19937
other algorithms such as pcg and xoroshiro may be supported in the future
"""
def __init__(self, init_seed=None):
self._rand = RandomState(init_seed)
def rand(self, d0, *more, **kwargs):
return self._rand.rand(d0, *more, **kwargs)
def randn(self, d0, *more, **kwargs):
return self._rand.randn(d0, *more, **kwargs)
def add_randn_pads(self, a, w):
"""a + r * w,
where r is random array with nominal distribution N(0,1) and r.shape == a.shape
"""
return a + self._rand.randn(*a.shape) * w
def add_rand_pads(self, a, w):
"""a + r * w,
where r is random array with uniform distribution U[0,1) and r.shape == a.shape
"""
return a + self._rand.rand(*a.shape) * w
| 1,583
| 33.434783
| 87
|
py
|
FATE
|
FATE-master/python/federatedml/secureprotol/fixedpoint.py
|
#
# Copyright 2019 The FATE Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import functools
import math
import sys
import numpy as np
class FixedPointNumber(object):
"""Represents a float or int fixedpoint encoding;.
"""
BASE = 16
LOG2_BASE = math.log(BASE, 2)
FLOAT_MANTISSA_BITS = sys.float_info.mant_dig
Q = 293973345475167247070445277780365744413 ** 2
def __init__(self, encoding, exponent, n=None, max_int=None):
if n is None:
self.n = FixedPointNumber.Q
self.max_int = self.n // 2
else:
self.n = n
if max_int is None:
self.max_int = self.n // 2
else:
self.max_int = max_int
self.encoding = encoding
self.exponent = exponent
@classmethod
def calculate_exponent_from_precision(cls, precision):
exponent = math.floor(math.log(precision, cls.BASE))
return exponent
@classmethod
def encode(cls, scalar, n=None, max_int=None, precision=None, max_exponent=None):
"""return an encoding of an int or float.
"""
# Calculate the maximum exponent for desired precision
exponent = None
# Too low value preprocess;
# avoid "OverflowError: int too large to convert to float"
if np.abs(scalar) < 1e-200:
scalar = 0
if n is None:
n = cls.Q
max_int = n // 2
if precision is None:
if isinstance(scalar, int) or isinstance(scalar, np.int16) or \
isinstance(scalar, np.int32) or isinstance(scalar, np.int64):
exponent = 0
elif isinstance(scalar, float) or isinstance(scalar, np.float16) \
or isinstance(scalar, np.float32) or isinstance(scalar, np.float64):
flt_exponent = math.frexp(scalar)[1]
lsb_exponent = cls.FLOAT_MANTISSA_BITS - flt_exponent
exponent = math.floor(lsb_exponent / cls.LOG2_BASE)
else:
raise TypeError("Don't know the precision of type %s."
% type(scalar))
else:
exponent = cls.calculate_exponent_from_precision(precision)
if max_exponent is not None:
exponent = max(max_exponent, exponent)
int_fixpoint = int(round(scalar * pow(cls.BASE, exponent)))
if abs(int_fixpoint) > max_int:
raise ValueError(f"Integer needs to be within +/- {max_int},but got {int_fixpoint},"
f"basic info, scalar={scalar}, base={cls.BASE}, exponent={exponent}"
)
return cls(int_fixpoint % n, exponent, n, max_int)
def decode(self):
"""return decode plaintext.
"""
if self.encoding >= self.n:
# Should be mod n
raise ValueError('Attempted to decode corrupted number')
elif self.encoding <= self.max_int:
# Positive
mantissa = self.encoding
elif self.encoding >= self.n - self.max_int:
# Negative
mantissa = self.encoding - self.n
else:
raise OverflowError(f'Overflow detected in decode number, encoding: {self.encoding},'
f'{self.exponent}'
f' {self.n}')
return mantissa * pow(self.BASE, -self.exponent)
def increase_exponent_to(self, new_exponent):
"""return FixedPointNumber: new encoding with same value but having great exponent.
"""
if new_exponent < self.exponent:
raise ValueError('New exponent %i should be greater than'
'old exponent %i' % (new_exponent, self.exponent))
factor = pow(self.BASE, new_exponent - self.exponent)
new_encoding = self.encoding * factor % self.n
return FixedPointNumber(new_encoding, new_exponent, self.n, self.max_int)
def __align_exponent(self, x, y):
"""return x,y with same exponent
"""
if x.exponent < y.exponent:
x = x.increase_exponent_to(y.exponent)
elif x.exponent > y.exponent:
y = y.increase_exponent_to(x.exponent)
return x, y
def __truncate(self, a):
scalar = a.decode()
return FixedPointNumber.encode(scalar, n=self.n, max_int=self.max_int)
def __add__(self, other):
if isinstance(other, FixedPointNumber):
return self.__add_fixedpointnumber(other)
elif type(other).__name__ == "PaillierEncryptedNumber":
return other + self.decode()
else:
return self.__add_scalar(other)
def __radd__(self, other):
return self.__add__(other)
def __sub__(self, other):
if isinstance(other, FixedPointNumber):
return self.__sub_fixedpointnumber(other)
elif type(other).__name__ == "PaillierEncryptedNumber":
return (other - self.decode()) * -1
else:
return self.__sub_scalar(other)
def __rsub__(self, other):
if type(other).__name__ == "PaillierEncryptedNumber":
return other - self.decode()
x = self.__sub__(other)
x = -1 * x.decode()
return self.encode(x, n=self.n, max_int=self.max_int)
def __rmul__(self, other):
return self.__mul__(other)
def __mul__(self, other):
if isinstance(other, FixedPointNumber):
return self.__mul_fixedpointnumber(other)
elif type(other).__name__ == "PaillierEncryptedNumber":
return other * self.decode()
else:
return self.__mul_scalar(other)
def __truediv__(self, other):
if isinstance(other, FixedPointNumber):
scalar = other.decode()
else:
scalar = other
return self.__mul__(1 / scalar)
def __rtruediv__(self, other):
res = 1.0 / self.__truediv__(other).decode()
return FixedPointNumber.encode(res, n=self.n, max_int=self.max_int)
def __lt__(self, other):
x = self.decode()
if isinstance(other, FixedPointNumber):
y = other.decode()
else:
y = other
if x < y:
return True
else:
return False
def __gt__(self, other):
x = self.decode()
if isinstance(other, FixedPointNumber):
y = other.decode()
else:
y = other
if x > y:
return True
else:
return False
def __le__(self, other):
x = self.decode()
if isinstance(other, FixedPointNumber):
y = other.decode()
else:
y = other
if x <= y:
return True
else:
return False
def __ge__(self, other):
x = self.decode()
if isinstance(other, FixedPointNumber):
y = other.decode()
else:
y = other
if x >= y:
return True
else:
return False
def __eq__(self, other):
x = self.decode()
if isinstance(other, FixedPointNumber):
y = other.decode()
else:
y = other
if x == y:
return True
else:
return False
def __ne__(self, other):
x = self.decode()
if isinstance(other, FixedPointNumber):
y = other.decode()
else:
y = other
if x != y:
return True
else:
return False
def __add_fixedpointnumber(self, other):
if self.n != other.n:
other = self.encode(other.decode(), n=self.n, max_int=self.max_int)
x, y = self.__align_exponent(self, other)
encoding = (x.encoding + y.encoding) % self.n
return FixedPointNumber(encoding, x.exponent, n=self.n, max_int=self.max_int)
def __add_scalar(self, scalar):
encoded = self.encode(scalar, n=self.n, max_int=self.max_int)
return self.__add_fixedpointnumber(encoded)
def __sub_fixedpointnumber(self, other):
if self.n != other.n:
other = self.encode(other.decode(), n=self.n, max_int=self.max_int)
x, y = self.__align_exponent(self, other)
encoding = (x.encoding - y.encoding) % self.n
return FixedPointNumber(encoding, x.exponent, n=self.n, max_int=self.max_int)
def __sub_scalar(self, scalar):
scalar = -1 * scalar
return self.__add_scalar(scalar)
def __mul_fixedpointnumber(self, other):
return self.__mul_scalar(other.decode())
def __mul_scalar(self, scalar):
val = self.decode()
z = val * scalar
z_encode = FixedPointNumber.encode(z, n=self.n, max_int=self.max_int)
return z_encode
def __abs__(self):
if self.encoding <= self.max_int:
# Positive
return self
elif self.encoding >= self.n - self.max_int:
# Negative
return self * -1
def __mod__(self, other):
return FixedPointNumber(self.encoding % other, self.exponent, n=self.n, max_int=self.max_int)
class FixedPointEndec(object):
def __init__(self, n=None, max_int=None, precision=None, *args, **kwargs):
if n is None:
self.n = FixedPointNumber.Q
self.max_int = self.n // 2
else:
self.n = n
if max_int is None:
self.max_int = self.n // 2
else:
self.max_int = max_int
self.precision = precision
@classmethod
def _transform_op(cls, tensor, op):
from fate_arch.session import is_table
def _transform(x):
arr = np.zeros(shape=x.shape, dtype=object)
view = arr.view().reshape(-1)
x_array = x.view().reshape(-1)
for i in range(arr.size):
view[i] = op(x_array[i])
return arr
if isinstance(tensor, (int, np.int16, np.int32, np.int64,
float, np.float16, np.float32, np.float64,
FixedPointNumber)):
return op(tensor)
if isinstance(tensor, np.ndarray):
z = _transform(tensor)
return z
elif is_table(tensor):
f = functools.partial(_transform)
return tensor.mapValues(f)
else:
raise ValueError(f"unsupported type: {type(tensor)}")
def _encode(self, scalar):
return FixedPointNumber.encode(scalar,
n=self.n,
max_int=self.max_int,
precision=self.precision)
def _decode(self, number):
return number.decode()
def _truncate(self, number):
scalar = number.decode()
return FixedPointNumber.encode(scalar, n=self.n, max_int=self.max_int)
def encode(self, float_tensor):
return self._transform_op(float_tensor, op=self._encode)
def decode(self, integer_tensor):
return self._transform_op(integer_tensor, op=self._decode)
def truncate(self, integer_tensor, *args, **kwargs):
return self._transform_op(integer_tensor, op=self._truncate)
| 11,822
| 31.303279
| 101
|
py
|
FATE
|
FATE-master/python/federatedml/secureprotol/encrypt.py
|
#
# Copyright 2019 The FATE Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import functools
import hashlib
from collections import Iterable
import numpy as np
from federatedml.util import LOGGER
from Cryptodome import Random
from Cryptodome.PublicKey import RSA
from federatedml.feature.instance import Instance
from federatedml.secureprotol import gmpy_math
from federatedml.secureprotol.fate_paillier import PaillierKeypair
from federatedml.secureprotol.fate_paillier import PaillierEncryptedNumber
from federatedml.secureprotol.random import RandomPads
try:
from ipcl_python import PaillierKeypair as IpclPaillierKeypair
except ImportError:
pass
_TORCH_VALID = False
try:
import torch
_TORCH_VALID = True
except ImportError:
pass
class Encrypt(object):
def __init__(self):
self.public_key = None
self.privacy_key = None
def generate_key(self, n_length=0):
pass
def set_public_key(self, public_key):
pass
def get_public_key(self):
pass
def set_privacy_key(self, privacy_key):
pass
def get_privacy_key(self):
pass
def encrypt(self, value):
pass
def decrypt(self, value):
pass
def raw_encrypt(self, value):
pass
def raw_decrypt(self, value):
pass
def encrypt_list(self, values):
result = [self.encrypt(msg) for msg in values]
return result
def decrypt_list(self, values):
result = [self.decrypt(msg) for msg in values]
return result
def distribute_decrypt(self, X):
decrypt_table = X.mapValues(lambda x: self.recursive_decrypt(x))
return decrypt_table
def distribute_encrypt(self, X):
encrypt_table = X.mapValues(lambda x: self.recursive_encrypt(x))
return encrypt_table
def distribute_raw_decrypt(self, X):
return X.mapValues(lambda x: self.recursive_raw_decrypt(x))
def distribute_raw_encrypt(self, X):
return X.mapValues(lambda x: self.recursive_raw_encrypt(x))
def _recursive_func(self, obj, func):
if isinstance(obj, np.ndarray):
if len(obj.shape) == 1:
return np.reshape([func(val) for val in obj], obj.shape)
else:
return np.reshape(
[self._recursive_func(o, func) for o in obj], obj.shape
)
elif isinstance(obj, Iterable):
return type(obj)(
self._recursive_func(o, func) if isinstance(o, Iterable) else func(o)
for o in obj
)
else:
return func(obj)
def recursive_encrypt(self, X):
return self._recursive_func(X, self.encrypt)
def recursive_decrypt(self, X):
return self._recursive_func(X, self.decrypt)
def recursive_raw_encrypt(self, X):
return self._recursive_func(X, self.raw_encrypt)
def recursive_raw_decrypt(self, X):
return self._recursive_func(X, self.raw_decrypt)
class RsaEncrypt(Encrypt):
def __init__(self):
super(RsaEncrypt, self).__init__()
self.e = None
self.d = None
self.n = None
self.p = None
self.q = None
def generate_key(self, rsa_bit=1024):
random_generator = Random.new().read
rsa = RSA.generate(rsa_bit, random_generator)
self.e = rsa.e
self.d = rsa.d
self.n = rsa.n
self.p = rsa.p
self.q = rsa.q
def get_key_pair(self):
return self.e, self.d, self.n, self.p, self.q
def set_public_key(self, public_key):
self.e = public_key["e"]
self.n = public_key["n"]
def get_public_key(self):
return self.e, self.n
def set_privacy_key(self, privacy_key):
self.d = privacy_key["d"]
self.n = privacy_key["n"]
def get_privacy_key(self):
return self.d, self.n
def encrypt(self, value):
if self.e is not None and self.n is not None and self.p is not None and self.q is not None:
cp, cq = gmpy_math.crt_coefficient(self.p, self.q)
return gmpy_math.powmod_crt(value, self.e, self.n, self.p, self.q, cp, cq)
if self.e is not None and self.n is not None:
return gmpy_math.powmod(value, self.e, self.n)
else:
return None
def decrypt(self, value):
if self.d is not None and self.n is not None:
return gmpy_math.powmod(value, self.d, self.n)
else:
return None
class PaillierEncrypt(Encrypt):
def __init__(self):
super(PaillierEncrypt, self).__init__()
def generate_key(self, n_length=1024):
self.public_key, self.privacy_key = PaillierKeypair.generate_keypair(
n_length=n_length
)
def get_key_pair(self):
return self.public_key, self.privacy_key
def set_public_key(self, public_key):
self.public_key = public_key
def get_public_key(self):
return self.public_key
def set_privacy_key(self, privacy_key):
self.privacy_key = privacy_key
def get_privacy_key(self):
return self.privacy_key
def encrypt(self, value):
if self.public_key is not None:
return self.public_key.encrypt(value)
else:
return None
def decrypt(self, value):
if self.privacy_key is not None:
return self.privacy_key.decrypt(value)
else:
return None
def raw_encrypt(self, plaintext, exponent=0):
cipher_int = self.public_key.raw_encrypt(plaintext)
paillier_num = PaillierEncryptedNumber(public_key=self.public_key, ciphertext=cipher_int, exponent=exponent)
return paillier_num
def raw_decrypt(self, ciphertext):
return self.privacy_key.raw_decrypt(ciphertext.ciphertext())
def recursive_raw_encrypt(self, X, exponent=0):
raw_en_func = functools.partial(self.raw_encrypt, exponent=exponent)
return self._recursive_func(X, raw_en_func)
class IpclPaillierEncrypt(Encrypt):
"""
A class to perform Paillier encryption with Intel Paillier Cryptosystem Library (IPCL)
"""
def __init__(self):
super(IpclPaillierEncrypt, self).__init__()
def generate_key(self, n_length=1024):
self.public_key, self.privacy_key = IpclPaillierKeypair.generate_keypair(
n_length=n_length
)
def get_key_pair(self):
return self.public_key, self.privacy_key
def set_public_key(self, public_key):
self.public_key = public_key
def get_public_key(self):
return self.public_key
def set_privacy_key(self, privacy_key):
self.privacy_key = privacy_key
def get_privacy_key(self):
return self.privacy_key
def encrypt(self, value):
if self.public_key is not None:
return self.public_key.encrypt(value)
else:
return None
def decrypt(self, value):
if self.privacy_key is not None:
return self.privacy_key.decrypt(value)
else:
return None
def raw_encrypt(self, plaintext, exponent=0):
"""
Encrypt without applying obfuscator.
Returns:
(PaillierEncryptedNumber from `ipcl_python`): one ciphertext
"""
return self.public_key.raw_encrypt(plaintext)
def raw_decrypt(self, ciphertext):
"""
Decrypt without constructing `FixedPointNumber`.
Returns:
(int or list): raw value(s)
"""
return self.privacy_key.raw_decrypt(ciphertext)
def encrypt_list(self, values):
"""Encrypt a list of raw values into one ciphertext.
Returns:
(PaillierEncryptedNumber from `ipcl_python`): all in one single ciphertext
"""
return self.encrypt(values)
def decrypt_list(self, values):
"""
Decrypt input values.
If the type is list or 1-d numpy array, use `decrypt_list` of the parent class.
Ohterwise, the type will be a 0-d numpy array, which contains one single ciphertext of multiple raw values.
Use `item(0)` to fetch the ciphertext and then decrypt.
Returns:
(list): a list of raw values
"""
if np.ndim(values) >= 1:
return super().decrypt_list(values)
return self.decrypt(values.item(0))
def recursive_raw_encrypt(self, X, exponent=0):
raw_en_func = functools.partial(self.raw_encrypt, exponent=exponent)
return self._recursive_func(X, raw_en_func)
class PadsCipher(Encrypt):
def __init__(self):
super().__init__()
self._uuid = None
self._rands = None
self._amplify_factor = 1
def set_self_uuid(self, uuid):
self._uuid = uuid
def set_amplify_factor(self, factor):
self._amplify_factor = factor
def set_exchanged_keys(self, keys):
self._seeds = {
uid: v & 0xFFFFFFFF for uid, v in keys.items() if uid != self._uuid
}
self._rands = {
uid: RandomPads(v & 0xFFFFFFFF)
for uid, v in keys.items()
if uid != self._uuid
}
def encrypt(self, value):
if isinstance(value, np.ndarray):
ret = value
for uid, rand in self._rands.items():
if uid > self._uuid:
ret = rand.add_rand_pads(ret, 1.0 * self._amplify_factor)
else:
ret = rand.add_rand_pads(ret, -1.0 * self._amplify_factor)
return ret
if _TORCH_VALID and isinstance(value, torch.Tensor):
ret = value.numpy()
for uid, rand in self._rands.items():
if uid > self._uuid:
ret = rand.add_rand_pads(ret, 1.0 * self._amplify_factor)
else:
ret = rand.add_rand_pads(ret, -1.0 * self._amplify_factor)
return torch.Tensor(ret)
ret = value
for uid, rand in self._rands.items():
if uid > self._uuid:
ret += rand.rand(1)[0] * self._amplify_factor
else:
ret -= rand.rand(1)[0] * self._amplify_factor
return ret
def encrypt_table(self, table):
def _pad(key, value, seeds, amplify_factor):
has_key = int(hashlib.md5(f"{key}".encode("ascii")).hexdigest(), 16)
# LOGGER.debug(f"hash_key: {has_key}")
cur_seeds = {uid: has_key + seed for uid, seed in seeds.items()}
# LOGGER.debug(f"cur_seeds: {cur_seeds}")
rands = {uid: RandomPads(v & 0xFFFFFFFF) for uid, v in cur_seeds.items()}
if isinstance(value, np.ndarray):
ret = value
for uid, rand in rands.items():
if uid > self._uuid:
ret = rand.add_rand_pads(ret, 1.0 * amplify_factor)
else:
ret = rand.add_rand_pads(ret, -1.0 * amplify_factor)
return key, ret
elif isinstance(value, Instance):
ret = value.features
for uid, rand in rands.items():
if uid > self._uuid:
ret = rand.add_rand_pads(ret, 1.0 * amplify_factor)
else:
ret = rand.add_rand_pads(ret, -1.0 * amplify_factor)
value.features = ret
return key, value
else:
ret = value
for uid, rand in rands.items():
if uid > self._uuid:
ret += rand.rand(1)[0] * self._amplify_factor
else:
ret -= rand.rand(1)[0] * self._amplify_factor
return key, ret
f = functools.partial(
_pad, seeds=self._seeds, amplify_factor=self._amplify_factor
)
return table.map(f)
def decrypt(self, value):
return value
| 12,519
| 29.990099
| 116
|
py
|
FATE
|
FATE-master/python/federatedml/secureprotol/__init__.py
|
#
# Copyright 2019 The FATE Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from federatedml.secureprotol.encrypt import RsaEncrypt, PaillierEncrypt, IpclPaillierEncrypt
from federatedml.secureprotol.encrypt_mode import EncryptModeCalculator
__all__ = ['RsaEncrypt', 'PaillierEncrypt', 'IpclPaillierEncrypt', 'EncryptModeCalculator']
| 876
| 40.761905
| 93
|
py
|
FATE
|
FATE-master/python/federatedml/secureprotol/encode.py
|
import base64
import hashlib
from federatedml.util import LOGGER
class Encode:
def __init__(self, method, base64=0):
self.method = method
self.base64 = base64
self.dist_encode_function = {
"md5": self.__compute_md5,
"sha1": self.__compute_sha1,
"sha224": self.__compute_sha224,
"sha256": self.__compute_sha256,
"sha384": self.__compute_sha384,
"sha512": self.__compute_sha512,
}
@staticmethod
def is_support(method):
support_encode_method = ["md5", "sha1", "sha224", "sha256", "sha384", "sha512"]
return method in support_encode_method
def __compute_md5(self, value):
if self.base64 == 1:
return str(base64.b64encode(hashlib.md5(bytes(value, encoding='utf-8')).digest()), "utf-8")
else:
return hashlib.md5(bytes(value, encoding='utf-8')).hexdigest()
def __compute_sha256(self, value):
if self.base64 == 1:
return str(base64.b64encode(hashlib.sha256(bytes(value, encoding='utf-8')).digest()), "utf-8")
else:
return hashlib.sha256(bytes(value, encoding='utf-8')).hexdigest()
def __compute_sha1(self, value):
if self.base64 == 1:
return str(base64.b64encode(hashlib.sha1(bytes(value, encoding='utf-8')).digest()), "utf-8")
else:
return hashlib.sha1(bytes(value, encoding='utf-8')).hexdigest()
def __compute_sha224(self, value):
if self.base64 == 1:
return str(base64.b64encode(hashlib.sha224(bytes(value, encoding='utf-8')).digest()), "utf-8")
else:
return hashlib.sha224(bytes(value, encoding='utf-8')).hexdigest()
def __compute_sha512(self, value):
if self.base64 == 1:
return str(base64.b64encode(hashlib.sha512(bytes(value, encoding='utf-8')).digest()), "utf-8")
else:
return hashlib.sha512(bytes(value, encoding='utf-8')).hexdigest()
def __compute_sha384(self, value):
if self.base64 == 1:
return str(base64.b64encode(hashlib.sha384(bytes(value, encoding='utf-8')).digest()), "utf-8")
else:
return hashlib.sha384(bytes(value, encoding='utf-8')).hexdigest()
def compute(self, value, pre_salt=None, postfit_salt=None):
if not Encode.is_support(self.method):
LOGGER.warning("Encode module do not support method:{}".format(self.method))
return value
if pre_salt is not None:
value = pre_salt + value
if postfit_salt is not None:
value = value + postfit_salt
return self.dist_encode_function[self.method](value)
| 2,703
| 36.041096
| 106
|
py
|
FATE
|
FATE-master/python/federatedml/secureprotol/encrypt_mode.py
|
#
# Copyright 2019 The FATE Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import functools
from federatedml.secureprotol import PaillierEncrypt
from federatedml.util import LOGGER
class EncryptModeCalculator(object):
"""
Encyprt Mode module, a balance of security level and speed.
Parameters
----------
encrypter: object, fate-paillier object, object to encrypt numbers
mode: str, accpet 'strict', 'fast', 'balance'. "confusion_opt", "confusion_opt_balance"
'strict': means that re-encrypted every function call.
"""
def __init__(self, encrypter=None, mode="strict", re_encrypted_rate=1):
self.encrypter = encrypter
self.mode = mode
self.re_encrypted_rate = re_encrypted_rate
self.prev_data = None
self.prev_encrypted_data = None
self.enc_zeros = None
self.align_to_input_data = True
if self.mode != "strict":
self.mode = "strict"
LOGGER.warning("encrypted_mode_calculator will be remove in later version, "
"but in current version user can still use it, but it only supports strict mode, "
"other mode will be reset to strict for compatibility")
@staticmethod
def add_enc_zero(obj, enc_zero):
pass
def encrypt_data(self, input_data, enc_func):
return input_data.mapValues(enc_func)
def get_enc_func(self, encrypter, raw_enc=False, exponent=0):
if not raw_enc:
return encrypter.recursive_encrypt
else:
if isinstance(self.encrypter, PaillierEncrypt):
raw_en_func = functools.partial(self.encrypter.recursive_raw_encrypt, exponent=exponent)
else:
raw_en_func = self.encrypter.recursive_raw_encrypt
return raw_en_func
def encrypt(self, input_data):
"""
Encrypt data according to different mode
Parameters
----------
input_data: Table
Returns
-------
new_data: Table, encrypted result of input_data
"""
encrypt_func = self.get_enc_func(self.encrypter, raw_enc=False)
new_data = self.encrypt_data(input_data, encrypt_func)
return new_data
def raw_encrypt(self, input_data, exponent=0):
raw_en_func = self.get_enc_func(self.encrypter, raw_enc=True, exponent=exponent)
new_data = self.encrypt_data(input_data, raw_en_func)
return new_data
def init_enc_zero(self, input_data, raw_en=False, exponent=0):
pass
def recursive_encrypt(self, input_data):
return self.encrypter.recursive_encrypt(input_data)
def distribute_encrypt(self, input_data):
return self.encrypt(input_data)
def distribute_decrypt(self, input_data):
return self.encrypter.distribute_decrypt(input_data)
def recursive_decrypt(self, input_data):
return self.encrypter.recursive_decrypt(input_data)
| 3,520
| 32.533333
| 109
|
py
|
FATE
|
FATE-master/python/federatedml/secureprotol/diffie_hellman.py
|
#
# Copyright 2019 The FATE Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import random
import gmpy2
from gmpy2 import mpz
class DiffieHellman(object):
@staticmethod
def _decode_hex_string(number_str):
return mpz("0x{0}".format("".join(number_str.split())))
@staticmethod
def _oakley_group_1024_1024():
"""
from RFC 2409, refer to https://tools.ietf.org/html/rfc2409#page-22
"""
p = DiffieHellman._decode_hex_string("""
FFFFFFFF FFFFFFFF C90FDAA2 2168C234 C4C6628B 80DC1CD1
29024E08 8A67CC74 020BBEA6 3B139B22 514A0879 8E3404DD
EF9519B3 CD3A431B 302B0A6D F25F1437 4FE1356D 6D51C245
E485B576 625E7EC6 F44C42E9 A637ED6B 0BFF5CB6 F406B7ED
EE386BFB 5A899FA5 AE9F2411 7C4B1FE6 49286651 ECE65381
FFFFFFFF FFFFFFFF
""")
g = DiffieHellman._decode_hex_string("2")
return p, g
@staticmethod
def _oakley_group_2048_2048():
"""
from RFC 3526, refer to: https://datatracker.ietf.org/doc/html/rfc3526#section-3
"""
p = DiffieHellman._decode_hex_string("""
FFFFFFFF FFFFFFFF C90FDAA2 2168C234 C4C6628B 80DC1CD1
29024E08 8A67CC74 020BBEA6 3B139B22 514A0879 8E3404DD
EF9519B3 CD3A431B 302B0A6D F25F1437 4FE1356D 6D51C245
E485B576 625E7EC6 F44C42E9 A637ED6B 0BFF5CB6 F406B7ED
EE386BFB 5A899FA5 AE9F2411 7C4B1FE6 49286651 ECE45B3D
C2007CB8 A163BF05 98DA4836 1C55D39A 69163FA8 FD24CF5F
83655D23 DCA3AD96 1C62F356 208552BB 9ED52907 7096966D
670C354E 4ABC9804 F1746C08 CA18217C 32905E46 2E36CE3B
E39E772C 180E8603 9B2783A2 EC07A28F B5C55DF0 6F4C52C9
DE2BCBF6 95581718 3995497C EA956AE5 15D22618 98FA0510
""")
g = DiffieHellman._decode_hex_string("2")
return p, g
@staticmethod
def _additional_group_1024_160():
"""
from RFC 5114, has 160 bits subgroup size:
0xF518AA8781A8DF278ABA4E7D64B7CB9D49462353
refer to https://tools.ietf.org/html/rfc5114
"""
p = DiffieHellman._decode_hex_string("""
B10B8F96 A080E01D DE92DE5E AE5D54EC 52C99FBC FB06A3C6
9A6A9DCA 52D23B61 6073E286 75A23D18 9838EF1E 2EE652C0
13ECB4AE A9061123 24975C3C D49B83BF ACCBDD7D 90C4BD70
98488E9C 219A7372 4EFFD6FA E5644738 FAA31A4F F55BCCC0
A151AF5F 0DC8B4BD 45BF37DF 365C1A65 E68CFDA7 6D4DA708
DF1FB2BC 2E4A4371
""")
g = DiffieHellman._decode_hex_string("""
A4D1CBD5 C3FD3412 6765A442 EFB99905 F8104DD2 58AC507F
D6406CFF 14266D31 266FEA1E 5C41564B 777E690F 5504F213
160217B4 B01B886A 5E91547F 9E2749F4 D7FBD7D3 B9A92EE1
909D0D22 63F80A76 A6A24C08 7A091F53 1DBF0A01 69B6A28A
D662A4D1 8E73AFA3 2D779D59 18D08BC8 858F4DCE F97C2A24
855E6EEB 22B3B2E5
""")
return p, g
@staticmethod
def _additional_group_2048_224():
"""
from RFC 5114, has 224 bits subgroup size:
0x801C0D34C58D93FE997177101F80535A4738CEBCBF389A99B36371EB
refer to https://tools.ietf.org/html/rfc5114
"""
p = DiffieHellman._decode_hex_string("""
AD107E1E 9123A9D0 D660FAA7 9559C51F A20D64E5 683B9FD1
B54B1597 B61D0A75 E6FA141D F95A56DB AF9A3C40 7BA1DF15
EB3D688A 309C180E 1DE6B85A 1274A0A6 6D3F8152 AD6AC212
9037C9ED EFDA4DF8 D91E8FEF 55B7394B 7AD5B7D0 B6C12207
C9F98D11 ED34DBF6 C6BA0B2C 8BBC27BE 6A00E0A0 B9C49708
B3BF8A31 70918836 81286130 BC8985DB 1602E714 415D9330
278273C7 DE31EFDC 7310F712 1FD5A074 15987D9A DC0A486D
CDF93ACC 44328387 315D75E1 98C641A4 80CD86A1 B9E587E8
BE60E69C C928B2B9 C52172E4 13042E9B 23F10B0E 16E79763
C9B53DCF 4BA80A29 E3FB73C1 6B8E75B9 7EF363E2 FFA31F71
CF9DE538 4E71B81C 0AC4DFFE 0C10E64F
""")
g = DiffieHellman._decode_hex_string("""
AC4032EF 4F2D9AE3 9DF30B5C 8FFDAC50 6CDEBE7B 89998CAF
74866A08 CFE4FFE3 A6824A4E 10B9A6F0 DD921F01 A70C4AFA
AB739D77 00C29F52 C57DB17C 620A8652 BE5E9001 A8D66AD7
C1766910 1999024A F4D02727 5AC1348B B8A762D0 521BC98A
E2471504 22EA1ED4 09939D54 DA7460CD B5F6C6B2 50717CBE
F180EB34 118E98D1 19529A45 D6F83456 6E3025E3 16A330EF
BB77A86F 0C1AB15B 051AE3D4 28C8F8AC B70A8137 150B8EEB
10E183ED D19963DD D9E263E4 770589EF 6AA21E7F 5F2FF381
B539CCE3 409D13CD 566AFBB4 8D6C0191 81E1BCFE 94B30269
EDFE72FE 9B6AA4BD 7B5A0F1C 71CFFF4C 19C418E1 F6EC0179
81BC087F 2A7065B3 84B890D3 191F2BFA
""")
return p, g
@staticmethod
def _additional_group_2048_256():
"""
from RFC 5114, has 256 bits subgroup size:
0x8CF83642A709A097B447997640129DA299B1A47D1EB3750BA308B0FE64F5FBD3
refer to https://tools.ietf.org/html/rfc5114
"""
p = DiffieHellman._decode_hex_string("""
87A8E61D B4B6663C FFBBD19C 65195999 8CEEF608 660DD0F2
5D2CEED4 435E3B00 E00DF8F1 D61957D4 FAF7DF45 61B2AA30
16C3D911 34096FAA 3BF4296D 830E9A7C 209E0C64 97517ABD
5A8A9D30 6BCF67ED 91F9E672 5B4758C0 22E0B1EF 4275BF7B
6C5BFC11 D45F9088 B941F54E B1E59BB8 BC39A0BF 12307F5C
4FDB70C5 81B23F76 B63ACAE1 CAA6B790 2D525267 35488A0E
F13C6D9A 51BFA4AB 3AD83477 96524D8E F6A167B5 A41825D9
67E144E5 14056425 1CCACB83 E6B486F6 B3CA3F79 71506026
C0B857F6 89962856 DED4010A BD0BE621 C3A3960A 54E710C3
75F26375 D7014103 A4B54330 C198AF12 6116D227 6E11715F
693877FA D7EF09CA DB094AE9 1E1A1597
""")
g = DiffieHellman._decode_hex_string("""
3FB32C9B 73134D0B 2E775066 60EDBD48 4CA7B18F 21EF2054
07F4793A 1A0BA125 10DBC150 77BE463F FF4FED4A AC0BB555
BE3A6C1B 0C6B47B1 BC3773BF 7E8C6F62 901228F8 C28CBB18
A55AE313 41000A65 0196F931 C77A57F2 DDF463E5 E9EC144B
777DE62A AAB8A862 8AC376D2 82D6ED38 64E67982 428EBC83
1D14348F 6F2F9193 B5045AF2 767164E1 DFC967C1 FB3F2E55
A4BD1BFF E83B9C80 D052B985 D182EA0A DB2A3B73 13D3FE14
C8484B1E 052588B9 B7D2BBD2 DF016199 ECD06E15 57CD0915
B3353BBB 64E0EC37 7FD02837 0DF92B52 C7891428 CDC67EB6
184B523D 1DB246C3 2F630784 90F00EF8 D647D148 D4795451
5E2327CF EF98C582 664B4C0F 6CC41659
""")
return p, g
@staticmethod
def _oakley_group_key_pair(num_bits=1024):
available = {
1024: DiffieHellman._oakley_group_1024_1024,
2048: DiffieHellman._oakley_group_2048_2048
}
assert num_bits in available, \
"key pairs with specified size({0} bits) not found, please use one of {1}".format(
num_bits, available.keys()
)
return available[num_bits].__call__()
@staticmethod
def generate_oakley_group_key_pair(num_bits=1024, pair_name=None):
if pair_name is None:
return DiffieHellman._oakley_group_key_pair(num_bits)
assert pair_name in {
"oakley_group_1024_1024", "oakley_group_2048_2048"
}, "unsupported pair name: {0}".format(pair_name)
if pair_name == "oakley_group_1024_1024":
return DiffieHellman._oakley_group_1024_1024()
if pair_name == "oakley_group_2048_2048":
return DiffieHellman._oakley_group_2048_2048()
@staticmethod
def _key_pair(num_bits=1024):
available = {
1024: [
DiffieHellman._oakley_group_1024_1024,
DiffieHellman._additional_group_1024_160
],
2048: [
DiffieHellman._oakley_group_2048_2048,
DiffieHellman._additional_group_2048_224,
DiffieHellman._additional_group_2048_256
]
}
assert num_bits in available,\
"key pairs with specified size({0} bits) not found, please use one of {1}".format(
num_bits, available.keys()
)
return random.choice(available[num_bits]).__call__()
@staticmethod
def key_pair(num_bits=1024, pair_name=None):
"""
Generate a primitive root for a big prime number is really slow!
Notice the fact that:
1. we don't need the generator to be a primitive element of the group
but the one generates a large prime order.
2. There is no security issue with Diffie-Hellman if you reuse previously generated 𝑝 and 𝑔.
We simply use key pairs from RFC 5114 and RFC 2409
@:param pair_name: one of "additional_group_1024_160", "additional_group_2048_224",
"additional_group_2048_256", "oakley_group_1024_1024"
use additional_group_1024_160 as default
@:param num_bits: specify size of p
@:return p, g, where p is a prime number, g is a generator
"""
if pair_name is None:
if num_bits:
return DiffieHellman._key_pair(num_bits)
else:
return DiffieHellman._additional_group_1024_160()
assert pair_name in {
"additional_group_1024_160", "additional_group_2048_224", "additional_group_2048_256",
"oakley_group_1024_1024", "oakley_group_2048_2048"
}, "unsupported pair name: {0}".format(pair_name)
if pair_name == "additional_group_1024_160":
return DiffieHellman._additional_group_1024_160()
if pair_name == "additional_group_2048_224":
return DiffieHellman._additional_group_2048_224()
if pair_name == "additional_group_2048_256":
return DiffieHellman._additional_group_2048_256()
if pair_name == "oakley_group_1024_1024":
return DiffieHellman._oakley_group_1024_1024()
if pair_name == "oakley_group_2048_2048":
return DiffieHellman._oakley_group_2048_2048()
# noinspection PyArgumentList
@staticmethod
def generate_secret(p, num_bits=1024):
return mpz(random.SystemRandom().getrandbits(num_bits)) % p
@staticmethod
def encrypt(g, r, p):
return gmpy2.powmod(g, r, p)
@staticmethod
def decrypt(gr, r, p):
return gmpy2.powmod(gr, r, p)
| 10,705
| 40.657588
| 100
|
py
|
FATE
|
FATE-master/python/federatedml/secureprotol/fate_paillier.py
|
"""Paillier encryption library for partially homomorphic encryption."""
#
# Copyright 2019 The FATE Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import random
from federatedml.secureprotol import gmpy_math
from federatedml.secureprotol.fixedpoint import FixedPointNumber
class PaillierKeypair(object):
def __init__(self):
pass
@staticmethod
def generate_keypair(n_length=1024):
"""return a new :class:`PaillierPublicKey` and :class:`PaillierPrivateKey`.
"""
p = q = n = None
n_len = 0
while n_len != n_length:
p = gmpy_math.getprimeover(n_length // 2)
q = p
while q == p:
q = gmpy_math.getprimeover(n_length // 2)
n = p * q
n_len = n.bit_length()
public_key = PaillierPublicKey(n)
private_key = PaillierPrivateKey(public_key, p, q)
return public_key, private_key
class PaillierPublicKey(object):
"""Contains a public key and associated encryption methods.
"""
def __init__(self, n):
self.g = n + 1
self.n = n
self.nsquare = n * n
self.max_int = n // 3 - 1
def __repr__(self):
hashcode = hex(hash(self))[2:]
return "<PaillierPublicKey {}>".format(hashcode[:10])
def __eq__(self, other):
return self.n == other.n
def __hash__(self):
return hash(self.n)
def apply_obfuscator(self, ciphertext, random_value=None):
"""
"""
r = random_value or random.SystemRandom().randrange(1, self.n)
obfuscator = gmpy_math.powmod(r, self.n, self.nsquare)
return (ciphertext * obfuscator) % self.nsquare
def raw_encrypt(self, plaintext, random_value=None):
"""
"""
if not isinstance(plaintext, int):
raise TypeError("plaintext should be int, but got: %s" %
type(plaintext))
if plaintext >= (self.n - self.max_int) and plaintext < self.n:
# Very large plaintext, take a sneaky shortcut using inverses
neg_plaintext = self.n - plaintext # = abs(plaintext - nsquare)
neg_ciphertext = (self.n * neg_plaintext + 1) % self.nsquare
ciphertext = gmpy_math.invert(neg_ciphertext, self.nsquare)
else:
ciphertext = (self.n * plaintext + 1) % self.nsquare
ciphertext = self.apply_obfuscator(ciphertext, random_value)
return ciphertext
def encrypt(self, value, precision=None, random_value=None):
"""Encode and Paillier encrypt a real number value.
"""
if isinstance(value, FixedPointNumber):
value = value.decode()
encoding = FixedPointNumber.encode(value, self.n, self.max_int, precision)
obfuscator = random_value or 1
ciphertext = self.raw_encrypt(encoding.encoding, random_value=obfuscator)
encryptednumber = PaillierEncryptedNumber(self, ciphertext, encoding.exponent)
if random_value is None:
encryptednumber.apply_obfuscator()
return encryptednumber
class PaillierPrivateKey(object):
"""Contains a private key and associated decryption method.
"""
def __init__(self, public_key, p, q):
if not p * q == public_key.n:
raise ValueError("given public key does not match the given p and q")
if p == q:
raise ValueError("p and q have to be different")
self.public_key = public_key
if q < p:
self.p = q
self.q = p
else:
self.p = p
self.q = q
self.psquare = self.p * self.p
self.qsquare = self.q * self.q
self.q_inverse = gmpy_math.invert(self.q, self.p)
self.hp = self.h_func(self.p, self.psquare)
self.hq = self.h_func(self.q, self.qsquare)
def __eq__(self, other):
return self.p == other.p and self.q == other.q
def __hash__(self):
return hash((self.p, self.q))
def __repr__(self):
hashcode = hex(hash(self))[2:]
return "<PaillierPrivateKey {}>".format(hashcode[:10])
def h_func(self, x, xsquare):
"""Computes the h-function as defined in Paillier's paper page.
"""
return gmpy_math.invert(self.l_func(gmpy_math.powmod(self.public_key.g,
x - 1, xsquare), x), x)
def l_func(self, x, p):
"""computes the L function as defined in Paillier's paper.
"""
return (x - 1) // p
def crt(self, mp, mq):
"""the Chinese Remainder Theorem as needed for decryption.
return the solution modulo n=pq.
"""
u = (mp - mq) * self.q_inverse % self.p
x = (mq + (u * self.q)) % self.public_key.n
return x
def raw_decrypt(self, ciphertext):
"""return raw plaintext.
"""
if not isinstance(ciphertext, int):
raise TypeError("ciphertext should be an int, not: %s" %
type(ciphertext))
mp = self.l_func(gmpy_math.powmod(ciphertext,
self.p - 1, self.psquare),
self.p) * self.hp % self.p
mq = self.l_func(gmpy_math.powmod(ciphertext,
self.q - 1, self.qsquare),
self.q) * self.hq % self.q
return self.crt(mp, mq)
def decrypt(self, encrypted_number):
"""return the decrypted & decoded plaintext of encrypted_number.
"""
if not isinstance(encrypted_number, PaillierEncryptedNumber):
raise TypeError("encrypted_number should be an PaillierEncryptedNumber, \
not: %s" % type(encrypted_number))
if self.public_key != encrypted_number.public_key:
raise ValueError("encrypted_number was encrypted against a different key!")
encoded = self.raw_decrypt(encrypted_number.ciphertext(be_secure=False))
encoded = FixedPointNumber(encoded,
encrypted_number.exponent,
self.public_key.n,
self.public_key.max_int)
decrypt_value = encoded.decode()
return decrypt_value
class PaillierEncryptedNumber(object):
"""Represents the Paillier encryption of a float or int.
"""
def __init__(self, public_key, ciphertext, exponent=0):
self.public_key = public_key
self.__ciphertext = ciphertext
self.exponent = exponent
self.__is_obfuscator = False
if not isinstance(self.__ciphertext, int):
raise TypeError("ciphertext should be an int, not: %s" % type(self.__ciphertext))
if not isinstance(self.public_key, PaillierPublicKey):
raise TypeError("public_key should be a PaillierPublicKey, not: %s" % type(self.public_key))
def ciphertext(self, be_secure=True):
"""return the ciphertext of the PaillierEncryptedNumber.
"""
if be_secure and not self.__is_obfuscator:
self.apply_obfuscator()
return self.__ciphertext
def apply_obfuscator(self):
"""ciphertext by multiplying by r ** n with random r
"""
self.__ciphertext = self.public_key.apply_obfuscator(self.__ciphertext)
self.__is_obfuscator = True
def __add__(self, other):
if isinstance(other, PaillierEncryptedNumber):
return self.__add_encryptednumber(other)
else:
return self.__add_scalar(other)
def __radd__(self, other):
return self.__add__(other)
def __sub__(self, other):
return self + (other * -1)
def __rsub__(self, other):
return other + (self * -1)
def __rmul__(self, scalar):
return self.__mul__(scalar)
def __truediv__(self, scalar):
return self.__mul__(1 / scalar)
def __mul__(self, scalar):
"""return Multiply by an scalar(such as int, float)
"""
if isinstance(scalar, FixedPointNumber):
scalar = scalar.decode()
encode = FixedPointNumber.encode(scalar, self.public_key.n, self.public_key.max_int)
plaintext = encode.encoding
if plaintext < 0 or plaintext >= self.public_key.n:
raise ValueError("Scalar out of bounds: %i" % plaintext)
if plaintext >= self.public_key.n - self.public_key.max_int:
# Very large plaintext, play a sneaky trick using inverses
neg_c = gmpy_math.invert(self.ciphertext(False), self.public_key.nsquare)
neg_scalar = self.public_key.n - plaintext
ciphertext = gmpy_math.powmod(neg_c, neg_scalar, self.public_key.nsquare)
else:
ciphertext = gmpy_math.powmod(self.ciphertext(False), plaintext, self.public_key.nsquare)
exponent = self.exponent + encode.exponent
return PaillierEncryptedNumber(self.public_key, ciphertext, exponent)
def increase_exponent_to(self, new_exponent):
"""return PaillierEncryptedNumber:
new PaillierEncryptedNumber with same value but having great exponent.
"""
if new_exponent < self.exponent:
raise ValueError("New exponent %i should be great than old exponent %i" % (new_exponent, self.exponent))
factor = pow(FixedPointNumber.BASE, new_exponent - self.exponent)
new_encryptednumber = self.__mul__(factor)
new_encryptednumber.exponent = new_exponent
return new_encryptednumber
def __align_exponent(self, x, y):
"""return x,y with same exponet
"""
if x.exponent < y.exponent:
x = x.increase_exponent_to(y.exponent)
elif x.exponent > y.exponent:
y = y.increase_exponent_to(x.exponent)
return x, y
def __add_scalar(self, scalar):
"""return PaillierEncryptedNumber: z = E(x) + y
"""
if isinstance(scalar, FixedPointNumber):
scalar = scalar.decode()
encoded = FixedPointNumber.encode(scalar,
self.public_key.n,
self.public_key.max_int,
max_exponent=self.exponent)
return self.__add_fixpointnumber(encoded)
def __add_fixpointnumber(self, encoded):
"""return PaillierEncryptedNumber: z = E(x) + FixedPointNumber(y)
"""
if self.public_key.n != encoded.n:
raise ValueError("Attempted to add numbers encoded against different public keys!")
# their exponents must match, and align.
x, y = self.__align_exponent(self, encoded)
encrypted_scalar = x.public_key.raw_encrypt(y.encoding, 1)
encryptednumber = self.__raw_add(x.ciphertext(False), encrypted_scalar, x.exponent)
return encryptednumber
def __add_encryptednumber(self, other):
"""return PaillierEncryptedNumber: z = E(x) + E(y)
"""
if self.public_key != other.public_key:
raise ValueError("add two numbers have different public key!")
# their exponents must match, and align.
x, y = self.__align_exponent(self, other)
encryptednumber = self.__raw_add(x.ciphertext(False), y.ciphertext(False), x.exponent)
return encryptednumber
def __raw_add(self, e_x, e_y, exponent):
"""return the integer E(x + y) given ints E(x) and E(y).
"""
ciphertext = gmpy_math.mpz(e_x) * gmpy_math.mpz(e_y) % self.public_key.nsquare
return PaillierEncryptedNumber(self.public_key, int(ciphertext), exponent)
| 12,222
| 34.531977
| 116
|
py
|
FATE
|
FATE-master/python/federatedml/secureprotol/hash/hash_factory.py
|
import base64
import hashlib
from fate_crypto.hash import sm3_hash
from federatedml.util import consts
SUPPORT_METHOD = [consts.MD5, consts.SHA1, consts.SHA224, consts.SHA256,
consts.SHA384, consts.SHA512, consts.SM3, "none"]
def compute_md5(value):
return hashlib.md5(bytes(value, encoding='utf-8')).hexdigest()
def compute_md5_base64(value):
return str(base64.b64encode(hashlib.md5(bytes(value, encoding='utf-8')).digest()), "utf-8")
def compute_md5_bytes(value):
return hashlib.md5(bytes(value, encoding='utf-8')).digest()
def compute_sha256(value):
return hashlib.sha256(bytes(value, encoding='utf-8')).hexdigest()
def compute_sha256_base64(value):
return str(base64.b64encode(hashlib.sha256(bytes(value, encoding='utf-8')).digest()), "utf-8")
def compute_sha256_bytes(value):
return hashlib.sha256(bytes(value, encoding='utf-8')).digest()
def compute_sha1(value):
return hashlib.sha1(bytes(value, encoding='utf-8')).hexdigest()
def compute_sha1_base64(value):
return str(base64.b64encode(hashlib.sha1(bytes(value, encoding='utf-8')).digest()), "utf-8")
def compute_sha1_bytes(value):
return hashlib.sha1(bytes(value, encoding='utf-8')).digest()
def compute_sha224(value):
return hashlib.sha224(bytes(value, encoding='utf-8')).hexdigest()
def compute_sha224_base64(value):
return str(base64.b64encode(hashlib.sha224(bytes(value, encoding='utf-8')).digest()), "utf-8")
def compute_sha224_bytes(value):
return hashlib.sha224(bytes(value, encoding='utf-8')).digest()
def compute_sha512(value):
return hashlib.sha512(bytes(value, encoding='utf-8')).hexdigest()
def compute_sha512_base64(value):
return str(base64.b64encode(hashlib.sha512(bytes(value, encoding='utf-8')).digest()), "utf-8")
def compute_sha512_bytes(value):
return hashlib.sha512(bytes(value, encoding='utf-8')).digest()
def compute_sha384(value):
return hashlib.sha384(bytes(value, encoding='utf-8')).hexdigest()
def compute_sha384_base64(value):
return str(base64.b64encode(hashlib.sha384(bytes(value, encoding='utf-8')).digest()), "utf-8")
def compute_sha384_bytes(value):
return hashlib.sha384(bytes(value, encoding='utf-8')).digest()
def compute_sm3(value):
return sm3_hash(bytes(value, encoding='utf-8')).hex()
def compute_sm3_base64(value):
return str(base64.b64encode(sm3_hash(bytes(value, encoding='utf-8'))), "utf-8")
def compute_sm3_bytes(value):
return bytes(sm3_hash(bytes(value, encoding='utf-8')))
def compute_no_hash(value):
return str(value)
def compute_no_hash_base64(value):
return str(base64.b64encode(bytes(value, encoding='utf-8')), 'utf-8')
def compute_no_hash_bytes(value):
return bytes(str(value), encoding='utf-8')
HASH_HEX_FUNCTION = {
consts.MD5: compute_md5,
consts.SHA1: compute_sha1,
consts.SHA224: compute_sha224,
consts.SHA256: compute_sha256,
consts.SHA384: compute_sha384,
consts.SHA512: compute_sha512,
consts.SM3: compute_sm3,
"none": compute_no_hash
}
HASH_BYTE_FUNCTION = {
consts.MD5: compute_md5_bytes,
consts.SHA1: compute_sha1_bytes,
consts.SHA224: compute_sha224_bytes,
consts.SHA256: compute_sha256_bytes,
consts.SHA384: compute_sha384_bytes,
consts.SHA512: compute_sha512_bytes,
consts.SM3: compute_sm3_bytes,
"none": compute_no_hash_bytes
}
HASH_BASE64_FUNCTION = {
consts.MD5: compute_md5_base64,
consts.SHA1: compute_sha1_base64,
consts.SHA224: compute_sha224_base64,
consts.SHA256: compute_sha256_base64,
consts.SHA384: compute_sha384_base64,
consts.SHA512: compute_sha512_base64,
consts.SM3: compute_sm3_base64,
"none": compute_no_hash_base64
}
class Hash:
def __init__(self, method, base64=0, hex_output=True):
self.method = method
if self.method not in SUPPORT_METHOD:
raise ValueError("Hash does not support method:{}".format(self.method))
self.base64 = base64
self.hex_output = hex_output
if self.base64:
self.hash_operator = HASH_BASE64_FUNCTION[self.method]
if self.hex_output:
self.hash_operator = HASH_HEX_FUNCTION[self.method]
else:
self.hash_operator = HASH_BYTE_FUNCTION[self.method]
def compute(self, value, prefix_salt=None, suffix_salt=None):
value = str(value)
if prefix_salt:
value = prefix_salt + value
if suffix_salt:
value = value + suffix_salt
return self.hash_operator(value)
| 4,552
| 26.427711
| 98
|
py
|
FATE
|
FATE-master/python/federatedml/secureprotol/hash/__init__.py
| 0
| 0
| 0
|
py
|
|
FATE
|
FATE-master/python/federatedml/secureprotol/random_oracle/__init__.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright 2019 The FATE Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
| 663
| 33.947368
| 75
|
py
|
FATE
|
FATE-master/python/federatedml/secureprotol/random_oracle/base_random_oracle.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright 2019 The FATE Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
class RandomOracle(object):
"""
Base random oracle class
"""
def __init__(self):
pass
| 774
| 28.807692
| 75
|
py
|
FATE
|
FATE-master/python/federatedml/secureprotol/random_oracle/hash_function/sha256.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright 2019 The FATE Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import hashlib
from federatedml.secureprotol.random_oracle.hash_function.hash_function import HashFunction
class Sha256(HashFunction):
def __init__(self):
super(Sha256, self).__init__(hashlib.sha256())
def digest(self, message):
self.function = hashlib.sha256()
self.function.update(message)
return self.function.digest()
| 1,030
| 30.242424
| 91
|
py
|
FATE
|
FATE-master/python/federatedml/secureprotol/random_oracle/hash_function/__init__.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright 2019 The FATE Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
| 663
| 33.947368
| 75
|
py
|
FATE
|
FATE-master/python/federatedml/secureprotol/random_oracle/hash_function/hash_function.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright 2019 The FATE Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from federatedml.secureprotol.random_oracle.base_random_oracle import RandomOracle
class HashFunction(RandomOracle):
"""
Cryptographic hash functions
"""
def __init__(self, function):
super(HashFunction, self).__init__()
self.function = function
def digest(self, message):
"""
Be fed with a message and yield a digest
:param message: bytes
:return: bytes
"""
pass
| 1,115
| 28.368421
| 82
|
py
|
FATE
|
FATE-master/python/federatedml/secureprotol/random_oracle/message_authentication_code/hash_mac.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright 2019 The FATE Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from federatedml.secureprotol.random_oracle.message_authentication_code.mac import MessageAuthenticationCode
class HashBasedMessageAuthenticationCode(MessageAuthenticationCode):
"""
Hash-based MAC
"""
def __init__(self, function):
super(HashBasedMessageAuthenticationCode, self).__init__()
self.function = function
def digest(self, message):
"""
Be fed with a message and yield a digest
:param message: bytes
:return: bytes
"""
pass
| 1,184
| 30.184211
| 108
|
py
|
FATE
|
FATE-master/python/federatedml/secureprotol/random_oracle/message_authentication_code/sha256_mac.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright 2019 The FATE Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import hmac
from federatedml.secureprotol.random_oracle.message_authentication_code.hash_mac import \
HashBasedMessageAuthenticationCode
class Sha256MAC(HashBasedMessageAuthenticationCode):
"""
Sha256-based MAC
"""
def __init__(self, key):
self.mode = 'sha256'
self.key = key
super(Sha256MAC, self).__init__(hmac.new(self.key, digestmod=self.mode))
def digest(self, message):
self.function = hmac.new(self.key, digestmod=self.mode)
self.function.update(message)
return self.function.digest()
| 1,232
| 30.615385
| 89
|
py
|
FATE
|
FATE-master/python/federatedml/secureprotol/random_oracle/message_authentication_code/__init__.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright 2019 The FATE Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
| 663
| 33.947368
| 75
|
py
|
FATE
|
FATE-master/python/federatedml/secureprotol/random_oracle/message_authentication_code/mac.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright 2019 The FATE Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from federatedml.secureprotol.random_oracle.base_random_oracle import RandomOracle
class MessageAuthenticationCode(RandomOracle):
"""
MAC
"""
def __init__(self):
super(MessageAuthenticationCode, self).__init__()
| 902
| 30.137931
| 82
|
py
|
FATE
|
FATE-master/python/federatedml/secureprotol/test/fixedpoint_test.py
|
#
# Copyright 2019 The FATE Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import numpy as np
import unittest
from federatedml.secureprotol.fixedpoint import FixedPointNumber
class TestFixedPointNumber(unittest.TestCase):
def setUp(self):
unittest.TestCase.setUp(self)
def tearDown(self):
unittest.TestCase.tearDown(self)
def test_encode_decode(self):
for i in range(100):
en_i = FixedPointNumber.encode(i)
de_en_i = en_i.decode()
self.assertEqual(de_en_i, i)
en_i = FixedPointNumber.encode(-i)
de_en_i = en_i.decode()
self.assertEqual(de_en_i, -i)
for i in range(100):
x = i * 0.6
en_x = FixedPointNumber.encode(x)
de_en_x = en_x.decode()
self.assertAlmostEqual(de_en_x, x)
elem = np.ones(100) * np.random.rand()
for x in elem:
en_x = FixedPointNumber.encode(x)
de_en_x = en_x.decode()
self.assertAlmostEqual(de_en_x, x)
elem = np.ones(100) * np.random.randint(100)
for x in elem:
en_x = FixedPointNumber.encode(x)
de_en_x = en_x.decode()
self.assertAlmostEqual(de_en_x, x)
def test_add(self):
x_li = np.ones(100) * np.random.randint(100)
y_li = np.ones(100) * np.random.randint(1000)
z_li = np.ones(100) * np.random.rand()
t_li = range(100)
for i in range(x_li.shape[0]):
x = x_li[i]
y = y_li[i]
z = z_li[i]
t = t_li[i]
en_x = FixedPointNumber.encode(x)
en_y = FixedPointNumber.encode(y)
en_z = FixedPointNumber.encode(-z)
en_t = FixedPointNumber.encode(-t)
en_res = en_x + en_y + en_z + en_t
res = x + y + (-z) + (-t)
de_en_res = en_res.decode()
self.assertAlmostEqual(de_en_res, res)
def test_sub(self):
x_li = np.ones(100) * np.random.randint(100)
y_li = np.ones(100) * np.random.randint(1000)
z_li = np.ones(100) * np.random.rand()
t_li = range(100)
for i in range(x_li.shape[0]):
x = x_li[i]
y = y_li[i]
z = z_li[i]
t = t_li[i]
en_x = FixedPointNumber.encode(x)
en_y = FixedPointNumber.encode(y)
en_z = FixedPointNumber.encode(z)
en_t = FixedPointNumber.encode(t)
en_res = en_x - en_y - en_z - en_t
res = x - y - z - t
de_en_res = en_res.decode()
self.assertAlmostEqual(de_en_res, res)
def test_mul(self):
x_li = np.ones(100) * np.random.randint(100)
y_li = np.ones(100) * np.random.randint(1000) * -1
z_li = np.ones(100) * np.random.rand()
t_li = range(0, 100)
for i in range(x_li.shape[0]):
x = x_li[i]
y = y_li[i]
z = z_li[i]
t = t_li[i]
en_x = FixedPointNumber.encode(x)
en_res = (en_x * y + z) * t
res = (x * y + z) * t
de_en_res = en_res.decode()
self.assertAlmostEqual(de_en_res, res)
x = 9
en_x = FixedPointNumber.encode(x)
for i in range(100):
en_x = en_x + 5000 - 0.2
x = x + 5000 - 0.2
de_en_x = en_x.decode()
self.assertAlmostEqual(de_en_x, x)
def test_div(self):
for i in range(100):
x = np.random.randn() * 100
y = np.random.randn() * 100
en_x = FixedPointNumber.encode(x)
en_y = FixedPointNumber.encode(y)
z = x / y
en_z = en_x / en_y
de_en_z = en_z.decode()
self.assertAlmostEqual(de_en_z, z)
def test_lt(self):
for i in range(100):
x = np.random.randn() * 100
y = np.random.randn() * 100
en_x = FixedPointNumber.encode(x)
en_y = FixedPointNumber.encode(y)
z = x < y
en_z = en_x < en_y
self.assertEqual(en_z, z)
def test_gt(self):
for i in range(100):
x = np.random.randn() * 100
y = np.random.randn() * 100
en_x = FixedPointNumber.encode(x)
en_y = FixedPointNumber.encode(y)
z = x > y
en_z = en_x > en_y
self.assertEqual(en_z, z)
def test_le(self):
for i in range(100):
x = np.random.randint(10)
y = np.random.randint(10)
en_x = FixedPointNumber.encode(x)
en_y = FixedPointNumber.encode(y)
z = x <= y
en_z = en_x <= en_y
self.assertEqual(en_z, z)
def test_ge(self):
for i in range(100):
x = np.random.randint(10)
y = np.random.randint(10)
en_x = FixedPointNumber.encode(x)
en_y = FixedPointNumber.encode(y)
z = x >= y
en_z = en_x >= en_y
self.assertEqual(en_z, z)
def test_eq(self):
for i in range(100):
x = np.random.randint(10)
y = np.random.randint(10)
en_x = FixedPointNumber.encode(x)
en_y = FixedPointNumber.encode(y)
z = x == y
en_z = en_x == en_y
self.assertEqual(en_z, z)
def test_ne(self):
for i in range(100):
x = np.random.randint(10)
y = np.random.randint(10)
en_x = FixedPointNumber.encode(x)
en_y = FixedPointNumber.encode(y)
z = x != y
en_z = en_x != en_y
self.assertEqual(en_z, z)
if __name__ == '__main__':
unittest.main()
| 6,296
| 29.717073
| 75
|
py
|
FATE
|
FATE-master/python/federatedml/secureprotol/test/fate_paillier_test.py
|
#
# Copyright 2019 The FATE Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import numpy as np
import unittest
from federatedml.secureprotol.fate_paillier import PaillierKeypair
from federatedml.secureprotol.fate_paillier import PaillierPublicKey
from federatedml.secureprotol.fate_paillier import PaillierPrivateKey
from federatedml.secureprotol.fate_paillier import PaillierEncryptedNumber
class TestPaillierEncryptedNumber(unittest.TestCase):
def setUp(self):
self.public_key, self.private_key = PaillierKeypair.generate_keypair()
def tearDown(self):
unittest.TestCase.tearDown(self)
def test_add(self):
x_li = np.ones(100) * np.random.randint(100)
y_li = np.ones(100) * np.random.randint(1000)
z_li = np.ones(100) * np.random.rand()
t_li = range(100)
for i in range(x_li.shape[0]):
x = x_li[i]
y = y_li[i]
z = z_li[i]
t = t_li[i]
en_x = self.public_key.encrypt(x)
en_y = self.public_key.encrypt(y)
en_z = self.public_key.encrypt(z)
en_t = self.public_key.encrypt(t)
en_res = en_x + en_y + en_z + en_t
res = x + y + z + t
de_en_res = self.private_key.decrypt(en_res)
self.assertAlmostEqual(de_en_res, res)
def test_mul(self):
x_li = np.ones(100) * np.random.randint(100)
y_li = np.ones(100) * np.random.randint(1000) * -1
z_li = np.ones(100) * np.random.rand()
t_li = range(100)
for i in range(x_li.shape[0]):
x = x_li[i]
y = y_li[i]
z = z_li[i]
t = t_li[i]
en_x = self.public_key.encrypt(x)
en_res = (en_x * y + z) * t
res = (x * y + z) * t
de_en_res = self.private_key.decrypt(en_res)
self.assertAlmostEqual(de_en_res, res)
x = 9
en_x = self.public_key.encrypt(x)
for i in range(100):
en_x = en_x + 5000 - 0.2
x = x + 5000 - 0.2
de_en_x = self.private_key.decrypt(en_x)
self.assertAlmostEqual(de_en_x, x)
if __name__ == '__main__':
unittest.main()
| 2,751
| 30.632184
| 78
|
py
|
FATE
|
FATE-master/python/federatedml/secureprotol/test/__init__.py
|
#
# Copyright 2019 The FATE Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
| 616
| 37.5625
| 75
|
py
|
FATE
|
FATE-master/python/federatedml/secureprotol/test/encrypt_mode_test.py
|
#
# Copyright 2019 The FATE Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import copy
import numpy as np
import unittest
class TestEncryptModeCalculator(unittest.TestCase):
def setUp(self):
from fate_arch.session import computing_session as session
session.init("test_encrypt_mode_calculator")
self.list_data = []
self.tuple_data = []
self.numpy_data = []
for i in range(30):
list_value = [100 * i + j for j in range(20)]
tuple_value = tuple(list_value)
numpy_value = np.array(list_value, dtype="int")
self.list_data.append(list_value)
self.tuple_data.append(tuple_value)
self.numpy_data.append(numpy_value)
self.data_list = session.parallelize(self.list_data, include_key=False, partition=10)
self.data_tuple = session.parallelize(self.tuple_data, include_key=False, partition=10)
self.data_numpy = session.parallelize(self.numpy_data, include_key=False, partition=10)
def test_data_type(self, mode="strict", re_encrypted_rate=0.2):
from federatedml.secureprotol import PaillierEncrypt
from federatedml.secureprotol.encrypt_mode import EncryptModeCalculator
encrypter = PaillierEncrypt()
encrypter.generate_key(1024)
encrypted_calculator = EncryptModeCalculator(encrypter, mode, re_encrypted_rate)
data_list = dict(encrypted_calculator.encrypt(self.data_list).collect())
data_tuple = dict(encrypted_calculator.encrypt(self.data_tuple).collect())
data_numpy = dict(encrypted_calculator.encrypt(self.data_numpy).collect())
for key, value in data_list.items():
self.assertTrue(isinstance(value, list))
self.assertTrue(len(value) == len(self.list_data[key]))
for key, value in data_tuple.items():
self.assertTrue(isinstance(value, tuple))
self.assertTrue(len(value) == len(self.tuple_data[key]))
for key, value in data_numpy.items():
self.assertTrue(type(value).__name__ == "ndarray")
self.assertTrue(value.shape[0] == self.numpy_data[key].shape[0])
def test_data_type_with_diff_mode(self):
mode_list = ["strict", "fast", "confusion_opt", "balance", "confusion_opt_balance"]
for mode in mode_list:
self.test_data_type(mode=mode)
def test_diff_mode(self, round=10, mode="strict", re_encrypted_rate=0.2):
from federatedml.secureprotol.encrypt_mode import EncryptModeCalculator
from federatedml.secureprotol import PaillierEncrypt
encrypter = PaillierEncrypt()
encrypter.generate_key(1024)
encrypted_calculator = EncryptModeCalculator(encrypter, mode, re_encrypted_rate)
for i in range(round):
data_i = self.data_numpy.mapValues(lambda v: v + i)
data_i = encrypted_calculator.encrypt(data_i)
decrypt_data_i = dict(data_i.mapValues(lambda arr: np.array(
[encrypter.decrypt(val) for val in arr])).collect())
for j in range(30):
self.assertTrue(np.fabs(self.numpy_data[j] - decrypt_data_i[j] + i).all() < 1e-5)
if __name__ == '__main__':
unittest.main()
| 3,784
| 41.055556
| 97
|
py
|
FATE
|
FATE-master/python/federatedml/secureprotol/test/encode_test.py
|
import unittest
from federatedml.secureprotol.encode import Encode
class TestEncode(unittest.TestCase):
def test_compute(self):
value_list = ["12345", "54321", "111111"]
pre_salt = ""
postfit_salt = "12345"
sha256_base64_value_list = [
"5KCpDlrAfVQ1xvJcTPfMVlvst5e7W4PFFbxCfvMqR3A=",
"O90AvF51EJNqKGewUOtH4tUFyKM2y5NnlbUwRGb7kQw=",
"M1RdblccBKI/fUivm9yrH3cCkK8lnmYYJPBe8E2FDWM="]
sha256_value_list = [
"e4a0a90e5ac07d5435c6f25c4cf7cc565becb797bb5b83c515bc427ef32a4770",
"3bdd00bc5e7510936a2867b050eb47e2d505c8a336cb936795b5304466fb910c",
"33545d6e571c04a23f7d48af9bdcab1f770290af259e661824f05ef04d850d63"]
md5_value_list = ["8cfa2282b17de0a598c010f5f0109e7d",
"64bddc3ca51ad547e43f8e65cb5e2318",
"ff02c8a17f7fd875bd2e6d882fe7677d"]
md5_base64_value_list = ["jPoigrF94KWYwBD18BCefQ==",
"ZL3cPKUa1UfkP45ly14jGA==",
"/wLIoX9/2HW9Lm2IL+dnfQ=="]
sha1_base64_value_list = ["bur67wEzGYIqHzBAelNT93i1l5A=",
"hhrM2zkxjD26v53sNtImywM5Q7k=",
"X1M3cC7+mBF8IIHT6aghS/JJXvc="]
sha1_value_list = ["6eeafaef013319822a1f30407a5353f778b59790",
"861accdb39318c3dbabf9dec36d226cb033943b9",
"5f5337702efe98117c2081d3e9a8214bf2495ef7"]
sha224_base64_value_list = ["uVsQjSHFEq2gQLixymFYv5Ieht4p9v+MHr64kw==",
"zAKzPY0k41ZbCyqTY4cBeNqOo7R7uca0f36Pjg==",
"cNN9OVWrPqJ2g/Ve/w395o30Jxy7W3ol8NrF4w=="]
sha224_value_list = ["b95b108d21c512ada040b8b1ca6158bf921e86de29f6ff8c1ebeb893",
"cc02b33d8d24e3565b0b2a9363870178da8ea3b47bb9c6b47f7e8f8e",
"70d37d3955ab3ea27683f55eff0dfde68df4271cbb5b7a25f0dac5e3"]
sha512_base64_value_list = [
"bnhO4jFWKBnxwBlowI2zlaF0cLRFlDFIBLI1jvRI94Ohq6KkvhZSm+HoC6bzEPaIFzjBzGx3kOZlLdnNlNJaVg==",
"teHJ2xDTAHht99rky6eqMDaiINvTl30OzXq7sL9Dkk3NQ5GOJSd4ozempcTPgi8XD+uVtovhUPPkCOh1zJLLjg==",
"LWE4K5RVIJ3p8RQ6JMuT/YYfIz/T5CPdx/8Z4+ywB7DOOS/7wFsR5pMrAvLb5u3G0auCf6d3lA4v69D7Vk98/Q=="]
sha512_value_list = [
"6e784ee231562819f1c01968c08db395a17470b44594314804b2358ef448f783a1aba2a4be16529be1e80ba6f310f6881738c1cc6c7790e6652dd9cd94d25a56",
"b5e1c9db10d300786df7dae4cba7aa3036a220dbd3977d0ecd7abbb0bf43924dcd43918e252778a337a6a5c4cf822f170feb95b68be150f3e408e875cc92cb8e",
"2d61382b9455209de9f1143a24cb93fd861f233fd3e423ddc7ff19e3ecb007b0ce392ffbc05b11e6932b02f2dbe6edc6d1ab827fa777940e2febd0fb564f7cfd"]
sha384_base64_value_list = ["VQY6S0eKPD7KBYLKeBZ00Ys8Zr1HYUW4D9J82hmaDDBkNpoq9mvWchRo/isr/9Cb",
"iF8kMFkBmrFbmUUrb0qs4j5ZWRXACfpKUUZr/4SqaI96EMJn7Atfk/z8JMoDyK4j",
"wdy0BD5/rrkj2ABLchnCWMXGK6HKUc9NM23cepUdgEX9isxxEjynWfWkNwfObFpe"]
sha384_value_list = [
"55063a4b478a3c3eca0582ca781674d18b3c66bd476145b80fd27cda199a0c3064369a2af66bd6721468fe2b2bffd09b",
"885f243059019ab15b99452b6f4aace23e595915c009fa4a51466bff84aa688f7a10c267ec0b5f93fcfc24ca03c8ae23",
"c1dcb4043e7faeb923d8004b7219c258c5c62ba1ca51cf4d336ddc7a951d8045fd8acc71123ca759f5a43707ce6c5a5e"]
# test sha256, base64 = 1
# encode_sha256_base64 = Encode("sha256", base64=1)
# self.assertEqual(encode_sha256_base64.compute(value_list, pre_salt, postfit_salt)[0], sha256_base64_value_list)
# test sha256, base64 = 0
# encode_sha256 = Encode("sha256", base64=0)
# self.assertEqual(encode_sha256.compute(value_list, pre_salt, postfit_salt)[0], sha256_value_list)
# test md5, base64 = 1
# encode_md5_base64 = Encode("md5", base64=1)
# self.assertEqual(encode_md5_base64.compute(value_list, pre_salt, postfit_salt)[0], md5_base64_value_list)
# test md5, base64 = 0
# encode_md5 = Encode("md5", base64=0)
# self.assertEqual(encode_md5.compute(value_list, pre_salt, postfit_salt)[0], md5_value_list)
# test sha1, base64 = 1
# encode_base64_sha1 = Encode("sha1", base64=1)
# self.assertEqual(encode_base64_sha1.compute(value_list, pre_salt, postfit_salt)[0], sha1_base64_value_list)
# test sha1, base64 = 0
# encode_sha1 = Encode("sha1", base64=0)
# self.assertEqual(encode_sha1.compute(value_list, pre_salt, postfit_salt)[0], sha1_value_list)
# test sha1, base64 = 1
# encode_base_sha224 = Encode("sha224", base64=1)
# self.assertEqual(encode_base_sha224.compute(value_list, pre_salt, postfit_salt)[0], sha224_base64_value_list)
# test sha224, base64 = 0
# encode_sha224 = Encode("sha224", base64=0)
# self.assertEqual(encode_sha224.compute(value_list, pre_salt, postfit_salt)[0], sha224_value_list)
# test sha512, base64 = 1
# encode_base_sha512 = Encode("sha512", base64=1)
# self.assertEqual(encode_base_sha512.compute(value_list, pre_salt, postfit_salt)[0], sha512_base64_value_list)
# test sha224, base64 = 0
# encode_sha512 = Encode("sha512", base64=0)
# self.assertEqual(encode_sha512.compute(value_list, pre_salt, postfit_salt)[0], sha512_value_list)
# test sha384, base64 = 1
# encode_base_sha384 = Encode("sha384", base64=1)
# self.assertEqual(encode_base_sha384.compute(value_list, pre_salt, postfit_salt)[0], sha384_base64_value_list)
# test sha384, base64 = 0
# encode_sha384 = Encode("sha384", base64=0)
# self.assertEqual(encode_sha384.compute(value_list, pre_salt, postfit_salt)[0], sha384_value_list)
#
# test id_map
# encode_sha384 = Encode("sha384", base64=0)
# sha384_value_list = [
# "55063a4b478a3c3eca0582ca781674d18b3c66bd476145b80fd27cda199a0c3064369a2af66bd6721468fe2b2bffd09b",
# "885f243059019ab15b99452b6f4aace23e595915c009fa4a51466bff84aa688f7a10c267ec0b5f93fcfc24ca03c8ae23",
# "c1dcb4043e7faeb923d8004b7219c258c5c62ba1ca51cf4d336ddc7a951d8045fd8acc71123ca759f5a43707ce6c5a5e"]
# encode_value_list, id_map_pair = encode_sha384.compute(value_list, pre_salt, postfit_salt, id_map=True)
# id_map_value_list = []
# for i in range(len(encode_value_list)):
# id_map_value_list.append(id_map_pair[encode_value_list[i]])
#
# self.assertEqual(id_map_value_list, value_list)
########## test single value #####################
value = value_list[0]
# test sha256, base64 = 1
encode_sha256_base64 = Encode("sha256", base64=1)
sha256_base64_value = sha256_base64_value_list[0]
self.assertEqual(encode_sha256_base64.compute(value, pre_salt, postfit_salt), sha256_base64_value)
# test sha256, base64 = 0
encode_sha256 = Encode("sha256", base64=0)
sha256_value = sha256_value_list[0]
self.assertEqual(encode_sha256.compute(value, pre_salt, postfit_salt), sha256_value)
# test md5, base64 = 1
encode_md5_base64 = Encode("md5", base64=1)
md5_base64_value = md5_base64_value_list[0]
self.assertEqual(encode_md5_base64.compute(value, pre_salt, postfit_salt), md5_base64_value)
# test md5, base64 = 0
encode_md5 = Encode("md5", base64=0)
md5_value = md5_value_list[0]
self.assertEqual(encode_md5.compute(value, pre_salt, postfit_salt), md5_value)
# test sha1, base64 = 1
encode_base64_sha1 = Encode("sha1", base64=1)
sha1_base64_value = sha1_base64_value_list[0]
self.assertEqual(encode_base64_sha1.compute(value, pre_salt, postfit_salt), sha1_base64_value)
# test sha1, base64 = 0
encode_sha1 = Encode("sha1", base64=0)
sha1_value = sha1_value_list[0]
self.assertEqual(encode_sha1.compute(value, pre_salt, postfit_salt), sha1_value)
# test sha1, base64 = 1
encode_base_sha224 = Encode("sha224", base64=1)
sha224_base64_value = sha224_base64_value_list[0]
self.assertEqual(encode_base_sha224.compute(value, pre_salt, postfit_salt), sha224_base64_value)
# test sha224, base64 = 0
encode_sha224 = Encode("sha224", base64=0)
sha224_value = sha224_value_list[0]
self.assertEqual(encode_sha224.compute(value, pre_salt, postfit_salt), sha224_value)
# test sha512, base64 = 1
encode_base_sha512 = Encode("sha512", base64=1)
sha512_base64_value = sha512_base64_value_list[0]
self.assertEqual(encode_base_sha512.compute(value, pre_salt, postfit_salt), sha512_base64_value)
# test sha224, base64 = 0
encode_sha512 = Encode("sha512", base64=0)
sha512_value = sha512_value_list[0]
self.assertEqual(encode_sha512.compute(value, pre_salt, postfit_salt), sha512_value)
# test sha384, base64 = 1
encode_base_sha384 = Encode("sha384", base64=1)
sha384_base64_value = sha384_base64_value_list[0]
self.assertEqual(encode_base_sha384.compute(value, pre_salt, postfit_salt), sha384_base64_value)
# test sha384, base64 = 0
encode_sha384 = Encode("sha384", base64=0)
sha384_value = sha384_value_list[0]
self.assertEqual(encode_sha384.compute(value, pre_salt, postfit_salt), sha384_value)
# test is_support
support_encode_method = ["md5", "sha1", "sha224", "sha256", "sha384", "sha512"]
for method in support_encode_method:
self.assertTrue(Encode.is_support(method))
unsupport_method = "sha2"
self.assertFalse(Encode.is_support(unsupport_method))
# test conpute unsupport method
test_compute = Encode("sha3840000", base64=0)
self.assertEqual(test_compute.compute(value, pre_salt, postfit_salt), value)
if __name__ == '__main__':
unittest.main()
| 10,159
| 47.61244
| 143
|
py
|
FATE
|
FATE-master/python/federatedml/secureprotol/secret_sharing/verifiable_secret_sharing/feldman_verifiable_secret_sharing.py
|
import random
from federatedml.secureprotol import gmpy_math
from gmpy2 import mpz
class FeldmanVerifiableSecretSharing(object):
def __init__(self):
self.Q_n = 6
self.p = None
self.g = None
self.q = None
self.share_amount = -1
self.commitments = []
def set_share_amount(self, host_count):
self.share_amount = host_count + 1
def encrypt(self, secret):
coefficient = [self.encode(secret)]
for i in range(self.share_amount - 1):
random_coefficient = random.SystemRandom().randint(0, self.p - 1)
coefficient.append(random_coefficient)
f_x = []
for x in range(1, self.share_amount + 1):
y = 0
for c in reversed(coefficient):
y *= x % self.q
y += c % self.q
y %= self.q
f_x.append((x, y))
commitment = list(map(self.calculate_commitment, coefficient))
return f_x, commitment
def decrypt(self, x_values, y_values):
k = len(x_values)
assert k == len(set(x_values)), 'x_values points must be distinct'
secret = 0
for i in range(k):
numerator, denominator = 1, 1
for j in range(k):
if i == j:
continue
# compute a fraction & update the existing numerator + denominator
numerator = (numerator * (0 - x_values[j]))
denominator = (denominator * (x_values[i] - x_values[j]))
# get the polynomial from the numerator + denominator mod inverse
lagrange_polynomial = (numerator * gmpy_math.invert(denominator, self.q)) % self.q
# multiply the current y & the evaluated polynomial & add it to f(x)
secret = (self.q + secret + (y_values[i] * lagrange_polynomial)) % self.q
return self.decode(secret)
def calculate_commitment(self, coefficient):
return gmpy_math.powmod(self.g, coefficient, self.p)
def verify(self, f_x, commitment):
x, y = f_x[0], f_x[1]
v1 = gmpy_math.powmod(self.g, y, self.p)
v2 = 1
for i in range(len(commitment)):
v2 *= gmpy_math.powmod(commitment[i], (x**i), self.p)
v2 = v2 % self.p
if v1 != v2:
return False
return True
def encode(self, x):
upscaled = int(x * (10 ** self.Q_n))
if isinstance(x, int):
assert (abs(upscaled) < (self.q / (2 * self.share_amount))), (
f"{x} cannot be correctly embedded: choose bigger q or a lower precision"
)
return upscaled
def decode(self, s):
gate = s > self.q / 2
neg_nums = (s - self.q) * gate
pos_nums = s * (1 - gate)
integer, fraction = divmod((neg_nums + pos_nums), (10 ** self.Q_n))
result = integer if fraction == 0 else integer + fraction / (10**self.Q_n)
return result
@staticmethod
def _decode_hex_string(number_str):
return int(mpz("0x{0}".format("".join(number_str.split()))))
def key_pair(self):
"""
from RFC 5114, has 160 bits subgroup size:
0xF518AA8781A8DF278ABA4E7D64B7CB9D49462353
refer to https://tools.ietf.org/html/rfc5114
"""
self.p = FeldmanVerifiableSecretSharing._decode_hex_string("""
B10B8F96 A080E01D DE92DE5E AE5D54EC 52C99FBC FB06A3C6
9A6A9DCA 52D23B61 6073E286 75A23D18 9838EF1E 2EE652C0
13ECB4AE A9061123 24975C3C D49B83BF ACCBDD7D 90C4BD70
98488E9C 219A7372 4EFFD6FA E5644738 FAA31A4F F55BCCC0
A151AF5F 0DC8B4BD 45BF37DF 365C1A65 E68CFDA7 6D4DA708
DF1FB2BC 2E4A4371
""")
self.g = FeldmanVerifiableSecretSharing._decode_hex_string("""
A4D1CBD5 C3FD3412 6765A442 EFB99905 F8104DD2 58AC507F
D6406CFF 14266D31 266FEA1E 5C41564B 777E690F 5504F213
160217B4 B01B886A 5E91547F 9E2749F4 D7FBD7D3 B9A92EE1
909D0D22 63F80A76 A6A24C08 7A091F53 1DBF0A01 69B6A28A
D662A4D1 8E73AFA3 2D779D59 18D08BC8 858F4DCE F97C2A24
855E6EEB 22B3B2E5
""")
self.q = FeldmanVerifiableSecretSharing._decode_hex_string("""
F518AA87 81A8DF27 8ABA4E7D 64B7CB9D 49462353
""")
| 4,278
| 36.535088
| 94
|
py
|
FATE
|
FATE-master/python/federatedml/secureprotol/secret_sharing/verifiable_secret_sharing/__init__.py
| 0
| 0
| 0
|
py
|
|
FATE
|
FATE-master/python/federatedml/secureprotol/number_theory/__init__.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright 2019 The FATE Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
| 663
| 33.947368
| 75
|
py
|
FATE
|
FATE-master/python/federatedml/secureprotol/number_theory/group/cyclc_group.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright 2019 The FATE Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from federatedml.secureprotol.number_theory.group.base_group import GroupElement, GroupArithmetic
class CyclicGroupElement(GroupElement):
"""
Cyclic group element
"""
def __init__(self):
super(CyclicGroupElement, self).__init__()
class CyclicGroupArithmetic(GroupArithmetic):
"""
A collection of arithmetic operators for cyclic groups
"""
def __init__(self, identity, generator):
super(CyclicGroupArithmetic, self).__init__(identity)
self.generator = generator
def get_generator(self):
return self.generator
| 1,246
| 28.690476
| 97
|
py
|
FATE
|
FATE-master/python/federatedml/secureprotol/number_theory/group/base_group.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright 2019 The FATE Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
class GroupElement(object):
"""
Group element
"""
def __init__(self):
pass
class GroupArithmetic(object):
"""
A collection of arithmetic operators for groups
"""
def __init__(self, identity):
self.identity = identity
def add(self, a, b):
"""
x + y
"""
def neg(self, a):
"""
-x
"""
pass
def sub(self, a, b):
"""
x - y
"""
pass
def mul(self, scalar, a):
"""
scalar * a
"""
pass
def get_identity(self):
return self.identity
| 1,291
| 19.507937
| 75
|
py
|
FATE
|
FATE-master/python/federatedml/secureprotol/number_theory/group/__init__.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright 2019 The FATE Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
| 663
| 33.947368
| 75
|
py
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.