code stringlengths 17 6.64M |
|---|
class DeepFM(BaseModel):
'Instantiates the DeepFM Network architecture.\n\n :param linear_feature_columns: An iterable containing all the features used by linear part of the model.\n :param dnn_feature_columns: An iterable containing all the features used by deep part of the model.\n :param use_fm: bool,use FM part or not\n :param dnn_hidden_units: list,list of positive integer or empty list, the layer number and units in each layer of DNN\n :param l2_reg_linear: float. L2 regularizer strength applied to linear part\n :param l2_reg_embedding: float. L2 regularizer strength applied to embedding vector\n :param l2_reg_dnn: float. L2 regularizer strength applied to DNN\n :param init_std: float,to use as the initialize std of embedding vector\n :param seed: integer ,to use as random seed.\n :param dnn_dropout: float in [0,1), the probability we will drop out a given DNN coordinate.\n :param dnn_activation: Activation function to use in DNN\n :param dnn_use_bn: bool. Whether use BatchNormalization before activation or not in DNN\n :param task: str, ``"binary"`` for binary logloss or ``"regression"`` for regression loss\n :param device: str, ``"cpu"`` or ``"cuda:0"``\n :return: A PyTorch model instance.\n \n '
def __init__(self, linear_feature_columns, dnn_feature_columns, use_fm=True, dnn_hidden_units=(256, 128), l2_reg_linear=1e-05, l2_reg_embedding=1e-05, l2_reg_dnn=0, init_std=0.0001, seed=1024, dnn_dropout=0, dnn_activation='relu', dnn_use_bn=False, task='binary', device='cpu'):
super(DeepFM, self).__init__(linear_feature_columns, dnn_feature_columns, l2_reg_linear=l2_reg_linear, l2_reg_embedding=l2_reg_embedding, init_std=init_std, seed=seed, task=task, device=device)
self.use_fm = use_fm
self.use_dnn = ((len(dnn_feature_columns) > 0) and (len(dnn_hidden_units) > 0))
if use_fm:
self.fm = FM()
if self.use_dnn:
self.dnn = DNN(self.compute_input_dim(dnn_feature_columns), dnn_hidden_units, activation=dnn_activation, l2_reg=l2_reg_dnn, dropout_rate=dnn_dropout, use_bn=dnn_use_bn, init_std=init_std, device=device)
self.dnn_linear = nn.Linear(dnn_hidden_units[(- 1)], 1, bias=False).to(device)
self.add_regularization_weight(filter((lambda x: (('weight' in x[0]) and ('bn' not in x[0]))), self.dnn.named_parameters()), l2_reg_dnn)
self.add_regularization_weight(self.dnn_linear.weight, l2_reg_dnn)
self.to(device)
def forward(self, X):
(sparse_embedding_list, dense_value_list) = self.input_from_feature_columns(X, self.dnn_feature_columns, self.embedding_dict)
logit = self.linear_model(X)
if (self.use_fm and (len(sparse_embedding_list) > 0)):
fm_input = torch.cat(sparse_embedding_list, dim=1)
logit += self.fm(fm_input)
if self.use_dnn:
dnn_input = combined_dnn_input(sparse_embedding_list, dense_value_list)
dnn_output = self.dnn(dnn_input)
dnn_logit = self.dnn_linear(dnn_output)
logit += dnn_logit
y_pred = self.out(logit)
return y_pred
|
class DIN(BaseModel):
'Instantiates the Deep Interest Network architecture.\n\n :param dnn_feature_columns: An iterable containing all the features used by deep part of the model.\n :param history_feature_list: list,to indicate sequence sparse field\n :param dnn_use_bn: bool. Whether use BatchNormalization before activation or not in deep net\n :param dnn_hidden_units: list,list of positive integer or empty list, the layer number and units in each layer of deep net\n :param dnn_activation: Activation function to use in deep net\n :param att_hidden_size: list,list of positive integer , the layer number and units in each layer of attention net\n :param att_activation: Activation function to use in attention net\n :param att_weight_normalization: bool. Whether normalize the attention score of local activation unit.\n :param l2_reg_dnn: float. L2 regularizer strength applied to DNN\n :param l2_reg_embedding: float. L2 regularizer strength applied to embedding vector\n :param dnn_dropout: float in [0,1), the probability we will drop out a given DNN coordinate.\n :param init_std: float,to use as the initialize std of embedding vector\n :param seed: integer ,to use as random seed.\n :param task: str, ``"binary"`` for binary logloss or ``"regression"`` for regression loss\n :return: A PyTorch model instance.\n\n '
def __init__(self, dnn_feature_columns, history_feature_list, dnn_use_bn=False, dnn_hidden_units=(256, 128), dnn_activation='relu', att_hidden_size=(64, 16), att_activation='Dice', att_weight_normalization=False, l2_reg_dnn=0.0, l2_reg_embedding=1e-06, dnn_dropout=0, init_std=0.0001, seed=1024, task='binary', device='cpu'):
super(DIN, self).__init__([], dnn_feature_columns, l2_reg_linear=0, l2_reg_embedding=l2_reg_embedding, init_std=init_std, seed=seed, task=task, device=device)
self.sparse_feature_columns = (list(filter((lambda x: isinstance(x, SparseFeat)), dnn_feature_columns)) if dnn_feature_columns else [])
self.varlen_sparse_feature_columns = (list(filter((lambda x: isinstance(x, VarLenSparseFeat)), dnn_feature_columns)) if dnn_feature_columns else [])
self.history_feature_list = history_feature_list
self.history_feature_columns = []
self.sparse_varlen_feature_columns = []
self.history_fc_names = list(map((lambda x: ('hist_' + x)), history_feature_list))
for fc in self.varlen_sparse_feature_columns:
feature_name = fc.name
if (feature_name in self.history_fc_names):
self.history_feature_columns.append(fc)
else:
self.sparse_varlen_feature_columns.append(fc)
att_emb_dim = self._compute_interest_dim()
self.attention = AttentionSequencePoolingLayer(att_hidden_units=att_hidden_size, embedding_dim=att_emb_dim, activation=att_activation, return_score=False, supports_masking=False, weight_normalization=att_weight_normalization)
self.dnn = DNN(inputs_dim=self.compute_input_dim(dnn_feature_columns), hidden_units=dnn_hidden_units, activation=dnn_activation, dropout_rate=dnn_dropout, l2_reg=l2_reg_dnn, use_bn=dnn_use_bn)
self.dnn_linear = nn.Linear(dnn_hidden_units[(- 1)], 1, bias=False).to(device)
self.to(device)
def forward(self, X):
(sparse_embedding_list, dense_value_list) = self.input_from_feature_columns(X, self.dnn_feature_columns, self.embedding_dict)
query_emb_list = embedding_lookup(X, self.embedding_dict, self.feature_index, self.sparse_feature_columns, self.history_feature_list, self.history_feature_list, to_list=True)
keys_emb_list = embedding_lookup(X, self.embedding_dict, self.feature_index, self.history_feature_columns, self.history_fc_names, self.history_fc_names, to_list=True)
dnn_input_emb_list = embedding_lookup(X, self.embedding_dict, self.feature_index, self.sparse_feature_columns, mask_feat_list=self.history_feature_list, to_list=True)
sequence_embed_dict = varlen_embedding_lookup(X, self.embedding_dict, self.feature_index, self.sparse_varlen_feature_columns)
sequence_embed_list = get_varlen_pooling_list(sequence_embed_dict, X, self.feature_index, self.sparse_varlen_feature_columns, self.device)
dnn_input_emb_list += sequence_embed_list
query_emb = torch.cat(query_emb_list, dim=(- 1))
keys_emb = torch.cat(keys_emb_list, dim=(- 1))
keys_length = torch.ones((query_emb.size(0), 1)).to(self.device)
deep_input_emb = torch.cat(dnn_input_emb_list, dim=(- 1))
hist = self.attention(query_emb, keys_emb, keys_length)
deep_input_emb = torch.cat((deep_input_emb, hist), dim=(- 1))
deep_input_emb = deep_input_emb.view(deep_input_emb.size(0), (- 1))
dnn_input = combined_dnn_input([deep_input_emb], dense_value_list)
dnn_output = self.dnn(dnn_input)
dnn_logit = self.dnn_linear(dnn_output)
y_pred = self.out(dnn_logit)
return y_pred
def _compute_interest_dim(self):
interest_dim = 0
for feat in self.sparse_feature_columns:
if (feat.name in self.history_feature_list):
interest_dim += feat.embedding_dim
return interest_dim
|
class FiBiNET(BaseModel):
'Instantiates the Feature Importance and Bilinear feature Interaction NETwork architecture.\n\n :param linear_feature_columns: An iterable containing all the features used by linear part of the model.\n :param dnn_feature_columns: An iterable containing all the features used by deep part of the model.\n :param bilinear_type: str,bilinear function type used in Bilinear Interaction Layer,can be ``\'all\'`` , ``\'each\'`` or ``\'interaction\'``\n :param reduction_ratio: integer in [1,inf), reduction ratio used in SENET Layer\n :param dnn_hidden_units: list,list of positive integer or empty list, the layer number and units in each layer of DNN\n :param l2_reg_linear: float. L2 regularizer strength applied to wide part\n :param l2_reg_embedding: float. L2 regularizer strength applied to embedding vector\n :param l2_reg_dnn: float. L2 regularizer strength applied to DNN\n :param init_std: float,to use as the initialize std of embedding vector\n :param seed: integer ,to use as random seed.\n :param dnn_dropout: float in [0,1), the probability we will drop out a given DNN coordinate.\n :param dnn_activation: Activation function to use in DNN\n :param task: str, ``"binary"`` for binary logloss or ``"regression"`` for regression loss\n :param device: str, ``"cpu"`` or ``"cuda:0"``\n :return: A PyTorch model instance.\n \n '
def __init__(self, linear_feature_columns, dnn_feature_columns, bilinear_type='interaction', reduction_ratio=3, dnn_hidden_units=(128, 128), l2_reg_linear=1e-05, l2_reg_embedding=1e-05, l2_reg_dnn=0, init_std=0.0001, seed=1024, dnn_dropout=0, dnn_activation='relu', task='binary', device='cpu'):
super(FiBiNET, self).__init__(linear_feature_columns, dnn_feature_columns, l2_reg_linear=l2_reg_linear, l2_reg_embedding=l2_reg_embedding, init_std=init_std, seed=seed, task=task, device=device)
self.linear_feature_columns = linear_feature_columns
self.dnn_feature_columns = dnn_feature_columns
self.filed_size = len(self.embedding_dict)
self.SE = SENETLayer(self.filed_size, reduction_ratio, seed, device)
self.Bilinear = BilinearInteraction(self.filed_size, self.embedding_size, bilinear_type, seed, device)
self.dnn = DNN(self.compute_input_dim(dnn_feature_columns), dnn_hidden_units, activation=dnn_activation, l2_reg=l2_reg_dnn, dropout_rate=dnn_dropout, use_bn=False, init_std=init_std, device=device)
self.dnn_linear = nn.Linear(dnn_hidden_units[(- 1)], 1, bias=False).to(device)
def compute_input_dim(self, feature_columns, include_sparse=True, include_dense=True):
sparse_feature_columns = (list(filter((lambda x: isinstance(x, (SparseFeat, VarLenSparseFeat))), feature_columns)) if len(feature_columns) else [])
dense_feature_columns = (list(filter((lambda x: isinstance(x, DenseFeat)), feature_columns)) if len(feature_columns) else [])
field_size = len(sparse_feature_columns)
dense_input_dim = sum(map((lambda x: x.dimension), dense_feature_columns))
embedding_size = sparse_feature_columns[0].embedding_dim
sparse_input_dim = ((field_size * (field_size - 1)) * embedding_size)
input_dim = 0
if include_sparse:
input_dim += sparse_input_dim
if include_dense:
input_dim += dense_input_dim
return input_dim
def forward(self, X):
(sparse_embedding_list, dense_value_list) = self.input_from_feature_columns(X, self.dnn_feature_columns, self.embedding_dict)
sparse_embedding_input = torch.cat(sparse_embedding_list, dim=1)
senet_output = self.SE(sparse_embedding_input)
senet_bilinear_out = self.Bilinear(senet_output)
bilinear_out = self.Bilinear(sparse_embedding_input)
linear_logit = self.linear_model(X)
temp = torch.split(torch.cat((senet_bilinear_out, bilinear_out), dim=1), 1, dim=1)
dnn_input = combined_dnn_input(temp, dense_value_list)
dnn_output = self.dnn(dnn_input)
dnn_logit = self.dnn_linear(dnn_output)
if ((len(self.linear_feature_columns) > 0) and (len(self.dnn_feature_columns) > 0)):
final_logit = (linear_logit + dnn_logit)
elif (len(self.linear_feature_columns) == 0):
final_logit = dnn_logit
elif (len(self.dnn_feature_columns) == 0):
final_logit = linear_logit
else:
raise NotImplementedError
y_pred = self.out(final_logit)
return y_pred
|
class MLR(BaseModel):
'Instantiates the Mixed Logistic Regression/Piece-wise Linear Model.\n\n :param region_feature_columns: An iterable containing all the features used by region part of the model.\n :param base_feature_columns: An iterable containing all the features used by base part of the model.\n :param region_num: integer > 1,indicate the piece number\n :param l2_reg_linear: float. L2 regularizer strength applied to weight\n :param init_std: float,to use as the initialize std of embedding vector\n :param seed: integer ,to use as random seed.\n :param task: str, ``"binary"`` for binary logloss or ``"regression"`` for regression loss\n :param bias_feature_columns: An iterable containing all the features used by bias part of the model.\n :param device: str, ``"cpu"`` or ``"cuda:0"``\n :return: A PyTorch model instance.\n \n '
def __init__(self, region_feature_columns, base_feature_columns=None, bias_feature_columns=None, region_num=4, l2_reg_linear=1e-05, init_std=0.0001, seed=1024, task='binary', device='cpu'):
super(MLR, self).__init__(region_feature_columns, region_feature_columns, task=task, device=device)
if (region_num <= 1):
raise ValueError('region_num must > 1')
self.l2_reg_linear = l2_reg_linear
self.init_std = init_std
self.seed = seed
self.device = device
self.region_num = region_num
self.region_feature_columns = region_feature_columns
self.base_feature_columns = base_feature_columns
self.bias_feature_columns = bias_feature_columns
if ((base_feature_columns is None) or (len(base_feature_columns) == 0)):
self.base_feature_columns = region_feature_columns
if (bias_feature_columns is None):
self.bias_feature_columns = []
self.feature_index = build_input_features(((self.region_feature_columns + self.base_feature_columns) + self.bias_feature_columns))
self.region_linear_model = nn.ModuleList([Linear(self.region_feature_columns, self.feature_index, self.init_std, self.device) for i in range(self.region_num)])
self.base_linear_model = nn.ModuleList([Linear(self.base_feature_columns, self.feature_index, self.init_std, self.device) for i in range(self.region_num)])
if ((self.bias_feature_columns is not None) and (len(self.bias_feature_columns) > 0)):
self.bias_model = nn.Sequential(Linear(self.bias_feature_columns, self.feature_index, self.init_std, self.device), PredictionLayer(task='binary', use_bias=False))
self.prediction_layer = PredictionLayer(task=task, use_bias=False)
self.to(self.device)
def get_region_score(self, inputs, region_number):
region_logit = torch.cat([self.region_linear_model[i](inputs) for i in range(region_number)], dim=(- 1))
region_score = nn.Softmax(dim=(- 1))(region_logit)
return region_score
def get_learner_score(self, inputs, region_number):
learner_score = self.prediction_layer(torch.cat([self.region_linear_model[i](inputs) for i in range(region_number)], dim=(- 1)))
return learner_score
def forward(self, X):
region_score = self.get_region_score(X, self.region_num)
learner_score = self.get_learner_score(X, self.region_num)
final_logit = torch.sum((region_score * learner_score), dim=(- 1), keepdim=True)
if ((self.bias_feature_columns is not None) and (len(self.bias_feature_columns) > 0)):
bias_score = self.bias_model(X)
final_logit = (final_logit * bias_score)
return final_logit
|
class NFM(BaseModel):
'Instantiates the NFM Network architecture.\n\n :param linear_feature_columns: An iterable containing all the features used by linear part of the model.\n :param dnn_feature_columns: An iterable containing all the features used by deep part of the model.\n :param dnn_hidden_units: list,list of positive integer or empty list, the layer number and units in each layer of deep net\n :param l2_reg_embedding: float. L2 regularizer strength applied to embedding vector\n :param l2_reg_linear: float. L2 regularizer strength applied to linear part.\n :param l2_reg_dnn: float . L2 regularizer strength applied to DNN\n :param init_std: float,to use as the initialize std of embedding vector\n :param seed: integer ,to use as random seed.\n :param biout_dropout: When not ``None``, the probability we will drop out the output of BiInteractionPooling Layer.\n :param dnn_dropout: float in [0,1), the probability we will drop out a given DNN coordinate.\n :param dnn_activation: Activation function to use in deep net\n :param task: str, ``"binary"`` for binary logloss or ``"regression"`` for regression loss\n :param device: str, ``"cpu"`` or ``"cuda:0"``\n :return: A PyTorch model instance.\n \n '
def __init__(self, linear_feature_columns, dnn_feature_columns, dnn_hidden_units=(128, 128), l2_reg_embedding=1e-05, l2_reg_linear=1e-05, l2_reg_dnn=0, init_std=0.0001, seed=1024, bi_dropout=0, dnn_dropout=0, dnn_activation='relu', task='binary', device='cpu'):
super(NFM, self).__init__(linear_feature_columns, dnn_feature_columns, l2_reg_linear=l2_reg_linear, l2_reg_embedding=l2_reg_embedding, init_std=init_std, seed=seed, task=task, device=device)
self.dnn = DNN((self.compute_input_dim(dnn_feature_columns, include_sparse=False) + self.embedding_size), dnn_hidden_units, activation=dnn_activation, l2_reg=l2_reg_dnn, dropout_rate=dnn_dropout, use_bn=False, init_std=init_std, device=device)
self.dnn_linear = nn.Linear(dnn_hidden_units[(- 1)], 1, bias=False).to(device)
self.add_regularization_weight(filter((lambda x: (('weight' in x[0]) and ('bn' not in x[0]))), self.dnn.named_parameters()), l2_reg_dnn)
self.add_regularization_weight(self.dnn_linear.weight, l2_reg_dnn)
self.bi_pooling = BiInteractionPooling()
self.bi_dropout = bi_dropout
if (self.bi_dropout > 0):
self.dropout = nn.Dropout(bi_dropout)
self.to(device)
def forward(self, X):
(sparse_embedding_list, dense_value_list) = self.input_from_feature_columns(X, self.dnn_feature_columns, self.embedding_dict)
linear_logit = self.linear_model(X)
fm_input = torch.cat(sparse_embedding_list, dim=1)
bi_out = self.bi_pooling(fm_input)
if self.bi_dropout:
bi_out = self.dropout(bi_out)
dnn_input = combined_dnn_input([bi_out], dense_value_list)
dnn_output = self.dnn(dnn_input)
dnn_logit = self.dnn_linear(dnn_output)
logit = (linear_logit + dnn_logit)
y_pred = self.out(logit)
return y_pred
|
class PNN(BaseModel):
'Instantiates the Product-based Neural Network architecture.\n\n :param dnn_feature_columns: An iterable containing all the features used by deep part of the model.\n :param dnn_hidden_units: list,list of positive integer or empty list, the layer number and units in each layer of deep net\n :param l2_reg_embedding: float . L2 regularizer strength applied to embedding vector\n :param l2_reg_dnn: float. L2 regularizer strength applied to DNN\n :param init_std: float,to use as the initialize std of embedding vector\n :param seed: integer ,to use as random seed.\n :param dnn_dropout: float in [0,1), the probability we will drop out a given DNN coordinate.\n :param dnn_activation: Activation function to use in DNN\n :param use_inner: bool,whether use inner-product or not.\n :param use_outter: bool,whether use outter-product or not.\n :param kernel_type: str,kernel_type used in outter-product,can be ``\'mat\'`` , ``\'vec\'`` or ``\'num\'``\n :param task: str, ``"binary"`` for binary logloss or ``"regression"`` for regression loss\n :param device: str, ``"cpu"`` or ``"cuda:0"``\n :return: A PyTorch model instance.\n \n '
def __init__(self, dnn_feature_columns, dnn_hidden_units=(128, 128), l2_reg_embedding=1e-05, l2_reg_dnn=0, init_std=0.0001, seed=1024, dnn_dropout=0, dnn_activation='relu', use_inner=True, use_outter=False, kernel_type='mat', task='binary', device='cpu'):
super(PNN, self).__init__([], dnn_feature_columns, l2_reg_linear=0, l2_reg_embedding=l2_reg_embedding, init_std=init_std, seed=seed, task=task, device=device)
if (kernel_type not in ['mat', 'vec', 'num']):
raise ValueError('kernel_type must be mat,vec or num')
self.use_inner = use_inner
self.use_outter = use_outter
self.kernel_type = kernel_type
self.task = task
product_out_dim = 0
num_inputs = self.compute_input_dim(dnn_feature_columns, include_dense=False, feature_group=True)
num_pairs = int(((num_inputs * (num_inputs - 1)) / 2))
if self.use_inner:
product_out_dim += num_pairs
self.innerproduct = InnerProductLayer(device=device)
if self.use_outter:
product_out_dim += num_pairs
self.outterproduct = OutterProductLayer(num_inputs, self.embedding_size, kernel_type=kernel_type, device=device)
self.dnn = DNN((product_out_dim + self.compute_input_dim(dnn_feature_columns)), dnn_hidden_units, activation=dnn_activation, l2_reg=l2_reg_dnn, dropout_rate=dnn_dropout, use_bn=False, init_std=init_std, device=device)
self.dnn_linear = nn.Linear(dnn_hidden_units[(- 1)], 1, bias=False).to(device)
self.add_regularization_weight(filter((lambda x: (('weight' in x[0]) and ('bn' not in x[0]))), self.dnn.named_parameters()), l2_reg_dnn)
self.add_regularization_weight(self.dnn_linear.weight, l2_reg_dnn)
self.to(device)
def forward(self, X):
(sparse_embedding_list, dense_value_list) = self.input_from_feature_columns(X, self.dnn_feature_columns, self.embedding_dict)
linear_signal = torch.flatten(concat_fun(sparse_embedding_list), start_dim=1)
if self.use_inner:
inner_product = torch.flatten(self.innerproduct(sparse_embedding_list), start_dim=1)
if self.use_outter:
outer_product = self.outterproduct(sparse_embedding_list)
if (self.use_outter and self.use_inner):
product_layer = torch.cat([linear_signal, inner_product, outer_product], dim=1)
elif self.use_outter:
product_layer = torch.cat([linear_signal, outer_product], dim=1)
elif self.use_inner:
product_layer = torch.cat([linear_signal, inner_product], dim=1)
else:
product_layer = linear_signal
dnn_input = combined_dnn_input([product_layer], dense_value_list)
dnn_output = self.dnn(dnn_input)
dnn_logit = self.dnn_linear(dnn_output)
logit = dnn_logit
y_pred = self.out(logit)
return y_pred
|
class WDL(BaseModel):
'Instantiates the Wide&Deep Learning architecture.\n\n :param linear_feature_columns: An iterable containing all the features used by linear part of the model.\n :param dnn_feature_columns: An iterable containing all the features used by deep part of the model.\n :param dnn_hidden_units: list,list of positive integer or empty list, the layer number and units in each layer of DNN\n :param l2_reg_linear: float. L2 regularizer strength applied to wide part\n :param l2_reg_embedding: float. L2 regularizer strength applied to embedding vector\n :param l2_reg_dnn: float. L2 regularizer strength applied to DNN\n :param init_std: float,to use as the initialize std of embedding vector\n :param seed: integer ,to use as random seed.\n :param dnn_dropout: float in [0,1), the probability we will drop out a given DNN coordinate.\n :param dnn_activation: Activation function to use in DNN\n :param task: str, ``"binary"`` for binary logloss or ``"regression"`` for regression loss\n :param device: str, ``"cpu"`` or ``"cuda:0"``\n :return: A PyTorch model instance.\n \n '
def __init__(self, linear_feature_columns, dnn_feature_columns, dnn_hidden_units=(256, 128), l2_reg_linear=1e-05, l2_reg_embedding=1e-05, l2_reg_dnn=0, init_std=0.0001, seed=1024, dnn_dropout=0, dnn_activation='relu', dnn_use_bn=False, task='binary', device='cpu'):
super(WDL, self).__init__(linear_feature_columns, dnn_feature_columns, l2_reg_linear=l2_reg_linear, l2_reg_embedding=l2_reg_embedding, init_std=init_std, seed=seed, task=task, device=device)
self.use_dnn = ((len(dnn_feature_columns) > 0) and (len(dnn_hidden_units) > 0))
if self.use_dnn:
self.dnn = DNN(self.compute_input_dim(dnn_feature_columns), dnn_hidden_units, activation=dnn_activation, l2_reg=l2_reg_dnn, dropout_rate=dnn_dropout, use_bn=dnn_use_bn, init_std=init_std, device=device)
self.dnn_linear = nn.Linear(dnn_hidden_units[(- 1)], 1, bias=False).to(device)
self.add_regularization_weight(filter((lambda x: (('weight' in x[0]) and ('bn' not in x[0]))), self.dnn.named_parameters()), l2_reg_dnn)
self.add_regularization_weight(self.dnn_linear.weight, l2_reg_dnn)
self.to(device)
def forward(self, X):
(sparse_embedding_list, dense_value_list) = self.input_from_feature_columns(X, self.dnn_feature_columns, self.embedding_dict)
logit = self.linear_model(X)
if self.use_dnn:
dnn_input = combined_dnn_input(sparse_embedding_list, dense_value_list)
dnn_output = self.dnn(dnn_input)
dnn_logit = self.dnn_linear(dnn_output)
logit += dnn_logit
y_pred = self.out(logit)
return y_pred
|
class xDeepFM(BaseModel):
'Instantiates the xDeepFM architecture.\n\n :param linear_feature_columns: An iterable containing all the features used by linear part of the model.\n :param dnn_feature_columns: An iterable containing all the features used by deep part of the model.\n :param dnn_hidden_units: list,list of positive integer or empty list, the layer number and units in each layer of deep net\n :param cin_layer_size: list,list of positive integer or empty list, the feature maps in each hidden layer of Compressed Interaction Network\n :param cin_split_half: bool.if set to True, half of the feature maps in each hidden will connect to output unit\n :param cin_activation: activation function used on feature maps\n :param l2_reg_linear: float. L2 regularizer strength applied to linear part\n :param l2_reg_embedding: L2 regularizer strength applied to embedding vector\n :param l2_reg_dnn: L2 regularizer strength applied to deep net\n :param l2_reg_cin: L2 regularizer strength applied to CIN.\n :param init_std: float,to use as the initialize std of embedding vector\n :param seed: integer ,to use as random seed.\n :param dnn_dropout: float in [0,1), the probability we will drop out a given DNN coordinate.\n :param dnn_activation: Activation function to use in DNN\n :param dnn_use_bn: bool. Whether use BatchNormalization before activation or not in DNN\n :param task: str, ``"binary"`` for binary logloss or ``"regression"`` for regression loss\n :param device: str, ``"cpu"`` or ``"cuda:0"``\n :return: A PyTorch model instance.\n \n '
def __init__(self, linear_feature_columns, dnn_feature_columns, dnn_hidden_units=(256, 256), cin_layer_size=(256, 128), cin_split_half=True, cin_activation='relu', l2_reg_linear=1e-05, l2_reg_embedding=1e-05, l2_reg_dnn=0, l2_reg_cin=0, init_std=0.0001, seed=1024, dnn_dropout=0, dnn_activation='relu', dnn_use_bn=False, task='binary', device='cpu'):
super(xDeepFM, self).__init__(linear_feature_columns, dnn_feature_columns, l2_reg_linear=l2_reg_linear, l2_reg_embedding=l2_reg_embedding, init_std=init_std, seed=seed, task=task, device=device)
self.dnn_hidden_units = dnn_hidden_units
self.use_dnn = ((len(dnn_feature_columns) > 0) and (len(dnn_hidden_units) > 0))
if self.use_dnn:
self.dnn = DNN(self.compute_input_dim(dnn_feature_columns), dnn_hidden_units, activation=dnn_activation, l2_reg=l2_reg_dnn, dropout_rate=dnn_dropout, use_bn=dnn_use_bn, init_std=init_std, device=device)
self.dnn_linear = nn.Linear(dnn_hidden_units[(- 1)], 1, bias=False).to(device)
self.add_regularization_weight(filter((lambda x: (('weight' in x[0]) and ('bn' not in x[0]))), self.dnn.named_parameters()), l2_reg_dnn)
self.add_regularization_weight(self.dnn_linear.weight, l2_reg_dnn)
self.cin_layer_size = cin_layer_size
self.use_cin = ((len(self.cin_layer_size) > 0) and (len(dnn_feature_columns) > 0))
if self.use_cin:
field_num = len(self.embedding_dict)
if (cin_split_half == True):
self.featuremap_num = ((sum(cin_layer_size[:(- 1)]) // 2) + cin_layer_size[(- 1)])
else:
self.featuremap_num = sum(cin_layer_size)
self.cin = CIN(field_num, cin_layer_size, cin_activation, cin_split_half, l2_reg_cin, seed, device=device)
self.cin_linear = nn.Linear(self.featuremap_num, 1, bias=False).to(device)
self.add_regularization_weight(filter((lambda x: ('weight' in x[0])), self.cin.named_parameters()), l2_reg_cin)
self.to(device)
def forward(self, X):
(sparse_embedding_list, dense_value_list) = self.input_from_feature_columns(X, self.dnn_feature_columns, self.embedding_dict)
linear_logit = self.linear_model(X)
if self.use_cin:
cin_input = torch.cat(sparse_embedding_list, dim=1)
cin_output = self.cin(cin_input)
cin_logit = self.cin_linear(cin_output)
if self.use_dnn:
dnn_input = combined_dnn_input(sparse_embedding_list, dense_value_list)
dnn_output = self.dnn(dnn_input)
dnn_logit = self.dnn_linear(dnn_output)
if ((len(self.dnn_hidden_units) == 0) and (len(self.cin_layer_size) == 0)):
final_logit = linear_logit
elif ((len(self.dnn_hidden_units) == 0) and (len(self.cin_layer_size) > 0)):
final_logit = (linear_logit + cin_logit)
elif ((len(self.dnn_hidden_units) > 0) and (len(self.cin_layer_size) == 0)):
final_logit = (linear_logit + dnn_logit)
elif ((len(self.dnn_hidden_units) > 0) and (len(self.cin_layer_size) > 0)):
final_logit = ((linear_logit + dnn_logit) + cin_logit)
else:
raise NotImplementedError
y_pred = self.out(final_logit)
return y_pred
|
class EncoderBase(object):
def __init__(self):
self.trans_ls = list()
def reset(self):
self.trans_ls = list()
def check_var(self, df):
for (_, _, target, _) in self.trans_ls:
if (target not in df.columns):
raise Exception('The columns to be transformed are not in the dataframe.')
|
class LabelEncoder(EncoderBase):
def __init__(self):
super(LabelEncoder, self).__init__()
def fit(self, df, targets):
self.reset()
for target in targets:
unique = df[target].unique()
index = range(len(unique))
mapping = dict(zip(unique, index))
self.trans_ls.append((target, mapping))
def transform(self, df):
df_copy = df.copy(deep=True)
for (name, mapping) in self.trans_ls:
df_copy[name] = df_copy[name].map((lambda x: mapping[x]))
return df_copy
|
class NANEncoder(EncoderBase):
def __init__(self):
warnings.warn("This is a simple application in order to perform the simpliest imputation. It is strongly suggest to use R's mice package instead. ")
super().__init__()
def fit(self, df, targets, method='simple_impute'):
self.reset()
for target in targets:
if (method == 'simple_impute'):
if target.startswith('continuous_'):
self.trans_ls.append((target, df[target].median()))
elif target.startswith('discrete_'):
self.trans_ls.append((target, df[target].mode()))
def transform(self, df):
df_copy = df.copy(deep=True)
for (target, value) in self.trans_ls:
df_copy.loc[(pd.isnull(df_copy[target]), target)] = df_copy.loc[(pd.isnull(df_copy[target]), target)].map((lambda x: value))
return df_copy
|
class ScaleEncoder(EncoderBase):
def __init__(self):
super().__init__()
def fit(self, df, targets, configs):
'\n :param df: the dataframe to transform\n :param targets: a list of variables to perform the scaling\n :param configs: the scaling methods\n '
for target in targets:
for (method, param) in configs:
if (method == 'std'):
self._fit_standardize(df, method, target, param)
elif (method == 'minmax'):
self._fit_min_max(df, method, target, param)
elif (method == 'trunc_upper'):
self._fit_trunc_upper_quantile(df, method, target, param)
elif (method == 'trunc_lower'):
self._fit_trunc_lower_quantile(df, method, target, param)
elif (method == 'trunc_lower_upper'):
self._fit_trunc_lowerupper_quantile(df, method, target, param)
else:
raise NotImplementedError()
def _fit_standardize(self, method, df, target, param):
mean = df[target].mean()
std = df[target].std()
param_dict = {'mean': mean, 'std': std}
name = (target + '_std')
self.trans_ls.append((method, target, name, param_dict))
def _fit_min_max(self, df, method, target, param):
min_value = df[target].min()
max_value = df[target].max()
param_dict = {'min': min_value, 'max': max_value}
name = (target + '_minmax')
self.trans_ls.append((method, target, name, param_dict))
def _fit_trunc_upper_quantile(self, df, method, target, param):
upper_quantile = df[target].quantile(param['upper_quantile'])
name = (target + '_upper_q')
self.trans_ls.append((method, target, name, upper_quantile))
def _fit_trunc_lower_quantile(self, df, method, target, param):
lower_quantile = df[target].quantile(param['lower_quantile'])
name = (target + '_lower_q')
self.trans_ls.append((method, target, name, lower_quantile))
def _fit_trunc_lowerupper_quantile(self, df, method, target, param):
lower_quantile = df[target].quantile(param['lower_quantile'])
upper_quantile = df[target].quantile(param['upper_quantile'])
param_dict = {'lower_quantile': lower_quantile, upper_quantile: 'upper_quantile'}
name = (target + '_lower_upper_q')
self.trans_ls.append((method, target, name, param_dict))
def transform(self, df):
'\n Perform the transformation based on the previous results.\n '
df_copy = df.copy(deep=True)
for (method, target, name, param) in self.trans_ls:
if (method == 'std'):
df_copy[name] = ((df_copy[target] - param['mean']) / param['std'])
elif (method == 'minmax'):
df_copy[name] = ((df_copy[target] + param['min']) / (param['max'] - param['min']))
elif (method == 'trunc_upper'):
df_copy[name] = df_copy[target].apply((lambda x: (x if (x <= param) else param)))
elif (method == 'trunc_lower'):
df_copy[name] = df_copy[target].apply((lambda x: (x if (x >= param) else param)))
elif (method == 'trunc_lower_upper'):
def trunc(x):
if (x >= param['upper_quantile']):
return param['upper_quantile']
elif (x <= param['lower_quantile']):
return param['lower_quantile']
else:
return x
df_copy[name] = df_copy[target].apply(trunc)
else:
raise NotImplementedError()
return df_copy
|
class ClusteringEncoder(EncoderBase):
'\n '
def __init__(self):
super().__init__()
def fit(self, df, targets, configs):
'\n :param df: the dataframe to train the clustering algorithm.\n :param targets: a list of list of variables.\n :param config: configurations for clustering algorithms\n DBSCAN, OPTICS, Birch\n '
self.reset()
for target in targets:
for config in configs:
method = config['method']
if (method == 'kmeans'):
self._fit_kmeans(df, target, config)
elif (method == 'meanshift'):
self._fit_meanshit(df, target, config)
elif (method == 'affinitypropagation'):
self._fit_affinitypropagation(df, target, config)
elif (method == 'spectralclustering'):
self._fit_spectralclustering(df, target, config)
elif (method == 'agglomerativeclustering'):
self._fit_agglomerativeclustering(df, target, config)
elif (method == 'DBSCAN'):
self._fit_DBSCAN(df, target, config)
elif (method == 'OPTICS'):
self._fit_OPTICS(df, target, config)
elif (method == 'birch'):
self._fit_birch(df, target, config)
elif (method == 'gaussianmixture'):
self._fit_gaussianmixture(df, target, config)
elif (method == 'latentdirichletallocation'):
self._fit_latentdirichletallocation(df, target, config)
else:
raise NotImplementedError()
def _fit_kmeans(self, df, target, config):
config_cp = copy.deepcopy(config)
del config_cp['method']
kmeans = KMeans(**config_cp).fit(df[target])
name = ('_'.join(target) + '_kmeans')
self.trans_ls.append(('kmeans', name, target, kmeans))
def _fit_meanshit(self, df, target, config):
config_cp = copy.deepcopy(config)
bandwidth = estimate_bandwidth(df, config_cp['quantile'], config_cp['n_samples'])
del config['quantile']
del config['n_samples']
del config_cp['method']
config_cp['bandwidth'] = bandwidth
encoder = MeanShift(**config_cp).fit(df[target])
name = ('_'.join(target) + '_meanshift')
self.trans_ls.append(('meanshift', name, target, encoder))
def _fit_affinitypropagation(self, df, target, config):
config_cp = copy.deepcopy(config)
del config_cp['method']
encoder = AffinityPropagation(**config_cp).fit(df[target])
name = ('_'.join(target) + '_affinitypropagation')
self.trans_ls.append(('affinitypropagation', name, target, encoder))
def _fit_spectralclustering(self, df, target, config):
config_cp = copy.deepcopy(config)
del config_cp['method']
encoder = SpectralClustering(**config_cp).fit(df[target])
name = ('_'.join(target) + '_spectralclustering')
self.trans_ls.append(('spectralclustering', name, target, encoder))
def _fit_agglomerativeclustering(self, df, target, config):
config_cp = copy.deepcopy(config)
del config_cp['method']
encoder = AgglomerativeClustering(**config_cp).fit(df[target])
name = ('_'.join(target) + '_agglomerativeclustering')
self.trans_ls.append(('agglomerativeclustering', name, target, encoder))
def _fit_DBSCAN(self, df, target, config):
config_cp = copy.deepcopy(config)
del config_cp['method']
encoder = DBSCAN(**config_cp).fit(df[target])
name = ('_'.join(target) + '_DBSCAN')
self.trans_ls.append(('DBSCAN', name, target, encoder))
def _fit_OPTICS(self, df, target, config):
config_cp = copy.deepcopy(config)
del config_cp['method']
encoder = OPTICS(**config_cp).fit(df[target])
name = ('_'.join(target) + '_OPTICS')
self.trans_ls.append(('OPTICS', name, target, encoder))
def _fit_birch(self, df, target, config):
config_cp = copy.deepcopy(config)
del config_cp['method']
encoder = Birch(**config_cp).fit(df[target])
name = ('_'.join(target) + '_birch')
self.trans_ls.append(('birch', name, target, encoder))
def _fit_gaussianmixture(self, df, target, config):
config_cp = copy.deepcopy(config)
del config_cp['method']
encoder = GaussianMixture(**config_cp).fit(df[target])
name = ('_'.join(target) + '_gaussianmixture')
self.trans_ls.append(('gaussianmixture', name, target, encoder))
def _fit_latentdirichletallocation(self, df, target, config):
config_cp = copy.deepcopy(config)
del config_cp['method']
encoder = LatentDirichletAllocation(**config_cp).fit(df[target])
name = ('_'.join(target) + '_latentdirichletallocation')
self.trans_ls.append(('latentdirichletallocation', name, target, encoder))
def transform(self, df):
df_copy = df.copy
for (method, name, target, encoder) in self.trans_ls:
if (method in ['kmeans', 'meanshift', 'affinitypropagation', 'spectralclustering', 'agglomerativeclustering', 'DBSCAN', 'OPTICS', 'birch', 'gaussianmixture', 'latentdirichletallocation']):
df_copy[name] = encoder.predict(df_copy[target])
else:
raise NotImplementedError()
|
class CategoryEncoder(EncoderBase):
def __init__(self):
super().__init__()
def fit(self, df, y, targets, configurations):
"\n\n :param df: the data frame to be fitted; can be different from the transformed ones.\n :param y: the y variable\n :param targets: the variables to be transformed\n :param configurations: in the form of a list of (method, parameter), where method is one of ['woe', 'one-hot','ordinal','hash'],\n and parameter is a dictionary pertained to each encoding method\n :return:\n "
self.reset()
for target in targets:
for config in configurations:
self._fit_one(df, y, target, config)
def _fit_one(self, df, y, target, config):
(method, parameter) = (config[0], config[1])
if (method == 'woe'):
self._fit_woe(df, y, target)
elif (method == 'one-hot'):
self._fit_one_hot(df, target)
elif (method == 'ordinal'):
self._fit_ordinal(df, target)
elif (method == 'hash'):
self._fit_hash(df, target)
elif (method == 'target'):
self._fit_target(df, y, target, parameter)
elif (method == 'catboost'):
self._fit_catboost(df, y, target, parameter)
elif (method == 'glm'):
self._fit_glm(df, y, target, parameter)
elif (method == 'js'):
self._fit_js(df, y, target, parameter)
elif (method == 'leave_one_out'):
self._fit_leave_one_out(df, y, target, parameter)
elif (method == 'polinomial'):
self._fit_polynomial(df, y, target, parameter)
elif (method == 'sum'):
self._fit_sum(df, y, target, parameter)
elif (method == 'thermo'):
self._fit_thermo(df, y, target, parameter)
else:
logging.error(('The method you input is %s, and is not supported.' % method))
raise NotImplementedError()
def _fit_polynomial(self, df, y, target, parameter):
poly_encoder = ce.PolynomialEncoder()
poly_encoder.fit(df[target].map(to_str), df[y])
name = [(('continuous_' + remove_continuous_discrete_prefix(x)) + '_poly') for x in poly_encoder.get_feature_names()]
self.trans_ls.append(('polynomial', name, target, poly_encoder))
def _fit_sum(self, df, y, target, parameter):
sum_encoder = ce.SumEncoder()
sum_encoder.fit(df[target].map(to_str))
name = [(('continuous_' + remove_continuous_discrete_prefix(x)) + '_sum') for x in sum_encoder.get_feature_names()]
self.trans_ls.append(('sum_encoder', name, target, sum_encoder))
def _fit_js(self, df, y, target, parameter):
js_encoder = ce.JamesSteinEncoder()
js_encoder.fit(df[target].map(to_str), df[y])
name = [(('continuous_' + remove_continuous_discrete_prefix(x)) + '_js') for x in js_encoder.get_feature_names()]
self.trans_ls.append(('jsencoder', name, target, js_encoder))
def _fit_leave_one_out(self, df, y, target, parameter):
loo_encoder = ce.LeaveOneOutEncoder()
loo_encoder.fit(df[target].map(to_str), df[y])
name = [(('continuous_' + remove_continuous_discrete_prefix(x)) + '_leave_one_out') for x in loo_encoder.get_feature_names()]
self.trans_ls.append(('leave_one_out', name, target, loo_encoder))
def _fit_catboost(self, df, y, target, parameter):
cat_encoder = ce.CatBoostEncoder()
cat_encoder.fit(df[target].map(to_str), df[y])
name = [(('continuous_' + remove_continuous_discrete_prefix(x)) + '_catboost') for x in cat_encoder.get_feature_names()]
self.trans_ls.append(('catboost', name, target, cat_encoder))
def _fit_glm(self, df, y, target, parameter):
glm_encoder = ce.GLMMEncoder()
glm_encoder.fit(df[target].map(to_str), df[y])
name = [(('continuous_' + remove_continuous_discrete_prefix(x)) + '_glm') for x in glm_encoder.get_feature_names()]
self.trans_ls.append(('glm', name, target, glm_encoder))
def _fit_hash(self, df, target):
hash_encoder = ce.HashingEncoder()
hash_encoder.fit(df[target].map(to_str))
name = [(('continuous_' + remove_continuous_discrete_prefix(x)) + '_hash') for x in hash_encoder.get_feature_names()]
self.trans_ls.append(('hash', name, target, hash_encoder))
def _fit_ordinal(self, df, target):
ordinal_encoder = ce.OrdinalEncoder()
ordinal_encoder.fit(df[target].map(to_str))
name = [(('continuous_' + remove_continuous_discrete_prefix(x)) + '_ordinal') for x in ordinal_encoder.get_feature_names()]
self.trans_ls.append(('ordinal', name, target, ordinal_encoder))
def _fit_target(self, df, y, target, parameter):
smoothing = parameter['smoothing']
target_encoder = ce.TargetEncoder(smoothing=smoothing)
target_encoder.fit(df[target].map(to_str), df[y])
name = [(((('continuous_' + remove_continuous_discrete_prefix(x)) + '_smooth_') + str(smoothing)) + '_target') for x in target_encoder.get_feature_names()]
self.trans_ls.append(('target', name, target, target_encoder))
def _fit_one_hot(self, df, target):
one_hot_encoder = ce.OneHotEncoder()
target_copy = df[target].copy(deep=True)
target_copy = target_copy.map(to_str)
one_hot_encoder.fit(target_copy)
name = [(x + '_one_hot') for x in one_hot_encoder.get_feature_names()]
self.trans_ls.append(('one-hot', name, target, one_hot_encoder))
def _fit_woe(self, df, y, target):
woe_encoder = ce.woe.WOEEncoder(cols=target)
woe_encoder.fit(df[target].map(to_str), df[y].map(to_str))
name = (('continuous_' + remove_continuous_discrete_prefix(target)) + '_woe')
self.trans_ls.append(('woe', name, target, woe_encoder))
def transform(self, df, y=None):
'\n\n :param df: The data frame to be transformed.\n :param y: The name for y variable. Only used for leave-one-out transform for WOE and Target encoder.\n :return: The transformed dataset\n '
for (_, _, target, _) in self.trans_ls:
if (target not in df.columns):
raise Exception('The columns to be transformed are not in the dataframe.')
result_df = df.copy(deep=True)
for (method, name, target, encoder) in self.trans_ls:
if (method == 'woe'):
if (y is not None):
result_df[name] = encoder.transform(df[target].map(to_str), df[y])
else:
result_df[name] = encoder.transform(df[target].map(to_str))
if (method == 'one-hot'):
result_df[name] = encoder.transform(df[target].map(to_str))
if (method == 'target'):
if y:
result_df[name] = encoder.transform(df[target].map(to_str), df[y])
else:
result_df[name] = encoder.transform(df[target].map(to_str))
if (method == 'hash'):
result_df[name] = encoder.transform(df[target].map(to_str))
if (method == 'ordinal'):
result_df[name] = encoder.transform(df[target].map(to_str))
if (method in ['catboost', 'glm', 'js', 'leave-one-out', 'sum', 'thermo']):
result_df[name] = encoder.transform(df[target].map(to_str), df[y])
return result_df
def _fit_thermo(self, df, y, target, parameter):
encoder = ThermoEncoder()
encoder.fit(df[target].map(to_str))
name = (('continuous_' + remove_continuous_discrete_prefix(target)) + '_thermo')
self.trans_ls.append(('thermo', name, target, encoder))
|
class DiscreteEncoder(EncoderBase):
def __init__(self):
super(DiscreteEncoder, self).__init__()
def fit(self, df, targets, configurations):
"\n\n :param df: the dataframe to be fitted; can be different from the transformed one;\n :param targets: the variables to be transformed\n :param configurations: in the form of a list of (method, parameter), where method is one of ['quantile', 'uniform'],\n the parameter should contain a key called 'nbins'\n and parameter is a dictionary pertained to each encoding method\n :return:\n "
self.reset()
for target in targets:
for (method, parameter) in configurations:
nbins = parameter['nbins']
self._fit_one(df, target, method, nbins)
def _fit_one(self, df, target, method, nbins):
if (method == 'uniform'):
intervals = self._get_uniform_intervals(df, target, nbins)
name = (((('discrete_' + remove_continuous_discrete_prefix(target)) + '_nbins_') + str(nbins)) + '_uniform_dis_encoder')
self.trans_ls.append((target, name, intervals))
elif (method == 'quantile'):
intervals = self._get_quantile_intervals(df, target, nbins)
name = (((('discrete_' + remove_continuous_discrete_prefix(target)) + '_nbins_') + str(nbins)) + '_quantile_dis_encoder')
self.trans_ls.append((target, name, intervals))
else:
raise Exception('Not Implemented Yet')
def transform(self, df):
result = df.copy(deep=True)
for (target, _, _) in self.trans_ls:
if (target not in df.columns):
raise Exception('The columns to be transformed are not in the dataframe.')
for (target, name, intervals) in self.trans_ls:
result[name] = encode_label(result[target].map((lambda x: get_interval(x, intervals))))
return result
def _get_uniform_intervals(self, df, target, nbins):
target_var = df[target]
minimum = target_var[(target_var != (- np.inf))].min()
maximum = target_var[(target_var != np.inf)].max()
intervals = get_uniform_interval(minimum, maximum, nbins)
return intervals
def _get_quantile_intervals(self, df, target, nbins):
return get_quantile_interval(df[target], nbins)
|
class UnaryContinuousVarEncoder(EncoderBase):
def __init__(self):
super(UnaryContinuousVarEncoder, self).__init__()
def fit(self, targets, config):
self.reset()
for target in targets:
for (method, parameter) in config:
if (method == 'power'):
self._fit_power(target, parameter)
if (method == 'sin'):
self._fit_sin(target)
if (method == 'cos'):
self._fit_cos(target)
if (method == 'tan'):
self._fit_tan(target)
if (method == 'log'):
self._fit_log(target)
if (method == 'exp'):
self._fit_exp(target)
if (method == 'abs'):
self._fit_abs(target)
if (method == 'neg'):
self._fit_neg(target)
if (method == 'inv'):
self._fit_inv(target)
if (method == 'sqrt'):
self._fit_sqrt(target)
if (method == 'box_cox'):
self._fit_box_cox(target, parameter)
if (method == 'yeo_johnson'):
self._fit_yeo_johnson(target, parameter)
def _fit_box_cox(self, target, parameter):
name = ((target + str(parameter['lambda'])) + '_box_cox')
def encoder(x):
lambda_1 = parameter['lambda']
if (x <= 0):
warnings.warn('Box Cox transformation only applies to positive numbers! Returns 0!')
return 0
if (lambda_1 != 0):
return (((x ** lambda_1) - 1) / lambda_1)
else:
return np.log(x)
self.trans_ls.append(('box_cox', name, target, encoder))
def _fit_yeo_johnson(self, target, parameter):
lambda_1 = parameter['lambda']
name = ((target + str(lambda_1)) + '_yeo_johnson')
def encoder(x):
if ((lambda_1 != 0) and (x >= 0)):
return ((((x + 1) ** lambda_1) - 1) / lambda_1)
elif ((lambda_1 == 0) and (x >= 0)):
return np.log((x + 1))
elif ((lambda_1 != 2) and (x <= 0)):
return ((- ((((- x) + 1) ** (2 - lambda_1)) - 1)) / (2 - lambda_1))
elif ((lambda_1 == 2) and (x <= 0)):
return (- np.log(((- x) + 1)))
self.trans_ls.append(('yeo_johnson', name, target, encoder))
def transform(self, df):
result = df.copy(deep=True)
for (method, new_name, target, encoder) in self.trans_ls:
result[new_name] = result[target].apply(encoder)
return result
def _fit_power(self, target, parameter):
order = parameter['order']
_power = (lambda x: np.power(x, order))
new_name = ((('continuous_' + remove_continuous_discrete_prefix(target)) + '_power_') + str(order))
self.trans_ls.append(('power', new_name, target, _power))
def _fit_sin(self, target):
_sin = (lambda x: np.sin(x))
new_name = (('continuous_' + remove_continuous_discrete_prefix(target)) + '_sin')
self.trans_ls.append(('sin', new_name, target, _sin))
def _fit_cos(self, target):
_cos = (lambda x: np.cos(x))
new_name = (('continuous_' + remove_continuous_discrete_prefix(target)) + '_cos')
self.trans_ls.append(('cos', new_name, target, _cos))
def _fit_tan(self, target):
_tan = (lambda x: np.tan(x))
new_name = (('continuous_' + remove_continuous_discrete_prefix(target)) + '_tan')
self.trans_ls.append(('tan', new_name, target, _tan))
def _fit_log(self, target):
_log = (lambda x: np.log(x))
new_name = (('continuous_' + remove_continuous_discrete_prefix(target)) + '_log')
self.trans_ls.append(('log', new_name, target, _log))
def _fit_exp(self, target):
_exp = (lambda x: np.exp(x))
new_name = (('continuous_' + remove_continuous_discrete_prefix(target)) + '_exp')
self.trans_ls.append(('exp', new_name, target, _exp))
def _fit_abs(self, target):
_abs = (lambda x: np.abs(x))
new_name = (('continuous_' + remove_continuous_discrete_prefix(target)) + '_abs')
self.trans_ls.append(('abs', new_name, target, _abs))
def _fit_neg(self, target):
_neg = (lambda x: (- x))
new_name = (('continuous_' + remove_continuous_discrete_prefix(target)) + '_neg')
self.trans_ls.append(('neg', new_name, target, _neg))
def _fit_inv(self, target):
_inv = (lambda x: np.divide(1, x))
new_name = (('continuous_' + remove_continuous_discrete_prefix(target)) + '_inv')
self.trans_ls.append(('inv', new_name, target, _inv))
def _fit_sqrt(self, target):
_sqrt = (lambda x: np.sqrt(x))
new_name = (('continuous_' + remove_continuous_discrete_prefix(target)) + '_sqrt')
self.trans_ls.append(('sqrt', new_name, target, _sqrt))
|
class BinaryContinuousVarEncoder(EncoderBase):
def __init__(self):
super(BinaryContinuousVarEncoder, self).__init__()
def fit(self, targets_pairs, config):
for (target1, target2) in targets_pairs:
for method in config:
if (method == 'add'):
self._fit_add(target1, target2)
if (method == 'sub'):
self._fit_sub(target1, target2)
if (method == 'mul'):
self._fit_mul(target1, target2)
if (method == 'div'):
self._fit_div(target1, target2)
def transform(self, df):
result = df.copy(deep=True)
for (method, new_name, target1, target2, encoder) in self.trans_ls:
result[new_name] = result.apply((lambda row: encoder(row[target1], row[target2])), axis=1)
return result
def _fit_add(self, target1, target2):
_add = (lambda x, y: np.add(x, y))
new_name = (((('continuous_' + remove_continuous_discrete_prefix(target1)) + '_') + remove_continuous_discrete_prefix(target2)) + '_add')
self.trans_ls.append(('add', new_name, target1, target2, _add))
def _fit_sub(self, target1, target2):
_sub = (lambda x, y: (x - y))
new_name = (((('continuous_' + remove_continuous_discrete_prefix(target1)) + '_') + remove_continuous_discrete_prefix(target2)) + '_sub')
self.trans_ls.append(('sub', new_name, target1, target2, _sub))
def _fit_mul(self, target1, target2):
_mul = (lambda x, y: np.multiply(x, y))
new_name = (((('continuous_' + remove_continuous_discrete_prefix(target1)) + '_') + remove_continuous_discrete_prefix(target2)) + '_mul')
self.trans_ls.append(('mul', new_name, target1, target2, _mul))
def _fit_div(self, target1, target2):
_div = (lambda x, y: np.divide(x, y))
new_name = (((('continuous_' + remove_continuous_discrete_prefix(target1)) + '_') + remove_continuous_discrete_prefix(target2)) + '_div')
self.trans_ls.append(('div', new_name, target1, target2, _div))
|
class BoostTreeEncoder(EncoderBase):
def __init__(self, nthread=None):
super(BoostTreeEncoder, self).__init__()
if nthread:
self.nthread = cpu_count
else:
self.nthread = nthread
def fit(self, df, y, targets_list, config):
self.reset()
for (method, parameter) in config:
if (method == 'xgboost'):
self._fit_xgboost(df, y, targets_list, parameter)
if (method == 'lightgbm'):
self._fit_lightgbm(df, y, targets_list, parameter)
def _fit_xgboost(self, df, y, targets_list, parameter):
for targets in targets_list:
parameter_copy = copy.deepcopy(parameter)
if ('nthread' not in parameter.keys()):
parameter_copy['nthread'] = self.nthread
if ('objective' not in parameter.keys()):
if (len(np.unique(df[y])) == 2):
parameter_copy['objective'] = 'binary:logistic'
else:
parameter_copy['objective'] = 'multi:softmax'
num_rounds = parameter['num_boost_round']
pos = parameter['pos']
dtrain = xgb.DMatrix(df[list(targets)], label=df[y])
model = xgb.train(parameter_copy, dtrain, num_rounds)
name_remove = [remove_continuous_discrete_prefix(x) for x in targets]
name = ('discrete_' + '_'.join(name_remove))
self.trans_ls.append(('xgb', name, targets, model, pos))
def _fit_lightgbm(self, df, y, targets_list, parameter):
for targets in targets_list:
parameter_copy = copy.deepcopy(parameter)
if ('num_threads' not in parameter.keys()):
parameter_copy['num_threads'] = self.nthread
if ('objective' not in parameter.keys()):
if (len(np.unique(df[y])) == 2):
parameter_copy['objective'] = 'binary'
else:
parameter_copy['objective'] = 'multiclass'
num_rounds = parameter_copy['num_threads']
pos = parameter_copy['pos']
parameter_copy.pop('pos')
dtrain = lgb.Dataset(df[list(targets)], label=df[y])
model = lgb.train(parameter_copy, dtrain, num_rounds)
name_remove = [remove_continuous_discrete_prefix(x) for x in targets]
name = ('discrete_' + '_'.join(name_remove))
self.trans_ls.append(('lgb', name, targets, model, pos))
def transform(self, df):
result = df.copy(deep=True)
trans_results = [result]
for (method, name, targets, model, pos) in self.trans_ls:
if (method == 'xgb'):
tree_infos: pd.DataFrame = model.trees_to_dataframe()
elif (method == 'lgb'):
tree_infos = tree_to_dataframe_for_lightgbm(model).get()
else:
raise Exception('Not Implemented Yet')
trans_results.append(self._boost_transform(result[list(targets)], method, name, pos, tree_infos))
return pd.concat(trans_results, axis=1)
@staticmethod
def _transform_byeval(row, leaf_condition):
for key in leaf_condition.keys():
if eval(leaf_condition[key]):
return key
return np.NaN
def _boost_transform(self, df, method, name, pos, tree_infos):
tree_ids = tree_infos['Node'].drop_duplicates().tolist()
tree_ids.sort()
for tree_id in tree_ids:
tree_info = tree_infos[(tree_infos['Tree'] == tree_id)][['Node', 'Feature', 'Split', 'Yes', 'No', 'Missing']].copy(deep=True)
tree_info['Yes'] = tree_info['Yes'].apply((lambda y: str(y).replace((str(tree_id) + '-'), '')))
tree_info['No'] = tree_info['No'].apply((lambda y: str(y).replace((str(tree_id) + '-'), '')))
tree_info['Missing'] = tree_info['Missing'].apply((lambda y: str(y).replace((str(tree_id) + '-'), '')))
leaf_nodes = tree_info[(tree_info['Feature'] == 'Leaf')]['Node'].drop_duplicates().tolist()
encoder_dict = {}
for leaf_node in leaf_nodes:
encoder_dict[leaf_node] = get_booster_leaf_condition(leaf_node, [], tree_info)
if (not encoder_dict):
continue
df.fillna(np.NaN)
feature_name = '_'.join([name, method, ('tree_' + str(tree_id)), pos])
df.columns = [str(col) for col in list(df.columns)]
add_feature = pd.DataFrame(df.apply(self._transform_byeval, leaf_condition=encoder_dict, axis=1), columns=[feature_name])
df = pd.concat([df, add_feature], axis=1)
return df
|
class AnomalyScoreEncoder(EncoderBase):
def __init__(self, nthread=None):
super(AnomalyScoreEncoder, self).__init__()
if nthread:
self.nthread = cpu_count
else:
self.nthread = nthread
def fit(self, df, y, targets_list, config):
self.reset()
for (method, parameter) in config:
if (method == 'IsolationForest'):
self._fit_isolationForest(df, y, targets_list, parameter)
if (method == 'LOF'):
self._fit_LOF(df, y, targets_list, parameter)
def transform(self, df):
result = df.copy(deep=True)
for (method, name, targets, model) in self.trans_ls:
result[((name + '_') + method)] = model.predict(df[targets])
return result
def _fit_isolationForest(self, df, y, targets_list, parameter):
for targets in targets_list:
n_jobs = self.nthread
model = IsolationForest(n_jobs=n_jobs)
model.fit(X=df[targets])
name_remove = [remove_continuous_discrete_prefix(x) for x in targets]
name = ('discrete_' + '_'.join(name_remove))
self.trans_ls.append(('IsolationForest', name, targets, model))
def _fit_LOF(self, df, y, targets_list, parameter):
for targets in targets_list:
n_jobs = self.nthread
model = LocalOutlierFactor(n_jobs=n_jobs)
model.fit(X=df[targets])
name_remove = [remove_continuous_discrete_prefix(x) for x in targets]
name = ('discrete_' + '_'.join(name_remove))
self.trans_ls.append(('LOF', name, targets, model))
|
class GroupbyEncoder(EncoderBase):
def __init__(self):
super(GroupbyEncoder, self).__init__()
def fit(self, df, targets, groupby_op_list):
self.reset()
for target in targets:
for (groupby, operations, param) in groupby_op_list:
for operation in operations:
groupby_result = self._fit_one(df, target, groupby, operation)
name = ((((target + '_groupby_') + '_'.join(groupby)) + '_op_') + operation)
groupby_result = groupby_result.rename(columns={target: name})
self.trans_ls.append((groupby, groupby_result))
def transform(self, df):
result = df.copy(deep=True)
for (groupby, groupby_result) in self.trans_ls:
result = result.merge(groupby_result, on=groupby, how='left')
return result
def _fit_one(self, df, target, groupby_vars, operation):
result = df.groupby(groupby_vars, as_index=False).agg({target: operation})
return result
|
def get_interval(x, sorted_intervals):
if pd.isnull(x):
return (- 1)
if (x == np.inf):
return (- 2)
if (x == (- np.inf)):
return (- 3)
interval = 0
found = False
sorted_intervals.append(np.inf)
if ((x < sorted_intervals[0]) or (x >= sorted_intervals[(len(sorted_intervals) - 1)])):
return (- 4)
while ((not found) and (interval < (len(sorted_intervals) - 1))):
if (sorted_intervals[interval] <= x < sorted_intervals[(interval + 1)]):
return interval
else:
interval += 1
|
def encode_label(x):
x_copy = x.copy(deep=True)
unique = sorted(list(set([str(item) for item in x_copy.astype(str).unique()])))
kv = {unique[i]: i for i in range(len(unique))}
x_copy = x_copy.map((lambda x: kv[str(x)]))
return x_copy
|
def get_uniform_interval(minimum, maximum, nbins):
result = [minimum]
step_size = (float((maximum - minimum)) / nbins)
for index in range((nbins - 1)):
result.append((minimum + (step_size * (index + 1))))
result.append(maximum)
return result
|
def get_quantile_interval(data, nbins):
quantiles = get_uniform_interval(0, 1, nbins)
return list(data.quantile(quantiles))
|
def to_str(x):
if pd.isnull(x):
return '#NA#'
else:
return str(x)
|
def get_booster_leaf_condition(leaf_node, conditions, tree_info: pd.DataFrame):
start_node_info = tree_info[(tree_info['Node'] == leaf_node)]
if (start_node_info['Feature'].tolist()[0] == 'Leaf'):
conditions = []
if (str(leaf_node) in tree_info['Yes'].drop_duplicates().tolist()):
father_node_info = tree_info[(tree_info['Yes'] == str(leaf_node))]
fathers_left = True
else:
father_node_info = tree_info[(tree_info['No'] == str(leaf_node))]
fathers_left = False
father_node_id = father_node_info['Node'].tolist()[0]
split_value = father_node_info['Split'].tolist()[0]
split_feature = father_node_info['Feature'].tolist()[0]
if fathers_left:
add_condition = [((("row['" + str(split_feature)) + "'] <= ") + str(split_value))]
if (father_node_info['Yes'].tolist()[0] == father_node_info['Missing'].tolist()[0]):
add_condition.append((("is_missing(row['" + str(split_feature)) + "'])"))
else:
add_condition = [((("row['" + str(split_feature)) + "'] > ") + str(split_value))]
if (father_node_info['No'].tolist()[0] == father_node_info['Missing'].tolist()[0]):
add_condition.append((("row['" + str(split_feature)) + "'] == np.NaN"))
add_condition = (('(' + ' or '.join(add_condition)) + ')')
conditions.append(add_condition)
if (father_node_info['Node'].tolist()[0] == 0):
return ' and '.join(conditions)
else:
return get_booster_leaf_condition(father_node_id, conditions, tree_info)
|
class tree_to_dataframe_for_lightgbm(object):
def __init__(self, model):
self.json_model = model.dump_model()
self.features = self.json_model['feature_names']
def get_root_nodes_count(self, tree, max_id):
tree_node_id = tree.get('split_index')
if tree_node_id:
if (tree_node_id > max_id):
max_id = tree_node_id
if tree.get('left_child'):
left = self.get_root_nodes_count(tree.get('left_child'), max_id)
if (left > max_id):
max_id = left
else:
left = []
if tree.get('right_child'):
right = self.get_root_nodes_count(tree.get('right_child'), max_id)
if (right > max_id):
max_id = right
else:
right = []
if ((not left) and (not right)):
max_id = max_id
return max_id
def get(self):
tree_dataframe = []
for tree in self.json_model['tree_info']:
tree_id = tree['tree_index']
tree = tree['tree_structure']
root_nodes_count = (self.get_root_nodes_count(tree, 0) + 1)
tree_df = self._lightGBM_trans(tree, pd.DataFrame(), tree_id, root_nodes_count).sort_values('Node').reset_index(drop=True)
tree_df['Tree'] = tree_id
tree_dataframe.append(tree_df)
return pd.concat(tree_dataframe, axis=0)
def _lightGBM_trans(self, tree, tree_dataFrame, tree_id, root_nodes_count):
tree_node_id = tree.get('split_index')
threshold = tree.get('threshold')
default_left = tree.get('default_left')
if (tree_node_id is not None):
data = {'Node': tree_node_id, 'Feature': self.features[tree.get('split_feature')], 'Split': threshold}
yes_id = tree.get('left_child').get('split_index')
if (yes_id is None):
yes_id = (tree.get('left_child').get('leaf_index') + root_nodes_count)
tree_dataFrame = self._lightGBM_trans(tree.get('left_child'), tree_dataFrame, tree_id, root_nodes_count)
no_id = tree.get('right_child').get('split_index')
if (no_id is None):
no_id = (tree.get('right_child').get('leaf_index') + root_nodes_count)
tree_dataFrame = self._lightGBM_trans(tree.get('right_child'), tree_dataFrame, tree_id, root_nodes_count)
if default_left:
missing_id = yes_id
else:
missing_id = no_id
(data['Yes'], data['No'], data['Missing']) = ('_'.join([str(yes_id)]), '_'.join([str(no_id)]), '_'.join([str(missing_id)]))
else:
data = {'Node': (root_nodes_count + tree.get('leaf_index')), 'Feature': 'Leaf', 'Split': None, 'Yes': None, 'No': None, 'Missing': None}
row = pd.DataFrame.from_dict(data, orient='index').T
tree_dataFrame = pd.concat([tree_dataFrame, row])
return tree_dataFrame
|
class StandardizeEncoder(EncoderBase):
def __init__(self):
super(StandardizeEncoder, self).__init__()
def fit(self, df, targets):
self.reset()
for target in targets:
mean = df[target].mean()
std = df[target].std()
new_name = (('continuous_' + remove_continuous_discrete_prefix(target)) + '_standardized')
self.trans_ls.append((target, mean, std, new_name))
def transform(self, df):
result = df.copy(deep=True)
for (target, mean, std, new_name) in self.trans_ls:
result[new_name] = ((result[target] - mean) / std)
return result
|
class InteractionEncoder():
def __init__(self):
self.level = list()
self.targets = None
def fit(self, targets, level='all'):
if (level == 'all'):
self.level = [2, 3, 4]
else:
self.level = level
self.targets = targets
def transform(self, df):
result = df.copy(deep=True)
for level in self.level:
if (level == 2):
for target_1 in self.targets:
for target_2 in self.targets:
new_name = (((('continuous_' + remove_continuous_discrete_prefix(target_1)) + '_') + remove_continuous_discrete_prefix(target_2)) + '_cross')
result[new_name] = (result[target_1] * result[target_2])
if (level == 3):
for target_1 in self.targets:
for target_2 in self.targets:
for target_3 in self.targets:
new_name = (((((('continuous_' + remove_continuous_discrete_prefix(target_1)) + '_') + remove_continuous_discrete_prefix(target_2)) + '_') + remove_continuous_discrete_prefix(target_3)) + '_cross')
result[new_name] = ((result[target_1] * result[target_2]) * result[target_3])
if (level == 4):
for target_1 in self.targets:
for target_2 in self.targets:
for target_3 in self.targets:
for target_4 in self.targets:
new_name = (((((((('continuous_' + remove_continuous_discrete_prefix(target_1)) + '_') + remove_continuous_discrete_prefix(target_2)) + '_') + remove_continuous_discrete_prefix(target_3)) + '_') + remove_continuous_discrete_prefix(target_4)) + '_cross')
result[new_name] = (((result[target_1] * result[target_2]) * result[target_3]) * result[target_4])
return result
|
class DimReducEncoder():
def __init__(self):
self.result = list()
def fit(self, df, targets, config):
for target in targets:
for (method, parameter) in config:
if (method == 'pca'):
n_comp = parameter['n_components']
pos = (parameter.get('pos') if parameter.get('pos') else '')
encoder = PCA(n_comp)
encoder.fit(df[target])
self.result.append((method, encoder, pos, n_comp, target))
if (method == 'tsne'):
if parameter.get('pos'):
pos = parameter.get('pos')
parameter.pop('pos')
else:
pos = ''
encoder = TSNE(**parameter)
encoder.fit(df[target])
self.result.append((method, encoder, pos, parameter.get('n_components'), target))
def transform(self, df):
result = df.copy(deep=True)
for (method, encoder, pos, n_comp, target) in self.result:
if (method == 'pca'):
new_names = [(('pca_' + str(x)) + pos) for x in range(n_comp)]
result = pd.concat([result, pd.DataFrame(encoder.transform(df[target]), columns=new_names)], axis=1)
elif (method == 'tsne'):
new_names = [(('tsne_' + str(x)) + pos) for x in range(n_comp)]
result = pd.concat([result, pd.DataFrame(encoder.embedding_, columns=new_names)], axis=1)
return result
|
def is_missing(v):
return (v == np.NaN)
|
@dataclass
class TabDataOpt():
label: str = field(default='label', metadata={'help': "\n The name for the `y` variable.\n The default name is 'label'.\n "})
dis_vars_entity: list = field(default=None, metadata={'help': '\n A list of variables that are meant to be fed into a normal entity embedding layer (without using vicinity information). \n Default is None.\n '})
dis_vars_vic: list = field(default=None, metadata={'help': '\n A list of variables that are meant to be fed into a vicino entity embedding layer (using vicinity information). \n Default is None. \n '})
conti_vars: list = field(default=None, metadata={'help': '\n A list of continuous variables to be fed directly to densely connected layers. \n Default is None.\n '})
num_dim: int = field(default=10, metadata={'help': '\n The default number of dimensions for entity embeddings. \n We assume the output dimension number is the same so that it can be fed into neural networks.\n Default is 10.\n Cannot be smaller than 1.\n '})
centroids: dict = field(default=None, metadata={'help': '\n The centroids to use in conjunction with vicino embedding.\n The format is a dict that has keys corresponding to the dis_var_vic and values with centroids corresponds to numpy arrays. \n Default is None. \n '})
def __post_init__(self):
if (int(self.num_dim) <= 1):
logging.error(('The number of dimension specified is %f. \n The constructor only accepts integer value larger than 0.' % self.num_dim))
self.num_dim = int(self.num_dim)
if ((self.dis_vars_entity is None) and (self.dis_vars_vic is None) and (self.conti_vars is None)):
logging.error('All the variables specify are None.\n Must specify at least one of the dis_vars_entity, dis_vars_vic or conti_vars')
if (((self.dis_vars_vic is None) and (self.centroids is not None)) or ((self.dis_vars_vic is not None) and (self.centroids is None))):
logging.error('\n If specify to use vicino embedding both dis_vars_vic and centroids should be non-empty.')
if (len(self.dis_vars_vic) != len(self.centroids)):
logging.error(('The number of variables for vicino entity embeddings is %d, while the number of centoirds is %d. \n These two must be equal. \n ' % (len(self.dis_vars_vic), len(self.centroids))))
|
def permutate_selector(train_df, eval_df, y, variables=None, metric='acc', **kwargs):
'\n Return the importance of variables based on permutation loss\n\n :param train_df: training data set\n :param eval_df: eval data set\n :param y: name of the target variable\n :param variables: the variables to select perform the select; if None, then all the variables except target variable will be selected\n :param metric: the metric to determine the order; higher value indicate better performance\n :param **kwargs: argument for logistic regression\n\n returns: result after permutation\n '
@ray.remote()
def fit_and_predict(train_df, eval_df, y, variables, metric, start=None, **kwargs):
if (start is None):
clf = LogisticRegression(**kwargs)
else:
clf = LogisticRegression(warm_start=start, **kwargs)
clf.fit(train_df[variables], train_df[y])
y_pred = clf.predict(eval_df[variables])
if (metric == 'acc'):
score = accuracy_score(eval_df[y], y_pred)
else:
score = None
return (score, clf.coef_)
@ray.remote()
def fit_permute_and_predict(train_df, eval_df, y, variables, metric, start, permute_var, **kwargs):
train_df[permute_var] = np.random.permutation(train_df[permute_var])
(score, _) = fit_and_predict(train_df, eval_df, y, variables, metric, start, **kwargs)
return (permute_var, score)
ray.init()
ind_col = get_ind_col(train_df)
if (variables is not None):
var_to_use = [x for x in ind_col if (x in variables)]
else:
var_to_use = [x for x in ind_col if (x != y)]
result_dict = dict()
(score, warm_start) = fit_and_predict(train_df, eval_df, y, var_to_use, metric, None, **kwargs)
result_dict['origin'] = score
train_df_id = ray.put(train_df)
eval_df_id = ray.put(eval_df)
var_to_use_id = ray.put(var_to_use)
start_id = ray.put(warm_start)
result = [fit_permute_and_predict.remote(train_df_id, eval_df_id, y, var_to_use_id, start_id, permute_var, **kwargs) for permute_var in var_to_use]
result_list = ray.get(result)
for (var, score) in result_list:
result_dict[var] = score
ray.shutdown()
return result_dict
|
def tree_selector(train_df, eval_df, y, opt, metric='error', type='lgb'):
"\n This function select variable importance using built functions from xgboost or lightgbm\n :param train_df: training dataset,\n :param eval_df: evaluation dataset\n :param y: target variable\n :param opt: training operation for tree models\n :param metric: the metric used to select the best number of trees; currently only support 'error'\n :param type: 'lgb' or 'xgb'\n "
if (type == 'lgb'):
trainer = LGBFitter(y, metric)
elif (type == 'xgb'):
trainer = XGBFitter(y, metric)
else:
raise NotImplementedError()
trainer.train(train_df, eval_df, opt)
best_round = trainer.best_round
opt_copy = copy.deepcopy(opt)
opt_copy['num_round'] = best_round
trainer.train(train_df, eval_df, opt_copy)
importance = trainer.clf.feature_importance_
name = train_df.drop(columns=y).columns
result_dict = dict()
for (k, v) in zip(name, importance):
result_dict[k] = v
return result_dict
|
def shap_selector(train_df, eval_df, y, opt, type='lgb', metric='error'):
"\n This returns the shap explainer so that one can use it for variable selection.\n The base tree model we use will select the best iterations\n :param train_df: training dataset\n :param eval_df: eval dataset\n :param y: the target variable name\n :param opt: training argument for boosting parameters\n :param type; 'lgb' , 'xgb' or 'catboost'. The tree used for computing shap values.\n :param metric: metric to select the best tree; currently only support 'error'\n :returns shap explainer\n "
opt_copy = copy.deepcopy(opt)
if (type == 'lgb'):
trainer = LGBFitter(y, metric)
elif (type == 'xgb'):
trainer = XGBFitter(y, metric)
elif (type == 'catboost'):
trainer = CATFitter(y, metric)
else:
raise NotImplementedError()
trainer.train(train_df, eval_df, opt_copy)
best_round = trainer.best_round
opt_copy['num_round'] = best_round
trainer.train(train_df, eval_df, opt_copy)
clf = trainer.clf
return shap.TreeExplainer(clf)
|
def vif_selector(data_df, y):
'\n Calculate the VIF to select variables.\n :param data_df: the dataset\n :param y: the target variable\n '
ray.init()
ex_var = [var for var in data_df.columns if (var != y)]
data_df_id = ray.put(data_df)
@ray.remote()
def ls(data_df, target_var):
model = sm.OLS(data_df.drop(columns=target_var), data_df[target_var])
result = model.fit()
rs = result.rsquare
if (rs == 1):
vif = np.inf
else:
vif = (1.0 / (1.0 - rs))
return (target_var, vif)
result = [ls.remote(data_df_id, target_var) for target_var in ex_var]
result_list = ray.get(result)
return_result = dict()
for (k, v) in result_list:
return_result[k] = v
ray.shutdown()
return return_result
|
@click.group()
def cli():
'FitsMap --- Convert FITS files and catalogs into LeafletJS maps.'
pass
|
@cli.command()
@click.argument('directory', type=str)
@click.option('--out_dir', default='.', help=HELP_OUT_DIR)
@click.option('--min_zoom', default=0, help=HELP_MIN_ZOOM)
@click.option('--title', default='FitsMap', help=HELP_TITLE)
@click.option('--task_procs', default=0, help=HELP_TASK_PROCS)
@click.option('--procs_per_task', default=0, help=HELP_PROCS_PER_TASK)
@click.option('--catalog_delim', default=None, help=HELP_CATALOG_DELIM)
@click.option('--cat_wcs_fits_file', default=None, help=HELP_CAT_WCS_FITS_FILE)
@click.option('--image_engine', default='PIL', help=HELP_IMAGE_ENGINE)
def dir(directory, out_dir, min_zoom, title, task_procs, procs_per_task, catalog_delim, cat_wcs_fits_file, image_engine):
'Convert a directory to a map.\n\n CLI interface: dir command.\n\n DIRECTORY the relative path to the directory to convert i.e. ./files/\n '
convert.dir_to_map(directory, out_dir=out_dir, min_zoom=min_zoom, title=title, task_procs=task_procs, procs_per_task=procs_per_task, catalog_delim=catalog_delim, cat_wcs_fits_file=cat_wcs_fits_file, image_engine=image_engine)
|
@cli.command()
@click.argument('files', type=str)
@click.option('--out_dir', default='.', help=HELP_OUT_DIR)
@click.option('--min_zoom', default=0, help=HELP_MIN_ZOOM)
@click.option('--title', default='FitsMap', help=HELP_TITLE)
@click.option('--task_procs', default=0, help=HELP_TASK_PROCS)
@click.option('--procs_per_task', default=0, help=HELP_PROCS_PER_TASK)
@click.option('--catalog_delim', default=None, help=HELP_CATALOG_DELIM)
@click.option('--cat_wcs_fits_file', default=None, help=HELP_CAT_WCS_FITS_FILE)
@click.option('--image_engine', default='PIL', help=HELP_IMAGE_ENGINE)
def files(files, out_dir, min_zoom, title, task_procs, procs_per_task, catalog_delim, cat_wcs_fits_file, image_engine):
'Convert a files to a map.\n\n CLI interface: files command.\n\n FILES should be a comma seperated list of files i.e. a.fits,b.fits,c.cat\n '
convert.files_to_map(files.split(','), out_dir=out_dir, min_zoom=min_zoom, title=title, task_procs=task_procs, procs_per_task=procs_per_task, catalog_delim=catalog_delim, cat_wcs_fits_file=cat_wcs_fits_file, image_engine=image_engine)
|
def __server(out_dir: str, port: int) -> None:
def f():
server = http.server.HTTPServer(('', port), functools.partial(http.server.SimpleHTTPRequestHandler, directory=out_dir))
server.serve_forever()
return f
|
def __opener(address: str) -> None:
def f():
print('Opening up FitsMap in browser')
webbrowser.open(address)
return f
|
@cli.command()
@click.option('--out_dir', default='.', help=HELP_OUT_DIR)
@click.option('--port', default=8000, help=HELP_OUT_DIR)
@click.option('--open_browser', default=True, help=HELP_OUT_DIR)
def serve(out_dir: str, port: int, open_browser: bool):
'Spins up a web server to serve a fitsmap. webservers are required for catalogs.\n\n Args:\n out_dir (str): output location of the fitsmap\n '
map_address = f'http://localhost:{port}'
print(f'Starting web server in {out_dir} and serving at {map_address}')
tasks = [__server(out_dir, port)]
if open_browser:
tasks.append(__opener(map_address))
else:
print(f'Open browser and got to {map_address} to see FitsMap')
with ThreadPoolExecutor(max_workers=2) as pool:
pool.map((lambda t: t()), tasks)
|
class KDBush():
'Python port of https://github.com/mourner/kdbush'
def __init__(self, points, get_x: Callable=(lambda p: p[0]), get_y: Callable=(lambda p: p[1]), node_size: int=64, array_dtype=np.float64):
self.points = points
self.node_size = node_size
n_points = len(points)
index_array_dtype = (np.uint16 if (n_points < 65536) else np.uint32)
self.ids = np.zeros([n_points], dtype=index_array_dtype)
self.coords = np.zeros([(n_points * 2)], dtype=array_dtype)
for i in range(n_points):
self.ids[i] = i
self.coords[(2 * i)] = get_x(points[i])
self.coords[((2 * i) + 1)] = get_y(points[i])
_sort(self.ids, self.coords, self.node_size, 0, (n_points - 1), 0)
def range(self, min_x: int, min_y: int, max_x: int, max_y: int):
return _range(self.ids, self.coords, min_x, min_y, max_x, max_y, self.node_size)
def within(self, x: int, y: int, r: int):
return _within(self.ids, self.coords, x, y, r, self.node_size)
|
def _sort(ids: np.ndarray, coords: np.ndarray, node_size: int, left: int, right: int, axis: int) -> None:
if ((right - left) <= node_size):
return
m = ((left + right) >> 1)
_select(ids, coords, m, left, right, axis)
_sort(ids, coords, node_size, left, (m - 1), (1 - axis))
_sort(ids, coords, node_size, (m + 1), right, (1 - axis))
|
def _select(ids: np.ndarray, coords: np.ndarray, k: int, left: int, right: int, axis: int) -> None:
while (right > left):
if ((right - left) > 600):
n = ((right - left) + 1)
m = ((k - left) + 1)
z = np.log(n)
s = (0.5 * np.exp(((2 * z) / 3)))
sd = ((0.5 * np.sqrt((((z * s) * (n - s)) / n))) * ((m - (n / (- 1))) if (2 < 0) else 1))
new_left = max(left, int(np.floor(((k - ((m * s) / n)) + sd))))
new_right = min(right, int(np.floor(((k + (((m - n) * s) / n)) + sd))))
_select(ids, coords, k, new_left, new_right, axis)
t = coords[((2 * k) + axis)]
i = left
j = right
_swap_item(ids, coords, left, k)
if (coords[((2 * right) + axis)] > t):
_swap_item(ids, coords, left, right)
while (i < j):
_swap_item(ids, coords, i, j)
i += 1
j -= 1
while (coords[((2 * i) + axis)] < t):
i += 1
while (coords[((2 * j) + axis)] > t):
j -= 1
if (coords[((2 * left) + axis)] == t):
_swap_item(ids, coords, left, j)
else:
j += 1
_swap_item(ids, coords, j, right)
if (j <= k):
left = (j + 1)
if (k <= j):
right = (j - 1)
|
def _swap_item(ids: np.ndarray, coords: np.ndarray, i: int, j: int) -> None:
_swap(ids, i, j)
_swap(coords, (2 * i), (2 * j))
_swap(coords, ((2 * i) + 1), ((2 * j) + 1))
|
def _swap(arr: np.ndarray, i: int, j: int) -> None:
tmp = arr[i]
arr[i] = arr[j]
arr[j] = tmp
|
def _range(ids: np.ndarray, coords: np.ndarray, min_x: int, min_y: int, max_x: int, max_y: int, node_size: int) -> List[int]:
stack = [0, (len(ids) - 1), 0]
result = []
while len(stack):
axis = stack.pop()
right = stack.pop()
left = stack.pop()
if ((right - left) <= node_size):
for i in range(left, (right + 1)):
x = coords[(2 * i)]
y = coords[((2 * i) + 1)]
if ((min_x <= x <= max_x) and (min_y <= y <= max_y)):
result.append(ids[i])
continue
m = ((left + right) >> 1)
x = coords[(2 * m)]
y = coords[((2 * m) + 1)]
if ((min_x <= x <= max_x) and (min_y <= y <= max_y)):
result.append(ids[m])
if ((min_x <= x) if (axis == 0) else (min_y <= y)):
stack.append(left)
stack.append((m - 1))
stack.append((1 - axis))
if ((max_x >= x) if (axis == 0) else (max_y >= y)):
stack.append((m + 1))
stack.append(right)
stack.append((1 - axis))
return result
|
def _within(ids: np.ndarray, coords: np.ndarray, qx: int, qy: int, r: int, node_size: int) -> List[int]:
stack = [0, (len(ids) - 1), 0]
result = []
r2 = (r * r)
while len(stack):
axis = stack.pop()
right = stack.pop()
left = stack.pop()
if ((right - left) <= node_size):
for i in range(left, (right + 1)):
if (__sq_dist(coords[(2 * i)], coords[((2 * i) + 1)], qx, qy) < r2):
result.append(ids[i])
continue
m = ((left + right) >> 1)
x = coords[(2 * m)]
y = coords[((2 * m) + 1)]
if (__sq_dist(x, y, qx, qy) <= r2):
result.append(ids[m])
if (((qx - r) <= x) if (axis == 0) else ((qy - r) <= y)):
stack.append(left)
stack.append((m - 1))
stack.append((1 - axis))
if (((qx + r) >= x) if (axis == 0) else ((qy + r) >= y)):
stack.append((m + 1))
stack.append(right)
stack.append((1 - axis))
return result
|
def __sq_dist(ax: float, ay: float, bx: float, by: float) -> float:
return (((ax - bx) ** 2) + ((ay - by) ** 2))
|
class OutputManager():
'Manages all FitsMap console output for tasks.'
SENTINEL = (- 1)
__instance = None
@staticmethod
def pbar_disabled():
return bool(os.getenv('DISBALE_TQDM', False))
def check_for_updates(func):
def f(*args, **kwargs):
func(*args, **kwargs)
if (OutputManager.__instance and (not OutputManager.pbar_disabled())):
OutputManager.__instance.check_for_updates()
return f
@staticmethod
@check_for_updates
def write(pbar_ref: Tuple[(int, queue.Queue)], message: str) -> None:
(idx, q) = pbar_ref
def write(pbar):
pbar.clear()
pbar.display(message)
q.put([idx, write])
@staticmethod
@check_for_updates
def update(pbar_ref: Tuple[(int, queue.Queue)], value: int) -> None:
(idx, q) = pbar_ref
q.put([idx, (lambda pbar: pbar.update(value))])
@staticmethod
@check_for_updates
def update_done(pbar_ref: Tuple[(int, queue.Queue)]) -> None:
OutputManager.update(pbar_ref, OutputManager.SENTINEL)
@staticmethod
@check_for_updates
def set_description(pbar_ref: Tuple[(int, queue.Queue)], desc: str) -> None:
(idx, q) = pbar_ref
def write(pbar):
pbar.clear()
pbar.set_description(desc)
q.put([idx, write])
@staticmethod
@check_for_updates
def set_units_total(pbar_ref: Tuple[(int, queue.Queue)], unit: str, total: int) -> None:
(idx, q) = pbar_ref
def setup(pbar):
pbar.unit = unit
pbar.reset(total=total)
q.put([idx, setup])
def __init__(self):
self.progress_bars = dict()
self.in_progress = dict()
self.q = queue.Queue()
self.idx = count()
OutputManager.__instance = self
def make_bar(self) -> Tuple[(int, queue.Queue)]:
for idx in self.idx:
self.progress_bars[idx] = tqdm(position=idx, disable=OutputManager.pbar_disabled(), leave=True)
if (not OutputManager.pbar_disabled()):
self.progress_bars[idx].display('Preparing...')
self.in_progress[idx] = True
(yield tuple([idx, self.q]))
def check_for_updates(self):
if (not self.q.empty()):
(idx, f) = self.q.get(block=True)
running = (not (f == OutputManager.SENTINEL))
self.in_progress[idx] = running
if (running and (not OutputManager.pbar_disabled())):
f(self.progress_bars[idx])
def close_up(self):
list(map((lambda key: self.progress_bars[key].close()), sorted(self.progress_bars.keys())))
@property
def jobs_running(self):
return any(self.in_progress.values())
|
class PaddedArray():
def __init__(self, array: np.ndarray, pad: Tuple[(int, int)], tile_size=(256, 256)):
self.array = array
self.pad = pad
shape = [(array.shape[0] + pad[0]), (array.shape[1] + pad[1])]
empty_tile_shape = list(tile_size)
if (len(array.shape) == 3):
shape += [array.shape[2]]
empty_tile_shape += [array.shape[2]]
self.empty_tile = np.full(empty_tile_shape, np.nan, dtype=np.float32)
self.shape = tuple(shape)
def __get_internal_array(self, ys: slice, xs: slice) -> np.ndarray:
return self.array[(ys, xs)]
def __get_mixed(self, ys: slice, xs: slice) -> np.ndarray:
(start_y, stop_y) = (ys.start, ys.stop)
(start_x, stop_x) = (xs.start, xs.stop)
pad_y = max(0, (stop_y - self.array.shape[0]))
pad_x = max(0, (stop_x - self.array.shape[1]))
padding = [[0, pad_y], [0, pad_x]]
if (len(self.array.shape) == 3):
padding += [[0, 0]]
slice_ys = slice(start_y, min(self.array.shape[0], stop_y))
slice_xs = slice(start_x, min(self.array.shape[1], stop_x))
return np.pad(self.array[(slice_ys, slice_xs)], padding, mode='constant', constant_values=np.nan)
def __getitem__(self, axis_slices: Tuple[(slice, slice)]) -> np.ndarray:
if (type(axis_slices) is not tuple):
axis_slices = [axis_slices, slice(None)]
(ys, xs) = axis_slices
(start_y, stop_y) = ((ys.start or 0), (ys.stop or self.shape[0]))
(start_x, stop_x) = ((xs.start or 0), (xs.stop or self.shape[1]))
slice_ys = slice(start_y, stop_y)
slice_xs = slice(start_x, stop_x)
if ((stop_y < self.array.shape[0]) and (stop_x < self.array.shape[1])):
return self.__get_internal_array(slice_ys, slice_xs)
elif ((start_y > self.array.shape[0]) or (start_x > self.array.shape[1])):
return self.empty_tile
else:
return self.__get_mixed(slice_ys, slice_xs)
def __reduce__(self) -> Union[(str, Tuple[(Any, ...)])]:
return (PaddedArray, (self.array, self.pad))
|
def default_map(i):
return i
|
def default_udpate_f():
pass
|
def default_get_x(p):
return p['x']
|
def default_get_y(p):
return p['y']
|
class Supercluster():
def __init__(self, min_zoom: int=0, max_zoom: int=16, min_points: int=2, radius: float=40, extent: int=512, node_size: int=64, log: bool=False, generate_id: bool=False, reduce: Callable=None, map: Callable=default_map, alternate_CRS: Tuple[(int, int)]=(), update_f: Callable=default_udpate_f):
self.min_zoom = min_zoom
self.max_zoom = max_zoom
self.min_points = min_points
self.radius = radius
self.extent = extent
self.node_size = node_size
self.log = log
self.generate_id = generate_id
self.reduce = reduce
self.map = map
self.trees = np.zeros([(max_zoom + 2)], dtype=object)
self.alternate_CRS = alternate_CRS
self.update_f = update_f
def load(self, points) -> 'Supercluster':
self.points = points
clusters = []
for i in range(len(points)):
if points[i].get('geometry', []):
clusters.append(self.create_point_cluster(points[i], i))
if self.log:
self.update_f()
self.trees[(self.max_zoom + 1)] = KDBush(points=clusters, node_size=self.node_size, array_dtype=np.float32, get_x=default_get_x, get_y=default_get_y)
if self.log:
self.update_f()
for z in range(self.max_zoom, (self.min_zoom - 1), (- 1)):
clusters = self._cluster(clusters, z)
self.trees[z] = KDBush(points=clusters, node_size=self.node_size, array_dtype=np.float32, get_x=default_get_x, get_y=default_get_y)
if self.log:
self.update_f()
return self
def get_clusters(self, bbox, zoom):
if self.alternate_CRS:
(min_lng, min_lat, max_lng, max_lat) = bbox
else:
min_lng = (((((bbox[0] + 180) % 360) + 360) % 360) - 180)
min_lat = max((- 90), min(90, bbox[1]))
max_lng = (180 if (bbox[2] == 180) else (((((bbox[2] + 180) % 360) + 360) % 360) - 180))
max_lat = max((- 90), min(90, bbox[3]))
if ((bbox[2] - bbox[0]) >= 360):
min_lng = (- 180)
max_lng = 180
elif (min_lng > max_lng):
easternHem = self.get_clusters([min_lng, min_lat, 180, max_lat], zoom)
westernHem = self.get_clusters([(- 180), min_lat, max_lng, max_lat], zoom)
return (easternHem + westernHem)
tree = self.trees[self._limit_zoom(zoom)]
if self.alternate_CRS:
ids = tree.range(self.lng_x(min_lng), self.lat_y(min_lat), self.lng_x(max_lng), self.lat_y(max_lat))
else:
ids = tree.range(self.lng_x(min_lng), self.lat_y(max_lat), self.lng_x(max_lng), self.lat_y(min_lat))
clusters = []
for id in ids:
c = tree.points[id]
clusters.append((self.get_cluster_JSON(c) if c.get('num_points', 0) else self.points[c['index']]))
return clusters
def get_children(self, cluster_id):
origin_id = self._get_origin_id(cluster_id)
origin_zoom = self._get_origin_zoom(cluster_id)
err_msg = 'No cluster with the specified id'
index = self.trees[origin_zoom]
if (not index):
raise Exception(err_msg)
origin = index.points[origin_id]
if (not origin):
raise Exception(err_msg)
r = (self.radius / (self.extent * (2 ** (origin_zoom - 1))))
ids = index.within(origin['x'], origin['y'], r)
children = []
for id in ids:
c = index.points[id]
if (c['parent_id'] == cluster_id):
children.append((self.get_cluster_JSON(c) if ('num_points' in c) else self.points[c['index']]))
if (len(children) == 0):
raise Exception(err_msg)
return children
def get_leaves(self, cluster_id, limit, offset):
limit = (limit if limit else 10)
offset = (offset if offset else 0)
leaves = []
self._append_leaves(leaves, cluster_id, limit, offset, 0)
return leaves
def get_tile(self, z, x, y):
tree = self.trees[self._limit_zoom(z)]
z2 = (2 ** z)
p = (self.radius / self.extent)
top = ((y - p) / z2)
bottom = (((y + 1) + p) / z2)
tile = self._add_tile_features(tree.range(((x - p) / z2), top, (((x + 1) + p) / z2), bottom), tree.points, x, y, z2, dict(features=[]))
if (x == 0):
tile = self._add_tile_features(tree.range((1 - (p / z2)), top, 1, bottom), tree.points, z2, y, z2, tile)
if (x == (z2 - 1)):
tile = self._add_tile_features(tree.range(0, top, (p / z2), bottom), tree.points, (- 1), y, z2, tile)
return (tile if len(tile['features']) else None)
def get_cluster_expansion_zoom(self, cluster_id):
expansion_zoom = (self._get_origin_zoom(cluster_id) - 1)
while (expansion_zoom <= self.max_zoom):
children = self.get_children(cluster_id)
expansion_zoom += 1
if (len(children) != 1):
break
cluster_id = children[0]['properties']['cluster_id']
return expansion_zoom
def _append_leaves(self, result, cluster_id, limit, offset, skipped):
children = self.get_children(cluster_id)
for child in children:
props = child['properties']
if (props and ('cluster' in props)):
if ((skipped + props['point_count']) <= offset):
skipped += props['point_count']
else:
skipped = self._append_leaves(result, props['cluster_id'], limit, offset, skipped)
elif (skipped < offset):
skipped += 1
else:
result.append(child)
if (len(result) == limit):
break
return skipped
def _add_tile_features(self, ids, points, x, y, z2, tile):
for i in ids:
c = points[i]
is_cluster = ('num_points' in c)
if is_cluster:
tags = self.get_cluster_properties(c)
px = self.x_lng(c['x'])
py = self.y_lat(c['y'])
else:
p = self.points[c['index']]
tags = p.get('tags', None)
px = p['geometry']['coordinates'][0]
py = p['geometry']['coordinates'][1]
f = dict(type=1, geometry=[round((self.extent * ((px * z2) - x))), round((self.extent * ((py * z2) - y)))], properties={'global_x': px, 'global_y': py, **tags})
if is_cluster:
id = c['id']
elif self.generate_id:
id = c['index']
elif ('id' in self.points[c['index']]):
id = self.points[c['index']]['id']
else:
id = None
if (id is not None):
f['id'] = id
tile['features'].append(f)
return tile
def _limit_zoom(self, z):
return max(self.min_zoom, min(z, (self.max_zoom + 1)))
def _cluster(self, points, zoom: int):
clusters = []
r = (self.radius / (self.extent * (2 ** zoom)))
for i in range(len(points)):
p = points[i]
if (p['zoom'] <= zoom):
continue
p['zoom'] = zoom
tree = self.trees[(zoom + 1)]
neighbor_ids = tree.within(p['x'], p['y'], r)
num_points_origin = p.get('num_points', 1)
num_points = num_points_origin
for neighbor_id in neighbor_ids:
b = tree.points[neighbor_id]
if (b['zoom'] > zoom):
num_points += b.get('num_points', 1)
if (num_points >= self.min_points):
wx = (p['x'] * num_points_origin)
wy = (p['y'] * num_points_origin)
if (self.reduce and (num_points_origin > 1)):
cluster_properties = self._map(p, True)
else:
cluster_properties = None
id = (((i << 5) + (zoom + 1)) + len(self.points))
for neighbor_id in neighbor_ids:
b = tree.points[neighbor_id]
if (b['zoom'] <= zoom):
continue
b['zoom'] = zoom
num_points_2 = b.get('num_points', 1)
wx += (b['x'] * num_points_2)
wy += (b['y'] * num_points_2)
b['parent_id'] = id
if self.reduce:
if (cluster_properties is None):
cluster_properties = self._map(p, True)
cluster_properties = self.reduce(cluster_properties, self._map(b))
p['parent_id'] = id
clusters.append(self.create_cluster((wx / num_points), (wy / num_points), id, num_points, cluster_properties))
else:
clusters.append(p)
if (num_points > 1):
for neighbor_id in neighbor_ids:
b = tree.points[neighbor_id]
if (b['zoom'] <= zoom):
continue
b['zoom'] = zoom
clusters.append(b)
return clusters
def _get_origin_id(self, clusterId):
return ((clusterId - len(self.points)) >> 5)
def _get_origin_zoom(self, clusterId):
return ((clusterId - len(self.points)) % 32)
def _map(self, point, clone: bool=False):
if ('num_points' in point):
return (dict(**point['properties']) if clone else point['properties'])
original = self.points[point['index']]['properties']
result = self.map(original)
return (dict(**result) if (clone and (result == original)) else result)
def create_cluster(self, x, y, id, num_points, properties):
return dict(x=np.asarray(x, dtype=np.float32), y=np.asarray(y, dtype=np.float32), zoom=np.inf, id=id, parent_id=(- 1), num_points=num_points, properties=properties)
def create_point_cluster(self, p, id):
(x, y) = p['geometry']['coordinates']
return dict(x=np.asarray(self.lng_x(x), dtype=np.float32), y=np.asarray(self.lat_y(y), dtype=np.float32), zoom=np.inf, index=id, parentId=(- 1), tags=p['tags'])
def get_cluster_JSON(self, cluster):
return dict(type='Feature', id=cluster['id'], properties=self.get_cluster_properties(cluster), geometry=dict(type='Point', coordinates=[self.x_lng(cluster['x']), self.y_lat(cluster['y'])]))
def get_cluster_properties(self, cluster):
count = cluster['num_points']
if (count >= 1000000):
abbrev = f'{round((count / 1000000))}M'
elif (count >= 10000):
abbrev = f'{round((count / 1000))}k'
elif (count > 1000):
abbrev = f'{round(((count / 100) / 10))}k'
else:
abbrev = count
props = (cluster['properties'] if cluster['properties'] else {})
return dict(cluster=True, cluster_id=cluster['id'], point_count=count, point_count_abbreviated=abbrev, **props)
def lng_x(self, lng):
if self.alternate_CRS:
return (lng / self.alternate_CRS[0])
else:
return ((lng / 360) + 0.5)
def lat_y(self, lat):
if self.alternate_CRS:
return (lat / self.alternate_CRS[1])
else:
sin = math.sin(((lat * math.pi) / 180))
if (sin in [(- 1), 1]):
y = (- sin)
else:
y = (0.5 - ((0.25 * math.log(((1 + sin) / (1 - sin)))) / math.pi))
return min(max(y, 0), 1)
def x_lng(self, x):
if self.alternate_CRS:
return (x * self.alternate_CRS[0])
else:
return ((x - 0.5) * 360)
def y_lat(self, y):
if self.alternate_CRS:
return (y * self.alternate_CRS[1])
else:
y2 = (((180 - (y * 360)) * math.pi) / 180)
return (((360 * math.atan(math.exp(y2))) / math.pi) - 90)
|
class MockTQDM():
unit = ''
def update(self, n: int=1):
pass
def clear(self):
pass
def display(self, message):
pass
def set_description(self, desc):
pass
def reset(total):
pass
|
class MockWCS():
'Mock WCS object for testing'
def __init__(self, include_cd: bool):
if include_cd:
self.cd = np.array([[1, 0], [0, 1]])
else:
self.crpix = np.array([1, 1])
self.crval = np.array([1, 1])
def all_pix2world(self, *args, **kwargs):
return np.array([[1, 1], [1, 1], [1, 1]]).astype(np.float64)
@property
def wcs(self):
return self
|
def setup(with_data=False):
'Builds testing structure'
if (not os.path.exists(TEST_PATH)):
os.mkdir(TEST_PATH)
if with_data:
with_data_path = (lambda f: os.path.join(DATA_DIR, f))
with_test_path = (lambda f: os.path.join(TEST_PATH, f))
copy_file = (lambda f: shutil.copy(with_data_path(f), with_test_path(f)))
list(map(copy_file, os.listdir(DATA_DIR)))
compressed_files = list(filter((lambda f: f.endswith('tar.xz')), os.listdir(TEST_PATH)))
def extract(f):
with tarfile.open(with_test_path(f)) as f:
f.extractall(TEST_PATH)
any(map(extract, compressed_files))
|
def tear_down(include_ray=False):
'Tears down testing structure'
if os.path.exists(TEST_PATH):
shutil.rmtree(TEST_PATH)
if include_ray:
ray.shutdown()
|
def disbale_tqdm():
os.environ[TQDM_ENV_VAR] = 'True'
|
def enable_tqdm():
os.environ[TQDM_ENV_VAR] = 'False'
|
def cat_to_json(fname):
with open(fname, 'r') as f:
lines = f.readlines()
data = json.loads((('[' + ''.join([l.strip() for l in lines[1:(- 1)]])) + ']'))
return (data, lines[0])
|
def __stable_idx_answer(shape, zoom, tile_size=256):
dim0_tile_fraction = (shape[0] / tile_size)
dim1_tile_fraction = (shape[1] / tile_size)
if ((dim0_tile_fraction < 1) or (dim1_tile_fraction < 1)):
raise StopIteration()
num_tiles_dim0 = int(np.ceil(dim0_tile_fraction))
num_tiles_dim1 = int(np.ceil(dim1_tile_fraction))
tile_idxs_dim0 = [(i * tile_size) for i in range((num_tiles_dim0 + 1))]
tile_idxs_dim1 = [(i * tile_size) for i in range((num_tiles_dim1 + 1))]
pair_runner = (lambda coll: [slice(c0, c1) for (c0, c1) in zip(coll[:(- 1)], coll[1:])])
row_slices = pair_runner(tile_idxs_dim0)
col_slices = pair_runner(tile_idxs_dim1)
rows = zip(range((num_tiles_dim0 - 1), (- 1), (- 1)), row_slices)
cols = enumerate(col_slices)
rows_cols = product(rows, cols)
def transform_iteration(row_col):
((y, slice_y), (x, slice_x)) = row_col
return (zoom, y, x, slice_y, slice_x)
return map(transform_iteration, rows_cols)
|
def covert_idx_to_hashable_tuple(idx):
'Converts idxs to hashable type for set, slice is not hashable'
return (idx[0], idx[1], idx[2], str(idx[3]), str(idx[4]))
|
def get_slice_idx_generator_solution(zoom: int):
'Gets proper idxs using a method that tests correctly.\n\n The data returned by this can be big at high zoom levels.\n TODO: Find particular cases to test for.\n '
return list(__stable_idx_answer((4305, 9791), zoom))
|
def compare_file_directories(dir1, dir2) -> bool:
is_file = (lambda x: x.is_file())
is_dir = (lambda x: x.is_dir())
get_name = (lambda x: x.name)
get_path = (lambda x: x.path)
def get_file_extension(fname):
return os.path.splitext(fname)[1]
def compare_file_contents(file1, file2) -> bool:
f_ext = get_file_extension(file1)
if (f_ext in ['.png', '.tiff', '.ico']):
arr1 = np.array(Image.open(file1))
arr2 = np.array(Image.open(file2))
same = np.isclose(arr1, arr2, rtol=1e-05, atol=5, equal_nan=True)
if (not same.all()):
(ys, xs, cs) = np.where((~ same))
print(f'Found {len(ys)} differences in {file1}')
print(f'First difference at {ys[0]}, {xs[0]}, {cs[0]}')
print(f'First difference is {arr1[(ys[0], xs[0], cs[0])]} vs {arr2[(ys[0], xs[0], cs[0])]}')
return False
return same.all()
else:
mode = ('r' + ('b' * int((f_ext in ['.cbor', '.pbf']))))
with open(file1, mode) as f1, open(file2, mode) as f2:
try:
return (f1.readlines() == f2.readlines())
except:
print(file1, file2, mode)
def compare_subdirs(sub_dir1, sub_dir2) -> bool:
dir1_entries = list(os.scandir(sub_dir1))
dir2_entries = list(os.scandir(sub_dir2))
dir1_files = list(sorted(filter(is_file, dir1_entries), key=(lambda x: x.name)))
dir2_files = list(sorted(filter(is_file, dir2_entries), key=(lambda x: x.name)))
count_and_names_same = (list(map(get_name, dir1_files)) == list(map(get_name, dir2_files)))
if count_and_names_same:
file_pairs = list(zip(map(get_path, dir1_files), map(get_path, dir2_files)))
files_comps = list(starmap(compare_file_contents, zip(map(get_path, dir1_files), map(get_path, dir2_files))))
files_same = all(files_comps)
if files_same:
dir1_subdirs = list(sorted(filter(is_dir, dir1_entries), key=(lambda x: x.name)))
dir2_subdirs = list(sorted(filter(is_dir, dir2_entries), key=(lambda x: x.name)))
count_and_names_same = (list(map(get_name, dir1_subdirs)) == list(map(get_name, dir2_subdirs)))
if count_and_names_same:
subdir_pairs = list(zip(map(get_path, dir1_subdirs), map(get_path, dir2_subdirs)))
subdir_comp = list(starmap(compare_subdirs, subdir_pairs))
subdirs_same = all(subdir_comp)
return subdirs_same
else:
list(map((lambda x: print(x[1], "don't match")), filter((lambda x: (not x[0])), zip(subdir_comp, subdir_pairs))))
return False
else:
list(map((lambda x: print(x[1], "don't match")), filter((lambda x: (not x[0])), zip(files_comps, file_pairs))))
return False
else:
missing_files = set(list(map((lambda f: f.name), dir1_files))).symmetric_difference(set(list(map((lambda f: f.name), dir2_files))))
print(missing_files)
return False
return compare_subdirs(dir1, dir2)
|
def get_version():
here = os.path.dirname(os.path.realpath(__file__))
version_lcocation = os.path.join(here, '../__version__.py')
with open(version_lcocation, 'r') as f:
return f.readline().strip().replace('"', '')
|
@pytest.mark.unit
@pytest.mark.cartographer
def test_layer_name_to_dict_image():
'test cartographer.layer_name_to_dict'
out_dir = '.'
min_zoom = 0
max_native_zoom = 2
name = 'test'
color = ''
actual_dict = c.layer_name_to_dict(out_dir, (max_native_zoom + 5), min_zoom, max_native_zoom, name, color)
expected_dict = dict(directory=(name + '/{z}/{y}/{x}.png'), name=name, min_zoom=min_zoom, max_zoom=(max_native_zoom + 5), max_native_zoom=max_native_zoom)
assert (expected_dict == actual_dict)
|
@pytest.mark.unit
@pytest.mark.cartographer
def test_layer_name_to_dict_catalog():
'test cartographer.layer_name_to_dict'
helpers.setup()
out_dir = helpers.DATA_DIR
min_zoom = 0
max_native_zoom = 2
name = 'test'
color = '#4C72B0'
columns = 'a,b,c'
with open(os.path.join(out_dir, f'{name}.columns'), 'w') as f:
f.write(columns)
actual_dict = c.layer_name_to_dict(out_dir, (max_native_zoom + 5), min_zoom, max_native_zoom, name, color)
expected_dict = dict(directory=(name + '/{z}/{y}/{x}.pbf'), name=name, min_zoom=min_zoom, max_zoom=(max_native_zoom + 5), max_native_zoom=max_native_zoom, color=color, columns=[f'"{c}"' for c in columns.split(',')])
helpers.tear_down()
assert (expected_dict == actual_dict)
|
@pytest.mark.unit
@pytest.mark.cartographer
def test_img_layer_dict_to_str():
'test cartographer.layer_dict_to_str'
min_zoom = 0
max_zoom = 2
name = 'test'
layer_dict = dict(directory=(name + '/{z}/{y}/{x}.png'), name=name, min_zoom=min_zoom, max_zoom=(max_zoom + 5), max_native_zoom=max_zoom)
actual_str = c.img_layer_dict_to_str(layer_dict)
expected_str = ''.join([('const ' + layer_dict['name']), ((' = L.tileLayer("' + layer_dict['directory']) + '"'), ', { ', (('attribution:"' + "<a href='https://github.com/ryanhausen/fitsmap'>FitsMap</a>") + '", '), (('minZoom: ' + str(layer_dict['min_zoom'])) + ', '), (('maxZoom: ' + str(layer_dict['max_zoom'])) + ', '), (('maxNativeZoom: ' + str(layer_dict['max_native_zoom'])) + ' '), '});'])
assert (expected_str == actual_str)
|
@pytest.mark.unit
@pytest.mark.cartographer
def test_cat_layer_dict_to_str():
'test cartographer.layer_dict_to_str'
min_zoom = 0
max_zoom = 2
name = 'test'
columns = 'a,b,c'
layer_dict = dict(directory=(name + '/{z}/{y}/{x}.png'), name=name, min_zoom=min_zoom, max_zoom=(max_zoom + 5), max_native_zoom=max_zoom, color='red', columns=[f'"{c}"' for c in columns.split(',')])
actual_str = c.cat_layer_dict_to_str(layer_dict, float('inf'))
expected_str = ''.join([('const ' + layer_dict['name']), ' = L.gridLayer.tiledMarkers(', '{ ', (('tileURL:"' + layer_dict['directory']) + '", '), 'radius: 10, ', (('color: "' + layer_dict['color']) + '", '), 'fillOpacity: 0.2, ', 'strokeOpacity: 1.0, ', f'rowsPerColumn: Infinity, ', f"catalogColumns: [{','.join(layer_dict['columns'])}], ", (('minZoom: ' + str(layer_dict['min_zoom'])) + ', '), (('maxZoom: ' + str(layer_dict['max_zoom'])) + ', '), (('maxNativeZoom: ' + str(layer_dict['max_native_zoom'])) + ' '), '});'])
helpers.tear_down()
assert (expected_str == actual_str)
|
@pytest.mark.unit
@pytest.mark.cartographer
def test_leaflet_layer_control_declaration():
'test cartographer.add_layer_control'
min_zoom = 0
max_zoom = 2
name = 'test'
img_layer_dict = dict(directory=(name + '/{z}/{y}/{x}.png'), name=name, min_zoom=min_zoom, max_zoom=(max_zoom + 5), max_native_zoom=max_zoom)
cat_layer_dict = dict(directory=(name + '/{z}/{y}/{x}.pbf'), name=name, min_zoom=min_zoom, max_zoom=(max_zoom + 5), max_native_zoom=max_zoom, color='red')
actual = c.leaflet_layer_control_declaration([img_layer_dict], [cat_layer_dict])
expected = '\n'.join(['const layerControl = L.control.layers(', ' {"test":test},', ' {"test":test}', ').addTo(map);'])
assert (expected == actual)
|
@pytest.mark.unit
@pytest.mark.cartographer
def test_get_colors():
'test cartographer.colors_js'
expected = ['#4C72B0', '#DD8452', '#55A868', '#C44E52', '#8172B3', '#937860', '#DA8BC3', '#8C8C8C', '#CCB974', '#64B5CD']
color_iter = c.get_colors()
assert (expected == [next(color_iter) for _ in range(len(expected))])
|
@pytest.mark.unit
@pytest.mark.cartographer
def test_leaflet_crs_js():
'test cartographer.leaflet_crs_js'
min_zoom = 0
max_zoom = 2
name = 'test'
layer_dict = dict(directory=(name + '/{z}/{y}/{x}.png'), name=name, min_zoom=min_zoom, max_zoom=(max_zoom + 5), max_native_zoom=max_zoom)
actual = c.leaflet_crs_js([layer_dict])
expected = '\n'.join(['L.CRS.FitsMap = L.extend({}, L.CRS.Simple, {', f' transformation: new L.Transformation(1/{int((2 ** max_zoom))}, 0, -1/{int((2 ** max_zoom))}, 256)', '});'])
assert (actual == expected)
|
@pytest.mark.unit
@pytest.mark.cartographer
def test_extract_cd_matrix_as_string_with_cd():
'test cartographer.extract_cd_matrix_as_string'
wcs = helpers.MockWCS(include_cd=True)
actual = c.extract_cd_matrix_as_string(wcs)
expected = '[[1, 0], [0, 1]]'
assert (actual == expected)
|
@pytest.mark.unit
@pytest.mark.cartographer
def test_extract_cd_matrix_as_string_without_cd():
'test cartographer.extract_cd_matrix_as_string'
wcs = helpers.MockWCS(include_cd=False)
actual = c.extract_cd_matrix_as_string(wcs)
expected = '[[0.0, 0.0], [0.0, 0.0]]'
assert (actual == expected)
|
@pytest.mark.unit
@pytest.mark.cartographer
def test_leaflet_map_js():
'test cartographer.leaflet_map_js'
min_zoom = 0
max_zoom = 2
name = 'test'
layer_dict = dict(directory=(name + '/{z}/{y}/{x}.png'), name=name, min_zoom=min_zoom, max_zoom=(max_zoom + 5), max_native_zoom=max_zoom)
acutal_map_js = c.leaflet_map_js([layer_dict])
expected_map_js = '\n'.join(['const map = L.map("map", {', ' crs: L.CRS.FitsMap,', ((' minZoom: ' + str(min_zoom)) + ','), ' preferCanvas: true,', f" layers: [{layer_dict['name']}]", '});'])
assert (expected_map_js == acutal_map_js)
|
@pytest.mark.unit
@pytest.mark.cartographer
def test_build_conditional_css():
'test cartographer.build_conditional_css'
helpers.setup()
actual_css = c.build_conditional_css(helpers.TEST_PATH)
expected_css = '\n'.join([' <link rel=\'preload\' href=\'https://unpkg.com/leaflet-search@2.9.8/dist/leaflet-search.src.css\' as=\'style\' onload=\'this.rel="stylesheet"\'/>', ' <link rel=\'preload\' href=\'css/MarkerCluster.Default.min.css\' as=\'style\' onload=\'this.rel="stylesheet"\'/>', ' <link rel=\'preload\' href=\'css/MarkerCluster.min.css\' as=\'style\' onload=\'this.rel="stylesheet"\'/>', ' <link rel=\'preload\' href=\'css/MarkerPopup.min.css\' as=\'style\' onload=\'this.rel="stylesheet"\'/>', ' <link rel=\'preload\' href=\'css/TileNearestNeighbor.min.css\' as=\'style\' onload=\'this.rel="stylesheet"\'/>'])
helpers.tear_down()
assert (expected_css == actual_css)
|
@pytest.mark.unit
@pytest.mark.cartographer
def test_build_conditional_js():
'test cartographer.build_conditional_js'
helpers.setup()
acutal_js = c.build_conditional_js(helpers.TEST_PATH, True)
expected_js = '\n'.join([" <script defer src='https://cdnjs.cloudflare.com/ajax/libs/leaflet-search/3.0.2/leaflet-search.src.min.js'></script>", " <script defer src='js/customSearch.min.js'></script>", " <script defer src='js/tiledMarkers.min.js'></script>", " <script defer src='js/urlCoords.js'></script>", " <script defer src='js/index.js'></script>", " <script defer src='https://unpkg.com/cbor-web@8.1.0/dist/cbor.js'></script>", " <script defer src='https://unpkg.com/pbf@3.0.5/dist/pbf.js'></script>", " <script defer src='js/l.ellipse.min.js'></script>", " <script defer src='js/vector-tile.min.js'></script>"])
helpers.tear_down()
assert (expected_js == acutal_js)
|
@pytest.mark.unit
@pytest.mark.cartographer
def test_build_index_js():
'Tests cartographer.build_index_js'
img_layer_dict = [dict(name='img', directory='img/{z}/{y}/{x}.png', min_zoom=0, max_zoom=8, max_native_zoom=3)]
cat_layer_dict = [dict(name='cat', directory='cat/{z}/{y}/{x}.pbf', min_zoom=0, max_zoom=8, max_native_zoom=2, color='blue', columns=['"a"', '"b"', '"c"'])]
rows_per_column = np.inf
max_xy = (2048, 2048)
expected_js = '\n'.join(['// Image layers ================================================================', ((('const img = L.tileLayer("img/{z}/{y}/{x}.png", { attribution:"' + c.LAYER_ATTRIBUTION) + '", ') + 'minZoom: 0, maxZoom: 8, maxNativeZoom: 3 });'), '', '// Marker layers ===============================================================', 'const cat = L.gridLayer.tiledMarkers({ tileURL:"cat/{z}/{y}/{x}.pbf", radius: 10, color: "blue", fillOpacity: 0.2, strokeOpacity: 1.0, rowsPerColumn: Infinity, catalogColumns: ["a","b","c"], minZoom: 0, maxZoom: 8, maxNativeZoom: 2 });', '', '// Basic map setup =============================================================', 'L.CRS.FitsMap = L.extend({}, L.CRS.Simple, {', ' transformation: new L.Transformation(1/8, 0, -1/8, 256)', '});', '', 'const map = L.map("map", {', ' crs: L.CRS.FitsMap,', ' minZoom: 0,', ' preferCanvas: true,', ' layers: [img]', '});', '', 'const layerControl = L.control.layers(', ' {"img":img},', ' {"cat":cat}', ').addTo(map);', '', '// Search ======================================================================', 'const catalogPaths = [', ' "catalog_assets/cat/",', '];', '', 'const searchControl = buildCustomSearch(catalogPaths, 2);', 'map.addControl(searchControl);', '', '// Map event setup =============================================================', 'img.on("load", () => {', ' document.getElementById("loading-screen").style.visibility = "hidden";', ' document.getElementById("map").style.visibility = "visible";', '});', '', 'map.on("moveend", updateLocationBar);', 'map.on("zoomend", updateLocationBar);', '', 'if (urlParam("zoom")==null) {', f' map.fitBounds(L.latLngBounds([[0, 0], [{max_xy[0]}, {max_xy[1]}]]));', '} else {', ' panFromUrl(map);', '}'])
actual_js = c.build_index_js(img_layer_dict, cat_layer_dict, rows_per_column, max_xy)
assert (expected_js == actual_js)
|
@pytest.mark.unit
@pytest.mark.cartographer
def test_move_support_images():
'test cartographer.move_support_images'
helpers.setup()
actual_moved_images = c.move_support_images(helpers.TEST_PATH)
expected_moved_images = ['favicon.ico', 'loading-logo.svg']
helpers.tear_down()
assert (actual_moved_images == expected_moved_images)
|
@pytest.mark.unit
@pytest.mark.cartographer
def test_build_html():
'test cartographer.build_html'
title = 'test_title'
extra_js = 'test_extra_js'
extra_css = 'test_extra_css'
actual_html = c.build_html(title, extra_js, extra_css)
expected_html = '\n'.join(['<!DOCTYPE html>', '<html lang="en">', '<head>', ' <title>{}</title>'.format(title), ' <meta charset="utf-8" />', ' <meta name="viewport" content="width=device-width, initial-scale=1.0">', ' <link rel="shortcut icon" type="image/x-icon" href="imgs/favicon.ico" />', ' <link rel="preload" href="https://cdnjs.cloudflare.com/ajax/libs/leaflet/1.7.1/leaflet.min.css" integrity="sha512-1xoFisiGdy9nvho8EgXuXvnpR5GAMSjFwp40gSRE3NwdUdIMIKuPa7bqoUhLD0O/5tPNhteAsE5XyyMi5reQVA==" crossorigin="anonymous" referrerpolicy="no-referrer" as="style" onload="this.rel=\'stylesheet\'"/>', extra_css, ' <script defer src="https://cdnjs.cloudflare.com/ajax/libs/leaflet/1.7.1/leaflet.min.js" integrity="sha512-SeiQaaDh73yrb56sTW/RgVdi/mMqNeM2oBwubFHagc5BkixSpP1fvqF47mKzPGWYSSy4RwbBunrJBQ4Co8fRWA==" crossorigin="anonymous" referrerpolicy="no-referrer"></script>', extra_js, ' <style>', ' /* Map */', ' html,body{height:100%;padding:0;margin:0;font-family:Helvetica,Arial,sans-serif}#map{width:100%;height:100%;visibility:hidden}', ' /* Loading Page */', ' .overlay{background:#fff;height:100vh;width:100%;position:absolute}.brand{position:absolute;top:100px;left:50%;transform:translateX(-50%)}.brand img{width:100%;height:auto}.loadingtext{position:absolute;top:50%;left:50%;transform:translate(-50%,-50%);font-weight:700;font-size:xx-large}.loading{position:absolute;top:50%;left:50%;-webkit-transform:translate(-50%,-50%);-moz-transform:translate(-50%,-50%);-ms-transform:translate(-50%,-50%);-o-transform:translate(-50%,-50%);transform:translate(-50%,-50%);border-bottom:16px solid #0044aaff;border-top:16px solid #0044aaff;border-left:16px solid #80b3ffff;border-right:16px solid #80b3ffff;width:250px;height:250px;-webkit-border-radius:50%;-moz-border-radius:50%;border-radius:50%;-webkit-animation:rotate 1s ease-in-out infinite;-o-animation:rotate 1s ease-in-out infinite;animation:rotate 1s ease-in-out infinite}', ' @keyframes rotate{0%{-webkit-transform:translate(-50%,-50%) rotate(0deg);-moz-transform:translate(-50%,-50%) rotate(0deg);-ms-transform:translate(-50%,-50%) rotate(0deg);-o-transform:translate(-50%,-50%) rotate(0deg);transform:translate(-50%,-50%) rotate(0deg)}100%{-webkit-transform:translate(-50%,-50%) rotate(360deg);-moz-transform:translate(-50%,-50%) rotate(360deg);-ms-transform:translate(-50%,-50%) rotate(360deg);-o-transform:translate(-50%,-50%) rotate(360deg);transform:translate(-50%,-50%) rotate(360deg)}}', ' </style>', '</head>', '<body>', ' <div id="loading-screen" class="overlay">', ' <div class="brand"><img src="imgs/loading-logo.svg" /></div>', ' <div class="loading"></div>', ' <div class="loadingtext">Loading...</div>', ' </div>', ' <div id="map"></div>', '</body>', f'<!--Made with fitsmap v{helpers.get_version()}-->', '</html>\n'])
assert (expected_html == actual_html)
|
@pytest.mark.integration
@pytest.mark.cartographer
def test_chart_no_wcs():
'test cartographer.chart'
helpers.setup(with_data=True)
out_dir = helpers.TEST_PATH
title = 'test'
map_layer_names = 'test_layer'
marker_file_names = 'test_marker'
wcs = None
columns = 'a,b,c'
with open(os.path.join(out_dir, f'{marker_file_names}.columns'), 'w') as f:
f.write(columns)
list(map((lambda r: os.makedirs(os.path.join(out_dir, map_layer_names, str(r)))), range(3)))
list(map((lambda r: os.makedirs(os.path.join(out_dir, marker_file_names, str(r)))), range(2)))
os.mkdir(os.path.join(out_dir, 'js'))
os.mkdir(os.path.join(out_dir, 'css'))
c.chart(out_dir, title, [map_layer_names], [marker_file_names], wcs, float('inf'), [100, 100])
version = helpers.get_version()
raw_path = os.path.join(out_dir, 'test_index.html')
with open(raw_path, 'r') as f:
converted = list(map((lambda l: l.replace('VERSION', version)), f.readlines()))
with open(raw_path, 'w') as f:
f.writelines(converted)
actual_html = os.path.join(out_dir, 'index.html')
expected_html = os.path.join(out_dir, 'test_index.html')
files_match = filecmp.cmp(expected_html, actual_html)
helpers.tear_down()
assert files_match
|
@pytest.mark.integration
@pytest.mark.cartographer
def test_chart_with_wcs():
'test cartographer.chart'
helpers.setup(with_data=True)
out_dir = helpers.TEST_PATH
title = 'test'
map_layer_names = 'test_layer'
marker_file_names = 'test_marker'
wcs = WCS(os.path.join(out_dir, 'test_image.fits'))
columns = 'a,b,c'
with open(os.path.join(out_dir, f'{marker_file_names}.columns'), 'w') as f:
f.write(columns)
list(map((lambda r: os.makedirs(os.path.join(out_dir, map_layer_names, str(r)))), range(3)))
list(map((lambda r: os.makedirs(os.path.join(out_dir, marker_file_names, str(r)))), range(2)))
os.mkdir(os.path.join(out_dir, 'js'))
os.mkdir(os.path.join(out_dir, 'css'))
c.chart(out_dir, title, [map_layer_names], [marker_file_names], wcs, float('inf'), [100, 100])
version = helpers.get_version()
raw_path = os.path.join(out_dir, 'test_index_wcs.html')
with open(raw_path, 'r') as f:
converted = list(map((lambda l: l.replace('VERSION', version)), f.readlines()))
with open(raw_path, 'w') as f:
f.writelines(converted)
actual_html = os.path.join(out_dir, 'index.html')
expected_html = os.path.join(out_dir, 'test_index_wcs.html')
files_match = filecmp.cmp(expected_html, actual_html)
helpers.tear_down()
assert files_match
|
@pytest.mark.unit
@pytest.mark.convert
def test_build_path():
'Test the convert.build_path function'
(z, y, x) = (1, 2, 3)
out_dir = helpers.TEST_PATH
img_name = convert.build_path(z, y, x, out_dir)
expected_img_name = os.path.join(out_dir, str(z), str(y), f'{x}.png')
expected_file_name_matches = (expected_img_name == img_name)
assert expected_file_name_matches
|
@pytest.mark.unit
@pytest.mark.convert
def test_slice_idx_generator_z0():
"Test convert.slice_idx_generator at zoom level 0.\n\n The given shape (4305, 9791) breaks iterative schemes that don't properly\n seperate tiles. Was a bug.\n "
shape = (4305, 9791)
zoom = 0
tile_size = 256
given = convert.slice_idx_generator(shape, zoom, tile_size)
expected = helpers.get_slice_idx_generator_solution(zoom)
comparable_given = set(map(helpers.covert_idx_to_hashable_tuple, given))
comparable_expected = set(map(helpers.covert_idx_to_hashable_tuple, expected))
assert (comparable_given == comparable_expected)
|
@pytest.mark.unit
@pytest.mark.convert
def test_slice_idx_generator_z1():
"Test convert.slice_idx_generator at zoom level 1.\n\n The given shape (4305, 9791) breaks iterative schemes that don't properly\n seperate tiles. Was a bug.\n "
shape = (4305, 9791)
zoom = 1
tile_size = 256
given = convert.slice_idx_generator(shape, zoom, tile_size)
expected = helpers.get_slice_idx_generator_solution(zoom)
comparable_given = set(map(helpers.covert_idx_to_hashable_tuple, given))
comparable_expected = set(map(helpers.covert_idx_to_hashable_tuple, expected))
assert (comparable_given == comparable_expected)
|
@pytest.mark.unit
@pytest.mark.convert
def test_slice_idx_generator_z2():
"Test convert.slice_idx_generator at zoom level 2.\n\n The given shape (4305, 9791) breaks iterative schemes that don't properly\n seperate tiles. Was a bug.\n "
shape = (4305, 9791)
zoom = 2
tile_size = 256
given = convert.slice_idx_generator(shape, zoom, tile_size)
expected = helpers.get_slice_idx_generator_solution(zoom)
comparable_given = set(map(helpers.covert_idx_to_hashable_tuple, given))
comparable_expected = set(map(helpers.covert_idx_to_hashable_tuple, expected))
assert (comparable_given == comparable_expected)
|
@pytest.mark.unit
@pytest.mark.convert
def test_slice_idx_generator_z3():
"Test convert.slice_idx_generator at zoom level 3.\n\n The given shape (4305, 9791) breaks iterative schemes that don't properly\n seperate tiles. Was a bug.\n "
shape = (4305, 9791)
zoom = 3
tile_size = 256
given = convert.slice_idx_generator(shape, zoom, tile_size)
expected = helpers.get_slice_idx_generator_solution(zoom)
comparable_given = set(map(helpers.covert_idx_to_hashable_tuple, given))
comparable_expected = set(map(helpers.covert_idx_to_hashable_tuple, expected))
assert (comparable_given == comparable_expected)
|
@pytest.mark.unit
@pytest.mark.convert
def test_slice_idx_generator_z4():
"Test convert.slice_idx_generator at zoom level 4.\n\n The given shape (4305, 9791) breaks iterative schemes that don't properly\n seperate tiles. Was a bug.\n "
shape = (4305, 9791)
zoom = 4
tile_size = 256
given = convert.slice_idx_generator(shape, zoom, tile_size)
expected = helpers.get_slice_idx_generator_solution(zoom)
comparable_given = set(map(helpers.covert_idx_to_hashable_tuple, given))
comparable_expected = set(map(helpers.covert_idx_to_hashable_tuple, expected))
assert (comparable_given == comparable_expected)
|
@pytest.mark.unit
@pytest.mark.convert
def test_slice_idx_generator_z5():
"Test convert.slice_idx_generator at zoom level 5.\n\n The given shape (4305, 9791) breaks iterative schemes that don't properly\n seperate tiles. Was a bug.\n "
shape = (4305, 9791)
zoom = 5
tile_size = 256
given = convert.slice_idx_generator(shape, zoom, tile_size)
expected = helpers.get_slice_idx_generator_solution(zoom)
comparable_given = set(map(helpers.covert_idx_to_hashable_tuple, given))
comparable_expected = set(map(helpers.covert_idx_to_hashable_tuple, expected))
assert (comparable_given == comparable_expected)
|
@pytest.mark.unit
@pytest.mark.convert
def test_slice_idx_generator_raises():
"Test convert.slice_idx_generator raises StopIteration.\n\n The given shape (4305, 9791) breaks iterative schemes that don't properly\n seperate tiles. Was a bug.\n "
shape = (250, 250)
zoom = 5
tile_size = 256
with pytest.raises(StopIteration) as excinfo:
given = convert.slice_idx_generator(shape, zoom, tile_size)
assert excinfo
|
@pytest.mark.unit
@pytest.mark.convert
def test_balance_array_2d():
'Test convert.balance_array'
in_shape = (10, 20)
expected_shape = (32, 32)
expected_num_nans = (np.prod(expected_shape) - np.prod(in_shape))
test_array = np.zeros(in_shape)
out_array = convert.balance_array(test_array)
assert (out_array.shape == expected_shape)
assert (np.isnan(out_array[:]).sum() == expected_num_nans)
|
@pytest.mark.unit
@pytest.mark.convert
def test_balance_array_3d():
'Test convert.balance_array'
in_shape = (10, 20, 3)
expected_shape = (32, 32, 3)
expected_num_nans = (np.prod(expected_shape) - np.prod(in_shape))
test_array = np.zeros(in_shape)
out_array = convert.balance_array(test_array)
assert (out_array.shape == expected_shape)
assert (np.isnan(out_array[:]).sum() == expected_num_nans)
|
@pytest.mark.unit
@pytest.mark.convert
def test_get_array_fits():
'Test convert.get_array'
helpers.setup()
tmp = np.zeros((3, 3), dtype=np.float32)
out_path = os.path.join(helpers.TEST_PATH, 'test.fits')
fits.PrimaryHDU(data=tmp).writeto(out_path)
pads = [[0, 1], [0, 1]]
expected_array = np.pad(tmp, pads, mode='constant', constant_values=np.nan)
actual_array = convert.get_array(out_path)
helpers.tear_down()
np.testing.assert_equal(expected_array, actual_array[:])
|
@pytest.mark.unit
@pytest.mark.convert
def test_get_array_fits_fails():
'Test convert.get_array'
helpers.setup()
tmp = np.zeros(3, dtype=np.float32)
out_path = os.path.join(helpers.TEST_PATH, 'test.fits')
fits.PrimaryHDU(data=tmp).writeto(out_path)
with pytest.raises(ValueError) as excinfo:
convert.get_array(out_path)
helpers.tear_down()
assert ('FitsMap only supports 2D' in str(excinfo.value))
|
@pytest.mark.unit
@pytest.mark.convert
def test_get_array_png():
'Test convert.get_array'
helpers.setup()
expected_array = camera()
out_path = os.path.join(helpers.TEST_PATH, 'test.png')
Image.fromarray(expected_array).save(out_path)
actual_array = convert.get_array(out_path)
helpers.tear_down()
np.testing.assert_equal(expected_array, np.flipud(actual_array.array))
|
@pytest.mark.unit
@pytest.mark.convert
def test_filter_on_extension_without_predicate():
'Test convert.filter_on_extension without a predicate argument'
test_files = ['file_one.fits', 'file_two.fits', 'file_three.exclude']
extensions = ['fits']
expected_list = test_files[:(- 1)]
actual_list = convert.filter_on_extension(test_files, extensions)
assert (expected_list == actual_list)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.