repo
stringlengths 2
99
| file
stringlengths 13
225
| code
stringlengths 0
18.3M
| file_length
int64 0
18.3M
| avg_line_length
float64 0
1.36M
| max_line_length
int64 0
4.26M
| extension_type
stringclasses 1
value |
|---|---|---|---|---|---|---|
xgboost
|
xgboost-master/demo/CLI/regression/mapfeat.py
|
#!/usr/bin/env python3
fo = open('machine.txt', 'w')
cnt = 6
fmap = {}
for l in open('machine.data'):
arr = l.split(',')
fo.write(arr[8])
for i in range(0, 6):
fo.write(' %d:%s' % (i, arr[i + 2]))
if arr[0] not in fmap:
fmap[arr[0]] = cnt
cnt += 1
fo.write(' %d:1' % fmap[arr[0]])
fo.write('\n')
fo.close()
# create feature map for machine data
fo = open('featmap.txt', 'w')
# list from machine.names
names = [
'vendor', 'MYCT', 'MMIN', 'MMAX', 'CACH', 'CHMIN', 'CHMAX', 'PRP', 'ERP'
]
for i in range(0, 6):
fo.write('%d\t%s\tint\n' % (i, names[i + 1]))
for v, k in sorted(fmap.items(), key=lambda x: x[1]):
fo.write('%d\tvendor=%s\ti\n' % (k, v))
fo.close()
| 726
| 20.382353
| 76
|
py
|
xgboost
|
xgboost-master/demo/CLI/regression/mknfold.py
|
#!/usr/bin/env python3
import random
import sys
if len(sys.argv) < 2:
print('Usage:<filename> <k> [nfold = 5]')
exit(0)
random.seed(10)
k = int(sys.argv[2])
if len(sys.argv) > 3:
nfold = int(sys.argv[3])
else:
nfold = 5
fi = open(sys.argv[1], 'r')
ftr = open(sys.argv[1] + '.train', 'w')
fte = open(sys.argv[1] + '.test', 'w')
for l in fi:
if random.randint(1, nfold) == k:
fte.write(l)
else:
ftr.write(l)
fi.close()
ftr.close()
fte.close()
| 487
| 15.266667
| 45
|
py
|
xgboost
|
xgboost-master/demo/guide-python/predict_first_ntree.py
|
"""
Demo for prediction using number of trees
=========================================
"""
import os
import numpy as np
from sklearn.datasets import load_svmlight_file
import xgboost as xgb
CURRENT_DIR = os.path.dirname(__file__)
train = os.path.join(CURRENT_DIR, "../data/agaricus.txt.train")
test = os.path.join(CURRENT_DIR, "../data/agaricus.txt.test")
def native_interface():
# load data in do training
dtrain = xgb.DMatrix(train + "?format=libsvm")
dtest = xgb.DMatrix(test + "?format=libsvm")
param = {"max_depth": 2, "eta": 1, "objective": "binary:logistic"}
watchlist = [(dtest, "eval"), (dtrain, "train")]
num_round = 3
bst = xgb.train(param, dtrain, num_round, watchlist)
print("start testing prediction from first n trees")
# predict using first 1 tree
label = dtest.get_label()
ypred1 = bst.predict(dtest, iteration_range=(0, 1))
# by default, we predict using all the trees
ypred2 = bst.predict(dtest)
print("error of ypred1=%f" % (np.sum((ypred1 > 0.5) != label) / float(len(label))))
print("error of ypred2=%f" % (np.sum((ypred2 > 0.5) != label) / float(len(label))))
def sklearn_interface():
X_train, y_train = load_svmlight_file(train)
X_test, y_test = load_svmlight_file(test)
clf = xgb.XGBClassifier(n_estimators=3, max_depth=2, eta=1)
clf.fit(X_train, y_train, eval_set=[(X_test, y_test)])
assert clf.n_classes_ == 2
print("start testing prediction from first n trees")
# predict using first 1 tree
ypred1 = clf.predict(X_test, iteration_range=(0, 1))
# by default, we predict using all the trees
ypred2 = clf.predict(X_test)
print(
"error of ypred1=%f" % (np.sum((ypred1 > 0.5) != y_test) / float(len(y_test)))
)
print(
"error of ypred2=%f" % (np.sum((ypred2 > 0.5) != y_test) / float(len(y_test)))
)
if __name__ == "__main__":
native_interface()
sklearn_interface()
| 1,942
| 30.852459
| 87
|
py
|
xgboost
|
xgboost-master/demo/guide-python/external_memory.py
|
"""
Experimental support for external memory
========================================
This is similar to the one in `quantile_data_iterator.py`, but for external memory
instead of Quantile DMatrix. The feature is not ready for production use yet.
.. versionadded:: 1.5.0
See :doc:`the tutorial </tutorials/external_memory>` for more details.
"""
import os
import tempfile
from typing import Callable, List, Tuple
import numpy as np
from sklearn.datasets import make_regression
import xgboost
def make_batches(
n_samples_per_batch: int, n_features: int, n_batches: int, tmpdir: str,
) -> List[Tuple[str, str]]:
files: List[Tuple[str, str]] = []
rng = np.random.RandomState(1994)
for i in range(n_batches):
X, y = make_regression(n_samples_per_batch, n_features, random_state=rng)
X_path = os.path.join(tmpdir, "X-" + str(i) + ".npy")
y_path = os.path.join(tmpdir, "y-" + str(i) + ".npy")
np.save(X_path, X)
np.save(y_path, y)
files.append((X_path, y_path))
return files
class Iterator(xgboost.DataIter):
"""A custom iterator for loading files in batches."""
def __init__(self, file_paths: List[Tuple[str, str]]):
self._file_paths = file_paths
self._it = 0
# XGBoost will generate some cache files under current directory with the prefix
# "cache"
super().__init__(cache_prefix=os.path.join(".", "cache"))
def load_file(self) -> Tuple[np.ndarray, np.ndarray]:
X_path, y_path = self._file_paths[self._it]
X = np.load(X_path)
y = np.load(y_path)
assert X.shape[0] == y.shape[0]
return X, y
def next(self, input_data: Callable) -> int:
"""Advance the iterator by 1 step and pass the data to XGBoost. This function is
called by XGBoost during the construction of ``DMatrix``
"""
if self._it == len(self._file_paths):
# return 0 to let XGBoost know this is the end of iteration
return 0
# input_data is a function passed in by XGBoost who has the similar signature to
# the ``DMatrix`` constructor.
X, y = self.load_file()
input_data(data=X, label=y)
self._it += 1
return 1
def reset(self) -> None:
"""Reset the iterator to its beginning"""
self._it = 0
def main(tmpdir: str) -> xgboost.Booster:
# generate some random data for demo
files = make_batches(1024, 17, 31, tmpdir)
it = Iterator(files)
# For non-data arguments, specify it here once instead of passing them by the `next`
# method.
missing = np.NaN
Xy = xgboost.DMatrix(it, missing=missing, enable_categorical=False)
# ``approx`` is also supported, but less efficient due to sketching. GPU behaves
# differently than CPU tree methods as it uses a hybrid approach. See tutorial in
# doc for details.
booster = xgboost.train(
{"tree_method": "hist", "max_depth": 4},
Xy,
evals=[(Xy, "Train")],
num_boost_round=10,
)
return booster
if __name__ == "__main__":
with tempfile.TemporaryDirectory() as tmpdir:
main(tmpdir)
| 3,179
| 30.8
| 89
|
py
|
xgboost
|
xgboost-master/demo/guide-python/quantile_data_iterator.py
|
"""
Demo for using data iterator with Quantile DMatrix
==================================================
.. versionadded:: 1.2.0
The demo that defines a customized iterator for passing batches of data into
:py:class:`xgboost.QuantileDMatrix` and use this ``QuantileDMatrix`` for
training. The feature is used primarily designed to reduce the required GPU
memory for training on distributed environment.
Aftering going through the demo, one might ask why don't we use more native
Python iterator? That's because XGBoost requires a `reset` function, while
using `itertools.tee` might incur significant memory usage according to:
https://docs.python.org/3/library/itertools.html#itertools.tee.
"""
import cupy
import numpy
import xgboost
COLS = 64
ROWS_PER_BATCH = 1000 # data is splited by rows
BATCHES = 32
class IterForDMatrixDemo(xgboost.core.DataIter):
"""A data iterator for XGBoost DMatrix.
`reset` and `next` are required for any data iterator, other functions here
are utilites for demonstration's purpose.
"""
def __init__(self):
"""Generate some random data for demostration.
Actual data can be anything that is currently supported by XGBoost.
"""
self.rows = ROWS_PER_BATCH
self.cols = COLS
rng = cupy.random.RandomState(1994)
self._data = [rng.randn(self.rows, self.cols)] * BATCHES
self._labels = [rng.randn(self.rows)] * BATCHES
self._weights = [rng.uniform(size=self.rows)] * BATCHES
self.it = 0 # set iterator to 0
super().__init__()
def as_array(self):
return cupy.concatenate(self._data)
def as_array_labels(self):
return cupy.concatenate(self._labels)
def as_array_weights(self):
return cupy.concatenate(self._weights)
def data(self):
"""Utility function for obtaining current batch of data."""
return self._data[self.it]
def labels(self):
"""Utility function for obtaining current batch of label."""
return self._labels[self.it]
def weights(self):
return self._weights[self.it]
def reset(self):
"""Reset the iterator"""
self.it = 0
def next(self, input_data):
"""Yield next batch of data."""
if self.it == len(self._data):
# Return 0 when there's no more batch.
return 0
input_data(data=self.data(), label=self.labels(), weight=self.weights())
self.it += 1
return 1
def main():
rounds = 100
it = IterForDMatrixDemo()
# Use iterator, must be `QuantileDMatrix`.
# In this demo, the input batches are created using cupy, and the data processing
# (quantile sketching) will be performed on GPU. If data is loaded with CPU based
# data structures like numpy or pandas, then the processing step will be performed
# on CPU instead.
m_with_it = xgboost.QuantileDMatrix(it)
# Use regular DMatrix.
m = xgboost.DMatrix(
it.as_array(), it.as_array_labels(), weight=it.as_array_weights()
)
assert m_with_it.num_col() == m.num_col()
assert m_with_it.num_row() == m.num_row()
# Tree meethod must be `hist`.
reg_with_it = xgboost.train(
{"tree_method": "hist", "device": "cuda"}, m_with_it, num_boost_round=rounds
)
predict_with_it = reg_with_it.predict(m_with_it)
reg = xgboost.train(
{"tree_method": "hist", "device": "cuda"}, m, num_boost_round=rounds
)
predict = reg.predict(m)
numpy.testing.assert_allclose(predict_with_it, predict, rtol=1e6)
if __name__ == "__main__":
main()
| 3,624
| 28.713115
| 86
|
py
|
xgboost
|
xgboost-master/demo/guide-python/callbacks.py
|
'''
Demo for using and defining callback functions
==============================================
.. versionadded:: 1.3.0
'''
import argparse
import os
import tempfile
import numpy as np
from matplotlib import pyplot as plt
from sklearn.datasets import load_breast_cancer
from sklearn.model_selection import train_test_split
import xgboost as xgb
class Plotting(xgb.callback.TrainingCallback):
'''Plot evaluation result during training. Only for demonstration purpose as it's quite
slow to draw.
'''
def __init__(self, rounds):
self.fig = plt.figure()
self.ax = self.fig.add_subplot(111)
self.rounds = rounds
self.lines = {}
self.fig.show()
self.x = np.linspace(0, self.rounds, self.rounds)
plt.ion()
def _get_key(self, data, metric):
return f'{data}-{metric}'
def after_iteration(self, model, epoch, evals_log):
'''Update the plot.'''
if not self.lines:
for data, metric in evals_log.items():
for metric_name, log in metric.items():
key = self._get_key(data, metric_name)
expanded = log + [0] * (self.rounds - len(log))
self.lines[key], = self.ax.plot(self.x, expanded, label=key)
self.ax.legend()
else:
# https://pythonspot.com/matplotlib-update-plot/
for data, metric in evals_log.items():
for metric_name, log in metric.items():
key = self._get_key(data, metric_name)
expanded = log + [0] * (self.rounds - len(log))
self.lines[key].set_ydata(expanded)
self.fig.canvas.draw()
# False to indicate training should not stop.
return False
def custom_callback():
'''Demo for defining a custom callback function that plots evaluation result during
training.'''
X, y = load_breast_cancer(return_X_y=True)
X_train, X_valid, y_train, y_valid = train_test_split(X, y, random_state=0)
D_train = xgb.DMatrix(X_train, y_train)
D_valid = xgb.DMatrix(X_valid, y_valid)
num_boost_round = 100
plotting = Plotting(num_boost_round)
# Pass it to the `callbacks` parameter as a list.
xgb.train(
{
'objective': 'binary:logistic',
'eval_metric': ['error', 'rmse'],
'tree_method': 'hist',
"device": "cuda",
},
D_train,
evals=[(D_train, 'Train'), (D_valid, 'Valid')],
num_boost_round=num_boost_round,
callbacks=[plotting])
def check_point_callback():
# only for demo, set a larger value (like 100) in practice as checkpointing is quite
# slow.
rounds = 2
def check(as_pickle):
for i in range(0, 10, rounds):
if i == 0:
continue
if as_pickle:
path = os.path.join(tmpdir, 'model_' + str(i) + '.pkl')
else:
path = os.path.join(tmpdir, 'model_' + str(i) + '.json')
assert(os.path.exists(path))
X, y = load_breast_cancer(return_X_y=True)
m = xgb.DMatrix(X, y)
# Check point to a temporary directory for demo
with tempfile.TemporaryDirectory() as tmpdir:
# Use callback class from xgboost.callback
# Feel free to subclass/customize it to suit your need.
check_point = xgb.callback.TrainingCheckPoint(directory=tmpdir,
iterations=rounds,
name='model')
xgb.train({'objective': 'binary:logistic'}, m,
num_boost_round=10,
verbose_eval=False,
callbacks=[check_point])
check(False)
# This version of checkpoint saves everything including parameters and
# model. See: doc/tutorials/saving_model.rst
check_point = xgb.callback.TrainingCheckPoint(directory=tmpdir,
iterations=rounds,
as_pickle=True,
name='model')
xgb.train({'objective': 'binary:logistic'}, m,
num_boost_round=10,
verbose_eval=False,
callbacks=[check_point])
check(True)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--plot', default=1, type=int)
args = parser.parse_args()
check_point_callback()
if args.plot:
custom_callback()
| 4,631
| 33.311111
| 92
|
py
|
xgboost
|
xgboost-master/demo/guide-python/generalized_linear_model.py
|
"""
Demo for GLM
============
"""
import os
import xgboost as xgb
##
# this script demonstrate how to fit generalized linear model in xgboost
# basically, we are using linear model, instead of tree for our boosters
##
CURRENT_DIR = os.path.dirname(__file__)
dtrain = xgb.DMatrix(
os.path.join(CURRENT_DIR, "../data/agaricus.txt.train?format=libsvm")
)
dtest = xgb.DMatrix(
os.path.join(CURRENT_DIR, "../data/agaricus.txt.test?format=libsvm")
)
# change booster to gblinear, so that we are fitting a linear model
# alpha is the L1 regularizer
# lambda is the L2 regularizer
# you can also set lambda_bias which is L2 regularizer on the bias term
param = {
"objective": "binary:logistic",
"booster": "gblinear",
"alpha": 0.0001,
"lambda": 1,
}
# normally, you do not need to set eta (step_size)
# XGBoost uses a parallel coordinate descent algorithm (shotgun),
# there could be affection on convergence with parallelization on certain cases
# setting eta to be smaller value, e.g 0.5 can make the optimization more stable
# param['eta'] = 1
##
# the rest of settings are the same
##
watchlist = [(dtest, "eval"), (dtrain, "train")]
num_round = 4
bst = xgb.train(param, dtrain, num_round, watchlist)
preds = bst.predict(dtest)
labels = dtest.get_label()
print(
"error=%f"
% (
sum(1 for i in range(len(preds)) if int(preds[i] > 0.5) != labels[i])
/ float(len(preds))
)
)
| 1,425
| 26.423077
| 80
|
py
|
xgboost
|
xgboost-master/demo/guide-python/learning_to_rank.py
|
"""
Getting started with learning to rank
=====================================
.. versionadded:: 2.0.0
This is a demonstration of using XGBoost for learning to rank tasks using the
MSLR_10k_letor dataset. For more infomation about the dataset, please visit its
`description page <https://www.microsoft.com/en-us/research/project/mslr/>`_.
This is a two-part demo, the first one contains a basic example of using XGBoost to
train on relevance degree, and the second part simulates click data and enable the
position debiasing training.
For an overview of learning to rank in XGBoost, please see
:doc:`Learning to Rank </tutorials/learning_to_rank>`.
"""
from __future__ import annotations
import argparse
import json
import os
import pickle as pkl
import numpy as np
import pandas as pd
from sklearn.datasets import load_svmlight_file
import xgboost as xgb
from xgboost.testing.data import RelDataCV, simulate_clicks, sort_ltr_samples
def load_mlsr_10k(data_path: str, cache_path: str) -> RelDataCV:
"""Load the MSLR10k dataset from data_path and cache a pickle object in cache_path.
Returns
-------
A list of tuples [(X, y, qid), ...].
"""
root_path = os.path.expanduser(args.data)
cacheroot_path = os.path.expanduser(args.cache)
cache_path = os.path.join(cacheroot_path, "MSLR_10K_LETOR.pkl")
# Use only the Fold1 for demo:
# Train, Valid, Test
# {S1,S2,S3}, S4, S5
fold = 1
if not os.path.exists(cache_path):
fold_path = os.path.join(root_path, f"Fold{fold}")
train_path = os.path.join(fold_path, "train.txt")
valid_path = os.path.join(fold_path, "vali.txt")
test_path = os.path.join(fold_path, "test.txt")
X_train, y_train, qid_train = load_svmlight_file(
train_path, query_id=True, dtype=np.float32
)
y_train = y_train.astype(np.int32)
qid_train = qid_train.astype(np.int32)
X_valid, y_valid, qid_valid = load_svmlight_file(
valid_path, query_id=True, dtype=np.float32
)
y_valid = y_valid.astype(np.int32)
qid_valid = qid_valid.astype(np.int32)
X_test, y_test, qid_test = load_svmlight_file(
test_path, query_id=True, dtype=np.float32
)
y_test = y_test.astype(np.int32)
qid_test = qid_test.astype(np.int32)
data = RelDataCV(
train=(X_train, y_train, qid_train),
test=(X_test, y_test, qid_test),
max_rel=4,
)
with open(cache_path, "wb") as fd:
pkl.dump(data, fd)
with open(cache_path, "rb") as fd:
data = pkl.load(fd)
return data
def ranking_demo(args: argparse.Namespace) -> None:
"""Demonstration for learning to rank with relevance degree."""
data = load_mlsr_10k(args.data, args.cache)
# Sort data according to query index
X_train, y_train, qid_train = data.train
sorted_idx = np.argsort(qid_train)
X_train = X_train[sorted_idx]
y_train = y_train[sorted_idx]
qid_train = qid_train[sorted_idx]
X_test, y_test, qid_test = data.test
sorted_idx = np.argsort(qid_test)
X_test = X_test[sorted_idx]
y_test = y_test[sorted_idx]
qid_test = qid_test[sorted_idx]
ranker = xgb.XGBRanker(
tree_method="hist",
device="cuda",
lambdarank_pair_method="topk",
lambdarank_num_pair_per_sample=13,
eval_metric=["ndcg@1", "ndcg@8"],
)
ranker.fit(
X_train,
y_train,
qid=qid_train,
eval_set=[(X_test, y_test)],
eval_qid=[qid_test],
verbose=True,
)
def click_data_demo(args: argparse.Namespace) -> None:
"""Demonstration for learning to rank with click data."""
data = load_mlsr_10k(args.data, args.cache)
train, test = simulate_clicks(data)
assert test is not None
assert train.X.shape[0] == train.click.size
assert test.X.shape[0] == test.click.size
assert test.score.dtype == np.float32
assert test.click.dtype == np.int32
X_train, clicks_train, y_train, qid_train = sort_ltr_samples(
train.X,
train.y,
train.qid,
train.click,
train.pos,
)
X_test, clicks_test, y_test, qid_test = sort_ltr_samples(
test.X,
test.y,
test.qid,
test.click,
test.pos,
)
class ShowPosition(xgb.callback.TrainingCallback):
def after_iteration(
self,
model: xgb.Booster,
epoch: int,
evals_log: xgb.callback.TrainingCallback.EvalsLog,
) -> bool:
config = json.loads(model.save_config())
ti_plus = np.array(config["learner"]["objective"]["ti+"])
tj_minus = np.array(config["learner"]["objective"]["tj-"])
df = pd.DataFrame({"ti+": ti_plus, "tj-": tj_minus})
print(df)
return False
ranker = xgb.XGBRanker(
n_estimators=512,
tree_method="hist",
device="cuda",
learning_rate=0.01,
reg_lambda=1.5,
subsample=0.8,
sampling_method="gradient_based",
# LTR specific parameters
objective="rank:ndcg",
# - Enable bias estimation
lambdarank_unbiased=True,
# - normalization (1 / (norm + 1))
lambdarank_bias_norm=1,
# - Focus on the top 12 documents
lambdarank_num_pair_per_sample=12,
lambdarank_pair_method="topk",
ndcg_exp_gain=True,
eval_metric=["ndcg@1", "ndcg@3", "ndcg@5", "ndcg@10"],
callbacks=[ShowPosition()],
)
ranker.fit(
X_train,
clicks_train,
qid=qid_train,
eval_set=[(X_test, y_test), (X_test, clicks_test)],
eval_qid=[qid_test, qid_test],
verbose=True,
)
ranker.predict(X_test)
if __name__ == "__main__":
parser = argparse.ArgumentParser(
description="Demonstration of learning to rank using XGBoost."
)
parser.add_argument(
"--data",
type=str,
help="Root directory of the MSLR-WEB10K data.",
required=True,
)
parser.add_argument(
"--cache",
type=str,
help="Directory for caching processed data.",
required=True,
)
args = parser.parse_args()
ranking_demo(args)
click_data_demo(args)
| 6,345
| 28.516279
| 87
|
py
|
xgboost
|
xgboost-master/demo/guide-python/continuation.py
|
"""
Demo for training continuation
==============================
"""
import os
import pickle
import tempfile
from sklearn.datasets import load_breast_cancer
import xgboost
def training_continuation(tmpdir: str, use_pickle: bool) -> None:
"""Basic training continuation."""
# Train 128 iterations in 1 session
X, y = load_breast_cancer(return_X_y=True)
clf = xgboost.XGBClassifier(n_estimators=128)
clf.fit(X, y, eval_set=[(X, y)], eval_metric="logloss")
print("Total boosted rounds:", clf.get_booster().num_boosted_rounds())
# Train 128 iterations in 2 sessions, with the first one runs for 32 iterations and
# the second one runs for 96 iterations
clf = xgboost.XGBClassifier(n_estimators=32)
clf.fit(X, y, eval_set=[(X, y)], eval_metric="logloss")
assert clf.get_booster().num_boosted_rounds() == 32
# load back the model, this could be a checkpoint
if use_pickle:
path = os.path.join(tmpdir, "model-first-32.pkl")
with open(path, "wb") as fd:
pickle.dump(clf, fd)
with open(path, "rb") as fd:
loaded = pickle.load(fd)
else:
path = os.path.join(tmpdir, "model-first-32.json")
clf.save_model(path)
loaded = xgboost.XGBClassifier()
loaded.load_model(path)
clf = xgboost.XGBClassifier(n_estimators=128 - 32)
clf.fit(X, y, eval_set=[(X, y)], eval_metric="logloss", xgb_model=loaded)
print("Total boosted rounds:", clf.get_booster().num_boosted_rounds())
assert clf.get_booster().num_boosted_rounds() == 128
def training_continuation_early_stop(tmpdir: str, use_pickle: bool) -> None:
"""Training continuation with early stopping."""
early_stopping_rounds = 5
early_stop = xgboost.callback.EarlyStopping(
rounds=early_stopping_rounds, save_best=True
)
n_estimators = 512
X, y = load_breast_cancer(return_X_y=True)
clf = xgboost.XGBClassifier(n_estimators=n_estimators)
clf.fit(X, y, eval_set=[(X, y)], eval_metric="logloss", callbacks=[early_stop])
print("Total boosted rounds:", clf.get_booster().num_boosted_rounds())
best = clf.best_iteration
# Train 512 iterations in 2 sessions, with the first one runs for 128 iterations and
# the second one runs until early stop.
clf = xgboost.XGBClassifier(n_estimators=128)
# Reinitialize the early stop callback
early_stop = xgboost.callback.EarlyStopping(
rounds=early_stopping_rounds, save_best=True
)
clf.fit(X, y, eval_set=[(X, y)], eval_metric="logloss", callbacks=[early_stop])
assert clf.get_booster().num_boosted_rounds() == 128
# load back the model, this could be a checkpoint
if use_pickle:
path = os.path.join(tmpdir, "model-first-128.pkl")
with open(path, "wb") as fd:
pickle.dump(clf, fd)
with open(path, "rb") as fd:
loaded = pickle.load(fd)
else:
path = os.path.join(tmpdir, "model-first-128.json")
clf.save_model(path)
loaded = xgboost.XGBClassifier()
loaded.load_model(path)
early_stop = xgboost.callback.EarlyStopping(
rounds=early_stopping_rounds, save_best=True
)
clf = xgboost.XGBClassifier(n_estimators=n_estimators - 128)
clf.fit(
X,
y,
eval_set=[(X, y)],
eval_metric="logloss",
callbacks=[early_stop],
xgb_model=loaded,
)
print("Total boosted rounds:", clf.get_booster().num_boosted_rounds())
assert clf.best_iteration == best
if __name__ == "__main__":
with tempfile.TemporaryDirectory() as tmpdir:
training_continuation_early_stop(tmpdir, False)
training_continuation_early_stop(tmpdir, True)
training_continuation(tmpdir, True)
training_continuation(tmpdir, False)
| 3,807
| 33.306306
| 88
|
py
|
xgboost
|
xgboost-master/demo/guide-python/custom_rmsle.py
|
"""
Demo for defining a custom regression objective and metric
==========================================================
Demo for defining customized metric and objective. Notice that for simplicity reason
weight is not used in following example. In this script, we implement the Squared Log
Error (SLE) objective and RMSLE metric as customized functions, then compare it with
native implementation in XGBoost.
See :doc:`/tutorials/custom_metric_obj` for a step by step walkthrough, with other
details.
The `SLE` objective reduces impact of outliers in training dataset, hence here we also
compare its performance with standard squared error.
"""
import argparse
from time import time
from typing import Dict, List, Tuple
import matplotlib
import numpy as np
from matplotlib import pyplot as plt
import xgboost as xgb
# shape of generated data.
kRows = 4096
kCols = 16
kOutlier = 10000 # mean of generated outliers
kNumberOfOutliers = 64
kRatio = 0.7
kSeed = 1994
kBoostRound = 20
np.random.seed(seed=kSeed)
def generate_data() -> Tuple[xgb.DMatrix, xgb.DMatrix]:
'''Generate data containing outliers.'''
x = np.random.randn(kRows, kCols)
y = np.random.randn(kRows)
y += np.abs(np.min(y))
# Create outliers
for i in range(0, kNumberOfOutliers):
ind = np.random.randint(0, len(y)-1)
y[ind] += np.random.randint(0, kOutlier)
train_portion = int(kRows * kRatio)
# rmsle requires all label be greater than -1.
assert np.all(y > -1.0)
train_x: np.ndarray = x[: train_portion]
train_y: np.ndarray = y[: train_portion]
dtrain = xgb.DMatrix(train_x, label=train_y)
test_x = x[train_portion:]
test_y = y[train_portion:]
dtest = xgb.DMatrix(test_x, label=test_y)
return dtrain, dtest
def native_rmse(dtrain: xgb.DMatrix,
dtest: xgb.DMatrix) -> Dict[str, Dict[str, List[float]]]:
'''Train using native implementation of Root Mean Squared Loss.'''
print('Squared Error')
squared_error = {
'objective': 'reg:squarederror',
'eval_metric': 'rmse',
'tree_method': 'hist',
'seed': kSeed
}
start = time()
results: Dict[str, Dict[str, List[float]]] = {}
xgb.train(squared_error,
dtrain=dtrain,
num_boost_round=kBoostRound,
evals=[(dtrain, 'dtrain'), (dtest, 'dtest')],
evals_result=results)
print('Finished Squared Error in:', time() - start, '\n')
return results
def native_rmsle(dtrain: xgb.DMatrix,
dtest: xgb.DMatrix) -> Dict[str, Dict[str, List[float]]]:
'''Train using native implementation of Squared Log Error.'''
print('Squared Log Error')
results: Dict[str, Dict[str, List[float]]] = {}
squared_log_error = {
'objective': 'reg:squaredlogerror',
'eval_metric': 'rmsle',
'tree_method': 'hist',
'seed': kSeed
}
start = time()
xgb.train(squared_log_error,
dtrain=dtrain,
num_boost_round=kBoostRound,
evals=[(dtrain, 'dtrain'), (dtest, 'dtest')],
evals_result=results)
print('Finished Squared Log Error in:', time() - start)
return results
def py_rmsle(dtrain: xgb.DMatrix, dtest: xgb.DMatrix) -> Dict:
'''Train using Python implementation of Squared Log Error.'''
def gradient(predt: np.ndarray, dtrain: xgb.DMatrix) -> np.ndarray:
'''Compute the gradient squared log error.'''
y = dtrain.get_label()
return (np.log1p(predt) - np.log1p(y)) / (predt + 1)
def hessian(predt: np.ndarray, dtrain: xgb.DMatrix) -> np.ndarray:
'''Compute the hessian for squared log error.'''
y = dtrain.get_label()
return ((-np.log1p(predt) + np.log1p(y) + 1) /
np.power(predt + 1, 2))
def squared_log(predt: np.ndarray,
dtrain: xgb.DMatrix) -> Tuple[np.ndarray, np.ndarray]:
'''Squared Log Error objective. A simplified version for RMSLE used as
objective function.
:math:`\frac{1}{2}[log(pred + 1) - log(label + 1)]^2`
'''
predt[predt < -1] = -1 + 1e-6
grad = gradient(predt, dtrain)
hess = hessian(predt, dtrain)
return grad, hess
def rmsle(predt: np.ndarray, dtrain: xgb.DMatrix) -> Tuple[str, float]:
''' Root mean squared log error metric.
:math:`\sqrt{\frac{1}{N}[log(pred + 1) - log(label + 1)]^2}`
'''
y = dtrain.get_label()
predt[predt < -1] = -1 + 1e-6
elements = np.power(np.log1p(y) - np.log1p(predt), 2)
return 'PyRMSLE', float(np.sqrt(np.sum(elements) / len(y)))
results: Dict[str, Dict[str, List[float]]] = {}
xgb.train({'tree_method': 'hist', 'seed': kSeed,
'disable_default_eval_metric': 1},
dtrain=dtrain,
num_boost_round=kBoostRound,
obj=squared_log,
custom_metric=rmsle,
evals=[(dtrain, 'dtrain'), (dtest, 'dtest')],
evals_result=results)
return results
def plot_history(rmse_evals, rmsle_evals, py_rmsle_evals):
fig, axs = plt.subplots(3, 1)
ax0: matplotlib.axes.Axes = axs[0]
ax1: matplotlib.axes.Axes = axs[1]
ax2: matplotlib.axes.Axes = axs[2]
x = np.arange(0, kBoostRound, 1)
ax0.plot(x, rmse_evals['dtrain']['rmse'], label='train-RMSE')
ax0.plot(x, rmse_evals['dtest']['rmse'], label='test-RMSE')
ax0.legend()
ax1.plot(x, rmsle_evals['dtrain']['rmsle'], label='train-native-RMSLE')
ax1.plot(x, rmsle_evals['dtest']['rmsle'], label='test-native-RMSLE')
ax1.legend()
ax2.plot(x, py_rmsle_evals['dtrain']['PyRMSLE'], label='train-PyRMSLE')
ax2.plot(x, py_rmsle_evals['dtest']['PyRMSLE'], label='test-PyRMSLE')
ax2.legend()
def main(args):
dtrain, dtest = generate_data()
rmse_evals = native_rmse(dtrain, dtest)
rmsle_evals = native_rmsle(dtrain, dtest)
py_rmsle_evals = py_rmsle(dtrain, dtest)
if args.plot != 0:
plot_history(rmse_evals, rmsle_evals, py_rmsle_evals)
plt.show()
if __name__ == "__main__":
parser = argparse.ArgumentParser(
description='Arguments for custom RMSLE objective function demo.')
parser.add_argument(
'--plot',
type=int,
default=1,
help='Set to 0 to disable plotting the evaluation history.')
args = parser.parse_args()
main(args)
| 6,450
| 31.094527
| 86
|
py
|
xgboost
|
xgboost-master/demo/guide-python/cat_in_the_dat.py
|
"""
Train XGBoost with cat_in_the_dat dataset
=========================================
A simple demo for categorical data support using dataset from Kaggle categorical data
tutorial.
The excellent tutorial is at:
https://www.kaggle.com/shahules/an-overview-of-encoding-techniques
And the data can be found at:
https://www.kaggle.com/shahules/an-overview-of-encoding-techniques/data
Also, see the tutorial for using XGBoost with categorical data:
:doc:`/tutorials/categorical`.
.. versionadded 1.6.0
"""
from __future__ import annotations
import os
from tempfile import TemporaryDirectory
from time import time
import pandas as pd
from sklearn.metrics import roc_auc_score
from sklearn.model_selection import train_test_split
import xgboost as xgb
def load_cat_in_the_dat() -> tuple[pd.DataFrame, pd.Series]:
"""Assuming you have already downloaded the data into `input` directory."""
df_train = pd.read_csv("./input/cat-in-the-dat/train.csv")
print(
"train data set has got {} rows and {} columns".format(
df_train.shape[0], df_train.shape[1]
)
)
X = df_train.drop(["target"], axis=1)
y = df_train["target"]
for i in range(0, 5):
X["bin_" + str(i)] = X["bin_" + str(i)].astype("category")
for i in range(0, 5):
X["nom_" + str(i)] = X["nom_" + str(i)].astype("category")
for i in range(5, 10):
X["nom_" + str(i)] = X["nom_" + str(i)].apply(int, base=16)
for i in range(0, 6):
X["ord_" + str(i)] = X["ord_" + str(i)].astype("category")
print(
"train data set has got {} rows and {} columns".format(X.shape[0], X.shape[1])
)
return X, y
params = {
"tree_method": "hist",
"device": "cuda",
"n_estimators": 32,
"colsample_bylevel": 0.7,
}
def categorical_model(X: pd.DataFrame, y: pd.Series, output_dir: str) -> None:
"""Train using builtin categorical data support from XGBoost"""
X_train, X_test, y_train, y_test = train_test_split(
X, y, random_state=1994, test_size=0.2
)
# Specify `enable_categorical` to True.
clf = xgb.XGBClassifier(
**params,
eval_metric="auc",
enable_categorical=True,
max_cat_to_onehot=1, # We use optimal partitioning exclusively
)
clf.fit(X_train, y_train, eval_set=[(X_test, y_test), (X_train, y_train)])
clf.save_model(os.path.join(output_dir, "categorical.json"))
y_score = clf.predict_proba(X_test)[:, 1] # proba of positive samples
auc = roc_auc_score(y_test, y_score)
print("AUC of using builtin categorical data support:", auc)
def onehot_encoding_model(X: pd.DataFrame, y: pd.Series, output_dir: str) -> None:
"""Train using one-hot encoded data."""
X_train, X_test, y_train, y_test = train_test_split(
X, y, random_state=42, test_size=0.2
)
# Specify `enable_categorical` to False as we are using encoded data.
clf = xgb.XGBClassifier(**params, eval_metric="auc", enable_categorical=False)
clf.fit(
X_train,
y_train,
eval_set=[(X_test, y_test), (X_train, y_train)],
)
clf.save_model(os.path.join(output_dir, "one-hot.json"))
y_score = clf.predict_proba(X_test)[:, 1] # proba of positive samples
auc = roc_auc_score(y_test, y_score)
print("AUC of using onehot encoding:", auc)
if __name__ == "__main__":
X, y = load_cat_in_the_dat()
with TemporaryDirectory() as tmpdir:
start = time()
categorical_model(X, y, tmpdir)
end = time()
print("Duration:categorical", end - start)
X = pd.get_dummies(X)
start = time()
onehot_encoding_model(X, y, tmpdir)
end = time()
print("Duration:onehot", end - start)
| 3,744
| 28.722222
| 86
|
py
|
xgboost
|
xgboost-master/demo/guide-python/sklearn_examples.py
|
'''
Collection of examples for using sklearn interface
==================================================
For an introduction to XGBoost's scikit-learn estimator interface, see
:doc:`/python/sklearn_estimator`.
Created on 1 Apr 2015
@author: Jamie Hall
'''
import pickle
import numpy as np
from sklearn.datasets import fetch_california_housing, load_digits, load_iris
from sklearn.metrics import confusion_matrix, mean_squared_error
from sklearn.model_selection import GridSearchCV, KFold, train_test_split
import xgboost as xgb
rng = np.random.RandomState(31337)
print("Zeros and Ones from the Digits dataset: binary classification")
digits = load_digits(n_class=2)
y = digits['target']
X = digits['data']
kf = KFold(n_splits=2, shuffle=True, random_state=rng)
for train_index, test_index in kf.split(X):
xgb_model = xgb.XGBClassifier(n_jobs=1).fit(X[train_index], y[train_index])
predictions = xgb_model.predict(X[test_index])
actuals = y[test_index]
print(confusion_matrix(actuals, predictions))
print("Iris: multiclass classification")
iris = load_iris()
y = iris['target']
X = iris['data']
kf = KFold(n_splits=2, shuffle=True, random_state=rng)
for train_index, test_index in kf.split(X):
xgb_model = xgb.XGBClassifier(n_jobs=1).fit(X[train_index], y[train_index])
predictions = xgb_model.predict(X[test_index])
actuals = y[test_index]
print(confusion_matrix(actuals, predictions))
print("California Housing: regression")
X, y = fetch_california_housing(return_X_y=True)
kf = KFold(n_splits=2, shuffle=True, random_state=rng)
for train_index, test_index in kf.split(X):
xgb_model = xgb.XGBRegressor(n_jobs=1).fit(X[train_index], y[train_index])
predictions = xgb_model.predict(X[test_index])
actuals = y[test_index]
print(mean_squared_error(actuals, predictions))
print("Parameter optimization")
xgb_model = xgb.XGBRegressor(n_jobs=1)
clf = GridSearchCV(xgb_model,
{'max_depth': [2, 4],
'n_estimators': [50, 100]}, verbose=1, n_jobs=1, cv=3)
clf.fit(X, y)
print(clf.best_score_)
print(clf.best_params_)
# The sklearn API models are picklable
print("Pickling sklearn API models")
# must open in binary format to pickle
pickle.dump(clf, open("best_calif.pkl", "wb"))
clf2 = pickle.load(open("best_calif.pkl", "rb"))
print(np.allclose(clf.predict(X), clf2.predict(X)))
# Early-stopping
X = digits['data']
y = digits['target']
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=0)
clf = xgb.XGBClassifier(n_jobs=1)
clf.fit(X_train, y_train, early_stopping_rounds=10, eval_metric="auc",
eval_set=[(X_test, y_test)])
| 2,644
| 32.910256
| 79
|
py
|
xgboost
|
xgboost-master/demo/guide-python/update_process.py
|
"""
Demo for using `process_type` with `prune` and `refresh`
========================================================
Modifying existing trees is not a well established use for XGBoost, so feel free to
experiment.
"""
import numpy as np
from sklearn.datasets import fetch_california_housing
import xgboost as xgb
def main():
n_rounds = 32
X, y = fetch_california_housing(return_X_y=True)
# Train a model first
X_train = X[: X.shape[0] // 2]
y_train = y[: y.shape[0] // 2]
Xy = xgb.DMatrix(X_train, y_train)
evals_result: xgb.callback.EvaluationMonitor.EvalsLog = {}
booster = xgb.train(
{"tree_method": "hist", "max_depth": 6, "device": "cuda"},
Xy,
num_boost_round=n_rounds,
evals=[(Xy, "Train")],
evals_result=evals_result,
)
SHAP = booster.predict(Xy, pred_contribs=True)
# Refresh the leaf value and tree statistic
X_refresh = X[X.shape[0] // 2 :]
y_refresh = y[y.shape[0] // 2 :]
Xy_refresh = xgb.DMatrix(X_refresh, y_refresh)
# The model will adapt to other half of the data by changing leaf value (no change in
# split condition) with refresh_leaf set to True.
refresh_result: xgb.callback.EvaluationMonitor.EvalsLog = {}
refreshed = xgb.train(
{"process_type": "update", "updater": "refresh", "refresh_leaf": True},
Xy_refresh,
num_boost_round=n_rounds,
xgb_model=booster,
evals=[(Xy, "Original"), (Xy_refresh, "Train")],
evals_result=refresh_result,
)
# Refresh the model without changing the leaf value, but tree statistic including
# cover and weight are refreshed.
refresh_result: xgb.callback.EvaluationMonitor.EvalsLog = {}
refreshed = xgb.train(
{"process_type": "update", "updater": "refresh", "refresh_leaf": False},
Xy_refresh,
num_boost_round=n_rounds,
xgb_model=booster,
evals=[(Xy, "Original"), (Xy_refresh, "Train")],
evals_result=refresh_result,
)
# Without refreshing the leaf value, resulting trees should be the same with original
# model except for accumulated statistic. The rtol is for floating point error in
# prediction.
np.testing.assert_allclose(
refresh_result["Original"]["rmse"], evals_result["Train"]["rmse"], rtol=1e-5
)
# But SHAP value is changed as cover in tree nodes are changed.
refreshed_SHAP = refreshed.predict(Xy, pred_contribs=True)
assert not np.allclose(SHAP, refreshed_SHAP, rtol=1e-3)
# Prune the trees with smaller max_depth
X_update = X_train
y_update = y_train
Xy_update = xgb.DMatrix(X_update, y_update)
prune_result: xgb.callback.EvaluationMonitor.EvalsLog = {}
pruned = xgb.train(
{"process_type": "update", "updater": "prune", "max_depth": 2},
Xy_update,
num_boost_round=n_rounds,
xgb_model=booster,
evals=[(Xy, "Original"), (Xy_update, "Train")],
evals_result=prune_result,
)
# Have a smaller model, but similar accuracy.
np.testing.assert_allclose(
np.array(prune_result["Original"]["rmse"]),
np.array(prune_result["Train"]["rmse"]),
atol=1e-5,
)
if __name__ == "__main__":
main()
| 3,247
| 32.833333
| 89
|
py
|
xgboost
|
xgboost-master/demo/guide-python/feature_weights.py
|
"""
Demo for using feature weight to change column sampling
=======================================================
.. versionadded:: 1.3.0
"""
import argparse
import numpy as np
from matplotlib import pyplot as plt
import xgboost
def main(args: argparse.Namespace) -> None:
rng = np.random.RandomState(1994)
kRows = 4196
kCols = 10
X = rng.randn(kRows, kCols)
y = rng.randn(kRows)
fw = np.ones(shape=(kCols,))
for i in range(kCols):
fw[i] *= float(i)
dtrain = xgboost.DMatrix(X, y)
dtrain.set_info(feature_weights=fw)
# Perform column sampling for each node split evaluation, the sampling process is
# weighted by feature weights.
bst = xgboost.train(
{"tree_method": "hist", "colsample_bynode": 0.2},
dtrain,
num_boost_round=10,
evals=[(dtrain, "d")],
)
feature_map = bst.get_fscore()
# feature zero has 0 weight
assert feature_map.get("f0", None) is None
assert max(feature_map.values()) == feature_map.get("f9")
if args.plot:
xgboost.plot_importance(bst)
plt.show()
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument(
"--plot",
type=int,
default=1,
help="Set to 0 to disable plotting the evaluation history.",
)
args = parser.parse_args()
main(args)
| 1,383
| 22.066667
| 85
|
py
|
xgboost
|
xgboost-master/demo/guide-python/gamma_regression.py
|
"""
Demo for gamma regression
=========================
"""
import numpy as np
import xgboost as xgb
# this script demonstrates how to fit gamma regression model (with log link function)
# in xgboost, before running the demo you need to generate the autoclaims dataset
# by running gen_autoclaims.R located in xgboost/demo/data.
data = np.genfromtxt('../data/autoclaims.csv', delimiter=',')
dtrain = xgb.DMatrix(data[0:4741, 0:34], data[0:4741, 34])
dtest = xgb.DMatrix(data[4741:6773, 0:34], data[4741:6773, 34])
# for gamma regression, we need to set the objective to 'reg:gamma', it also suggests
# to set the base_score to a value between 1 to 5 if the number of iteration is small
param = {'objective':'reg:gamma', 'booster':'gbtree', 'base_score':3}
# the rest of settings are the same
watchlist = [(dtest, 'eval'), (dtrain, 'train')]
num_round = 30
# training and evaluation
bst = xgb.train(param, dtrain, num_round, watchlist)
preds = bst.predict(dtest)
labels = dtest.get_label()
print('test deviance=%f' % (2 * np.sum((labels - preds) / preds - np.log(labels) + np.log(preds))))
| 1,098
| 35.633333
| 99
|
py
|
xgboost
|
xgboost-master/demo/guide-python/categorical.py
|
"""
Getting started with categorical data
=====================================
Experimental support for categorical data.
In before, users need to run an encoder themselves before passing the data into XGBoost,
which creates a sparse matrix and potentially increase memory usage. This demo
showcases the experimental categorical data support, more advanced features are planned.
Also, see :doc:`the tutorial </tutorials/categorical>` for using XGBoost with
categorical data.
.. versionadded:: 1.5.0
"""
from typing import Tuple
import numpy as np
import pandas as pd
import xgboost as xgb
def make_categorical(
n_samples: int, n_features: int, n_categories: int, onehot: bool
) -> Tuple[pd.DataFrame, pd.Series]:
"""Make some random data for demo."""
rng = np.random.RandomState(1994)
pd_dict = {}
for i in range(n_features + 1):
c = rng.randint(low=0, high=n_categories, size=n_samples)
pd_dict[str(i)] = pd.Series(c, dtype=np.int64)
df = pd.DataFrame(pd_dict)
label = df.iloc[:, 0]
df = df.iloc[:, 1:]
for i in range(0, n_features):
label += df.iloc[:, i]
label += 1
df = df.astype("category")
categories = np.arange(0, n_categories)
for col in df.columns:
df[col] = df[col].cat.set_categories(categories)
if onehot:
return pd.get_dummies(df), label
return df, label
def main() -> None:
# Use builtin categorical data support
# For scikit-learn interface, the input data must be pandas DataFrame or cudf
# DataFrame with categorical features
X, y = make_categorical(100, 10, 4, False)
# Specify `enable_categorical` to True, also we use onehot encoding based split
# here for demonstration. For details see the document of `max_cat_to_onehot`.
reg = xgb.XGBRegressor(
tree_method="hist", enable_categorical=True, max_cat_to_onehot=5, device="cuda"
)
reg.fit(X, y, eval_set=[(X, y)])
# Pass in already encoded data
X_enc, y_enc = make_categorical(100, 10, 4, True)
reg_enc = xgb.XGBRegressor(tree_method="hist", device="cuda")
reg_enc.fit(X_enc, y_enc, eval_set=[(X_enc, y_enc)])
reg_results = np.array(reg.evals_result()["validation_0"]["rmse"])
reg_enc_results = np.array(reg_enc.evals_result()["validation_0"]["rmse"])
# Check that they have same results
np.testing.assert_allclose(reg_results, reg_enc_results)
# Convert to DMatrix for SHAP value
booster: xgb.Booster = reg.get_booster()
m = xgb.DMatrix(X, enable_categorical=True) # specify categorical data support.
SHAP = booster.predict(m, pred_contribs=True)
margin = booster.predict(m, output_margin=True)
np.testing.assert_allclose(
np.sum(SHAP, axis=len(SHAP.shape) - 1), margin, rtol=1e-3
)
if __name__ == "__main__":
main()
| 2,833
| 31.204545
| 88
|
py
|
xgboost
|
xgboost-master/demo/guide-python/boost_from_prediction.py
|
"""
Demo for boosting from prediction
=================================
"""
import os
import xgboost as xgb
CURRENT_DIR = os.path.dirname(__file__)
dtrain = xgb.DMatrix(
os.path.join(CURRENT_DIR, "../data/agaricus.txt.train?format=libsvm")
)
dtest = xgb.DMatrix(
os.path.join(CURRENT_DIR, "../data/agaricus.txt.test?format=libsvm")
)
watchlist = [(dtest, "eval"), (dtrain, "train")]
###
# advanced: start from a initial base prediction
#
print("start running example to start from a initial prediction")
# specify parameters via map, definition are same as c++ version
param = {"max_depth": 2, "eta": 1, "objective": "binary:logistic"}
# train xgboost for 1 round
bst = xgb.train(param, dtrain, 1, watchlist)
# Note: we need the margin value instead of transformed prediction in
# set_base_margin
# do predict with output_margin=True, will always give you margin values
# before logistic transformation
ptrain = bst.predict(dtrain, output_margin=True)
ptest = bst.predict(dtest, output_margin=True)
dtrain.set_base_margin(ptrain)
dtest.set_base_margin(ptest)
print("this is result of running from initial prediction")
bst = xgb.train(param, dtrain, 1, watchlist)
| 1,174
| 31.638889
| 73
|
py
|
xgboost
|
xgboost-master/demo/guide-python/evals_result.py
|
"""
This script demonstrate how to access the eval metrics
======================================================
"""
import os
import xgboost as xgb
CURRENT_DIR = os.path.dirname(__file__)
dtrain = xgb.DMatrix(
os.path.join(CURRENT_DIR, "../data/agaricus.txt.train?format=libsvm")
)
dtest = xgb.DMatrix(
os.path.join(CURRENT_DIR, "../data/agaricus.txt.test?format=libsvm")
)
param = [
("max_depth", 2),
("objective", "binary:logistic"),
("eval_metric", "logloss"),
("eval_metric", "error"),
]
num_round = 2
watchlist = [(dtest, "eval"), (dtrain, "train")]
evals_result = {}
bst = xgb.train(param, dtrain, num_round, watchlist, evals_result=evals_result)
print("Access logloss metric directly from evals_result:")
print(evals_result["eval"]["logloss"])
print("")
print("Access metrics through a loop:")
for e_name, e_mtrs in evals_result.items():
print("- {}".format(e_name))
for e_mtr_name, e_mtr_vals in e_mtrs.items():
print(" - {}".format(e_mtr_name))
print(" - {}".format(e_mtr_vals))
print("")
print("Access complete dictionary:")
print(evals_result)
| 1,120
| 24.477273
| 79
|
py
|
xgboost
|
xgboost-master/demo/guide-python/sklearn_parallel.py
|
"""
Demo for using xgboost with sklearn
===================================
"""
import multiprocessing
from sklearn.datasets import fetch_california_housing
from sklearn.model_selection import GridSearchCV
import xgboost as xgb
if __name__ == "__main__":
print("Parallel Parameter optimization")
X, y = fetch_california_housing(return_X_y=True)
xgb_model = xgb.XGBRegressor(
n_jobs=multiprocessing.cpu_count() // 2, tree_method="hist"
)
clf = GridSearchCV(
xgb_model,
{"max_depth": [2, 4, 6], "n_estimators": [50, 100, 200]},
verbose=1,
n_jobs=2,
)
clf.fit(X, y)
print(clf.best_score_)
print(clf.best_params_)
| 689
| 24.555556
| 67
|
py
|
xgboost
|
xgboost-master/demo/guide-python/predict_leaf_indices.py
|
"""
Demo for obtaining leaf index
=============================
"""
import os
import xgboost as xgb
# load data in do training
CURRENT_DIR = os.path.dirname(__file__)
dtrain = xgb.DMatrix(
os.path.join(CURRENT_DIR, "../data/agaricus.txt.train?format=libsvm")
)
dtest = xgb.DMatrix(
os.path.join(CURRENT_DIR, "../data/agaricus.txt.test?format=libsvm")
)
param = {"max_depth": 2, "eta": 1, "objective": "binary:logistic"}
watchlist = [(dtest, "eval"), (dtrain, "train")]
num_round = 3
bst = xgb.train(param, dtrain, num_round, watchlist)
print("start testing predict the leaf indices")
# predict using first 2 tree
leafindex = bst.predict(
dtest, iteration_range=(0, 2), pred_leaf=True, strict_shape=True
)
print(leafindex.shape)
print(leafindex)
# predict all trees
leafindex = bst.predict(dtest, pred_leaf=True)
print(leafindex.shape)
| 850
| 25.59375
| 73
|
py
|
xgboost
|
xgboost-master/demo/guide-python/spark_estimator_examples.py
|
"""
Collection of examples for using xgboost.spark estimator interface
==================================================================
@author: Weichen Xu
"""
import sklearn.datasets
from pyspark.ml.evaluation import MulticlassClassificationEvaluator, RegressionEvaluator
from pyspark.ml.linalg import Vectors
from pyspark.sql import SparkSession
from pyspark.sql.functions import rand
from sklearn.model_selection import train_test_split
from xgboost.spark import SparkXGBClassifier, SparkXGBRegressor
spark = SparkSession.builder.master("local[*]").getOrCreate()
def create_spark_df(X, y):
return spark.createDataFrame(
spark.sparkContext.parallelize(
[(Vectors.dense(features), float(label)) for features, label in zip(X, y)]
),
["features", "label"],
)
# load diabetes dataset (regression dataset)
diabetes_X, diabetes_y = sklearn.datasets.load_diabetes(return_X_y=True)
diabetes_X_train, diabetes_X_test, diabetes_y_train, diabetes_y_test = train_test_split(
diabetes_X, diabetes_y, test_size=0.3, shuffle=True
)
diabetes_train_spark_df = create_spark_df(diabetes_X_train, diabetes_y_train)
diabetes_test_spark_df = create_spark_df(diabetes_X_test, diabetes_y_test)
# train xgboost regressor model
xgb_regressor = SparkXGBRegressor(max_depth=5)
xgb_regressor_model = xgb_regressor.fit(diabetes_train_spark_df)
transformed_diabetes_test_spark_df = xgb_regressor_model.transform(
diabetes_test_spark_df
)
regressor_evaluator = RegressionEvaluator(metricName="rmse")
print(
f"regressor rmse={regressor_evaluator.evaluate(transformed_diabetes_test_spark_df)}"
)
diabetes_train_spark_df2 = diabetes_train_spark_df.withColumn(
"validationIndicatorCol", rand(1) > 0.7
)
# train xgboost regressor model with validation dataset
xgb_regressor2 = SparkXGBRegressor(
max_depth=5, validation_indicator_col="validationIndicatorCol"
)
xgb_regressor_model2 = xgb_regressor2.fit(diabetes_train_spark_df2)
transformed_diabetes_test_spark_df2 = xgb_regressor_model2.transform(
diabetes_test_spark_df
)
print(
f"regressor2 rmse={regressor_evaluator.evaluate(transformed_diabetes_test_spark_df2)}"
)
# load iris dataset (classification dataset)
iris_X, iris_y = sklearn.datasets.load_iris(return_X_y=True)
iris_X_train, iris_X_test, iris_y_train, iris_y_test = train_test_split(
iris_X, iris_y, test_size=0.3, shuffle=True
)
iris_train_spark_df = create_spark_df(iris_X_train, iris_y_train)
iris_test_spark_df = create_spark_df(iris_X_test, iris_y_test)
# train xgboost classifier model
xgb_classifier = SparkXGBClassifier(max_depth=5)
xgb_classifier_model = xgb_classifier.fit(iris_train_spark_df)
transformed_iris_test_spark_df = xgb_classifier_model.transform(iris_test_spark_df)
classifier_evaluator = MulticlassClassificationEvaluator(metricName="f1")
print(f"classifier f1={classifier_evaluator.evaluate(transformed_iris_test_spark_df)}")
iris_train_spark_df2 = iris_train_spark_df.withColumn(
"validationIndicatorCol", rand(1) > 0.7
)
# train xgboost classifier model with validation dataset
xgb_classifier2 = SparkXGBClassifier(
max_depth=5, validation_indicator_col="validationIndicatorCol"
)
xgb_classifier_model2 = xgb_classifier2.fit(iris_train_spark_df2)
transformed_iris_test_spark_df2 = xgb_classifier_model2.transform(iris_test_spark_df)
print(
f"classifier2 f1={classifier_evaluator.evaluate(transformed_iris_test_spark_df2)}"
)
spark.stop()
| 3,454
| 34.255102
| 90
|
py
|
xgboost
|
xgboost-master/demo/guide-python/individual_trees.py
|
"""
Demo for prediction using individual trees and model slices
===========================================================
"""
import os
import numpy as np
from scipy.special import logit
from sklearn.datasets import load_svmlight_file
import xgboost as xgb
CURRENT_DIR = os.path.dirname(__file__)
train = os.path.join(CURRENT_DIR, "../data/agaricus.txt.train")
test = os.path.join(CURRENT_DIR, "../data/agaricus.txt.test")
def individual_tree() -> None:
"""Get prediction from each individual tree and combine them together."""
X_train, y_train = load_svmlight_file(train)
X_test, y_test = load_svmlight_file(test)
Xy_train = xgb.QuantileDMatrix(X_train, y_train)
n_rounds = 4
# Specify the base score, otherwise xgboost will estimate one from the training
# data.
base_score = 0.5
params = {
"max_depth": 2,
"eta": 1,
"objective": "reg:logistic",
"tree_method": "hist",
"base_score": base_score,
}
booster = xgb.train(params, Xy_train, num_boost_round=n_rounds)
# Use logit to inverse the base score back to raw leaf value (margin)
scores = np.full((X_test.shape[0],), logit(base_score))
for i in range(n_rounds):
# - Use output_margin to get raw leaf values
# - Use iteration_range to get prediction for only one tree
# - Use previous prediction as base marign for the model
Xy_test = xgb.DMatrix(X_test, base_margin=scores)
if i == n_rounds - 1:
# last round, get the transformed prediction
scores = booster.predict(
Xy_test, iteration_range=(i, i + 1), output_margin=False
)
else:
# get raw leaf value for accumulation
scores = booster.predict(
Xy_test, iteration_range=(i, i + 1), output_margin=True
)
full = booster.predict(xgb.DMatrix(X_test), output_margin=False)
np.testing.assert_allclose(scores, full)
def model_slices() -> None:
"""Inference with each individual using model slices."""
X_train, y_train = load_svmlight_file(train)
X_test, y_test = load_svmlight_file(test)
Xy_train = xgb.QuantileDMatrix(X_train, y_train)
n_rounds = 4
# Specify the base score, otherwise xgboost will estimate one from the training
# data.
base_score = 0.5
params = {
"max_depth": 2,
"eta": 1,
"objective": "reg:logistic",
"tree_method": "hist",
"base_score": base_score,
}
booster = xgb.train(params, Xy_train, num_boost_round=n_rounds)
trees = [booster[t] for t in range(n_rounds)]
# Use logit to inverse the base score back to raw leaf value (margin)
scores = np.full((X_test.shape[0],), logit(base_score))
for i, t in enumerate(trees):
# Feed previous scores into base margin.
Xy_test = xgb.DMatrix(X_test, base_margin=scores)
if i == n_rounds - 1:
# last round, get the transformed prediction
scores = t.predict(Xy_test, output_margin=False)
else:
# get raw leaf value for accumulation
scores = t.predict(Xy_test, output_margin=True)
full = booster.predict(xgb.DMatrix(X_test), output_margin=False)
np.testing.assert_allclose(scores, full)
if __name__ == "__main__":
individual_tree()
model_slices()
| 3,371
| 32.72
| 83
|
py
|
xgboost
|
xgboost-master/demo/guide-python/basic_walkthrough.py
|
"""
Getting started with XGBoost
============================
This is a simple example of using the native XGBoost interface, there are other
interfaces in the Python package like scikit-learn interface and Dask interface.
See :doc:`/python/python_intro` and :doc:`/tutorials/index` for other references.
"""
import os
import pickle
import numpy as np
from sklearn.datasets import load_svmlight_file
import xgboost as xgb
# Make sure the demo knows where to load the data.
CURRENT_DIR = os.path.dirname(os.path.abspath(__file__))
XGBOOST_ROOT_DIR = os.path.dirname(os.path.dirname(CURRENT_DIR))
DEMO_DIR = os.path.join(XGBOOST_ROOT_DIR, "demo")
# X is a scipy csr matrix, XGBoost supports many other input types,
X, y = load_svmlight_file(os.path.join(DEMO_DIR, "data", "agaricus.txt.train"))
dtrain = xgb.DMatrix(X, y)
# validation set
X_test, y_test = load_svmlight_file(os.path.join(DEMO_DIR, "data", "agaricus.txt.test"))
dtest = xgb.DMatrix(X_test, y_test)
# specify parameters via map, definition are same as c++ version
param = {"max_depth": 2, "eta": 1, "objective": "binary:logistic"}
# specify validations set to watch performance
watchlist = [(dtest, "eval"), (dtrain, "train")]
# number of boosting rounds
num_round = 2
bst = xgb.train(param, dtrain, num_boost_round=num_round, evals=watchlist)
# run prediction
preds = bst.predict(dtest)
labels = dtest.get_label()
print(
"error=%f"
% (
sum(1 for i in range(len(preds)) if int(preds[i] > 0.5) != labels[i])
/ float(len(preds))
)
)
bst.save_model("model-0.json")
# dump model
bst.dump_model("dump.raw.txt")
# dump model with feature map
bst.dump_model("dump.nice.txt", os.path.join(DEMO_DIR, "data/featmap.txt"))
# save dmatrix into binary buffer
dtest.save_binary("dtest.dmatrix")
# save model
bst.save_model("model-1.json")
# load model and data in
bst2 = xgb.Booster(model_file="model-1.json")
dtest2 = xgb.DMatrix("dtest.dmatrix")
preds2 = bst2.predict(dtest2)
# assert they are the same
assert np.sum(np.abs(preds2 - preds)) == 0
# alternatively, you can pickle the booster
pks = pickle.dumps(bst2)
# load model and data in
bst3 = pickle.loads(pks)
preds3 = bst3.predict(dtest2)
# assert they are the same
assert np.sum(np.abs(preds3 - preds)) == 0
| 2,257
| 29.106667
| 88
|
py
|
xgboost
|
xgboost-master/demo/guide-python/sklearn_evals_result.py
|
"""
Demo for accessing the xgboost eval metrics by using sklearn interface
======================================================================
"""
import numpy as np
from sklearn.datasets import make_hastie_10_2
import xgboost as xgb
X, y = make_hastie_10_2(n_samples=2000, random_state=42)
# Map labels from {-1, 1} to {0, 1}
labels, y = np.unique(y, return_inverse=True)
X_train, X_test = X[:1600], X[1600:]
y_train, y_test = y[:1600], y[1600:]
param_dist = {'objective':'binary:logistic', 'n_estimators':2}
clf = xgb.XGBModel(**param_dist)
# Or you can use: clf = xgb.XGBClassifier(**param_dist)
clf.fit(X_train, y_train,
eval_set=[(X_train, y_train), (X_test, y_test)],
eval_metric='logloss',
verbose=True)
# Load evals result by calling the evals_result() function
evals_result = clf.evals_result()
print('Access logloss metric directly from validation_0:')
print(evals_result['validation_0']['logloss'])
print('')
print('Access metrics through a loop:')
for e_name, e_mtrs in evals_result.items():
print('- {}'.format(e_name))
for e_mtr_name, e_mtr_vals in e_mtrs.items():
print(' - {}'.format(e_mtr_name))
print(' - {}'.format(e_mtr_vals))
print('')
print('Access complete dict:')
print(evals_result)
| 1,278
| 26.804348
| 70
|
py
|
xgboost
|
xgboost-master/demo/guide-python/quantile_regression.py
|
"""
Quantile Regression
===================
.. versionadded:: 2.0.0
The script is inspired by this awesome example in sklearn:
https://scikit-learn.org/stable/auto_examples/ensemble/plot_gradient_boosting_quantile.html
"""
import argparse
from typing import Dict
import numpy as np
from sklearn.model_selection import train_test_split
import xgboost as xgb
def f(x: np.ndarray) -> np.ndarray:
"""The function to predict."""
return x * np.sin(x)
def quantile_loss(args: argparse.Namespace) -> None:
"""Train a quantile regression model."""
rng = np.random.RandomState(1994)
# Generate a synthetic dataset for demo, the generate process is from the sklearn
# example.
X = np.atleast_2d(rng.uniform(0, 10.0, size=1000)).T
expected_y = f(X).ravel()
sigma = 0.5 + X.ravel() / 10.0
noise = rng.lognormal(sigma=sigma) - np.exp(sigma**2.0 / 2.0)
y = expected_y + noise
# Train on 0.05 and 0.95 quantiles. The model is similar to multi-class and
# multi-target models.
alpha = np.array([0.05, 0.5, 0.95])
evals_result: Dict[str, Dict] = {}
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=rng)
# We will be using the `hist` tree method, quantile DMatrix can be used to preserve
# memory.
# Do not use the `exact` tree method for quantile regression, otherwise the
# performance might drop.
Xy = xgb.QuantileDMatrix(X, y)
# use Xy as a reference
Xy_test = xgb.QuantileDMatrix(X_test, y_test, ref=Xy)
booster = xgb.train(
{
# Use the quantile objective function.
"objective": "reg:quantileerror",
"tree_method": "hist",
"quantile_alpha": alpha,
# Let's try not to overfit.
"learning_rate": 0.04,
"max_depth": 5,
},
Xy,
num_boost_round=32,
early_stopping_rounds=2,
# The evaluation result is a weighted average across multiple quantiles.
evals=[(Xy, "Train"), (Xy_test, "Test")],
evals_result=evals_result,
)
xx = np.atleast_2d(np.linspace(0, 10, 1000)).T
scores = booster.inplace_predict(xx)
# dim 1 is the quantiles
assert scores.shape[0] == xx.shape[0]
assert scores.shape[1] == alpha.shape[0]
y_lower = scores[:, 0] # alpha=0.05
y_med = scores[:, 1] # alpha=0.5, median
y_upper = scores[:, 2] # alpha=0.95
# Train a mse model for comparison
booster = xgb.train(
{
"objective": "reg:squarederror",
"tree_method": "hist",
# Let's try not to overfit.
"learning_rate": 0.04,
"max_depth": 5,
},
Xy,
num_boost_round=32,
early_stopping_rounds=2,
evals=[(Xy, "Train"), (Xy_test, "Test")],
evals_result=evals_result,
)
xx = np.atleast_2d(np.linspace(0, 10, 1000)).T
y_pred = booster.inplace_predict(xx)
if args.plot:
from matplotlib import pyplot as plt
fig = plt.figure(figsize=(10, 10))
plt.plot(xx, f(xx), "g:", linewidth=3, label=r"$f(x) = x\,\sin(x)$")
plt.plot(X_test, y_test, "b.", markersize=10, label="Test observations")
plt.plot(xx, y_med, "r-", label="Predicted median")
plt.plot(xx, y_pred, "m-", label="Predicted mean")
plt.plot(xx, y_upper, "k-")
plt.plot(xx, y_lower, "k-")
plt.fill_between(
xx.ravel(), y_lower, y_upper, alpha=0.4, label="Predicted 90% interval"
)
plt.xlabel("$x$")
plt.ylabel("$f(x)$")
plt.ylim(-10, 25)
plt.legend(loc="upper left")
plt.show()
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument(
"--plot",
action="store_true",
help="Specify it to enable plotting the outputs.",
)
args = parser.parse_args()
quantile_loss(args)
| 3,920
| 30.368
| 91
|
py
|
xgboost
|
xgboost-master/demo/guide-python/cross_validation.py
|
"""
Demo for using cross validation
===============================
"""
import os
import numpy as np
import xgboost as xgb
# load data in do training
CURRENT_DIR = os.path.dirname(__file__)
dtrain = xgb.DMatrix(
os.path.join(CURRENT_DIR, "../data/agaricus.txt.train?format=libsvm")
)
param = {"max_depth": 2, "eta": 1, "objective": "binary:logistic"}
num_round = 2
print("running cross validation")
# do cross validation, this will print result out as
# [iteration] metric_name:mean_value+std_value
# std_value is standard deviation of the metric
xgb.cv(
param,
dtrain,
num_round,
nfold=5,
metrics={"error"},
seed=0,
callbacks=[xgb.callback.EvaluationMonitor(show_stdv=True)],
)
print("running cross validation, disable standard deviation display")
# do cross validation, this will print result out as
# [iteration] metric_name:mean_value
res = xgb.cv(
param,
dtrain,
num_boost_round=10,
nfold=5,
metrics={"error"},
seed=0,
callbacks=[
xgb.callback.EvaluationMonitor(show_stdv=False),
xgb.callback.EarlyStopping(3),
],
)
print(res)
print("running cross validation, with preprocessing function")
# define the preprocessing function
# used to return the preprocessed training, test data, and parameter
# we can use this to do weight rescale, etc.
# as a example, we try to set scale_pos_weight
def fpreproc(dtrain, dtest, param):
label = dtrain.get_label()
ratio = float(np.sum(label == 0)) / np.sum(label == 1)
param["scale_pos_weight"] = ratio
return (dtrain, dtest, param)
# do cross validation, for each fold
# the dtrain, dtest, param will be passed into fpreproc
# then the return value of fpreproc will be used to generate
# results of that fold
xgb.cv(param, dtrain, num_round, nfold=5, metrics={"auc"}, seed=0, fpreproc=fpreproc)
###
# you can also do cross validation with customized loss function
# See custom_objective.py
##
print("running cross validation, with customized loss function")
def logregobj(preds, dtrain):
labels = dtrain.get_label()
preds = 1.0 / (1.0 + np.exp(-preds))
grad = preds - labels
hess = preds * (1.0 - preds)
return grad, hess
def evalerror(preds, dtrain):
labels = dtrain.get_label()
return "error", float(sum(labels != (preds > 0.0))) / len(labels)
param = {"max_depth": 2, "eta": 1}
# train with customized objective
xgb.cv(param, dtrain, num_round, nfold=5, seed=0, obj=logregobj, feval=evalerror)
| 2,481
| 25.978261
| 85
|
py
|
xgboost
|
xgboost-master/demo/guide-python/multioutput_regression.py
|
"""
A demo for multi-output regression
==================================
The demo is adopted from scikit-learn:
https://scikit-learn.org/stable/auto_examples/ensemble/plot_random_forest_regression_multioutput.html#sphx-glr-auto-examples-ensemble-plot-random-forest-regression-multioutput-py
See :doc:`/tutorials/multioutput` for more information.
.. note::
The feature is experimental. For the `multi_output_tree` strategy, many features are
missing.
"""
import argparse
from typing import Dict, List, Tuple
import numpy as np
from matplotlib import pyplot as plt
import xgboost as xgb
def plot_predt(y: np.ndarray, y_predt: np.ndarray, name: str) -> None:
s = 25
plt.scatter(y[:, 0], y[:, 1], c="navy", s=s, edgecolor="black", label="data")
plt.scatter(
y_predt[:, 0], y_predt[:, 1], c="cornflowerblue", s=s, edgecolor="black"
)
plt.xlim([-1, 2])
plt.ylim([-1, 2])
plt.show()
def gen_circle() -> Tuple[np.ndarray, np.ndarray]:
"Generate a sample dataset that y is a 2 dim circle."
rng = np.random.RandomState(1994)
X = np.sort(200 * rng.rand(100, 1) - 100, axis=0)
y = np.array([np.pi * np.sin(X).ravel(), np.pi * np.cos(X).ravel()]).T
y[::5, :] += 0.5 - rng.rand(20, 2)
y = y - y.min()
y = y / y.max()
return X, y
def rmse_model(plot_result: bool, strategy: str) -> None:
"""Draw a circle with 2-dim coordinate as target variables."""
X, y = gen_circle()
# Train a regressor on it
reg = xgb.XGBRegressor(
tree_method="hist",
n_estimators=128,
n_jobs=16,
max_depth=8,
multi_strategy=strategy,
subsample=0.6,
)
reg.fit(X, y, eval_set=[(X, y)])
y_predt = reg.predict(X)
if plot_result:
plot_predt(y, y_predt, "multi")
def custom_rmse_model(plot_result: bool, strategy: str) -> None:
"""Train using Python implementation of Squared Error."""
# As the experimental support status, custom objective doesn't support matrix as
# gradient and hessian, which will be changed in future release.
def gradient(predt: np.ndarray, dtrain: xgb.DMatrix) -> np.ndarray:
"""Compute the gradient squared error."""
y = dtrain.get_label().reshape(predt.shape)
return (predt - y).reshape(y.size)
def hessian(predt: np.ndarray, dtrain: xgb.DMatrix) -> np.ndarray:
"""Compute the hessian for squared error."""
return np.ones(predt.shape).reshape(predt.size)
def squared_log(
predt: np.ndarray, dtrain: xgb.DMatrix
) -> Tuple[np.ndarray, np.ndarray]:
grad = gradient(predt, dtrain)
hess = hessian(predt, dtrain)
return grad, hess
def rmse(predt: np.ndarray, dtrain: xgb.DMatrix) -> Tuple[str, float]:
y = dtrain.get_label().reshape(predt.shape)
v = np.sqrt(np.sum(np.power(y - predt, 2)))
return "PyRMSE", v
X, y = gen_circle()
Xy = xgb.DMatrix(X, y)
results: Dict[str, Dict[str, List[float]]] = {}
# Make sure the `num_target` is passed to XGBoost when custom objective is used.
# When builtin objective is used, XGBoost can figure out the number of targets
# automatically.
booster = xgb.train(
{
"tree_method": "hist",
"num_target": y.shape[1],
"multi_strategy": strategy,
},
dtrain=Xy,
num_boost_round=128,
obj=squared_log,
evals=[(Xy, "Train")],
evals_result=results,
custom_metric=rmse,
)
y_predt = booster.inplace_predict(X)
if plot_result:
plot_predt(y, y_predt, "multi")
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("--plot", choices=[0, 1], type=int, default=1)
args = parser.parse_args()
# Train with builtin RMSE objective
# - One model per output.
rmse_model(args.plot == 1, "one_output_per_tree")
# - One model for all outputs, this is still working in progress, many features are
# missing.
rmse_model(args.plot == 1, "multi_output_tree")
# Train with custom objective.
# - One model per output.
custom_rmse_model(args.plot == 1, "one_output_per_tree")
# - One model for all outputs, this is still working in progress, many features are
# missing.
custom_rmse_model(args.plot == 1, "multi_output_tree")
| 4,360
| 30.832117
| 178
|
py
|
xgboost
|
xgboost-master/demo/guide-python/custom_softmax.py
|
'''
Demo for creating customized multi-class objective function
===========================================================
This demo is only applicable after (excluding) XGBoost 1.0.0, as before this version
XGBoost returns transformed prediction for multi-class objective function. More details
in comments.
See :doc:`/tutorials/custom_metric_obj` for detailed tutorial and notes.
'''
import argparse
import numpy as np
from matplotlib import pyplot as plt
import xgboost as xgb
np.random.seed(1994)
kRows = 100
kCols = 10
kClasses = 4 # number of classes
kRounds = 10 # number of boosting rounds.
# Generate some random data for demo.
X = np.random.randn(kRows, kCols)
y = np.random.randint(0, 4, size=kRows)
m = xgb.DMatrix(X, y)
def softmax(x):
'''Softmax function with x as input vector.'''
e = np.exp(x)
return e / np.sum(e)
def softprob_obj(predt: np.ndarray, data: xgb.DMatrix):
'''Loss function. Computing the gradient and approximated hessian (diagonal).
Reimplements the `multi:softprob` inside XGBoost.
'''
labels = data.get_label()
if data.get_weight().size == 0:
# Use 1 as weight if we don't have custom weight.
weights = np.ones((kRows, 1), dtype=float)
else:
weights = data.get_weight()
# The prediction is of shape (rows, classes), each element in a row
# represents a raw prediction (leaf weight, hasn't gone through softmax
# yet). In XGBoost 1.0.0, the prediction is transformed by a softmax
# function, fixed in later versions.
assert predt.shape == (kRows, kClasses)
grad = np.zeros((kRows, kClasses), dtype=float)
hess = np.zeros((kRows, kClasses), dtype=float)
eps = 1e-6
# compute the gradient and hessian, slow iterations in Python, only
# suitable for demo. Also the one in native XGBoost core is more robust to
# numeric overflow as we don't do anything to mitigate the `exp` in
# `softmax` here.
for r in range(predt.shape[0]):
target = labels[r]
p = softmax(predt[r, :])
for c in range(predt.shape[1]):
assert target >= 0 or target <= kClasses
g = p[c] - 1.0 if c == target else p[c]
g = g * weights[r]
h = max((2.0 * p[c] * (1.0 - p[c]) * weights[r]).item(), eps)
grad[r, c] = g
hess[r, c] = h
# Right now (XGBoost 1.0.0), reshaping is necessary
grad = grad.reshape((kRows * kClasses, 1))
hess = hess.reshape((kRows * kClasses, 1))
return grad, hess
def predict(booster: xgb.Booster, X):
'''A customized prediction function that converts raw prediction to
target class.
'''
# Output margin means we want to obtain the raw prediction obtained from
# tree leaf weight.
predt = booster.predict(X, output_margin=True)
out = np.zeros(kRows)
for r in range(predt.shape[0]):
# the class with maximum prob (not strictly prob as it haven't gone
# through softmax yet so it doesn't sum to 1, but result is the same
# for argmax).
i = np.argmax(predt[r])
out[r] = i
return out
def merror(predt: np.ndarray, dtrain: xgb.DMatrix):
y = dtrain.get_label()
# Like custom objective, the predt is untransformed leaf weight when custom objective
# is provided.
# With the use of `custom_metric` parameter in train function, custom metric receives
# raw input only when custom objective is also being used. Otherwise custom metric
# will receive transformed prediction.
assert predt.shape == (kRows, kClasses)
out = np.zeros(kRows)
for r in range(predt.shape[0]):
i = np.argmax(predt[r])
out[r] = i
assert y.shape == out.shape
errors = np.zeros(kRows)
errors[y != out] = 1.0
return 'PyMError', np.sum(errors) / kRows
def plot_history(custom_results, native_results):
fig, axs = plt.subplots(2, 1)
ax0 = axs[0]
ax1 = axs[1]
pymerror = custom_results['train']['PyMError']
merror = native_results['train']['merror']
x = np.arange(0, kRounds, 1)
ax0.plot(x, pymerror, label='Custom objective')
ax0.legend()
ax1.plot(x, merror, label='multi:softmax')
ax1.legend()
plt.show()
def main(args):
custom_results = {}
# Use our custom objective function
booster_custom = xgb.train({'num_class': kClasses,
'disable_default_eval_metric': True},
m,
num_boost_round=kRounds,
obj=softprob_obj,
custom_metric=merror,
evals_result=custom_results,
evals=[(m, 'train')])
predt_custom = predict(booster_custom, m)
native_results = {}
# Use the same objective function defined in XGBoost.
booster_native = xgb.train({'num_class': kClasses,
"objective": "multi:softmax",
'eval_metric': 'merror'},
m,
num_boost_round=kRounds,
evals_result=native_results,
evals=[(m, 'train')])
predt_native = booster_native.predict(m)
# We are reimplementing the loss function in XGBoost, so it should
# be the same for normal cases.
assert np.all(predt_custom == predt_native)
np.testing.assert_allclose(custom_results['train']['PyMError'],
native_results['train']['merror'])
if args.plot != 0:
plot_history(custom_results, native_results)
if __name__ == '__main__':
parser = argparse.ArgumentParser(
description='Arguments for custom softmax objective function demo.')
parser.add_argument(
'--plot',
type=int,
default=1,
help='Set to 0 to disable plotting the evaluation history.')
args = parser.parse_args()
main(args)
| 6,043
| 31.494624
| 89
|
py
|
xgboost
|
xgboost-master/demo/rank/rank.py
|
#!/usr/bin/python
from sklearn.datasets import load_svmlight_file
import xgboost as xgb
from xgboost import DMatrix
# This script demonstrate how to do ranking with xgboost.train
x_train, y_train = load_svmlight_file("mq2008.train")
x_valid, y_valid = load_svmlight_file("mq2008.vali")
x_test, y_test = load_svmlight_file("mq2008.test")
group_train = []
with open("mq2008.train.group", "r") as f:
data = f.readlines()
for line in data:
group_train.append(int(line.split("\n")[0]))
group_valid = []
with open("mq2008.vali.group", "r") as f:
data = f.readlines()
for line in data:
group_valid.append(int(line.split("\n")[0]))
group_test = []
with open("mq2008.test.group", "r") as f:
data = f.readlines()
for line in data:
group_test.append(int(line.split("\n")[0]))
train_dmatrix = DMatrix(x_train, y_train)
valid_dmatrix = DMatrix(x_valid, y_valid)
test_dmatrix = DMatrix(x_test)
train_dmatrix.set_group(group_train)
valid_dmatrix.set_group(group_valid)
params = {'objective': 'rank:ndcg', 'eta': 0.1, 'gamma': 1.0,
'min_child_weight': 0.1, 'max_depth': 6}
xgb_model = xgb.train(params, train_dmatrix, num_boost_round=4,
evals=[(valid_dmatrix, 'validation')])
pred = xgb_model.predict(test_dmatrix)
| 1,288
| 29.690476
| 63
|
py
|
xgboost
|
xgboost-master/demo/rank/trans_data.py
|
import sys
def save_data(group_data,output_feature,output_group):
if len(group_data) == 0:
return
output_group.write(str(len(group_data))+"\n")
for data in group_data:
# only include nonzero features
feats = [ p for p in data[2:] if float(p.split(':')[1]) != 0.0 ]
output_feature.write(data[0] + " " + " ".join(feats) + "\n")
if __name__ == "__main__":
if len(sys.argv) != 4:
print ("Usage: python trans_data.py [Ranksvm Format Input] [Output Feature File] [Output Group File]")
sys.exit(0)
fi = open(sys.argv[1])
output_feature = open(sys.argv[2],"w")
output_group = open(sys.argv[3],"w")
group_data = []
group = ""
for line in fi:
if not line:
break
if "#" in line:
line = line[:line.index("#")]
splits = line.strip().split(" ")
if splits[1] != group:
save_data(group_data,output_feature,output_group)
group_data = []
group = splits[1]
group_data.append(splits)
save_data(group_data,output_feature,output_group)
fi.close()
output_feature.close()
output_group.close()
| 1,177
| 27.047619
| 110
|
py
|
xgboost
|
xgboost-master/demo/rank/rank_sklearn.py
|
#!/usr/bin/python
from sklearn.datasets import load_svmlight_file
import xgboost as xgb
# This script demonstrate how to do ranking with XGBRanker
x_train, y_train = load_svmlight_file("mq2008.train")
x_valid, y_valid = load_svmlight_file("mq2008.vali")
x_test, y_test = load_svmlight_file("mq2008.test")
group_train = []
with open("mq2008.train.group", "r") as f:
data = f.readlines()
for line in data:
group_train.append(int(line.split("\n")[0]))
group_valid = []
with open("mq2008.vali.group", "r") as f:
data = f.readlines()
for line in data:
group_valid.append(int(line.split("\n")[0]))
group_test = []
with open("mq2008.test.group", "r") as f:
data = f.readlines()
for line in data:
group_test.append(int(line.split("\n")[0]))
params = {'objective': 'rank:ndcg', 'learning_rate': 0.1,
'gamma': 1.0, 'min_child_weight': 0.1,
'max_depth': 6, 'n_estimators': 4}
model = xgb.sklearn.XGBRanker(**params)
model.fit(x_train, y_train, group_train, verbose=True,
eval_set=[(x_valid, y_valid)], eval_group=[group_valid])
pred = model.predict(x_test)
| 1,131
| 30.444444
| 66
|
py
|
xgboost
|
xgboost-master/demo/kaggle-higgs/higgs-pred.py
|
#!/usr/bin/python
# make prediction
import numpy as np
import xgboost as xgb
# path to where the data lies
dpath = 'data'
modelfile = 'higgs.model'
outfile = 'higgs.pred.csv'
# make top 15% as positive
threshold_ratio = 0.15
# load in training data, directly use numpy
dtest = np.loadtxt( dpath+'/test.csv', delimiter=',', skiprows=1 )
data = dtest[:,1:31]
idx = dtest[:,0]
print ('finish loading from csv ')
xgmat = xgb.DMatrix( data, missing = -999.0 )
bst = xgb.Booster({'nthread':16}, model_file = modelfile)
ypred = bst.predict( xgmat )
res = [ ( int(idx[i]), ypred[i] ) for i in range(len(ypred)) ]
rorder = {}
for k, v in sorted( res, key = lambda x:-x[1] ):
rorder[ k ] = len(rorder) + 1
# write out predictions
ntop = int( threshold_ratio * len(rorder ) )
fo = open(outfile, 'w')
nhit = 0
ntot = 0
fo.write('EventId,RankOrder,Class\n')
for k, v in res:
if rorder[k] <= ntop:
lb = 's'
nhit += 1
else:
lb = 'b'
# change output rank order to follow Kaggle convention
fo.write('%s,%d,%s\n' % ( k, len(rorder)+1-rorder[k], lb ) )
ntot += 1
fo.close()
print ('finished writing into prediction file')
| 1,164
| 22.77551
| 66
|
py
|
xgboost
|
xgboost-master/demo/kaggle-higgs/speedtest.py
|
#!/usr/bin/python
# this is the example script to use xgboost to train
import time
import numpy as np
from sklearn.ensemble import GradientBoostingClassifier
import xgboost as xgb
test_size = 550000
# path to where the data lies
dpath = 'data'
# load in training data, directly use numpy
dtrain = np.loadtxt( dpath+'/training.csv', delimiter=',', skiprows=1, converters={32: lambda x:int(x=='s') } )
print ('finish loading from csv ')
label = dtrain[:,32]
data = dtrain[:,1:31]
# rescale weight to make it same as test set
weight = dtrain[:,31] * float(test_size) / len(label)
sum_wpos = sum( weight[i] for i in range(len(label)) if label[i] == 1.0 )
sum_wneg = sum( weight[i] for i in range(len(label)) if label[i] == 0.0 )
# print weight statistics
print ('weight statistics: wpos=%g, wneg=%g, ratio=%g' % ( sum_wpos, sum_wneg, sum_wneg/sum_wpos ))
# construct xgboost.DMatrix from numpy array, treat -999.0 as missing value
xgmat = xgb.DMatrix( data, label=label, missing = -999.0, weight=weight )
# setup parameters for xgboost
param = {}
# use logistic regression loss
param['objective'] = 'binary:logitraw'
# scale weight of positive examples
param['scale_pos_weight'] = sum_wneg/sum_wpos
param['bst:eta'] = 0.1
param['bst:max_depth'] = 6
param['eval_metric'] = 'auc'
param['nthread'] = 4
plst = param.items()+[('eval_metric', 'ams@0.15')]
watchlist = [ (xgmat,'train') ]
# boost 10 trees
num_round = 10
print ('loading data end, start to boost trees')
print ("training GBM from sklearn")
tmp = time.time()
gbm = GradientBoostingClassifier(n_estimators=num_round, max_depth=6, verbose=2)
gbm.fit(data, label)
print ("sklearn.GBM costs: %s seconds" % str(time.time() - tmp))
#raw_input()
print ("training xgboost")
threads = [1, 2, 4, 16]
for i in threads:
param['nthread'] = i
tmp = time.time()
plst = param.items()+[('eval_metric', 'ams@0.15')]
bst = xgb.train( plst, xgmat, num_round, watchlist );
print ("XGBoost with %d thread costs: %s seconds" % (i, str(time.time() - tmp)))
print ('finish training')
| 2,051
| 30.090909
| 111
|
py
|
xgboost
|
xgboost-master/demo/kaggle-higgs/higgs-cv.py
|
#!/usr/bin/python
import numpy as np
import xgboost as xgb
### load data in do training
train = np.loadtxt('./data/training.csv', delimiter=',', skiprows=1, converters={32: lambda x:int(x=='s'.encode('utf-8')) } )
label = train[:,32]
data = train[:,1:31]
weight = train[:,31]
dtrain = xgb.DMatrix( data, label=label, missing = -999.0, weight=weight )
param = {'max_depth':6, 'eta':0.1, 'objective':'binary:logitraw', 'nthread':4}
num_round = 120
print ('running cross validation, with preprocessing function')
# define the preprocessing function
# used to return the preprocessed training, test data, and parameter
# we can use this to do weight rescale, etc.
# as a example, we try to set scale_pos_weight
def fpreproc(dtrain, dtest, param):
label = dtrain.get_label()
ratio = float(np.sum(label == 0)) / np.sum(label==1)
param['scale_pos_weight'] = ratio
wtrain = dtrain.get_weight()
wtest = dtest.get_weight()
sum_weight = sum(wtrain) + sum(wtest)
wtrain *= sum_weight / sum(wtrain)
wtest *= sum_weight / sum(wtest)
dtrain.set_weight(wtrain)
dtest.set_weight(wtest)
return (dtrain, dtest, param)
# do cross validation, for each fold
# the dtrain, dtest, param will be passed into fpreproc
# then the return value of fpreproc will be used to generate
# results of that fold
xgb.cv(param, dtrain, num_round, nfold=5,
metrics={'ams@0.15', 'auc'}, seed = 0, fpreproc = fpreproc)
| 1,436
| 35.846154
| 125
|
py
|
xgboost
|
xgboost-master/demo/kaggle-higgs/higgs-numpy.py
|
#!/usr/bin/python
# this is the example script to use xgboost to train
import numpy as np
import xgboost as xgb
test_size = 550000
# path to where the data lies
dpath = 'data'
# load in training data, directly use numpy
dtrain = np.loadtxt( dpath+'/training.csv', delimiter=',', skiprows=1, converters={32: lambda x:int(x=='s'.encode('utf-8')) } )
print ('finish loading from csv ')
label = dtrain[:,32]
data = dtrain[:,1:31]
# rescale weight to make it same as test set
weight = dtrain[:,31] * float(test_size) / len(label)
sum_wpos = sum( weight[i] for i in range(len(label)) if label[i] == 1.0 )
sum_wneg = sum( weight[i] for i in range(len(label)) if label[i] == 0.0 )
# print weight statistics
print ('weight statistics: wpos=%g, wneg=%g, ratio=%g' % ( sum_wpos, sum_wneg, sum_wneg/sum_wpos ))
# construct xgboost.DMatrix from numpy array, treat -999.0 as missing value
xgmat = xgb.DMatrix( data, label=label, missing = -999.0, weight=weight )
# setup parameters for xgboost
param = {}
# use logistic regression loss, use raw prediction before logistic transformation
# since we only need the rank
param['objective'] = 'binary:logitraw'
# scale weight of positive examples
param['scale_pos_weight'] = sum_wneg/sum_wpos
param['eta'] = 0.1
param['max_depth'] = 6
param['eval_metric'] = 'auc'
param['nthread'] = 16
# you can directly throw param in, though we want to watch multiple metrics here
plst = list(param.items())+[('eval_metric', 'ams@0.15')]
watchlist = [ (xgmat,'train') ]
# boost 120 trees
num_round = 120
print ('loading data end, start to boost trees')
bst = xgb.train( plst, xgmat, num_round, watchlist );
# save out model
bst.save_model('higgs.model')
print ('finish training')
| 1,714
| 30.759259
| 127
|
py
|
xgboost
|
xgboost-master/demo/json-model/json_parser.py
|
"""Demonstration for parsing JSON/UBJSON tree model file generated by XGBoost.
"""
import argparse
import json
from dataclasses import dataclass
from enum import IntEnum, unique
from typing import Any, Dict, List, Sequence, Union
import numpy as np
try:
import ubjson
except ImportError:
ubjson = None
ParamT = Dict[str, str]
def to_integers(data: Union[bytes, List[int]]) -> List[int]:
"""Convert a sequence of bytes to a list of Python integer"""
return [v for v in data]
@unique
class SplitType(IntEnum):
numerical = 0
categorical = 1
@dataclass
class Node:
# properties
left: int
right: int
parent: int
split_idx: int
split_cond: float
default_left: bool
split_type: SplitType
categories: List[int]
# statistic
base_weight: float
loss_chg: float
sum_hess: float
class Tree:
"""A tree built by XGBoost."""
def __init__(self, tree_id: int, nodes: Sequence[Node]) -> None:
self.tree_id = tree_id
self.nodes = nodes
def loss_change(self, node_id: int) -> float:
"""Loss gain of a node."""
return self.nodes[node_id].loss_chg
def sum_hessian(self, node_id: int) -> float:
"""Sum Hessian of a node."""
return self.nodes[node_id].sum_hess
def base_weight(self, node_id: int) -> float:
"""Base weight of a node."""
return self.nodes[node_id].base_weight
def split_index(self, node_id: int) -> int:
"""Split feature index of node."""
return self.nodes[node_id].split_idx
def split_condition(self, node_id: int) -> float:
"""Split value of a node."""
return self.nodes[node_id].split_cond
def split_categories(self, node_id: int) -> List[int]:
"""Categories in a node."""
return self.nodes[node_id].categories
def is_categorical(self, node_id: int) -> bool:
"""Whether a node has categorical split."""
return self.nodes[node_id].split_type == SplitType.categorical
def is_numerical(self, node_id: int) -> bool:
return not self.is_categorical(node_id)
def parent(self, node_id: int) -> int:
"""Parent ID of a node."""
return self.nodes[node_id].parent
def left_child(self, node_id: int) -> int:
"""Left child ID of a node."""
return self.nodes[node_id].left
def right_child(self, node_id: int) -> int:
"""Right child ID of a node."""
return self.nodes[node_id].right
def is_leaf(self, node_id: int) -> bool:
"""Whether a node is leaf."""
return self.nodes[node_id].left == -1
def is_deleted(self, node_id: int) -> bool:
"""Whether a node is deleted."""
return self.split_index(node_id) == np.iinfo(np.uint32).max
def __str__(self) -> str:
stack = [0]
nodes = []
while stack:
node: Dict[str, Union[float, int, List[int]]] = {}
nid = stack.pop()
node["node id"] = nid
node["gain"] = self.loss_change(nid)
node["cover"] = self.sum_hessian(nid)
nodes.append(node)
if not self.is_leaf(nid) and not self.is_deleted(nid):
left = self.left_child(nid)
right = self.right_child(nid)
stack.append(left)
stack.append(right)
categories = self.split_categories(nid)
if categories:
assert self.is_categorical(nid)
node["categories"] = categories
else:
assert self.is_numerical(nid)
node["condition"] = self.split_condition(nid)
if self.is_leaf(nid):
node["weight"] = self.split_condition(nid)
string = "\n".join(map(lambda x: " " + str(x), nodes))
return string
class Model:
"""Gradient boosted tree model."""
def __init__(self, model: dict) -> None:
"""Construct the Model from a JSON object.
parameters
----------
model : A dictionary loaded by json representing a XGBoost boosted tree model.
"""
# Basic properties of a model
self.learner_model_shape: ParamT = model["learner"]["learner_model_param"]
self.num_output_group = int(self.learner_model_shape["num_class"])
self.num_feature = int(self.learner_model_shape["num_feature"])
self.base_score = float(self.learner_model_shape["base_score"])
# A field encoding which output group a tree belongs
self.tree_info = model["learner"]["gradient_booster"]["model"]["tree_info"]
model_shape: ParamT = model["learner"]["gradient_booster"]["model"][
"gbtree_model_param"
]
# JSON representation of trees
j_trees = model["learner"]["gradient_booster"]["model"]["trees"]
# Load the trees
self.num_trees = int(model_shape["num_trees"])
trees: List[Tree] = []
for i in range(self.num_trees):
tree: Dict[str, Any] = j_trees[i]
tree_id = int(tree["id"])
assert tree_id == i, (tree_id, i)
# - properties
left_children: List[int] = tree["left_children"]
right_children: List[int] = tree["right_children"]
parents: List[int] = tree["parents"]
split_conditions: List[float] = tree["split_conditions"]
split_indices: List[int] = tree["split_indices"]
# when ubjson is used, this is a byte array with each element as uint8
default_left = to_integers(tree["default_left"])
# - categorical features
# when ubjson is used, this is a byte array with each element as uint8
split_types = to_integers(tree["split_type"])
# categories for each node is stored in a CSR style storage with segment as
# the begin ptr and the `categories' as values.
cat_segments: List[int] = tree["categories_segments"]
cat_sizes: List[int] = tree["categories_sizes"]
# node index for categorical nodes
cat_nodes: List[int] = tree["categories_nodes"]
assert len(cat_segments) == len(cat_sizes) == len(cat_nodes)
cats = tree["categories"]
assert len(left_children) == len(split_types)
# The storage for categories is only defined for categorical nodes to
# prevent unnecessary overhead for numerical splits, we track the
# categorical node that are processed using a counter.
cat_cnt = 0
if cat_nodes:
last_cat_node = cat_nodes[cat_cnt]
else:
last_cat_node = -1
node_categories: List[List[int]] = []
for node_id in range(len(left_children)):
if node_id == last_cat_node:
beg = cat_segments[cat_cnt]
size = cat_sizes[cat_cnt]
end = beg + size
node_cats = cats[beg:end]
# categories are unique for each node
assert len(set(node_cats)) == len(node_cats)
cat_cnt += 1
if cat_cnt == len(cat_nodes):
last_cat_node = -1 # continue to process the rest of the nodes
else:
last_cat_node = cat_nodes[cat_cnt]
assert node_cats
node_categories.append(node_cats)
else:
# append an empty node, it's either a numerical node or a leaf.
node_categories.append([])
# - stats
base_weights: List[float] = tree["base_weights"]
loss_changes: List[float] = tree["loss_changes"]
sum_hessian: List[float] = tree["sum_hessian"]
# Construct a list of nodes that have complete information
nodes: List[Node] = [
Node(
left_children[node_id],
right_children[node_id],
parents[node_id],
split_indices[node_id],
split_conditions[node_id],
default_left[node_id] == 1, # to boolean
SplitType(split_types[node_id]),
node_categories[node_id],
base_weights[node_id],
loss_changes[node_id],
sum_hessian[node_id],
)
for node_id in range(len(left_children))
]
pytree = Tree(tree_id, nodes)
trees.append(pytree)
self.trees = trees
def print_model(self) -> None:
for i, tree in enumerate(self.trees):
print("\ntree_id:", i)
print(tree)
if __name__ == "__main__":
parser = argparse.ArgumentParser(
description="Demonstration for loading XGBoost JSON/UBJSON model."
)
parser.add_argument(
"--model", type=str, required=True, help="Path to .json/.ubj model file."
)
args = parser.parse_args()
if args.model.endswith("json"):
# use json format
with open(args.model, "r") as fd:
model = json.load(fd)
elif args.model.endswith("ubj"):
if ubjson is None:
raise ImportError("ubjson is not installed.")
# use ubjson format
with open(args.model, "rb") as bfd:
model = ubjson.load(bfd)
else:
raise ValueError(
"Unexpected file extension. Supported file extension are json and ubj."
)
model = Model(model)
model.print_model()
| 9,711
| 33.810036
| 87
|
py
|
xgboost
|
xgboost-master/demo/rmm_plugin/rmm_mgpu_with_dask.py
|
import dask
from dask.distributed import Client
from dask_cuda import LocalCUDACluster
from sklearn.datasets import make_classification
import xgboost as xgb
def main(client):
# Optionally force XGBoost to use RMM for all GPU memory allocation, see ./README.md
# xgb.set_config(use_rmm=True)
X, y = make_classification(n_samples=10000, n_informative=5, n_classes=3)
# In pratice one should prefer loading the data with dask collections instead of using
# `from_array`.
X = dask.array.from_array(X)
y = dask.array.from_array(y)
dtrain = xgb.dask.DaskDMatrix(client, X, label=y)
params = {'max_depth': 8, 'eta': 0.01, 'objective': 'multi:softprob', 'num_class': 3,
'tree_method': 'gpu_hist', 'eval_metric': 'merror'}
output = xgb.dask.train(client, params, dtrain, num_boost_round=100,
evals=[(dtrain, 'train')])
bst = output['booster']
history = output['history']
for i, e in enumerate(history['train']['merror']):
print(f'[{i}] train-merror: {e}')
if __name__ == '__main__':
# To use RMM pool allocator with a GPU Dask cluster, just add rmm_pool_size option to
# LocalCUDACluster constructor.
with LocalCUDACluster(rmm_pool_size='2GB') as cluster:
with Client(cluster) as client:
main(client)
| 1,334
| 36.083333
| 90
|
py
|
xgboost
|
xgboost-master/demo/rmm_plugin/rmm_singlegpu.py
|
import rmm
from sklearn.datasets import make_classification
import xgboost as xgb
# Initialize RMM pool allocator
rmm.reinitialize(pool_allocator=True)
# Optionally force XGBoost to use RMM for all GPU memory allocation, see ./README.md
# xgb.set_config(use_rmm=True)
X, y = make_classification(n_samples=10000, n_informative=5, n_classes=3)
dtrain = xgb.DMatrix(X, label=y)
params = {
"max_depth": 8,
"eta": 0.01,
"objective": "multi:softprob",
"num_class": 3,
"tree_method": "gpu_hist",
}
# XGBoost will automatically use the RMM pool allocator
bst = xgb.train(params, dtrain, num_boost_round=100, evals=[(dtrain, "train")])
| 651
| 27.347826
| 84
|
py
|
xgboost
|
xgboost-master/dev/query_contributors.py
|
"""Query list of all contributors and reviewers in a release"""
import json
import re
import sys
import requests
from sh.contrib import git
if len(sys.argv) != 5:
print(f'Usage: {sys.argv[0]} [starting commit/tag] [ending commit/tag] [GitHub username] ' +
'[GitHub password]')
sys.exit(1)
from_commit = sys.argv[1]
to_commit = sys.argv[2]
username = sys.argv[3]
password = sys.argv[4]
contributors = set()
reviewers = set()
def paginate_request(url, callback):
r = requests.get(url, auth=(username, password))
assert r.status_code == requests.codes.ok, f'Code: {r.status_code}, Text: {r.text}'
callback(json.loads(r.text))
while 'next' in r.links:
r = requests.get(r.links['next']['url'], auth=(username, password))
callback(json.loads(r.text))
for line in git.log(f'{from_commit}..{to_commit}', '--pretty=format:%s', '--reverse', '--first-parent'):
m = re.search('\(#([0-9]+)\)$', line.rstrip())
if m:
pr_id = m.group(1)
print(f'PR #{pr_id}')
def process_commit_list(commit_list):
try:
contributors.update([commit['author']['login'] for commit in commit_list])
except TypeError:
prompt = (f'Error fetching contributors for PR #{pr_id}. Enter it manually, ' +
'as a space-separated list: ')
contributors.update(str(input(prompt)).split(' '))
def process_review_list(review_list):
reviewers.update([x['user']['login'] for x in review_list])
def process_comment_list(comment_list):
reviewers.update([x['user']['login'] for x in comment_list])
paginate_request(f'https://api.github.com/repos/dmlc/xgboost/pulls/{pr_id}/commits',
process_commit_list)
paginate_request(f'https://api.github.com/repos/dmlc/xgboost/pulls/{pr_id}/reviews',
process_review_list)
paginate_request(f'https://api.github.com/repos/dmlc/xgboost/issues/{pr_id}/comments',
process_comment_list)
print('Contributors: ', end='')
for x in sorted(contributors):
r = requests.get(f'https://api.github.com/users/{x}', auth=(username, password))
assert r.status_code == requests.codes.ok, f'Code: {r.status_code}, Text: {r.text}'
user_info = json.loads(r.text)
if user_info['name'] is None:
print(f"@{x}, ", end='')
else:
print(f"{user_info['name']} (@{x}), ", end='')
print('\nReviewers: ', end='')
for x in sorted(reviewers):
r = requests.get(f'https://api.github.com/users/{x}', auth=(username, password))
assert r.status_code == requests.codes.ok, f'Code: {r.status_code}, Text: {r.text}'
user_info = json.loads(r.text)
if user_info['name'] is None:
print(f"@{x}, ", end='')
else:
print(f"{user_info['name']} (@{x}), ", end='')
print('')
| 2,905
| 37.236842
| 104
|
py
|
xgboost
|
xgboost-master/dev/release-artifacts.py
|
"""Simple script for managing Python, R, and source release packages.
tqdm, sh are required to run this script.
"""
import argparse
import os
import shutil
import subprocess
import tarfile
import tempfile
from pathlib import Path
from typing import Any, Dict, List, Optional, Tuple, Union
from urllib.request import urlretrieve
import tqdm
from packaging import version
from sh.contrib import git
# The package building is managed by Jenkins CI.
PREFIX = "https://s3-us-west-2.amazonaws.com/xgboost-nightly-builds/release_"
ROOT = Path(__file__).absolute().parent.parent
DIST = ROOT / "python-package" / "dist"
pbar = None
class DirectoryExcursion:
def __init__(self, path: Union[os.PathLike, str]) -> None:
self.path = path
self.curdir = os.path.normpath(os.path.abspath(os.path.curdir))
def __enter__(self) -> None:
os.chdir(self.path)
def __exit__(self, *args: Any) -> None:
os.chdir(self.curdir)
def show_progress(block_num, block_size, total_size):
"Show file download progress."
global pbar
if pbar is None:
pbar = tqdm.tqdm(total=total_size / 1024, unit="kB")
downloaded = block_num * block_size
if downloaded < total_size:
upper = (total_size - downloaded) / 1024
pbar.update(min(block_size / 1024, upper))
else:
pbar.close()
pbar = None
def retrieve(url, filename=None):
print(f"{url} -> {filename}")
return urlretrieve(url, filename, reporthook=show_progress)
def latest_hash() -> str:
"Get latest commit hash."
ret = subprocess.run(["git", "rev-parse", "HEAD"], capture_output=True)
assert ret.returncode == 0, "Failed to get latest commit hash."
commit_hash = ret.stdout.decode("utf-8").strip()
return commit_hash
def download_wheels(
platforms: List[str],
dir_URL: str,
src_filename_prefix: str,
target_filename_prefix: str,
outdir: str,
) -> List[str]:
"""Download all binary wheels. dir_URL is the URL for remote directory storing the
release wheels.
"""
filenames = []
outdir = os.path.join(outdir, "dist")
if not os.path.exists(outdir):
os.mkdir(outdir)
for platform in platforms:
src_wheel = src_filename_prefix + platform + ".whl"
url = dir_URL + src_wheel
target_wheel = target_filename_prefix + platform + ".whl"
filename = os.path.join(outdir, target_wheel)
filenames.append(filename)
retrieve(url=url, filename=filename)
ret = subprocess.run(["twine", "check", filename], capture_output=True)
assert ret.returncode == 0, "Failed twine check"
stderr = ret.stderr.decode("utf-8")
stdout = ret.stdout.decode("utf-8")
assert stderr.find("warning") == -1, "Unresolved warnings:\n" + stderr
assert stdout.find("warning") == -1, "Unresolved warnings:\n" + stdout
return filenames
def make_pysrc_wheel(release: str, outdir: str) -> None:
"""Make Python source distribution."""
dist = os.path.join(outdir, "dist")
if not os.path.exists(dist):
os.mkdir(dist)
with DirectoryExcursion(os.path.join(ROOT, "python-package")):
subprocess.check_call(["python", "-m", "build", "--sdist"])
src = os.path.join(DIST, f"xgboost-{release}.tar.gz")
subprocess.check_call(["twine", "check", src])
shutil.move(src, os.path.join(dist, f"xgboost-{release}.tar.gz"))
def download_py_packages(
branch: str, major: int, minor: int, commit_hash: str, outdir: str
) -> None:
platforms = [
"win_amd64",
"manylinux2014_x86_64",
"manylinux2014_aarch64",
"macosx_10_15_x86_64.macosx_11_0_x86_64.macosx_12_0_x86_64",
"macosx_12_0_arm64",
]
branch = branch.split("_")[1] # release_x.y.z
dir_URL = PREFIX + branch + "/"
src_filename_prefix = "xgboost-" + args.release + "%2B" + commit_hash + "-py3-none-"
target_filename_prefix = "xgboost-" + args.release + "-py3-none-"
if not os.path.exists(DIST):
os.mkdir(DIST)
filenames = download_wheels(
platforms, dir_URL, src_filename_prefix, target_filename_prefix, outdir
)
print("List of downloaded wheels:", filenames)
print(
"""
Following steps should be done manually:
- Upload pypi package by `python3 -m twine upload dist/<Package Name>` for all wheels.
- Check the uploaded files on `https://pypi.org/project/xgboost/<VERSION>/#files` and
`pip install xgboost==<VERSION>` """
)
def download_r_packages(
release: str, branch: str, rc: str, commit: str, outdir: str
) -> Tuple[Dict[str, str], List[str]]:
platforms = ["win64", "linux"]
dirname = os.path.join(outdir, "r-packages")
if not os.path.exists(dirname):
os.mkdir(dirname)
filenames = []
branch = branch.split("_")[1] # release_x.y.z
urls = {}
for plat in platforms:
url = f"{PREFIX}{branch}/xgboost_r_gpu_{plat}_{commit}.tar.gz"
if not rc:
filename = f"xgboost_r_gpu_{plat}_{release}.tar.gz"
else:
filename = f"xgboost_r_gpu_{plat}_{release}-{rc}.tar.gz"
target = os.path.join(dirname, filename)
retrieve(url=url, filename=target)
filenames.append(target)
urls[plat] = url
print("Finished downloading R packages:", filenames)
hashes = []
with DirectoryExcursion(os.path.join(outdir, "r-packages")):
for f in filenames:
ret = subprocess.run(["sha256sum", os.path.basename(f)], capture_output=True)
h = ret.stdout.decode().strip()
hashes.append(h)
return urls, hashes
def check_path():
root = os.path.abspath(os.path.curdir)
assert os.path.basename(root) == "xgboost", "Must be run on project root."
def make_src_package(release: str, outdir: str) -> Tuple[str, str]:
tarname = f"xgboost-{release}.tar.gz"
tarpath = os.path.join(outdir, tarname)
if os.path.exists(tarpath):
os.remove(tarpath)
with tempfile.TemporaryDirectory() as tmpdir_str:
tmpdir = Path(tmpdir_str)
shutil.copytree(os.path.curdir, tmpdir / "xgboost")
with DirectoryExcursion(tmpdir / "xgboost"):
ret = subprocess.run(
["git", "submodule", "foreach", "--quiet", "echo $sm_path"],
capture_output=True,
)
submodules = ret.stdout.decode().strip().split()
for mod in submodules:
mod_path = os.path.join(os.path.abspath(os.path.curdir), mod, ".git")
os.remove(mod_path)
shutil.rmtree(".git")
with tarfile.open(tarpath, "x:gz") as tar:
src = tmpdir / "xgboost"
tar.add(src, arcname="xgboost")
with DirectoryExcursion(os.path.dirname(tarpath)):
ret = subprocess.run(["sha256sum", tarname], capture_output=True)
h = ret.stdout.decode().strip()
return tarname, h
def release_note(
release: str,
artifact_hashes: List[str],
r_urls: Dict[str, str],
tarname: str,
outdir: str,
) -> None:
"""Generate a note for GitHub release description."""
r_gpu_linux_url = r_urls["linux"]
r_gpu_win64_url = r_urls["win64"]
src_tarball = (
f"https://github.com/dmlc/xgboost/releases/download/v{release}/{tarname}"
)
hash_note = "\n".join(artifact_hashes)
end_note = f"""
### Additional artifacts:
You can verify the downloaded packages by running the following command on your Unix shell:
``` sh
echo "<hash> <artifact>" | shasum -a 256 --check
```
```
{hash_note}
```
**Experimental binary packages for R with CUDA enabled**
* xgboost_r_gpu_linux_1.7.5.tar.gz: [Download]({r_gpu_linux_url})
* xgboost_r_gpu_win64_1.7.5.tar.gz: [Download]({r_gpu_win64_url})
**Source tarball**
* xgboost.tar.gz: [Download]({src_tarball})"""
print(end_note)
with open(os.path.join(outdir, "end_note.md"), "w") as fd:
fd.write(end_note)
def main(args: argparse.Namespace) -> None:
check_path()
rel = version.parse(args.release)
assert isinstance(rel, version.Version)
major = rel.major
minor = rel.minor
patch = rel.micro
print("Release:", rel)
if not rel.is_prerelease:
# Major release
rc: Optional[str] = None
rc_ver: Optional[int] = None
else:
# RC release
major = rel.major
minor = rel.minor
patch = rel.micro
assert rel.pre is not None
rc, rc_ver = rel.pre
assert rc == "rc"
release = str(major) + "." + str(minor) + "." + str(patch)
if args.branch is not None:
branch = args.branch
else:
branch = "release_" + str(major) + "." + str(minor) + ".0"
git.clean("-xdf")
git.checkout(branch)
git.pull("origin", branch)
git.submodule("update")
commit_hash = latest_hash()
if not os.path.exists(args.outdir):
os.mkdir(args.outdir)
# source tarball
hashes: List[str] = []
tarname, h = make_src_package(release, args.outdir)
hashes.append(h)
# CUDA R packages
urls, hr = download_r_packages(
release,
branch,
"" if rc is None else rc + str(rc_ver),
commit_hash,
args.outdir,
)
hashes.extend(hr)
# Python source wheel
make_pysrc_wheel(release, args.outdir)
# Python binary wheels
download_py_packages(branch, major, minor, commit_hash, args.outdir)
# Write end note
release_note(release, hashes, urls, tarname, args.outdir)
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument(
"--release",
type=str,
required=True,
help="Version tag, e.g. '1.3.2', or '1.5.0rc1'",
)
parser.add_argument(
"--branch",
type=str,
default=None,
help=(
"Optional branch. Usually patch releases reuse the same branch of the"
" major release, but there can be exception."
),
)
parser.add_argument(
"--outdir",
type=str,
default=None,
required=True,
help="Directory to store the generated packages.",
)
args = parser.parse_args()
main(args)
| 10,231
| 28.744186
| 91
|
py
|
xgboost
|
xgboost-master/dev/prepare_jvm_release.py
|
import argparse
import errno
import glob
import os
import platform
import re
import shutil
import subprocess
import sys
import tempfile
import zipfile
from contextlib import contextmanager
from urllib.request import urlretrieve
def normpath(path):
"""Normalize UNIX path to a native path."""
normalized = os.path.join(*path.split("/"))
if os.path.isabs(path):
return os.path.abspath("/") + normalized
else:
return normalized
def cp(source, target):
source = normpath(source)
target = normpath(target)
print("cp {0} {1}".format(source, target))
shutil.copy(source, target)
def maybe_makedirs(path):
path = normpath(path)
print("mkdir -p " + path)
try:
os.makedirs(path)
except OSError as e:
if e.errno != errno.EEXIST:
raise
@contextmanager
def cd(path):
path = normpath(path)
cwd = os.getcwd()
os.chdir(path)
print("cd " + path)
try:
yield path
finally:
os.chdir(cwd)
def run(command, **kwargs):
print(command)
subprocess.check_call(command, shell=True, **kwargs)
def get_current_git_tag():
out = subprocess.check_output(["git", "tag", "--points-at", "HEAD"])
return out.decode().split("\n")[0]
def get_current_commit_hash():
out = subprocess.check_output(["git", "rev-parse", "HEAD"])
return out.decode().split("\n")[0]
def get_current_git_branch():
out = subprocess.check_output(["git", "log", "-n", "1", "--pretty=%d", "HEAD"])
m = re.search(r"release_[0-9\.]+", out.decode())
if not m:
raise ValueError("Expected branch name of form release_xxx")
return m.group(0)
def retrieve(url, filename=None):
print(f"{url} -> {filename}")
return urlretrieve(url, filename)
def main():
parser = argparse.ArgumentParser()
parser.add_argument("--release-version", type=str, required=True,
help="Version of the release being prepared")
args = parser.parse_args()
if sys.platform != "darwin" or platform.machine() != "x86_64":
raise NotImplementedError("Please run this script using an Intel Mac")
version = args.release_version
expected_git_tag = "v" + version
current_git_tag = get_current_git_tag()
if current_git_tag != expected_git_tag:
if not current_git_tag:
raise ValueError(f"Expected git tag {expected_git_tag} but current HEAD has no tag. "
f"Run: git checkout {expected_git_tag}")
raise ValueError(f"Expected git tag {expected_git_tag} but current HEAD is at tag "
f"{current_git_tag}. Run: git checkout {expected_git_tag}")
commit_hash = get_current_commit_hash()
git_branch = get_current_git_branch()
print(f"Using commit {commit_hash} of branch {git_branch}, git tag {current_git_tag}")
with cd("jvm-packages/"):
print("====copying pure-Python tracker====")
for use_cuda in [True, False]:
xgboost4j = "xgboost4j-gpu" if use_cuda else "xgboost4j"
cp("../python-package/xgboost/tracker.py", f"{xgboost4j}/src/main/resources")
print("====copying resources for testing====")
with cd("../demo/CLI/regression"):
run(f"{sys.executable} mapfeat.py")
run(f"{sys.executable} mknfold.py machine.txt 1")
for use_cuda in [True, False]:
xgboost4j = "xgboost4j-gpu" if use_cuda else "xgboost4j"
xgboost4j_spark = "xgboost4j-spark-gpu" if use_cuda else "xgboost4j-spark"
maybe_makedirs(f"{xgboost4j}/src/test/resources")
maybe_makedirs(f"{xgboost4j_spark}/src/test/resources")
for file in glob.glob("../demo/data/agaricus.*"):
cp(file, f"{xgboost4j}/src/test/resources")
cp(file, f"{xgboost4j_spark}/src/test/resources")
for file in glob.glob("../demo/CLI/regression/machine.txt.t*"):
cp(file, f"{xgboost4j_spark}/src/test/resources")
print("====Creating directories to hold native binaries====")
for os_ident, arch in [("linux", "x86_64"), ("windows", "x86_64"), ("macos", "x86_64")]:
output_dir = f"xgboost4j/src/main/resources/lib/{os_ident}/{arch}"
maybe_makedirs(output_dir)
for os_ident, arch in [("linux", "x86_64")]:
output_dir = f"xgboost4j-gpu/src/main/resources/lib/{os_ident}/{arch}"
maybe_makedirs(output_dir)
print("====Downloading native binaries from CI====")
nightly_bucket_prefix = "https://s3-us-west-2.amazonaws.com/xgboost-nightly-builds"
maven_repo_prefix = "https://s3-us-west-2.amazonaws.com/xgboost-maven-repo/release/ml/dmlc"
retrieve(url=f"{nightly_bucket_prefix}/{git_branch}/xgboost4j_{commit_hash}.dll",
filename="xgboost4j/src/main/resources/lib/windows/x86_64/xgboost4j.dll")
with tempfile.TemporaryDirectory() as tempdir:
# libxgboost4j.so for Linux x86_64, CPU only
zip_path = os.path.join(tempdir, "xgboost4j_2.12.jar")
extract_dir = os.path.join(tempdir, "xgboost4j")
retrieve(url=f"{maven_repo_prefix}/xgboost4j_2.12/{version}/"
f"xgboost4j_2.12-{version}.jar",
filename=zip_path)
os.mkdir(extract_dir)
with zipfile.ZipFile(zip_path, "r") as t:
t.extractall(extract_dir)
cp(os.path.join(extract_dir, "lib", "linux", "x86_64", "libxgboost4j.so"),
"xgboost4j/src/main/resources/lib/linux/x86_64/libxgboost4j.so")
# libxgboost4j.so for Linux x86_64, GPU support
zip_path = os.path.join(tempdir, "xgboost4j-gpu_2.12.jar")
extract_dir = os.path.join(tempdir, "xgboost4j-gpu")
retrieve(url=f"{maven_repo_prefix}/xgboost4j-gpu_2.12/{version}/"
f"xgboost4j-gpu_2.12-{version}.jar",
filename=zip_path)
os.mkdir(extract_dir)
with zipfile.ZipFile(zip_path, "r") as t:
t.extractall(extract_dir)
cp(os.path.join(extract_dir, "lib", "linux", "x86_64", "libxgboost4j.so"),
"xgboost4j-gpu/src/main/resources/lib/linux/x86_64/libxgboost4j.so")
print("====Next Steps====")
print("1. Gain upload right to Maven Central repo.")
print("1-1. Sign up for a JIRA account at Sonatype: ")
print("1-2. File a JIRA ticket: "
"https://issues.sonatype.org/secure/CreateIssue.jspa?issuetype=21&pid=10134. Example: "
"https://issues.sonatype.org/browse/OSSRH-67724")
print("2. Store the Sonatype credentials in .m2/settings.xml. See insturctions in "
"https://central.sonatype.org/publish/publish-maven/")
print("3. Now on a Mac machine, run:")
print(" GPG_TTY=$(tty) mvn deploy -Prelease -DskipTests")
print("4. Log into https://oss.sonatype.org/. On the left menu panel, click Staging "
"Repositories. Visit the URL https://oss.sonatype.org/content/repositories/mldmlc-1085 "
"to inspect the staged JAR files. Finally, press Release button to publish the "
"artifacts to the Maven Central repository.")
if __name__ == "__main__":
main()
| 7,263
| 40.508571
| 99
|
py
|
xgboost
|
xgboost-master/python-package/hatch_build.py
|
"""
Custom hook to customize the behavior of Hatchling.
Here, we customize the tag of the generated wheels.
"""
import sysconfig
from typing import Any, Dict
from hatchling.builders.hooks.plugin.interface import BuildHookInterface
def get_tag() -> str:
"""Get appropriate wheel tag according to system"""
tag_platform = sysconfig.get_platform().replace("-", "_").replace(".", "_")
return f"py3-none-{tag_platform}"
class CustomBuildHook(BuildHookInterface):
"""A custom build hook"""
def initialize(self, version: str, build_data: Dict[str, Any]) -> None:
"""This step ccurs immediately before each build."""
build_data["tag"] = get_tag()
| 681
| 28.652174
| 79
|
py
|
xgboost
|
xgboost-master/python-package/xgboost/rabit.py
|
"""Compatibility shim for xgboost.rabit; to be removed in 2.0"""
import logging
import warnings
from enum import IntEnum, unique
from typing import Any, Callable, List, Optional, TypeVar
import numpy as np
from . import collective
LOGGER = logging.getLogger("[xgboost.rabit]")
def _deprecation_warning() -> str:
return (
"The xgboost.rabit submodule is marked as deprecated in 1.7 and will be removed "
"in 2.0. Please use xgboost.collective instead."
)
def init(args: Optional[List[bytes]] = None) -> None:
"""Initialize the rabit library with arguments"""
warnings.warn(_deprecation_warning(), FutureWarning)
parsed = {}
if args:
for arg in args:
kv = arg.decode().split("=")
if len(kv) == 2:
parsed[kv[0]] = kv[1]
collective.init(**parsed)
def finalize() -> None:
"""Finalize the process, notify tracker everything is done."""
collective.finalize()
def get_rank() -> int:
"""Get rank of current process.
Returns
-------
rank : int
Rank of current process.
"""
return collective.get_rank()
def get_world_size() -> int:
"""Get total number workers.
Returns
-------
n : int
Total number of process.
"""
return collective.get_world_size()
def is_distributed() -> int:
"""If rabit is distributed."""
return collective.is_distributed()
def tracker_print(msg: Any) -> None:
"""Print message to the tracker.
This function can be used to communicate the information of
the progress to the tracker
Parameters
----------
msg : str
The message to be printed to tracker.
"""
collective.communicator_print(msg)
def get_processor_name() -> bytes:
"""Get the processor name.
Returns
-------
name : str
the name of processor(host)
"""
return collective.get_processor_name().encode()
T = TypeVar("T") # pylint:disable=invalid-name
def broadcast(data: T, root: int) -> T:
"""Broadcast object from one node to all other nodes.
Parameters
----------
data : any type that can be pickled
Input data, if current rank does not equal root, this can be None
root : int
Rank of the node to broadcast data from.
Returns
-------
object : int
the result of broadcast.
"""
return collective.broadcast(data, root)
@unique
class Op(IntEnum):
"""Supported operations for rabit."""
MAX = 0
MIN = 1
SUM = 2
OR = 3
def allreduce( # pylint:disable=invalid-name
data: np.ndarray, op: Op, prepare_fun: Optional[Callable[[np.ndarray], None]] = None
) -> np.ndarray:
"""Perform allreduce, return the result.
Parameters
----------
data :
Input data.
op :
Reduction operators, can be MIN, MAX, SUM, BITOR
prepare_fun :
Lazy preprocessing function, if it is not None, prepare_fun(data)
will be called by the function before performing allreduce, to initialize the data
If the result of Allreduce can be recovered directly,
then prepare_fun will NOT be called
Returns
-------
result :
The result of allreduce, have same shape as data
Notes
-----
This function is not thread-safe.
"""
if prepare_fun is None:
return collective.allreduce(data, collective.Op(op))
raise ValueError("preprocessing function is no longer supported")
def version_number() -> int:
"""Returns version number of current stored model.
This means how many calls to CheckPoint we made so far.
Returns
-------
version : int
Version number of currently stored model
"""
return 0
class RabitContext:
"""A context controlling rabit initialization and finalization."""
def __init__(self, args: Optional[List[bytes]] = None) -> None:
if args is None:
args = []
self.args = args
def __enter__(self) -> None:
init(self.args)
assert is_distributed()
LOGGER.warning(_deprecation_warning())
LOGGER.debug("-------------- rabit say hello ------------------")
def __exit__(self, *args: List) -> None:
finalize()
LOGGER.debug("--------------- rabit say bye ------------------")
| 4,310
| 24.358824
| 90
|
py
|
xgboost
|
xgboost-master/python-package/xgboost/libpath.py
|
# coding: utf-8
"""Find the path to xgboost dynamic library files."""
import os
import platform
import sys
from typing import List
class XGBoostLibraryNotFound(Exception):
"""Error thrown by when xgboost is not found"""
def find_lib_path() -> List[str]:
"""Find the path to xgboost dynamic library files.
Returns
-------
lib_path
List of all found library path to xgboost
"""
curr_path = os.path.dirname(os.path.abspath(os.path.expanduser(__file__)))
dll_path = [
# normal, after installation `lib` is copied into Python package tree.
os.path.join(curr_path, "lib"),
# editable installation, no copying is performed.
os.path.join(curr_path, os.path.pardir, os.path.pardir, "lib"),
# use libxgboost from a system prefix, if available. This should be the last
# option.
os.path.join(sys.base_prefix, "lib"),
]
if sys.platform == "win32":
if platform.architecture()[0] == "64bit":
dll_path.append(os.path.join(curr_path, "../../windows/x64/Release/"))
# hack for pip installation when copy all parent source
# directory here
dll_path.append(os.path.join(curr_path, "./windows/x64/Release/"))
else:
dll_path.append(os.path.join(curr_path, "../../windows/Release/"))
# hack for pip installation when copy all parent source
# directory here
dll_path.append(os.path.join(curr_path, "./windows/Release/"))
dll_path = [os.path.join(p, "xgboost.dll") for p in dll_path]
elif sys.platform.startswith(("linux", "freebsd", "emscripten")):
dll_path = [os.path.join(p, "libxgboost.so") for p in dll_path]
elif sys.platform == "darwin":
dll_path = [os.path.join(p, "libxgboost.dylib") for p in dll_path]
elif sys.platform == "cygwin":
dll_path = [os.path.join(p, "cygxgboost.dll") for p in dll_path]
if platform.system() == "OS400":
dll_path = [os.path.join(p, "libxgboost.so") for p in dll_path]
lib_path = [p for p in dll_path if os.path.exists(p) and os.path.isfile(p)]
# XGBOOST_BUILD_DOC is defined by sphinx conf.
if not lib_path and not os.environ.get("XGBOOST_BUILD_DOC", False):
link = "https://xgboost.readthedocs.io/en/stable/install.html"
msg = (
"Cannot find XGBoost Library in the candidate path. "
+ "List of candidates:\n- "
+ ("\n- ".join(dll_path))
+ "\nXGBoost Python package path: "
+ curr_path
+ "\nsys.base_prefix: "
+ sys.base_prefix
+ "\nSee: "
+ link
+ " for installing XGBoost."
)
raise XGBoostLibraryNotFound(msg)
return lib_path
| 2,791
| 37.246575
| 85
|
py
|
xgboost
|
xgboost-master/python-package/xgboost/tracker.py
|
# pylint: disable=too-many-instance-attributes, too-many-arguments, too-many-branches
"""
This script is a variant of dmlc-core/dmlc_tracker/tracker.py,
which is a specialized version for xgboost tasks.
"""
import argparse
import logging
import socket
import struct
import sys
from threading import Thread
from typing import Dict, List, Optional, Set, Tuple, Union
_RingMap = Dict[int, Tuple[int, int]]
_TreeMap = Dict[int, List[int]]
class ExSocket:
"""
Extension of socket to handle recv and send of special data
"""
def __init__(self, sock: socket.socket) -> None:
self.sock = sock
def recvall(self, nbytes: int) -> bytes:
"""Receive number of bytes."""
res = []
nread = 0
while nread < nbytes:
chunk = self.sock.recv(min(nbytes - nread, 1024))
nread += len(chunk)
res.append(chunk)
return b"".join(res)
def recvint(self) -> int:
"""Receive an integer of 32 bytes"""
return struct.unpack("@i", self.recvall(4))[0]
def sendint(self, value: int) -> None:
"""Send an integer of 32 bytes"""
self.sock.sendall(struct.pack("@i", value))
def sendstr(self, value: str) -> None:
"""Send a Python string"""
self.sendint(len(value))
self.sock.sendall(value.encode())
def recvstr(self) -> str:
"""Receive a Python string"""
slen = self.recvint()
return self.recvall(slen).decode()
# magic number used to verify existence of data
MAGIC_NUM = 0xFF99
def get_some_ip(host: str) -> str:
"""Get ip from host"""
return socket.getaddrinfo(host, None)[0][4][0]
def get_family(addr: str) -> int:
"""Get network family from address."""
return socket.getaddrinfo(addr, None)[0][0]
class WorkerEntry:
"""Hanlder to each worker."""
def __init__(self, sock: socket.socket, s_addr: Tuple[str, int]):
worker = ExSocket(sock)
self.sock = worker
self.host = get_some_ip(s_addr[0])
magic = worker.recvint()
assert magic == MAGIC_NUM, f"invalid magic number={magic} from {self.host}"
worker.sendint(MAGIC_NUM)
self.rank = worker.recvint()
self.world_size = worker.recvint()
self.task_id = worker.recvstr()
self.cmd = worker.recvstr()
self.wait_accept = 0
self.port: Optional[int] = None
def print(self, use_logger: bool) -> None:
"""Execute the print command from worker."""
msg = self.sock.recvstr()
# On dask we use print to avoid setting global verbosity.
if use_logger:
logging.info(msg.strip())
else:
print(msg.strip(), flush=True)
def decide_rank(self, job_map: Dict[str, int]) -> int:
"""Get the rank of current entry."""
if self.rank >= 0:
return self.rank
if self.task_id != "NULL" and self.task_id in job_map:
return job_map[self.task_id]
return -1
def assign_rank(
self,
rank: int,
wait_conn: Dict[int, "WorkerEntry"],
tree_map: _TreeMap,
parent_map: Dict[int, int],
ring_map: _RingMap,
) -> List[int]:
"""Assign the rank for current entry."""
self.rank = rank
nnset = set(tree_map[rank])
rprev, next_rank = ring_map[rank]
self.sock.sendint(rank)
# send parent rank
self.sock.sendint(parent_map[rank])
# send world size
self.sock.sendint(len(tree_map))
self.sock.sendint(len(nnset))
# send the rprev and next link
for r in nnset:
self.sock.sendint(r)
# send prev link
if rprev not in (-1, rank):
nnset.add(rprev)
self.sock.sendint(rprev)
else:
self.sock.sendint(-1)
# send next link
if next_rank not in (-1, rank):
nnset.add(next_rank)
self.sock.sendint(next_rank)
else:
self.sock.sendint(-1)
return self._get_remote(wait_conn, nnset)
def _get_remote(
self, wait_conn: Dict[int, "WorkerEntry"], nnset: Set[int]
) -> List[int]:
while True:
ngood = self.sock.recvint()
goodset = set()
for _ in range(ngood):
goodset.add(self.sock.recvint())
assert goodset.issubset(nnset)
badset = nnset - goodset
conset = []
for r in badset:
if r in wait_conn:
conset.append(r)
self.sock.sendint(len(conset))
self.sock.sendint(len(badset) - len(conset))
for r in conset:
self.sock.sendstr(wait_conn[r].host)
port = wait_conn[r].port
assert port is not None
# send port of this node to other workers so that they can call connect
self.sock.sendint(port)
self.sock.sendint(r)
nerr = self.sock.recvint()
if nerr != 0:
continue
self.port = self.sock.recvint()
rmset = []
# all connection was successuly setup
for r in conset:
wait_conn[r].wait_accept -= 1
if wait_conn[r].wait_accept == 0:
rmset.append(r)
for r in rmset:
wait_conn.pop(r, None)
self.wait_accept = len(badset) - len(conset)
return rmset
class RabitTracker:
"""
tracker for rabit
"""
def __init__(
self,
host_ip: str,
n_workers: int,
port: int = 0,
use_logger: bool = False,
sortby: str = "host",
) -> None:
"""A Python implementation of RABIT tracker.
Parameters
..........
use_logger:
Use logging.info for tracker print command. When set to False, Python print
function is used instead.
sortby:
How to sort the workers for rank assignment. The default is host, but users
can set the `DMLC_TASK_ID` via RABIT initialization arguments and obtain
deterministic rank assignment. Available options are:
- host
- task
"""
sock = socket.socket(get_family(host_ip), socket.SOCK_STREAM)
sock.bind((host_ip, port))
self.port = sock.getsockname()[1]
sock.listen(256)
self.sock = sock
self.host_ip = host_ip
self.thread: Optional[Thread] = None
self.n_workers = n_workers
self._use_logger = use_logger
self._sortby = sortby
logging.info("start listen on %s:%d", host_ip, self.port)
def __del__(self) -> None:
if hasattr(self, "sock"):
self.sock.close()
@staticmethod
def _get_neighbor(rank: int, n_workers: int) -> List[int]:
rank = rank + 1
ret = []
if rank > 1:
ret.append(rank // 2 - 1)
if rank * 2 - 1 < n_workers:
ret.append(rank * 2 - 1)
if rank * 2 < n_workers:
ret.append(rank * 2)
return ret
def worker_envs(self) -> Dict[str, Union[str, int]]:
"""
get environment variables for workers
can be passed in as args or envs
"""
return {"DMLC_TRACKER_URI": self.host_ip, "DMLC_TRACKER_PORT": self.port}
def _get_tree(self, n_workers: int) -> Tuple[_TreeMap, Dict[int, int]]:
tree_map: _TreeMap = {}
parent_map: Dict[int, int] = {}
for r in range(n_workers):
tree_map[r] = self._get_neighbor(r, n_workers)
parent_map[r] = (r + 1) // 2 - 1
return tree_map, parent_map
def find_share_ring(
self, tree_map: _TreeMap, parent_map: Dict[int, int], rank: int
) -> List[int]:
"""
get a ring structure that tends to share nodes with the tree
return a list starting from rank
"""
nset = set(tree_map[rank])
cset = nset - {parent_map[rank]}
if not cset:
return [rank]
rlst = [rank]
cnt = 0
for v in cset:
vlst = self.find_share_ring(tree_map, parent_map, v)
cnt += 1
if cnt == len(cset):
vlst.reverse()
rlst += vlst
return rlst
def get_ring(self, tree_map: _TreeMap, parent_map: Dict[int, int]) -> _RingMap:
"""
get a ring connection used to recover local data
"""
assert parent_map[0] == -1
rlst = self.find_share_ring(tree_map, parent_map, 0)
assert len(rlst) == len(tree_map)
ring_map: _RingMap = {}
n_workers = len(tree_map)
for r in range(n_workers):
rprev = (r + n_workers - 1) % n_workers
rnext = (r + 1) % n_workers
ring_map[rlst[r]] = (rlst[rprev], rlst[rnext])
return ring_map
def get_link_map(self, n_workers: int) -> Tuple[_TreeMap, Dict[int, int], _RingMap]:
"""
get the link map, this is a bit hacky, call for better algorithm
to place similar nodes together
"""
tree_map, parent_map = self._get_tree(n_workers)
ring_map = self.get_ring(tree_map, parent_map)
rmap = {0: 0}
k = 0
for i in range(n_workers - 1):
k = ring_map[k][1]
rmap[k] = i + 1
ring_map_: _RingMap = {}
tree_map_: _TreeMap = {}
parent_map_: Dict[int, int] = {}
for k, v in ring_map.items():
ring_map_[rmap[k]] = (rmap[v[0]], rmap[v[1]])
for k, tree_nodes in tree_map.items():
tree_map_[rmap[k]] = [rmap[x] for x in tree_nodes]
for k, parent in parent_map.items():
if k != 0:
parent_map_[rmap[k]] = rmap[parent]
else:
parent_map_[rmap[k]] = -1
return tree_map_, parent_map_, ring_map_
def _sort_pending(self, pending: List[WorkerEntry]) -> List[WorkerEntry]:
if self._sortby == "host":
pending.sort(key=lambda s: s.host)
elif self._sortby == "task":
pending.sort(key=lambda s: s.task_id)
return pending
def accept_workers(self, n_workers: int) -> None:
"""Wait for all workers to connect to the tracker."""
# set of nodes that finishes the job
shutdown: Dict[int, WorkerEntry] = {}
# set of nodes that is waiting for connections
wait_conn: Dict[int, WorkerEntry] = {}
# maps job id to rank
job_map: Dict[str, int] = {}
# list of workers that is pending to be assigned rank
pending: List[WorkerEntry] = []
# lazy initialize tree_map
tree_map = None
while len(shutdown) != n_workers:
fd, s_addr = self.sock.accept()
s = WorkerEntry(fd, s_addr)
if s.cmd == "print":
s.print(self._use_logger)
continue
if s.cmd == "shutdown":
assert s.rank >= 0 and s.rank not in shutdown
assert s.rank not in wait_conn
shutdown[s.rank] = s
logging.debug("Received %s signal from %d", s.cmd, s.rank)
continue
assert s.cmd in ("start", "recover")
# lazily initialize the workers
if tree_map is None:
assert s.cmd == "start"
if s.world_size > 0:
n_workers = s.world_size
tree_map, parent_map, ring_map = self.get_link_map(n_workers)
# set of nodes that is pending for getting up
todo_nodes = list(range(n_workers))
else:
assert s.world_size in (-1, n_workers)
if s.cmd == "recover":
assert s.rank >= 0
rank = s.decide_rank(job_map)
# batch assignment of ranks
if rank == -1:
assert todo_nodes
pending.append(s)
if len(pending) == len(todo_nodes):
pending = self._sort_pending(pending)
for s in pending:
rank = todo_nodes.pop(0)
if s.task_id != "NULL":
job_map[s.task_id] = rank
s.assign_rank(rank, wait_conn, tree_map, parent_map, ring_map)
if s.wait_accept > 0:
wait_conn[rank] = s
logging.debug(
"Received %s signal from %s; assign rank %d",
s.cmd,
s.host,
s.rank,
)
if not todo_nodes:
logging.info("@tracker All of %d nodes getting started", n_workers)
else:
s.assign_rank(rank, wait_conn, tree_map, parent_map, ring_map)
logging.debug("Received %s signal from %d", s.cmd, s.rank)
if s.wait_accept > 0:
wait_conn[rank] = s
logging.info("@tracker All nodes finishes job")
def start(self, n_workers: int) -> None:
"""Strat the tracker, it will wait for `n_workers` to connect."""
def run() -> None:
self.accept_workers(n_workers)
self.thread = Thread(target=run, args=(), daemon=True)
self.thread.start()
def join(self) -> None:
"""Wait for the tracker to finish."""
while self.thread is not None and self.thread.is_alive():
self.thread.join(100)
def alive(self) -> bool:
"""Wether the tracker thread is alive"""
return self.thread is not None and self.thread.is_alive()
def get_host_ip(host_ip: Optional[str] = None) -> str:
"""Get the IP address of current host. If `host_ip` is not none then it will be
returned as it's
"""
if host_ip is None or host_ip == "auto":
host_ip = "ip"
if host_ip == "dns":
host_ip = socket.getfqdn()
elif host_ip == "ip":
from socket import gaierror
try:
host_ip = socket.gethostbyname(socket.getfqdn())
except gaierror:
logging.debug(
"gethostbyname(socket.getfqdn()) failed... trying on hostname()"
)
host_ip = socket.gethostbyname(socket.gethostname())
if host_ip.startswith("127."):
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
# doesn't have to be reachable
s.connect(("10.255.255.255", 1))
host_ip = s.getsockname()[0]
assert host_ip is not None
return host_ip
def start_rabit_tracker(args: argparse.Namespace) -> None:
"""Standalone function to start rabit tracker.
Parameters
----------
args: arguments to start the rabit tracker.
"""
envs = {"DMLC_NUM_WORKER": args.num_workers, "DMLC_NUM_SERVER": args.num_servers}
rabit = RabitTracker(
host_ip=get_host_ip(args.host_ip), n_workers=args.num_workers, use_logger=True
)
envs.update(rabit.worker_envs())
rabit.start(args.num_workers)
sys.stdout.write("DMLC_TRACKER_ENV_START\n")
# simply write configuration to stdout
for k, v in envs.items():
sys.stdout.write(f"{k}={v}\n")
sys.stdout.write("DMLC_TRACKER_ENV_END\n")
sys.stdout.flush()
rabit.join()
def main() -> None:
"""Main function if tracker is executed in standalone mode."""
parser = argparse.ArgumentParser(description="Rabit Tracker start.")
parser.add_argument(
"--num-workers",
required=True,
type=int,
help="Number of worker process to be launched.",
)
parser.add_argument(
"--num-servers",
default=0,
type=int,
help="Number of server process to be launched. Only used in PS jobs.",
)
parser.add_argument(
"--host-ip",
default=None,
type=str,
help=(
"Host IP addressed, this is only needed "
+ "if the host IP cannot be automatically guessed."
),
)
parser.add_argument(
"--log-level",
default="INFO",
type=str,
choices=["INFO", "DEBUG"],
help="Logging level of the logger.",
)
args = parser.parse_args()
fmt = "%(asctime)s %(levelname)s %(message)s"
if args.log_level == "INFO":
level = logging.INFO
elif args.log_level == "DEBUG":
level = logging.DEBUG
else:
raise RuntimeError(f"Unknown logging level {args.log_level}")
logging.basicConfig(format=fmt, level=level)
if args.num_servers == 0:
start_rabit_tracker(args)
else:
raise RuntimeError("Do not yet support start ps tracker in standalone mode.")
if __name__ == "__main__":
main()
| 16,918
| 32.109589
| 88
|
py
|
xgboost
|
xgboost-master/python-package/xgboost/core.py
|
# pylint: disable=too-many-arguments, too-many-branches, invalid-name
# pylint: disable=too-many-lines, too-many-locals
"""Core XGBoost Library."""
import copy
import ctypes
import importlib.util
import json
import os
import re
import sys
import warnings
from abc import ABC, abstractmethod
from collections.abc import Mapping
from enum import IntEnum, unique
from functools import wraps
from inspect import Parameter, signature
from typing import (
Any,
Callable,
Dict,
Generator,
Iterable,
List,
Optional,
Sequence,
Tuple,
Type,
TypeVar,
Union,
cast,
overload,
)
import numpy as np
import scipy.sparse
from ._typing import (
_T,
ArrayLike,
BoosterParam,
CFloatPtr,
CNumeric,
CNumericPtr,
CStrPptr,
CStrPtr,
CTypeT,
CupyT,
DataType,
FeatureInfo,
FeatureNames,
FeatureTypes,
ModelIn,
NumpyOrCupy,
c_bst_ulong,
)
from .compat import PANDAS_INSTALLED, DataFrame, py_str
from .libpath import find_lib_path
class XGBoostError(ValueError):
"""Error thrown by xgboost trainer."""
@overload
def from_pystr_to_cstr(data: str) -> bytes:
...
@overload
def from_pystr_to_cstr(data: List[str]) -> ctypes.Array:
...
def from_pystr_to_cstr(data: Union[str, List[str]]) -> Union[bytes, ctypes.Array]:
"""Convert a Python str or list of Python str to C pointer
Parameters
----------
data
str or list of str
"""
if isinstance(data, str):
return bytes(data, "utf-8")
if isinstance(data, list):
data_as_bytes: List[bytes] = [bytes(d, "utf-8") for d in data]
pointers: ctypes.Array[ctypes.c_char_p] = (
ctypes.c_char_p * len(data_as_bytes)
)(*data_as_bytes)
return pointers
raise TypeError()
def from_cstr_to_pystr(data: CStrPptr, length: c_bst_ulong) -> List[str]:
"""Revert C pointer to Python str
Parameters
----------
data :
pointer to data
length :
pointer to length of data
"""
res = []
for i in range(length.value):
try:
res.append(str(cast(bytes, data[i]).decode("ascii")))
except UnicodeDecodeError:
res.append(str(cast(bytes, data[i]).decode("utf-8")))
return res
def make_jcargs(**kwargs: Any) -> bytes:
"Make JSON-based arguments for C functions."
return from_pystr_to_cstr(json.dumps(kwargs))
def _parse_eval_str(result: str) -> List[Tuple[str, float]]:
"""Parse an eval result string from the booster."""
splited = result.split()[1:]
# split up `test-error:0.1234`
metric_score_str = [tuple(s.split(":")) for s in splited]
# convert to float
metric_score = [(n, float(s)) for n, s in metric_score_str]
return metric_score
IterRange = TypeVar("IterRange", Optional[Tuple[int, int]], Tuple[int, int])
def _expect(expectations: Sequence[Type], got: Type) -> str:
"""Translate input error into string.
Parameters
----------
expectations :
a list of expected value.
got :
actual input
Returns
-------
msg: str
"""
msg = "Expecting "
for t in range(len(expectations) - 1):
msg += str(expectations[t])
msg += " or "
msg += str(expectations[-1])
msg += ". Got " + str(got)
return msg
def _log_callback(msg: bytes) -> None:
"""Redirect logs from native library into Python console"""
smsg = py_str(msg)
if smsg.find("WARNING:") != -1:
warnings.warn(smsg, UserWarning)
return
print(smsg)
def _get_log_callback_func() -> Callable:
"""Wrap log_callback() method in ctypes callback type"""
c_callback = ctypes.CFUNCTYPE(None, ctypes.c_char_p)
return c_callback(_log_callback)
def _lib_version(lib: ctypes.CDLL) -> Tuple[int, int, int]:
"""Get the XGBoost version from native shared object."""
major = ctypes.c_int()
minor = ctypes.c_int()
patch = ctypes.c_int()
lib.XGBoostVersion(ctypes.byref(major), ctypes.byref(minor), ctypes.byref(patch))
return major.value, minor.value, patch.value
def _py_version() -> str:
"""Get the XGBoost version from Python version file."""
VERSION_FILE = os.path.join(os.path.dirname(__file__), "VERSION")
with open(VERSION_FILE, encoding="ascii") as f:
return f.read().strip()
def _load_lib() -> ctypes.CDLL:
"""Load xgboost Library."""
lib_paths = find_lib_path()
if not lib_paths:
# This happens only when building document.
return None # type: ignore
try:
pathBackup = os.environ["PATH"].split(os.pathsep)
except KeyError:
pathBackup = []
lib_success = False
os_error_list = []
for lib_path in lib_paths:
try:
# needed when the lib is linked with non-system-available
# dependencies
os.environ["PATH"] = os.pathsep.join(
pathBackup + [os.path.dirname(lib_path)]
)
lib = ctypes.cdll.LoadLibrary(lib_path)
setattr(lib, "path", os.path.normpath(lib_path))
lib_success = True
except OSError as e:
os_error_list.append(str(e))
continue
finally:
os.environ["PATH"] = os.pathsep.join(pathBackup)
if not lib_success:
libname = os.path.basename(lib_paths[0])
raise XGBoostError(
f"""
XGBoost Library ({libname}) could not be loaded.
Likely causes:
* OpenMP runtime is not installed
- vcomp140.dll or libgomp-1.dll for Windows
- libomp.dylib for Mac OSX
- libgomp.so for Linux and other UNIX-like OSes
Mac OSX users: Run `brew install libomp` to install OpenMP runtime.
* You are running 32-bit Python on a 64-bit OS
Error message(s): {os_error_list}
"""
)
lib.XGBGetLastError.restype = ctypes.c_char_p
lib.callback = _get_log_callback_func() # type: ignore
if lib.XGBRegisterLogCallback(lib.callback) != 0:
raise XGBoostError(lib.XGBGetLastError())
def parse(ver: str) -> Tuple[int, int, int]:
"""Avoid dependency on packaging (PEP 440)."""
# 2.0.0-dev or 2.0.0
major, minor, patch = ver.split("-")[0].split(".")
return int(major), int(minor), int(patch)
libver = _lib_version(lib)
pyver = parse(_py_version())
# verify that we are loading the correct binary.
if pyver != libver:
pyver_str = ".".join((str(v) for v in pyver))
libver_str = ".".join((str(v) for v in libver))
msg = (
"Mismatched version between the Python package and the native shared "
f"""object. Python package version: {pyver_str}. Shared object """
f"""version: {libver_str}. Shared object is loaded from: {lib.path}.
Likely cause:
* XGBoost is first installed with anaconda then upgraded with pip. To fix it """
"please remove one of the installations."
)
raise ValueError(msg)
return lib
# load the XGBoost library globally
_LIB = _load_lib()
def _check_call(ret: int) -> None:
"""Check the return value of C API call
This function will raise exception when error occurs.
Wrap every API call with this function
Parameters
----------
ret :
return value from API calls
"""
if ret != 0:
raise XGBoostError(py_str(_LIB.XGBGetLastError()))
def _check_distributed_params(kwargs: Dict[str, Any]) -> None:
"""Validate parameters in distributed environments."""
device = kwargs.get("device", None)
if device and not isinstance(device, str):
msg = "Invalid type for the `device` parameter"
msg += _expect((str,), type(device))
raise TypeError(msg)
if device and device.find(":") != -1:
raise ValueError(
"Distributed training doesn't support selecting device ordinal as GPUs are"
" managed by the distributed framework. use `device=cuda` or `device=gpu`"
" instead."
)
if kwargs.get("booster", None) == "gblinear":
raise NotImplementedError(
f"booster `{kwargs['booster']}` is not supported for distributed training."
)
def build_info() -> dict:
"""Build information of XGBoost. The returned value format is not stable. Also,
please note that build time dependency is not the same as runtime dependency. For
instance, it's possible to build XGBoost with older CUDA version but run it with the
lastest one.
.. versionadded:: 1.6.0
"""
j_info = ctypes.c_char_p()
_check_call(_LIB.XGBuildInfo(ctypes.byref(j_info)))
assert j_info.value is not None
res = json.loads(j_info.value.decode()) # pylint: disable=no-member
res["libxgboost"] = _LIB.path
return res
def _numpy2ctypes_type(dtype: Type[np.number]) -> Type[CNumeric]:
_NUMPY_TO_CTYPES_MAPPING: Dict[Type[np.number], Type[CNumeric]] = {
np.float32: ctypes.c_float,
np.float64: ctypes.c_double,
np.uint32: ctypes.c_uint,
np.uint64: ctypes.c_uint64,
np.int32: ctypes.c_int32,
np.int64: ctypes.c_int64,
}
if np.intc is not np.int32: # Windows
_NUMPY_TO_CTYPES_MAPPING[np.intc] = _NUMPY_TO_CTYPES_MAPPING[np.int32]
if dtype not in _NUMPY_TO_CTYPES_MAPPING:
raise TypeError(
f"Supported types: {_NUMPY_TO_CTYPES_MAPPING.keys()}, got: {dtype}"
)
return _NUMPY_TO_CTYPES_MAPPING[dtype]
def _cuda_array_interface(data: DataType) -> bytes:
assert (
data.dtype.hasobject is False
), "Input data contains `object` dtype. Expecting numeric data."
interface = data.__cuda_array_interface__
if "mask" in interface:
interface["mask"] = interface["mask"].__cuda_array_interface__
interface_str = bytes(json.dumps(interface), "utf-8")
return interface_str
def ctypes2numpy(cptr: CNumericPtr, length: int, dtype: Type[np.number]) -> np.ndarray:
"""Convert a ctypes pointer array to a numpy array."""
ctype: Type[CNumeric] = _numpy2ctypes_type(dtype)
if not isinstance(cptr, ctypes.POINTER(ctype)):
raise RuntimeError(f"expected {ctype} pointer")
res = np.zeros(length, dtype=dtype)
if not ctypes.memmove(res.ctypes.data, cptr, length * res.strides[0]):
raise RuntimeError("memmove failed")
return res
def ctypes2cupy(cptr: CNumericPtr, length: int, dtype: Type[np.number]) -> CupyT:
"""Convert a ctypes pointer array to a cupy array."""
# pylint: disable=import-error
import cupy
from cupy.cuda.memory import MemoryPointer, UnownedMemory
CUPY_TO_CTYPES_MAPPING: Dict[Type[np.number], Type[CNumeric]] = {
cupy.float32: ctypes.c_float,
cupy.uint32: ctypes.c_uint,
}
if dtype not in CUPY_TO_CTYPES_MAPPING:
raise RuntimeError(f"Supported types: {CUPY_TO_CTYPES_MAPPING.keys()}")
addr = ctypes.cast(cptr, ctypes.c_void_p).value
# pylint: disable=c-extension-no-member,no-member
device = cupy.cuda.runtime.pointerGetAttributes(addr).device
# The owner field is just used to keep the memory alive with ref count. As
# unowned's life time is scoped within this function we don't need that.
unownd = UnownedMemory(
addr, length * ctypes.sizeof(CUPY_TO_CTYPES_MAPPING[dtype]), owner=None
)
memptr = MemoryPointer(unownd, 0)
# pylint: disable=unexpected-keyword-arg
mem = cupy.ndarray((length,), dtype=dtype, memptr=memptr)
assert mem.device.id == device
arr = cupy.array(mem, copy=True)
return arr
def ctypes2buffer(cptr: CStrPtr, length: int) -> bytearray:
"""Convert ctypes pointer to buffer type."""
if not isinstance(cptr, ctypes.POINTER(ctypes.c_char)):
raise RuntimeError("expected char pointer")
res = bytearray(length)
rptr = (ctypes.c_char * length).from_buffer(res)
if not ctypes.memmove(rptr, cptr, length):
raise RuntimeError("memmove failed")
return res
def c_str(string: str) -> ctypes.c_char_p:
"""Convert a python string to cstring."""
return ctypes.c_char_p(string.encode("utf-8"))
def c_array(
ctype: Type[CTypeT], values: ArrayLike
) -> Union[ctypes.Array, ctypes._Pointer]:
"""Convert a python array to c array."""
if isinstance(values, np.ndarray) and values.dtype.itemsize == ctypes.sizeof(ctype):
return values.ctypes.data_as(ctypes.POINTER(ctype))
return (ctype * len(values))(*values)
def from_array_interface(interface: dict) -> NumpyOrCupy:
"""Convert array interface to numpy or cupy array"""
class Array: # pylint: disable=too-few-public-methods
"""Wrapper type for communicating with numpy and cupy."""
_interface: Optional[dict] = None
@property
def __array_interface__(self) -> Optional[dict]:
return self._interface
@__array_interface__.setter
def __array_interface__(self, interface: dict) -> None:
self._interface = copy.copy(interface)
# converts some fields to tuple as required by numpy
self._interface["shape"] = tuple(self._interface["shape"])
self._interface["data"] = tuple(self._interface["data"])
if self._interface.get("strides", None) is not None:
self._interface["strides"] = tuple(self._interface["strides"])
@property
def __cuda_array_interface__(self) -> Optional[dict]:
return self.__array_interface__
@__cuda_array_interface__.setter
def __cuda_array_interface__(self, interface: dict) -> None:
self.__array_interface__ = interface
arr = Array()
if "stream" in interface:
# CUDA stream is presented, this is a __cuda_array_interface__.
spec = importlib.util.find_spec("cupy")
if spec is None:
raise ImportError("`cupy` is required for handling CUDA buffer.")
import cupy as cp # pylint: disable=import-error
arr.__cuda_array_interface__ = interface
out = cp.array(arr, copy=True)
else:
arr.__array_interface__ = interface
out = np.array(arr, copy=True)
return out
def _prediction_output(
shape: CNumericPtr, dims: c_bst_ulong, predts: CFloatPtr, is_cuda: bool
) -> NumpyOrCupy:
arr_shape = ctypes2numpy(shape, dims.value, np.uint64)
length = int(np.prod(arr_shape))
if is_cuda:
arr_predict = ctypes2cupy(predts, length, np.float32)
else:
arr_predict = ctypes2numpy(predts, length, np.float32)
arr_predict = arr_predict.reshape(arr_shape)
return arr_predict
class DataIter(ABC): # pylint: disable=too-many-instance-attributes
"""The interface for user defined data iterator.
Parameters
----------
cache_prefix :
Prefix to the cache files, only used in external memory. It can be either an
URI or a file path.
release_data :
Whether the iterator should release the data during reset. Set it to True if the
data transformation (converting data to np.float32 type) is expensive.
"""
def __init__(
self, cache_prefix: Optional[str] = None, release_data: bool = True
) -> None:
self.cache_prefix = cache_prefix
self._handle = _ProxyDMatrix()
self._exception: Optional[Exception] = None
self._enable_categorical = False
self._allow_host = True
self._release = release_data
# Stage data in Python until reset or next is called to avoid data being free.
self._temporary_data: Optional[Tuple[Any, Any, Any, Any]] = None
self._input_id: int = 0
def get_callbacks(
self, allow_host: bool, enable_categorical: bool
) -> Tuple[Callable, Callable]:
"""Get callback functions for iterating in C."""
assert hasattr(self, "cache_prefix"), "__init__ is not called."
self._reset_callback = ctypes.CFUNCTYPE(None, ctypes.c_void_p)(
self._reset_wrapper
)
self._next_callback = ctypes.CFUNCTYPE(
ctypes.c_int,
ctypes.c_void_p,
)(self._next_wrapper)
self._allow_host = allow_host
self._enable_categorical = enable_categorical
return self._reset_callback, self._next_callback
@property
def proxy(self) -> "_ProxyDMatrix":
"""Handle of DMatrix proxy."""
return self._handle
def _handle_exception(self, fn: Callable, dft_ret: _T) -> _T:
if self._exception is not None:
return dft_ret
try:
return fn()
except Exception as e: # pylint: disable=broad-except
# Defer the exception in order to return 0 and stop the iteration.
# Exception inside a ctype callback function has no effect except
# for printing to stderr (doesn't stop the execution).
tb = sys.exc_info()[2]
# On dask, the worker is restarted and somehow the information is
# lost.
self._exception = e.with_traceback(tb)
return dft_ret
def reraise(self) -> None:
"""Reraise the exception thrown during iteration."""
self._temporary_data = None
if self._exception is not None:
# pylint 2.7.0 believes `self._exception` can be None even with `assert
# isinstace`
exc = self._exception
self._exception = None
raise exc # pylint: disable=raising-bad-type
def __del__(self) -> None:
assert self._temporary_data is None
assert self._exception is None
def _reset_wrapper(self, this: None) -> None: # pylint: disable=unused-argument
"""A wrapper for user defined `reset` function."""
# free the data
if self._release:
self._temporary_data = None
self._handle_exception(self.reset, None)
def _next_wrapper(self, this: None) -> int: # pylint: disable=unused-argument
"""A wrapper for user defined `next` function.
`this` is not used in Python. ctypes can handle `self` of a Python
member function automatically when converting it to c function
pointer.
"""
@require_keyword_args(True)
def input_data(
data: Any,
*,
feature_names: Optional[FeatureNames] = None,
feature_types: Optional[FeatureTypes] = None,
**kwargs: Any,
) -> None:
from .data import _proxy_transform, dispatch_proxy_set_data
# Reduce the amount of transformation that's needed for QuantileDMatrix.
if self._temporary_data is not None and id(data) == self._input_id:
new, cat_codes, feature_names, feature_types = self._temporary_data
else:
new, cat_codes, feature_names, feature_types = _proxy_transform(
data,
feature_names,
feature_types,
self._enable_categorical,
)
# Stage the data, meta info are copied inside C++ MetaInfo.
self._temporary_data = (new, cat_codes, feature_names, feature_types)
dispatch_proxy_set_data(self.proxy, new, cat_codes, self._allow_host)
self.proxy.set_info(
feature_names=feature_names,
feature_types=feature_types,
**kwargs,
)
self._input_id = id(data)
# pylint: disable=not-callable
return self._handle_exception(lambda: self.next(input_data), 0)
@abstractmethod
def reset(self) -> None:
"""Reset the data iterator. Prototype for user defined function."""
raise NotImplementedError()
@abstractmethod
def next(self, input_data: Callable) -> int:
"""Set the next batch of data.
Parameters
----------
input_data:
A function with same data fields like `data`, `label` with
`xgboost.DMatrix`.
Returns
-------
0 if there's no more batch, otherwise 1.
"""
raise NotImplementedError()
# Notice for `require_keyword_args`
# Authors: Olivier Grisel
# Gael Varoquaux
# Andreas Mueller
# Lars Buitinck
# Alexandre Gramfort
# Nicolas Tresegnie
# Sylvain Marie
# License: BSD 3 clause
def require_keyword_args(
error: bool,
) -> Callable[[Callable[..., _T]], Callable[..., _T]]:
"""Decorator for methods that issues warnings for positional arguments
Using the keyword-only argument syntax in pep 3102, arguments after the
* will issue a warning or error when passed as a positional argument.
Modified from sklearn utils.validation.
Parameters
----------
error :
Whether to throw an error or raise a warning.
"""
def throw_if(func: Callable[..., _T]) -> Callable[..., _T]:
"""Throw an error/warning if there are positional arguments after the asterisk.
Parameters
----------
f :
function to check arguments on.
"""
sig = signature(func)
kwonly_args = []
all_args = []
for name, param in sig.parameters.items():
if param.kind == Parameter.POSITIONAL_OR_KEYWORD:
all_args.append(name)
elif param.kind == Parameter.KEYWORD_ONLY:
kwonly_args.append(name)
@wraps(func)
def inner_f(*args: Any, **kwargs: Any) -> _T:
extra_args = len(args) - len(all_args)
if extra_args > 0:
# ignore first 'self' argument for instance methods
args_msg = [
f"{name}"
for name, _ in zip(kwonly_args[:extra_args], args[-extra_args:])
]
# pylint: disable=consider-using-f-string
msg = "Pass `{}` as keyword args.".format(", ".join(args_msg))
if error:
raise TypeError(msg)
warnings.warn(msg, FutureWarning)
for k, arg in zip(sig.parameters, args):
kwargs[k] = arg
return func(**kwargs)
return inner_f
return throw_if
_deprecate_positional_args = require_keyword_args(False)
@unique
class DataSplitMode(IntEnum):
"""Supported data split mode for DMatrix."""
ROW = 0
COL = 1
class DMatrix: # pylint: disable=too-many-instance-attributes,too-many-public-methods
"""Data Matrix used in XGBoost.
DMatrix is an internal data structure that is used by XGBoost, which is optimized
for both memory efficiency and training speed. You can construct DMatrix from
multiple different sources of data.
"""
@_deprecate_positional_args
def __init__(
self,
data: DataType,
label: Optional[ArrayLike] = None,
*,
weight: Optional[ArrayLike] = None,
base_margin: Optional[ArrayLike] = None,
missing: Optional[float] = None,
silent: bool = False,
feature_names: Optional[FeatureNames] = None,
feature_types: Optional[FeatureTypes] = None,
nthread: Optional[int] = None,
group: Optional[ArrayLike] = None,
qid: Optional[ArrayLike] = None,
label_lower_bound: Optional[ArrayLike] = None,
label_upper_bound: Optional[ArrayLike] = None,
feature_weights: Optional[ArrayLike] = None,
enable_categorical: bool = False,
data_split_mode: DataSplitMode = DataSplitMode.ROW,
) -> None:
"""Parameters
----------
data :
Data source of DMatrix. See :ref:`py-data` for a list of supported input
types.
label :
Label of the training data.
weight :
Weight for each instance.
.. note::
For ranking task, weights are per-group. In ranking task, one weight
is assigned to each group (not each data point). This is because we
only care about the relative ordering of data points within each group,
so it doesn't make sense to assign weights to individual data points.
base_margin :
Base margin used for boosting from existing model.
missing :
Value in the input data which needs to be present as a missing value. If
None, defaults to np.nan.
silent :
Whether print messages during construction
feature_names :
Set names for features.
feature_types :
Set types for features. When `enable_categorical` is set to `True`, string
"c" represents categorical data type while "q" represents numerical feature
type. For categorical features, the input is assumed to be preprocessed and
encoded by the users. The encoding can be done via
:py:class:`sklearn.preprocessing.OrdinalEncoder` or pandas dataframe
`.cat.codes` method. This is useful when users want to specify categorical
features without having to construct a dataframe as input.
nthread :
Number of threads to use for loading data when parallelization is
applicable. If -1, uses maximum threads available on the system.
group :
Group size for all ranking group.
qid :
Query ID for data samples, used for ranking.
label_lower_bound :
Lower bound for survival training.
label_upper_bound :
Upper bound for survival training.
feature_weights :
Set feature weights for column sampling.
enable_categorical :
.. versionadded:: 1.3.0
.. note:: This parameter is experimental
Experimental support of specializing for categorical features. Do not set
to True unless you are interested in development. Also, JSON/UBJSON
serialization format is required.
"""
if group is not None and qid is not None:
raise ValueError("Either one of `group` or `qid` should be None.")
self.missing = missing if missing is not None else np.nan
self.nthread = nthread if nthread is not None else -1
self.silent = silent
# force into void_p, mac need to pass things in as void_p
if data is None:
self.handle: Optional[ctypes.c_void_p] = None
return
from .data import _is_iter, dispatch_data_backend
if _is_iter(data):
self._init_from_iter(data, enable_categorical)
assert self.handle is not None
return
handle, feature_names, feature_types = dispatch_data_backend(
data,
missing=self.missing,
threads=self.nthread,
feature_names=feature_names,
feature_types=feature_types,
enable_categorical=enable_categorical,
data_split_mode=data_split_mode,
)
assert handle is not None
self.handle = handle
self.set_info(
label=label,
weight=weight,
base_margin=base_margin,
group=group,
qid=qid,
label_lower_bound=label_lower_bound,
label_upper_bound=label_upper_bound,
feature_weights=feature_weights,
)
if feature_names is not None:
self.feature_names = feature_names
if feature_types is not None:
self.feature_types = feature_types
def _init_from_iter(self, iterator: DataIter, enable_categorical: bool) -> None:
it = iterator
args = {
"missing": self.missing,
"nthread": self.nthread,
"cache_prefix": it.cache_prefix if it.cache_prefix else "",
}
args_cstr = from_pystr_to_cstr(json.dumps(args))
handle = ctypes.c_void_p()
reset_callback, next_callback = it.get_callbacks(True, enable_categorical)
ret = _LIB.XGDMatrixCreateFromCallback(
None,
it.proxy.handle,
reset_callback,
next_callback,
args_cstr,
ctypes.byref(handle),
)
it.reraise()
# delay check_call to throw intermediate exception first
_check_call(ret)
self.handle = handle
def __del__(self) -> None:
if hasattr(self, "handle") and self.handle:
_check_call(_LIB.XGDMatrixFree(self.handle))
self.handle = None
@_deprecate_positional_args
def set_info(
self,
*,
label: Optional[ArrayLike] = None,
weight: Optional[ArrayLike] = None,
base_margin: Optional[ArrayLike] = None,
group: Optional[ArrayLike] = None,
qid: Optional[ArrayLike] = None,
label_lower_bound: Optional[ArrayLike] = None,
label_upper_bound: Optional[ArrayLike] = None,
feature_names: Optional[FeatureNames] = None,
feature_types: Optional[FeatureTypes] = None,
feature_weights: Optional[ArrayLike] = None,
) -> None:
"""Set meta info for DMatrix. See doc string for :py:obj:`xgboost.DMatrix`."""
from .data import dispatch_meta_backend
if label is not None:
self.set_label(label)
if weight is not None:
self.set_weight(weight)
if base_margin is not None:
self.set_base_margin(base_margin)
if group is not None:
self.set_group(group)
if qid is not None:
self.set_uint_info("qid", qid)
if label_lower_bound is not None:
self.set_float_info("label_lower_bound", label_lower_bound)
if label_upper_bound is not None:
self.set_float_info("label_upper_bound", label_upper_bound)
if feature_names is not None:
self.feature_names = feature_names
if feature_types is not None:
self.feature_types = feature_types
if feature_weights is not None:
dispatch_meta_backend(
matrix=self, data=feature_weights, name="feature_weights"
)
def get_float_info(self, field: str) -> np.ndarray:
"""Get float property from the DMatrix.
Parameters
----------
field: str
The field name of the information
Returns
-------
info : array
a numpy array of float information of the data
"""
length = c_bst_ulong()
ret = ctypes.POINTER(ctypes.c_float)()
_check_call(
_LIB.XGDMatrixGetFloatInfo(
self.handle, c_str(field), ctypes.byref(length), ctypes.byref(ret)
)
)
return ctypes2numpy(ret, length.value, np.float32)
def get_uint_info(self, field: str) -> np.ndarray:
"""Get unsigned integer property from the DMatrix.
Parameters
----------
field: str
The field name of the information
Returns
-------
info : array
a numpy array of unsigned integer information of the data
"""
length = c_bst_ulong()
ret = ctypes.POINTER(ctypes.c_uint)()
_check_call(
_LIB.XGDMatrixGetUIntInfo(
self.handle, c_str(field), ctypes.byref(length), ctypes.byref(ret)
)
)
return ctypes2numpy(ret, length.value, np.uint32)
def set_float_info(self, field: str, data: ArrayLike) -> None:
"""Set float type property into the DMatrix.
Parameters
----------
field: str
The field name of the information
data: numpy array
The array of data to be set
"""
from .data import dispatch_meta_backend
dispatch_meta_backend(self, data, field, "float")
def set_float_info_npy2d(self, field: str, data: ArrayLike) -> None:
"""Set float type property into the DMatrix
for numpy 2d array input
Parameters
----------
field: str
The field name of the information
data: numpy array
The array of data to be set
"""
from .data import dispatch_meta_backend
dispatch_meta_backend(self, data, field, "float")
def set_uint_info(self, field: str, data: ArrayLike) -> None:
"""Set uint type property into the DMatrix.
Parameters
----------
field: str
The field name of the information
data: numpy array
The array of data to be set
"""
from .data import dispatch_meta_backend
dispatch_meta_backend(self, data, field, "uint32")
def save_binary(self, fname: Union[str, os.PathLike], silent: bool = True) -> None:
"""Save DMatrix to an XGBoost buffer. Saved binary can be later loaded
by providing the path to :py:func:`xgboost.DMatrix` as input.
Parameters
----------
fname : string or os.PathLike
Name of the output buffer file.
silent : bool (optional; default: True)
If set, the output is suppressed.
"""
fname = os.fspath(os.path.expanduser(fname))
_check_call(
_LIB.XGDMatrixSaveBinary(self.handle, c_str(fname), ctypes.c_int(silent))
)
def set_label(self, label: ArrayLike) -> None:
"""Set label of dmatrix
Parameters
----------
label: array like
The label information to be set into DMatrix
"""
from .data import dispatch_meta_backend
dispatch_meta_backend(self, label, "label", "float")
def set_weight(self, weight: ArrayLike) -> None:
"""Set weight of each instance.
Parameters
----------
weight : array like
Weight for each data point
.. note:: For ranking task, weights are per-group.
In ranking task, one weight is assigned to each group (not each
data point). This is because we only care about the relative
ordering of data points within each group, so it doesn't make
sense to assign weights to individual data points.
"""
from .data import dispatch_meta_backend
dispatch_meta_backend(self, weight, "weight", "float")
def set_base_margin(self, margin: ArrayLike) -> None:
"""Set base margin of booster to start from.
This can be used to specify a prediction value of existing model to be
base_margin However, remember margin is needed, instead of transformed
prediction e.g. for logistic regression: need to put in value before
logistic transformation see also example/demo.py
Parameters
----------
margin: array like
Prediction margin of each datapoint
"""
from .data import dispatch_meta_backend
dispatch_meta_backend(self, margin, "base_margin", "float")
def set_group(self, group: ArrayLike) -> None:
"""Set group size of DMatrix (used for ranking).
Parameters
----------
group : array like
Group size of each group
"""
from .data import dispatch_meta_backend
dispatch_meta_backend(self, group, "group", "uint32")
def get_label(self) -> np.ndarray:
"""Get the label of the DMatrix.
Returns
-------
label : array
"""
return self.get_float_info("label")
def get_weight(self) -> np.ndarray:
"""Get the weight of the DMatrix.
Returns
-------
weight : array
"""
return self.get_float_info("weight")
def get_base_margin(self) -> np.ndarray:
"""Get the base margin of the DMatrix.
Returns
-------
base_margin
"""
return self.get_float_info("base_margin")
def get_group(self) -> np.ndarray:
"""Get the group of the DMatrix.
Returns
-------
group
"""
group_ptr = self.get_uint_info("group_ptr")
return np.diff(group_ptr)
def get_data(self) -> scipy.sparse.csr_matrix:
"""Get the predictors from DMatrix as a CSR matrix. This getter is mostly for
testing purposes. If this is a quantized DMatrix then quantized values are
returned instead of input values.
.. versionadded:: 1.7.0
"""
indptr = np.empty(self.num_row() + 1, dtype=np.uint64)
indices = np.empty(self.num_nonmissing(), dtype=np.uint32)
data = np.empty(self.num_nonmissing(), dtype=np.float32)
c_indptr = indptr.ctypes.data_as(ctypes.POINTER(c_bst_ulong))
c_indices = indices.ctypes.data_as(ctypes.POINTER(ctypes.c_uint32))
c_data = data.ctypes.data_as(ctypes.POINTER(ctypes.c_float))
config = from_pystr_to_cstr(json.dumps({}))
_check_call(
_LIB.XGDMatrixGetDataAsCSR(self.handle, config, c_indptr, c_indices, c_data)
)
ret = scipy.sparse.csr_matrix(
(data, indices, indptr), shape=(self.num_row(), self.num_col())
)
return ret
def get_quantile_cut(self) -> Tuple[np.ndarray, np.ndarray]:
"""Get quantile cuts for quantization."""
n_features = self.num_col()
c_sindptr = ctypes.c_char_p()
c_sdata = ctypes.c_char_p()
config = make_jcargs()
_check_call(
_LIB.XGDMatrixGetQuantileCut(
self.handle, config, ctypes.byref(c_sindptr), ctypes.byref(c_sdata)
)
)
assert c_sindptr.value is not None
assert c_sdata.value is not None
i_indptr = json.loads(c_sindptr.value)
indptr = from_array_interface(i_indptr)
assert indptr.size == n_features + 1
assert indptr.dtype == np.uint64
i_data = json.loads(c_sdata.value)
data = from_array_interface(i_data)
assert data.size == indptr[-1]
assert data.dtype == np.float32
return indptr, data
def num_row(self) -> int:
"""Get the number of rows in the DMatrix."""
ret = c_bst_ulong()
_check_call(_LIB.XGDMatrixNumRow(self.handle, ctypes.byref(ret)))
return ret.value
def num_col(self) -> int:
"""Get the number of columns (features) in the DMatrix."""
ret = c_bst_ulong()
_check_call(_LIB.XGDMatrixNumCol(self.handle, ctypes.byref(ret)))
return ret.value
def num_nonmissing(self) -> int:
"""Get the number of non-missing values in the DMatrix.
.. versionadded:: 1.7.0
"""
ret = c_bst_ulong()
_check_call(_LIB.XGDMatrixNumNonMissing(self.handle, ctypes.byref(ret)))
return ret.value
def slice(
self, rindex: Union[List[int], np.ndarray], allow_groups: bool = False
) -> "DMatrix":
"""Slice the DMatrix and return a new DMatrix that only contains `rindex`.
Parameters
----------
rindex
List of indices to be selected.
allow_groups
Allow slicing of a matrix with a groups attribute
Returns
-------
res
A new DMatrix containing only selected indices.
"""
from .data import _maybe_np_slice
res = DMatrix(None)
res.handle = ctypes.c_void_p()
rindex = _maybe_np_slice(rindex, dtype=np.int32)
_check_call(
_LIB.XGDMatrixSliceDMatrixEx(
self.handle,
c_array(ctypes.c_int, rindex),
c_bst_ulong(len(rindex)),
ctypes.byref(res.handle),
ctypes.c_int(1 if allow_groups else 0),
)
)
return res
@property
def feature_names(self) -> Optional[FeatureNames]:
"""Get feature names (column labels).
Returns
-------
feature_names : list or None
"""
length = c_bst_ulong()
sarr = ctypes.POINTER(ctypes.c_char_p)()
_check_call(
_LIB.XGDMatrixGetStrFeatureInfo(
self.handle,
c_str("feature_name"),
ctypes.byref(length),
ctypes.byref(sarr),
)
)
feature_names = from_cstr_to_pystr(sarr, length)
if not feature_names:
return None
return feature_names
@feature_names.setter
def feature_names(self, feature_names: Optional[FeatureNames]) -> None:
"""Set feature names (column labels).
Parameters
----------
feature_names : list or None
Labels for features. None will reset existing feature names
"""
if feature_names is not None:
# validate feature name
try:
if not isinstance(feature_names, str):
feature_names = list(feature_names)
else:
feature_names = [feature_names]
except TypeError:
feature_names = [cast(str, feature_names)]
if len(feature_names) != len(set(feature_names)):
raise ValueError("feature_names must be unique")
if len(feature_names) != self.num_col() and self.num_col() != 0:
msg = (
"feature_names must have the same length as data, ",
f"expected {self.num_col()}, got {len(feature_names)}",
)
raise ValueError(msg)
# prohibit to use symbols may affect to parse. e.g. []<
if not all(
isinstance(f, str) and not any(x in f for x in ["[", "]", "<"])
for f in feature_names
):
raise ValueError(
"feature_names must be string, and may not contain [, ] or <"
)
feature_names_bytes = [bytes(f, encoding="utf-8") for f in feature_names]
c_feature_names = (ctypes.c_char_p * len(feature_names_bytes))(
*feature_names_bytes
)
_check_call(
_LIB.XGDMatrixSetStrFeatureInfo(
self.handle,
c_str("feature_name"),
c_feature_names,
c_bst_ulong(len(feature_names)),
)
)
else:
# reset feature_types also
_check_call(
_LIB.XGDMatrixSetStrFeatureInfo(
self.handle, c_str("feature_name"), None, c_bst_ulong(0)
)
)
self.feature_types = None
@property
def feature_types(self) -> Optional[FeatureTypes]:
"""Get feature types (column types).
Returns
-------
feature_types : list or None
"""
length = c_bst_ulong()
sarr = ctypes.POINTER(ctypes.c_char_p)()
_check_call(
_LIB.XGDMatrixGetStrFeatureInfo(
self.handle,
c_str("feature_type"),
ctypes.byref(length),
ctypes.byref(sarr),
)
)
res = from_cstr_to_pystr(sarr, length)
if not res:
return None
return res
@feature_types.setter
def feature_types(self, feature_types: Optional[Union[List[str], str]]) -> None:
"""Set feature types (column types).
This is for displaying the results and categorical data support. See
:py:class:`DMatrix` for details.
Parameters
----------
feature_types :
Labels for features. None will reset existing feature names
"""
# For compatibility reason this function wraps single str input into a list. But
# we should not promote such usage since other than visualization, the field is
# also used for specifying categorical data type.
if feature_types is not None:
if not isinstance(feature_types, (list, str)):
raise TypeError("feature_types must be string or list of strings")
if isinstance(feature_types, str):
# single string will be applied to all columns
feature_types = [feature_types] * self.num_col()
try:
if not isinstance(feature_types, str):
feature_types = list(feature_types)
else:
feature_types = [feature_types]
except TypeError:
feature_types = [cast(str, feature_types)]
feature_types_bytes = [bytes(f, encoding="utf-8") for f in feature_types]
c_feature_types = (ctypes.c_char_p * len(feature_types_bytes))(
*feature_types_bytes
)
_check_call(
_LIB.XGDMatrixSetStrFeatureInfo(
self.handle,
c_str("feature_type"),
c_feature_types,
c_bst_ulong(len(feature_types)),
)
)
if len(feature_types) != self.num_col() and self.num_col() != 0:
msg = "feature_types must have the same length as data"
raise ValueError(msg)
else:
# Reset.
_check_call(
_LIB.XGDMatrixSetStrFeatureInfo(
self.handle, c_str("feature_type"), None, c_bst_ulong(0)
)
)
class _ProxyDMatrix(DMatrix):
"""A placeholder class when DMatrix cannot be constructed (QuantileDMatrix,
inplace_predict).
"""
def __init__(self) -> None: # pylint: disable=super-init-not-called
self.handle = ctypes.c_void_p()
_check_call(_LIB.XGProxyDMatrixCreate(ctypes.byref(self.handle)))
def _set_data_from_cuda_interface(self, data: DataType) -> None:
"""Set data from CUDA array interface."""
interface = data.__cuda_array_interface__
interface_str = bytes(json.dumps(interface), "utf-8")
_check_call(
_LIB.XGProxyDMatrixSetDataCudaArrayInterface(self.handle, interface_str)
)
def _set_data_from_cuda_columnar(self, data: DataType, cat_codes: list) -> None:
"""Set data from CUDA columnar format."""
from .data import _cudf_array_interfaces
interfaces_str = _cudf_array_interfaces(data, cat_codes)
_check_call(_LIB.XGProxyDMatrixSetDataCudaColumnar(self.handle, interfaces_str))
def _set_data_from_array(self, data: np.ndarray) -> None:
"""Set data from numpy array."""
from .data import _array_interface
_check_call(
_LIB.XGProxyDMatrixSetDataDense(self.handle, _array_interface(data))
)
def _set_data_from_csr(self, csr: scipy.sparse.csr_matrix) -> None:
"""Set data from scipy csr"""
from .data import _array_interface
_LIB.XGProxyDMatrixSetDataCSR(
self.handle,
_array_interface(csr.indptr),
_array_interface(csr.indices),
_array_interface(csr.data),
ctypes.c_size_t(csr.shape[1]),
)
class QuantileDMatrix(DMatrix):
"""A DMatrix variant that generates quantilized data directly from input for the
``hist`` tree method. This DMatrix is primarily designed to save memory in training
by avoiding intermediate storage. Set ``max_bin`` to control the number of bins
during quantisation, which should be consistent with the training parameter
``max_bin``. When ``QuantileDMatrix`` is used for validation/test dataset, ``ref``
should be another ``QuantileDMatrix``(or ``DMatrix``, but not recommended as it
defeats the purpose of saving memory) constructed from training dataset. See
:py:obj:`xgboost.DMatrix` for documents on meta info.
.. note::
Do not use ``QuantileDMatrix`` as validation/test dataset without supplying a
reference (the training dataset) ``QuantileDMatrix`` using ``ref`` as some
information may be lost in quantisation.
.. versionadded:: 1.7.0
Parameters
----------
max_bin :
The number of histogram bin, should be consistent with the training parameter
``max_bin``.
ref :
The training dataset that provides quantile information, needed when creating
validation/test dataset with ``QuantileDMatrix``. Supplying the training DMatrix
as a reference means that the same quantisation applied to the training data is
applied to the validation/test data
"""
@_deprecate_positional_args
def __init__( # pylint: disable=super-init-not-called
self,
data: DataType,
label: Optional[ArrayLike] = None,
*,
weight: Optional[ArrayLike] = None,
base_margin: Optional[ArrayLike] = None,
missing: Optional[float] = None,
silent: bool = False,
feature_names: Optional[FeatureNames] = None,
feature_types: Optional[FeatureTypes] = None,
nthread: Optional[int] = None,
max_bin: Optional[int] = None,
ref: Optional[DMatrix] = None,
group: Optional[ArrayLike] = None,
qid: Optional[ArrayLike] = None,
label_lower_bound: Optional[ArrayLike] = None,
label_upper_bound: Optional[ArrayLike] = None,
feature_weights: Optional[ArrayLike] = None,
enable_categorical: bool = False,
data_split_mode: DataSplitMode = DataSplitMode.ROW,
) -> None:
self.max_bin = max_bin
self.missing = missing if missing is not None else np.nan
self.nthread = nthread if nthread is not None else -1
self._silent = silent # unused, kept for compatibility
if isinstance(data, ctypes.c_void_p):
self.handle = data
return
if qid is not None and group is not None:
raise ValueError(
"Only one of the eval_qid or eval_group for each evaluation "
"dataset should be provided."
)
if isinstance(data, DataIter):
if any(
info is not None
for info in (
label,
weight,
base_margin,
feature_names,
feature_types,
group,
qid,
label_lower_bound,
label_upper_bound,
feature_weights,
)
):
raise ValueError(
"If data iterator is used as input, data like label should be "
"specified as batch argument."
)
self._init(
data,
ref=ref,
label=label,
weight=weight,
base_margin=base_margin,
group=group,
qid=qid,
label_lower_bound=label_lower_bound,
label_upper_bound=label_upper_bound,
feature_weights=feature_weights,
feature_names=feature_names,
feature_types=feature_types,
enable_categorical=enable_categorical,
)
def _init(
self,
data: DataType,
ref: Optional[DMatrix],
enable_categorical: bool,
**meta: Any,
) -> None:
from .data import (
SingleBatchInternalIter,
_is_dlpack,
_is_iter,
_transform_dlpack,
)
if _is_dlpack(data):
# We specialize for dlpack because cupy will take the memory from it so
# it can't be transformed twice.
data = _transform_dlpack(data)
if _is_iter(data):
it = data
else:
it = SingleBatchInternalIter(data=data, **meta)
handle = ctypes.c_void_p()
reset_callback, next_callback = it.get_callbacks(True, enable_categorical)
if it.cache_prefix is not None:
raise ValueError(
"QuantileDMatrix doesn't cache data, remove the cache_prefix "
"in iterator to fix this error."
)
config = make_jcargs(
nthread=self.nthread, missing=self.missing, max_bin=self.max_bin
)
ret = _LIB.XGQuantileDMatrixCreateFromCallback(
None,
it.proxy.handle,
ref.handle if ref is not None else ref,
reset_callback,
next_callback,
config,
ctypes.byref(handle),
)
it.reraise()
# delay check_call to throw intermediate exception first
_check_call(ret)
self.handle = handle
class DeviceQuantileDMatrix(QuantileDMatrix):
"""Use `QuantileDMatrix` instead.
.. deprecated:: 1.7.0
.. versionadded:: 1.1.0
"""
def __init__(self, *args: Any, **kwargs: Any) -> None:
warnings.warn("Please use `QuantileDMatrix` instead.", FutureWarning)
super().__init__(*args, **kwargs)
Objective = Callable[[np.ndarray, DMatrix], Tuple[np.ndarray, np.ndarray]]
Metric = Callable[[np.ndarray, DMatrix], Tuple[str, float]]
def _configure_metrics(params: BoosterParam) -> BoosterParam:
if (
isinstance(params, dict)
and "eval_metric" in params
and isinstance(params["eval_metric"], list)
):
eval_metrics = params["eval_metric"]
params.pop("eval_metric", None)
params_list = list(params.items())
for eval_metric in eval_metrics:
params_list += [("eval_metric", eval_metric)]
return params_list
return params
class Booster:
# pylint: disable=too-many-public-methods
"""A Booster of XGBoost.
Booster is the model of xgboost, that contains low level routines for
training, prediction and evaluation.
"""
def __init__(
self,
params: Optional[BoosterParam] = None,
cache: Optional[Sequence[DMatrix]] = None,
model_file: Optional[Union["Booster", bytearray, os.PathLike, str]] = None,
) -> None:
# pylint: disable=invalid-name
"""
Parameters
----------
params :
Parameters for boosters.
cache :
List of cache items.
model_file :
Path to the model file if it's string or PathLike.
"""
cache = cache if cache is not None else []
for d in cache:
if not isinstance(d, DMatrix):
raise TypeError(f"invalid cache item: {type(d).__name__}", cache)
dmats = c_array(ctypes.c_void_p, [d.handle for d in cache])
self.handle: Optional[ctypes.c_void_p] = ctypes.c_void_p()
_check_call(
_LIB.XGBoosterCreate(
dmats, c_bst_ulong(len(cache)), ctypes.byref(self.handle)
)
)
for d in cache:
# Validate feature only after the feature names are saved into booster.
self._assign_dmatrix_features(d)
if isinstance(model_file, Booster):
assert self.handle is not None
# We use the pickle interface for getting memory snapshot from
# another model, and load the snapshot with this booster.
state = model_file.__getstate__()
handle = state["handle"]
del state["handle"]
ptr = (ctypes.c_char * len(handle)).from_buffer(handle)
length = c_bst_ulong(len(handle))
_check_call(_LIB.XGBoosterUnserializeFromBuffer(self.handle, ptr, length))
self.__dict__.update(state)
elif isinstance(model_file, (str, os.PathLike, bytearray)):
self.load_model(model_file)
elif model_file is None:
pass
else:
raise TypeError("Unknown type:", model_file)
params = params or {}
params_processed = _configure_metrics(params.copy())
params_processed = self._configure_constraints(params_processed)
if isinstance(params_processed, list):
params_processed.append(("validate_parameters", True))
else:
params_processed["validate_parameters"] = True
self.set_param(params_processed or {})
def _transform_monotone_constrains(
self, value: Union[Dict[str, int], str, Tuple[int, ...]]
) -> Union[Tuple[int, ...], str]:
if isinstance(value, str):
return value
if isinstance(value, tuple):
return value
constrained_features = set(value.keys())
feature_names = self.feature_names or []
if not constrained_features.issubset(set(feature_names)):
raise ValueError(
"Constrained features are not a subset of training data feature names"
)
return tuple(value.get(name, 0) for name in feature_names)
def _transform_interaction_constraints(
self, value: Union[Sequence[Sequence[str]], str]
) -> Union[str, List[List[int]]]:
if isinstance(value, str):
return value
feature_idx_mapping = {
name: idx for idx, name in enumerate(self.feature_names or [])
}
try:
result = []
for constraint in value:
result.append(
[feature_idx_mapping[feature_name] for feature_name in constraint]
)
return result
except KeyError as e:
raise ValueError(
"Constrained features are not a subset of training data feature names"
) from e
def _configure_constraints(self, params: BoosterParam) -> BoosterParam:
if isinstance(params, dict):
# we must use list in the internal code as there can be multiple metrics
# with the same parameter name `eval_metric` (same key for dictionary).
params = list(params.items())
for idx, param in enumerate(params):
name, value = param
if value is None:
continue
if name == "monotone_constraints":
params[idx] = (name, self._transform_monotone_constrains(value))
elif name == "interaction_constraints":
params[idx] = (name, self._transform_interaction_constraints(value))
return params
def __del__(self) -> None:
if hasattr(self, "handle") and self.handle is not None:
_check_call(_LIB.XGBoosterFree(self.handle))
self.handle = None
def __getstate__(self) -> Dict:
# can't pickle ctypes pointers, put model content in bytearray
this = self.__dict__.copy()
handle = this["handle"]
if handle is not None:
length = c_bst_ulong()
cptr = ctypes.POINTER(ctypes.c_char)()
_check_call(
_LIB.XGBoosterSerializeToBuffer(
self.handle, ctypes.byref(length), ctypes.byref(cptr)
)
)
buf = ctypes2buffer(cptr, length.value)
this["handle"] = buf
return this
def __setstate__(self, state: Dict) -> None:
# reconstruct handle from raw data
handle = state["handle"]
if handle is not None:
buf = handle
dmats = c_array(ctypes.c_void_p, [])
handle = ctypes.c_void_p()
_check_call(
_LIB.XGBoosterCreate(dmats, c_bst_ulong(0), ctypes.byref(handle))
)
length = c_bst_ulong(len(buf))
ptr = (ctypes.c_char * len(buf)).from_buffer(buf)
_check_call(_LIB.XGBoosterUnserializeFromBuffer(handle, ptr, length))
state["handle"] = handle
self.__dict__.update(state)
def __getitem__(self, val: Union[int, tuple, slice]) -> "Booster":
"""Get a slice of the tree-based model.
.. versionadded:: 1.3.0
"""
if isinstance(val, int):
val = slice(val, val + 1)
if isinstance(val, tuple):
raise ValueError("Only supports slicing through 1 dimension.")
if not isinstance(val, slice):
msg = _expect((int, slice), type(val))
raise TypeError(msg)
if isinstance(val.start, type(Ellipsis)) or val.start is None:
start = 0
else:
start = val.start
if isinstance(val.stop, type(Ellipsis)) or val.stop is None:
stop = 0
else:
stop = val.stop
if stop < start:
raise ValueError("Invalid slice", val)
step = val.step if val.step is not None else 1
c_start = ctypes.c_int(start)
c_stop = ctypes.c_int(stop)
c_step = ctypes.c_int(step)
sliced_handle = ctypes.c_void_p()
status = _LIB.XGBoosterSlice(
self.handle, c_start, c_stop, c_step, ctypes.byref(sliced_handle)
)
if status == -2:
raise IndexError("Layer index out of range")
_check_call(status)
sliced = Booster()
_check_call(_LIB.XGBoosterFree(sliced.handle))
sliced.handle = sliced_handle
return sliced
def __iter__(self) -> Generator["Booster", None, None]:
"""Iterator method for getting individual trees.
.. versionadded:: 2.0.0
"""
for i in range(0, self.num_boosted_rounds()):
yield self[i]
def save_config(self) -> str:
"""Output internal parameter configuration of Booster as a JSON
string.
.. versionadded:: 1.0.0
"""
json_string = ctypes.c_char_p()
length = c_bst_ulong()
_check_call(
_LIB.XGBoosterSaveJsonConfig(
self.handle, ctypes.byref(length), ctypes.byref(json_string)
)
)
assert json_string.value is not None
result = json_string.value.decode() # pylint: disable=no-member
return result
def load_config(self, config: str) -> None:
"""Load configuration returned by `save_config`.
.. versionadded:: 1.0.0
"""
assert isinstance(config, str)
_check_call(_LIB.XGBoosterLoadJsonConfig(self.handle, c_str(config)))
def __copy__(self) -> "Booster":
return self.__deepcopy__(None)
def __deepcopy__(self, _: Any) -> "Booster":
"""Return a copy of booster."""
return Booster(model_file=self)
def copy(self) -> "Booster":
"""Copy the booster object.
Returns
-------
booster :
A copied booster model
"""
return copy.copy(self)
def attr(self, key: str) -> Optional[str]:
"""Get attribute string from the Booster.
Parameters
----------
key :
The key to get attribute from.
Returns
-------
value :
The attribute value of the key, returns None if attribute do not exist.
"""
ret = ctypes.c_char_p()
success = ctypes.c_int()
_check_call(
_LIB.XGBoosterGetAttr(
self.handle, c_str(key), ctypes.byref(ret), ctypes.byref(success)
)
)
if success.value != 0:
value = ret.value
assert value
return py_str(value)
return None
def attributes(self) -> Dict[str, Optional[str]]:
"""Get attributes stored in the Booster as a dictionary.
Returns
-------
result : dictionary of attribute_name: attribute_value pairs of strings.
Returns an empty dict if there's no attributes.
"""
length = c_bst_ulong()
sarr = ctypes.POINTER(ctypes.c_char_p)()
_check_call(
_LIB.XGBoosterGetAttrNames(
self.handle, ctypes.byref(length), ctypes.byref(sarr)
)
)
attr_names = from_cstr_to_pystr(sarr, length)
return {n: self.attr(n) for n in attr_names}
def set_attr(self, **kwargs: Optional[str]) -> None:
"""Set the attribute of the Booster.
Parameters
----------
**kwargs
The attributes to set. Setting a value to None deletes an attribute.
"""
for key, value in kwargs.items():
c_value = None
if value is not None:
c_value = c_str(str(value))
_check_call(_LIB.XGBoosterSetAttr(self.handle, c_str(key), c_value))
def _get_feature_info(self, field: str) -> Optional[FeatureInfo]:
length = c_bst_ulong()
sarr = ctypes.POINTER(ctypes.c_char_p)()
if not hasattr(self, "handle") or self.handle is None:
return None
_check_call(
_LIB.XGBoosterGetStrFeatureInfo(
self.handle,
c_str(field),
ctypes.byref(length),
ctypes.byref(sarr),
)
)
feature_info = from_cstr_to_pystr(sarr, length)
return feature_info if feature_info else None
def _set_feature_info(self, features: Optional[FeatureInfo], field: str) -> None:
if features is not None:
assert isinstance(features, list)
feature_info_bytes = [bytes(f, encoding="utf-8") for f in features]
c_feature_info = (ctypes.c_char_p * len(feature_info_bytes))(
*feature_info_bytes
)
_check_call(
_LIB.XGBoosterSetStrFeatureInfo(
self.handle,
c_str(field),
c_feature_info,
c_bst_ulong(len(features)),
)
)
else:
_check_call(
_LIB.XGBoosterSetStrFeatureInfo(
self.handle, c_str(field), None, c_bst_ulong(0)
)
)
@property
def feature_types(self) -> Optional[FeatureTypes]:
"""Feature types for this booster. Can be directly set by input data or by
assignment. See :py:class:`DMatrix` for details.
"""
return self._get_feature_info("feature_type")
@feature_types.setter
def feature_types(self, features: Optional[FeatureTypes]) -> None:
self._set_feature_info(features, "feature_type")
@property
def feature_names(self) -> Optional[FeatureNames]:
"""Feature names for this booster. Can be directly set by input data or by
assignment.
"""
return self._get_feature_info("feature_name")
@feature_names.setter
def feature_names(self, features: Optional[FeatureNames]) -> None:
self._set_feature_info(features, "feature_name")
def set_param(
self,
params: Union[Dict, Iterable[Tuple[str, Any]], str],
value: Optional[str] = None,
) -> None:
"""Set parameters into the Booster.
Parameters
----------
params :
list of key,value pairs, dict of key to value or simply str key
value :
value of the specified parameter, when params is str key
"""
if isinstance(params, Mapping):
params = params.items()
elif isinstance(params, str) and value is not None:
params = [(params, value)]
for key, val in cast(Iterable[Tuple[str, str]], params):
if isinstance(val, np.ndarray):
val = val.tolist()
if val is not None:
_check_call(
_LIB.XGBoosterSetParam(self.handle, c_str(key), c_str(str(val)))
)
def update(
self, dtrain: DMatrix, iteration: int, fobj: Optional[Objective] = None
) -> None:
"""Update for one iteration, with objective function calculated
internally. This function should not be called directly by users.
Parameters
----------
dtrain :
Training data.
iteration :
Current iteration number.
fobj :
Customized objective function.
"""
if not isinstance(dtrain, DMatrix):
raise TypeError(f"invalid training matrix: {type(dtrain).__name__}")
self._assign_dmatrix_features(dtrain)
if fobj is None:
_check_call(
_LIB.XGBoosterUpdateOneIter(
self.handle, ctypes.c_int(iteration), dtrain.handle
)
)
else:
pred = self.predict(dtrain, output_margin=True, training=True)
grad, hess = fobj(pred, dtrain)
self.boost(dtrain, grad, hess)
def boost(self, dtrain: DMatrix, grad: np.ndarray, hess: np.ndarray) -> None:
"""Boost the booster for one iteration, with customized gradient
statistics. Like :py:func:`xgboost.Booster.update`, this
function should not be called directly by users.
Parameters
----------
dtrain :
The training DMatrix.
grad :
The first order of gradient.
hess :
The second order of gradient.
"""
if len(grad) != len(hess):
raise ValueError(f"grad / hess length mismatch: {len(grad)} / {len(hess)}")
if not isinstance(dtrain, DMatrix):
raise TypeError(f"invalid training matrix: {type(dtrain).__name__}")
self._assign_dmatrix_features(dtrain)
_check_call(
_LIB.XGBoosterBoostOneIter(
self.handle,
dtrain.handle,
c_array(ctypes.c_float, grad),
c_array(ctypes.c_float, hess),
c_bst_ulong(len(grad)),
)
)
def eval_set(
self,
evals: Sequence[Tuple[DMatrix, str]],
iteration: int = 0,
feval: Optional[Metric] = None,
output_margin: bool = True,
) -> str:
# pylint: disable=invalid-name
"""Evaluate a set of data.
Parameters
----------
evals :
List of items to be evaluated.
iteration :
Current iteration.
feval :
Custom evaluation function.
Returns
-------
result: str
Evaluation result string.
"""
for d in evals:
if not isinstance(d[0], DMatrix):
raise TypeError(f"expected DMatrix, got {type(d[0]).__name__}")
if not isinstance(d[1], str):
raise TypeError(f"expected string, got {type(d[1]).__name__}")
self._assign_dmatrix_features(d[0])
dmats = c_array(ctypes.c_void_p, [d[0].handle for d in evals])
evnames = c_array(ctypes.c_char_p, [c_str(d[1]) for d in evals])
msg = ctypes.c_char_p()
_check_call(
_LIB.XGBoosterEvalOneIter(
self.handle,
ctypes.c_int(iteration),
dmats,
evnames,
c_bst_ulong(len(evals)),
ctypes.byref(msg),
)
)
assert msg.value is not None
res = msg.value.decode() # pylint: disable=no-member
if feval is not None:
for dmat, evname in evals:
feval_ret = feval(
self.predict(dmat, training=False, output_margin=output_margin),
dmat,
)
if isinstance(feval_ret, list):
for name, val in feval_ret:
# pylint: disable=consider-using-f-string
res += "\t%s-%s:%f" % (evname, name, val)
else:
name, val = feval_ret
# pylint: disable=consider-using-f-string
res += "\t%s-%s:%f" % (evname, name, val)
return res
def eval(self, data: DMatrix, name: str = "eval", iteration: int = 0) -> str:
"""Evaluate the model on mat.
Parameters
----------
data :
The dmatrix storing the input.
name :
The name of the dataset.
iteration :
The current iteration number.
Returns
-------
result: str
Evaluation result string.
"""
self._assign_dmatrix_features(data)
return self.eval_set([(data, name)], iteration)
# pylint: disable=too-many-function-args
def predict(
self,
data: DMatrix,
output_margin: bool = False,
pred_leaf: bool = False,
pred_contribs: bool = False,
approx_contribs: bool = False,
pred_interactions: bool = False,
validate_features: bool = True,
training: bool = False,
iteration_range: Tuple[int, int] = (0, 0),
strict_shape: bool = False,
) -> np.ndarray:
"""Predict with data. The full model will be used unless `iteration_range` is specified,
meaning user have to either slice the model or use the ``best_iteration``
attribute to get prediction from best model returned from early stopping.
.. note::
See :doc:`Prediction </prediction>` for issues like thread safety and a
summary of outputs from this function.
Parameters
----------
data :
The dmatrix storing the input.
output_margin :
Whether to output the raw untransformed margin value.
pred_leaf :
When this option is on, the output will be a matrix of (nsample,
ntrees) with each record indicating the predicted leaf index of
each sample in each tree. Note that the leaf index of a tree is
unique per tree, so you may find leaf 1 in both tree 1 and tree 0.
pred_contribs :
When this is True the output will be a matrix of size (nsample,
nfeats + 1) with each record indicating the feature contributions
(SHAP values) for that prediction. The sum of all feature
contributions is equal to the raw untransformed margin value of the
prediction. Note the final column is the bias term.
approx_contribs :
Approximate the contributions of each feature. Used when ``pred_contribs`` or
``pred_interactions`` is set to True. Changing the default of this parameter
(False) is not recommended.
pred_interactions :
When this is True the output will be a matrix of size (nsample,
nfeats + 1, nfeats + 1) indicating the SHAP interaction values for
each pair of features. The sum of each row (or column) of the
interaction values equals the corresponding SHAP value (from
pred_contribs), and the sum of the entire matrix equals the raw
untransformed margin value of the prediction. Note the last row and
column correspond to the bias term.
validate_features :
When this is True, validate that the Booster's and data's
feature_names are identical. Otherwise, it is assumed that the
feature_names are the same.
training :
Whether the prediction value is used for training. This can effect `dart`
booster, which performs dropouts during training iterations but use all trees
for inference. If you want to obtain result with dropouts, set this parameter
to `True`. Also, the parameter is set to true when obtaining prediction for
custom objective function.
.. versionadded:: 1.0.0
iteration_range :
Specifies which layer of trees are used in prediction. For example, if a
random forest is trained with 100 rounds. Specifying `iteration_range=(10,
20)`, then only the forests built during [10, 20) (half open set) rounds are
used in this prediction.
.. versionadded:: 1.4.0
strict_shape :
When set to True, output shape is invariant to whether classification is used.
For both value and margin prediction, the output shape is (n_samples,
n_groups), n_groups == 1 when multi-class is not used. Default to False, in
which case the output shape can be (n_samples, ) if multi-class is not used.
.. versionadded:: 1.4.0
Returns
-------
prediction : numpy array
"""
if not isinstance(data, DMatrix):
raise TypeError("Expecting data to be a DMatrix object, got: ", type(data))
if validate_features:
fn = data.feature_names
self._validate_features(fn)
args = {
"type": 0,
"training": training,
"iteration_begin": iteration_range[0],
"iteration_end": iteration_range[1],
"strict_shape": strict_shape,
}
def assign_type(t: int) -> None:
if args["type"] != 0:
raise ValueError("One type of prediction at a time.")
args["type"] = t
if output_margin:
assign_type(1)
if pred_contribs:
assign_type(2 if not approx_contribs else 3)
if pred_interactions:
assign_type(4 if not approx_contribs else 5)
if pred_leaf:
assign_type(6)
preds = ctypes.POINTER(ctypes.c_float)()
shape = ctypes.POINTER(c_bst_ulong)()
dims = c_bst_ulong()
_check_call(
_LIB.XGBoosterPredictFromDMatrix(
self.handle,
data.handle,
from_pystr_to_cstr(json.dumps(args)),
ctypes.byref(shape),
ctypes.byref(dims),
ctypes.byref(preds),
)
)
return _prediction_output(shape, dims, preds, False)
# pylint: disable=too-many-statements
def inplace_predict(
self,
data: DataType,
iteration_range: Tuple[int, int] = (0, 0),
predict_type: str = "value",
missing: float = np.nan,
validate_features: bool = True,
base_margin: Any = None,
strict_shape: bool = False,
) -> NumpyOrCupy:
"""Run prediction in-place when possible, Unlike :py:meth:`predict` method,
inplace prediction does not cache the prediction result.
Calling only ``inplace_predict`` in multiple threads is safe and lock
free. But the safety does not hold when used in conjunction with other
methods. E.g. you can't train the booster in one thread and perform
prediction in the other.
.. note::
If the device ordinal of the input data doesn't match the one configured for
the booster, data will be copied to the booster device.
.. code-block:: python
booster.set_param({"device": "cuda:0"})
booster.inplace_predict(cupy_array)
booster.set_param({"device": "cpu"})
booster.inplace_predict(numpy_array)
.. versionadded:: 1.1.0
Parameters
----------
data :
The input data.
iteration_range :
See :py:meth:`predict` for details.
predict_type :
* `value` Output model prediction values.
* `margin` Output the raw untransformed margin value.
missing :
See :py:obj:`xgboost.DMatrix` for details.
validate_features:
See :py:meth:`xgboost.Booster.predict` for details.
base_margin:
See :py:obj:`xgboost.DMatrix` for details.
.. versionadded:: 1.4.0
strict_shape:
See :py:meth:`xgboost.Booster.predict` for details.
.. versionadded:: 1.4.0
Returns
-------
prediction : numpy.ndarray/cupy.ndarray
The prediction result. When input data is on GPU, prediction result is
stored in a cupy array.
"""
preds = ctypes.POINTER(ctypes.c_float)()
# once caching is supported, we can pass id(data) as cache id.
args = make_jcargs(
type=1 if predict_type == "margin" else 0,
training=False,
iteration_begin=iteration_range[0],
iteration_end=iteration_range[1],
missing=missing,
strict_shape=strict_shape,
cache_id=0,
)
shape = ctypes.POINTER(c_bst_ulong)()
dims = c_bst_ulong()
if base_margin is not None:
proxy: Optional[_ProxyDMatrix] = _ProxyDMatrix()
assert proxy is not None
proxy.set_info(base_margin=base_margin)
p_handle = proxy.handle
else:
proxy = None
p_handle = ctypes.c_void_p()
assert proxy is None or isinstance(proxy, _ProxyDMatrix)
from .data import (
_array_interface,
_is_cudf_df,
_is_cupy_array,
_is_list,
_is_pandas_df,
_is_pandas_series,
_is_tuple,
_transform_pandas_df,
)
enable_categorical = True
if _is_pandas_series(data):
import pandas as pd
data = pd.DataFrame(data)
if _is_pandas_df(data):
data, fns, _ = _transform_pandas_df(data, enable_categorical)
if validate_features:
self._validate_features(fns)
if _is_list(data) or _is_tuple(data):
data = np.array(data)
if validate_features:
if not hasattr(data, "shape"):
raise TypeError(
"`shape` attribute is required when `validate_features` is True."
)
if len(data.shape) != 1 and self.num_features() != data.shape[1]:
raise ValueError(
f"Feature shape mismatch, expected: {self.num_features()}, "
f"got {data.shape[1]}"
)
if isinstance(data, np.ndarray):
from .data import _ensure_np_dtype
data, _ = _ensure_np_dtype(data, data.dtype)
_check_call(
_LIB.XGBoosterPredictFromDense(
self.handle,
_array_interface(data),
args,
p_handle,
ctypes.byref(shape),
ctypes.byref(dims),
ctypes.byref(preds),
)
)
return _prediction_output(shape, dims, preds, False)
if isinstance(data, scipy.sparse.csr_matrix):
from .data import transform_scipy_sparse
data = transform_scipy_sparse(data, True)
_check_call(
_LIB.XGBoosterPredictFromCSR(
self.handle,
_array_interface(data.indptr),
_array_interface(data.indices),
_array_interface(data.data),
c_bst_ulong(data.shape[1]),
args,
p_handle,
ctypes.byref(shape),
ctypes.byref(dims),
ctypes.byref(preds),
)
)
return _prediction_output(shape, dims, preds, False)
if _is_cupy_array(data):
from .data import _transform_cupy_array
data = _transform_cupy_array(data)
interface_str = _cuda_array_interface(data)
_check_call(
_LIB.XGBoosterPredictFromCudaArray(
self.handle,
interface_str,
args,
p_handle,
ctypes.byref(shape),
ctypes.byref(dims),
ctypes.byref(preds),
)
)
return _prediction_output(shape, dims, preds, True)
if _is_cudf_df(data):
from .data import _cudf_array_interfaces, _transform_cudf_df
data, cat_codes, fns, _ = _transform_cudf_df(
data, None, None, enable_categorical
)
interfaces_str = _cudf_array_interfaces(data, cat_codes)
if validate_features:
self._validate_features(fns)
_check_call(
_LIB.XGBoosterPredictFromCudaColumnar(
self.handle,
interfaces_str,
args,
p_handle,
ctypes.byref(shape),
ctypes.byref(dims),
ctypes.byref(preds),
)
)
return _prediction_output(shape, dims, preds, True)
raise TypeError(
"Data type:" + str(type(data)) + " not supported by inplace prediction."
)
def save_model(self, fname: Union[str, os.PathLike]) -> None:
"""Save the model to a file.
The model is saved in an XGBoost internal format which is universal among the
various XGBoost interfaces. Auxiliary attributes of the Python Booster object
(such as feature_names) will not be saved when using binary format. To save
those attributes, use JSON/UBJ instead. See :doc:`Model IO
</tutorials/saving_model>` for more info.
.. code-block:: python
model.save_model("model.json")
# or
model.save_model("model.ubj")
Parameters
----------
fname :
Output file name
"""
if isinstance(fname, (str, os.PathLike)): # assume file name
fname = os.fspath(os.path.expanduser(fname))
_check_call(_LIB.XGBoosterSaveModel(self.handle, c_str(fname)))
else:
raise TypeError("fname must be a string or os PathLike")
def save_raw(self, raw_format: str = "deprecated") -> bytearray:
"""Save the model to a in memory buffer representation instead of file.
Parameters
----------
raw_format :
Format of output buffer. Can be `json`, `ubj` or `deprecated`. Right now
the default is `deprecated` but it will be changed to `ubj` (univeral binary
json) in the future.
Returns
-------
An in memory buffer representation of the model
"""
length = c_bst_ulong()
cptr = ctypes.POINTER(ctypes.c_char)()
config = make_jcargs(format=raw_format)
_check_call(
_LIB.XGBoosterSaveModelToBuffer(
self.handle, config, ctypes.byref(length), ctypes.byref(cptr)
)
)
return ctypes2buffer(cptr, length.value)
def load_model(self, fname: ModelIn) -> None:
"""Load the model from a file or bytearray. Path to file can be local
or as an URI.
The model is loaded from XGBoost format which is universal among the various
XGBoost interfaces. Auxiliary attributes of the Python Booster object (such as
feature_names) will not be loaded when using binary format. To save those
attributes, use JSON/UBJ instead. See :doc:`Model IO </tutorials/saving_model>`
for more info.
.. code-block:: python
model.load_model("model.json")
# or
model.load_model("model.ubj")
Parameters
----------
fname :
Input file name or memory buffer(see also save_raw)
"""
if isinstance(fname, (str, os.PathLike)):
# assume file name, cannot use os.path.exist to check, file can be
# from URL.
fname = os.fspath(os.path.expanduser(fname))
_check_call(_LIB.XGBoosterLoadModel(self.handle, c_str(fname)))
elif isinstance(fname, bytearray):
buf = fname
length = c_bst_ulong(len(buf))
ptr = (ctypes.c_char * len(buf)).from_buffer(buf)
_check_call(_LIB.XGBoosterLoadModelFromBuffer(self.handle, ptr, length))
else:
raise TypeError("Unknown file type: ", fname)
if self.attr("best_iteration") is not None:
self.best_iteration = int(cast(int, self.attr("best_iteration")))
if self.attr("best_score") is not None:
self.best_score = float(cast(float, self.attr("best_score")))
def num_boosted_rounds(self) -> int:
"""Get number of boosted rounds. For gblinear this is reset to 0 after
serializing the model.
"""
rounds = ctypes.c_int()
assert self.handle is not None
_check_call(_LIB.XGBoosterBoostedRounds(self.handle, ctypes.byref(rounds)))
return rounds.value
def num_features(self) -> int:
"""Number of features in booster."""
features = c_bst_ulong()
assert self.handle is not None
_check_call(_LIB.XGBoosterGetNumFeature(self.handle, ctypes.byref(features)))
return features.value
def dump_model(
self,
fout: Union[str, os.PathLike],
fmap: Union[str, os.PathLike] = "",
with_stats: bool = False,
dump_format: str = "text",
) -> None:
"""Dump model into a text or JSON file. Unlike :py:meth:`save_model`, the
output format is primarily used for visualization or interpretation,
hence it's more human readable but cannot be loaded back to XGBoost.
Parameters
----------
fout :
Output file name.
fmap :
Name of the file containing feature map names.
with_stats :
Controls whether the split statistics are output.
dump_format :
Format of model dump file. Can be 'text' or 'json'.
"""
if isinstance(fout, (str, os.PathLike)):
fout = os.fspath(os.path.expanduser(fout))
# pylint: disable=consider-using-with
fout_obj = open(fout, "w", encoding="utf-8")
need_close = True
else:
fout_obj = fout
need_close = False
ret = self.get_dump(fmap, with_stats, dump_format)
if dump_format == "json":
fout_obj.write("[\n")
for i, val in enumerate(ret):
fout_obj.write(val)
if i < len(ret) - 1:
fout_obj.write(",\n")
fout_obj.write("\n]")
else:
for i, val in enumerate(ret):
fout_obj.write(f"booster[{i}]:\n")
fout_obj.write(val)
if need_close:
fout_obj.close()
def get_dump(
self,
fmap: Union[str, os.PathLike] = "",
with_stats: bool = False,
dump_format: str = "text",
) -> List[str]:
"""Returns the model dump as a list of strings. Unlike :py:meth:`save_model`, the output
format is primarily used for visualization or interpretation, hence it's more
human readable but cannot be loaded back to XGBoost.
Parameters
----------
fmap :
Name of the file containing feature map names.
with_stats :
Controls whether the split statistics are output.
dump_format :
Format of model dump. Can be 'text', 'json' or 'dot'.
"""
fmap = os.fspath(os.path.expanduser(fmap))
length = c_bst_ulong()
sarr = ctypes.POINTER(ctypes.c_char_p)()
_check_call(
_LIB.XGBoosterDumpModelEx(
self.handle,
c_str(fmap),
ctypes.c_int(with_stats),
c_str(dump_format),
ctypes.byref(length),
ctypes.byref(sarr),
)
)
res = from_cstr_to_pystr(sarr, length)
return res
def get_fscore(
self, fmap: Union[str, os.PathLike] = ""
) -> Dict[str, Union[float, List[float]]]:
"""Get feature importance of each feature.
.. note:: Zero-importance features will not be included
Keep in mind that this function does not include zero-importance feature, i.e.
those features that have not been used in any split conditions.
Parameters
----------
fmap :
The name of feature map file
"""
return self.get_score(fmap, importance_type="weight")
def get_score(
self, fmap: Union[str, os.PathLike] = "", importance_type: str = "weight"
) -> Dict[str, Union[float, List[float]]]:
"""Get feature importance of each feature.
For tree model Importance type can be defined as:
* 'weight': the number of times a feature is used to split the data across all trees.
* 'gain': the average gain across all splits the feature is used in.
* 'cover': the average coverage across all splits the feature is used in.
* 'total_gain': the total gain across all splits the feature is used in.
* 'total_cover': the total coverage across all splits the feature is used in.
.. note::
For linear model, only "weight" is defined and it's the normalized coefficients
without bias.
.. note:: Zero-importance features will not be included
Keep in mind that this function does not include zero-importance feature, i.e.
those features that have not been used in any split conditions.
Parameters
----------
fmap :
The name of feature map file.
importance_type :
One of the importance types defined above.
Returns
-------
A map between feature names and their scores. When `gblinear` is used for
multi-class classification the scores for each feature is a list with length
`n_classes`, otherwise they're scalars.
"""
fmap = os.fspath(os.path.expanduser(fmap))
features = ctypes.POINTER(ctypes.c_char_p)()
scores = ctypes.POINTER(ctypes.c_float)()
n_out_features = c_bst_ulong()
out_dim = c_bst_ulong()
shape = ctypes.POINTER(c_bst_ulong)()
_check_call(
_LIB.XGBoosterFeatureScore(
self.handle,
make_jcargs(importance_type=importance_type, feature_map=fmap),
ctypes.byref(n_out_features),
ctypes.byref(features),
ctypes.byref(out_dim),
ctypes.byref(shape),
ctypes.byref(scores),
)
)
features_arr = from_cstr_to_pystr(features, n_out_features)
scores_arr = _prediction_output(shape, out_dim, scores, False)
results: Dict[str, Union[float, List[float]]] = {}
if len(scores_arr.shape) > 1 and scores_arr.shape[1] > 1:
for feat, score in zip(features_arr, scores_arr):
results[feat] = [float(s) for s in score]
else:
for feat, score in zip(features_arr, scores_arr):
results[feat] = float(score)
return results
# pylint: disable=too-many-statements
def trees_to_dataframe(self, fmap: Union[str, os.PathLike] = "") -> DataFrame:
"""Parse a boosted tree model text dump into a pandas DataFrame structure.
This feature is only defined when the decision tree model is chosen as base
learner (`booster in {gbtree, dart}`). It is not defined for other base learner
types, such as linear learners (`booster=gblinear`).
Parameters
----------
fmap :
The name of feature map file.
"""
# pylint: disable=too-many-locals
fmap = os.fspath(os.path.expanduser(fmap))
if not PANDAS_INSTALLED:
raise ImportError(
(
"pandas must be available to use this method."
"Install pandas before calling again."
)
)
booster = json.loads(self.save_config())["learner"]["gradient_booster"]["name"]
if booster not in {"gbtree", "dart"}:
raise ValueError(f"This method is not defined for Booster type {booster}")
tree_ids = []
node_ids = []
fids = []
splits: List[Union[float, str]] = []
categories: List[Union[Optional[float], List[str]]] = []
y_directs: List[Union[float, str]] = []
n_directs: List[Union[float, str]] = []
missings: List[Union[float, str]] = []
gains = []
covers = []
trees = self.get_dump(fmap, with_stats=True)
for i, tree in enumerate(trees):
for line in tree.split("\n"):
arr = line.split("[")
# Leaf node
if len(arr) == 1:
# Last element of line.split is an empty string
if arr == [""]:
continue
# parse string
parse = arr[0].split(":")
stats = re.split("=|,", parse[1])
# append to lists
tree_ids.append(i)
node_ids.append(int(re.findall(r"\b\d+\b", parse[0])[0]))
fids.append("Leaf")
splits.append(float("NAN"))
categories.append(float("NAN"))
y_directs.append(float("NAN"))
n_directs.append(float("NAN"))
missings.append(float("NAN"))
gains.append(float(stats[1]))
covers.append(float(stats[3]))
# Not a Leaf Node
else:
# parse string
fid = arr[1].split("]")
if fid[0].find("<") != -1:
# numerical
parse = fid[0].split("<")
splits.append(float(parse[1]))
categories.append(None)
elif fid[0].find(":{") != -1:
# categorical
parse = fid[0].split(":")
cats = parse[1][1:-1] # strip the {}
cats_split = cats.split(",")
splits.append(float("NAN"))
categories.append(cats_split if cats_split else None)
else:
raise ValueError("Failed to parse model text dump.")
stats = re.split("=|,", fid[1])
# append to lists
tree_ids.append(i)
node_ids.append(int(re.findall(r"\b\d+\b", arr[0])[0]))
fids.append(parse[0])
str_i = str(i)
y_directs.append(str_i + "-" + stats[1])
n_directs.append(str_i + "-" + stats[3])
missings.append(str_i + "-" + stats[5])
gains.append(float(stats[7]))
covers.append(float(stats[9]))
ids = [str(t_id) + "-" + str(n_id) for t_id, n_id in zip(tree_ids, node_ids)]
df = DataFrame(
{
"Tree": tree_ids,
"Node": node_ids,
"ID": ids,
"Feature": fids,
"Split": splits,
"Yes": y_directs,
"No": n_directs,
"Missing": missings,
"Gain": gains,
"Cover": covers,
"Category": categories,
}
)
if callable(getattr(df, "sort_values", None)):
# pylint: disable=no-member
return df.sort_values(["Tree", "Node"]).reset_index(drop=True)
# pylint: disable=no-member
return df.sort(["Tree", "Node"]).reset_index(drop=True)
def _assign_dmatrix_features(self, data: DMatrix) -> None:
if data.num_row() == 0:
return
fn = data.feature_names
ft = data.feature_types
if self.feature_names is None:
self.feature_names = fn
if self.feature_types is None:
self.feature_types = ft
self._validate_features(fn)
def _validate_features(self, feature_names: Optional[FeatureNames]) -> None:
if self.feature_names is None:
return
if feature_names is None and self.feature_names is not None:
raise ValueError(
"training data did not have the following fields: "
+ ", ".join(self.feature_names)
)
if self.feature_names != feature_names:
dat_missing = set(cast(FeatureNames, self.feature_names)) - set(
cast(FeatureNames, feature_names)
)
my_missing = set(cast(FeatureNames, feature_names)) - set(
cast(FeatureNames, self.feature_names)
)
msg = "feature_names mismatch: {0} {1}"
if dat_missing:
msg += (
"\nexpected "
+ ", ".join(str(s) for s in dat_missing)
+ " in input data"
)
if my_missing:
msg += (
"\ntraining data did not have the following fields: "
+ ", ".join(str(s) for s in my_missing)
)
raise ValueError(msg.format(self.feature_names, feature_names))
def get_split_value_histogram(
self,
feature: str,
fmap: Union[os.PathLike, str] = "",
bins: Optional[int] = None,
as_pandas: bool = True,
) -> Union[np.ndarray, DataFrame]:
"""Get split value histogram of a feature
Parameters
----------
feature :
The name of the feature.
fmap:
The name of feature map file.
bin :
The maximum number of bins.
Number of bins equals number of unique split values n_unique,
if bins == None or bins > n_unique.
as_pandas :
Return pd.DataFrame when pandas is installed.
If False or pandas is not installed, return numpy ndarray.
Returns
-------
a histogram of used splitting values for the specified feature
either as numpy array or pandas DataFrame.
"""
xgdump = self.get_dump(fmap=fmap)
values = []
# pylint: disable=consider-using-f-string
regexp = re.compile(r"\[{0}<([\d.Ee+-]+)\]".format(feature))
for val in xgdump:
m = re.findall(regexp, val)
values.extend([float(x) for x in m])
n_unique = len(np.unique(values))
bins = max(min(n_unique, bins) if bins is not None else n_unique, 1)
nph = np.histogram(values, bins=bins)
nph_stacked = np.column_stack((nph[1][1:], nph[0]))
nph_stacked = nph_stacked[nph_stacked[:, 1] > 0]
if nph_stacked.size == 0:
ft = self.feature_types
fn = self.feature_names
if fn is None:
# Let xgboost generate the feature names.
fn = [f"f{i}" for i in range(self.num_features())]
try:
index = fn.index(feature)
feature_t: Optional[str] = cast(List[str], ft)[index]
except (ValueError, AttributeError, TypeError):
# None.index: attr err, None[0]: type err, fn.index(-1): value err
feature_t = None
if feature_t == "c": # categorical
raise ValueError(
"Split value historgam doesn't support categorical split."
)
if as_pandas and PANDAS_INSTALLED:
return DataFrame(nph_stacked, columns=["SplitValue", "Count"])
if as_pandas and not PANDAS_INSTALLED:
warnings.warn(
"Returning histogram as ndarray"
" (as_pandas == True, but pandas is not installed).",
UserWarning,
)
return nph_stacked
| 104,045
| 33.728304
| 97
|
py
|
xgboost
|
xgboost-master/python-package/xgboost/plotting.py
|
# pylint: disable=too-many-locals, too-many-arguments, invalid-name,
# pylint: disable=too-many-branches
"""Plotting Library."""
import json
from io import BytesIO
from typing import Any, Optional, Union
import numpy as np
from ._typing import PathLike
from .core import Booster
from .sklearn import XGBModel
Axes = Any # real type is matplotlib.axes.Axes
GraphvizSource = Any # real type is graphviz.Source
def plot_importance(
booster: Union[XGBModel, Booster, dict],
ax: Optional[Axes] = None,
height: float = 0.2,
xlim: Optional[tuple] = None,
ylim: Optional[tuple] = None,
title: str = "Feature importance",
xlabel: str = "F score",
ylabel: str = "Features",
fmap: PathLike = "",
importance_type: str = "weight",
max_num_features: Optional[int] = None,
grid: bool = True,
show_values: bool = True,
values_format: str = "{v}",
**kwargs: Any,
) -> Axes:
"""Plot importance based on fitted trees.
Parameters
----------
booster :
Booster or XGBModel instance, or dict taken by Booster.get_fscore()
ax : matplotlib Axes
Target axes instance. If None, new figure and axes will be created.
grid :
Turn the axes grids on or off. Default is True (On).
importance_type :
How the importance is calculated: either "weight", "gain", or "cover"
* "weight" is the number of times a feature appears in a tree
* "gain" is the average gain of splits which use the feature
* "cover" is the average coverage of splits which use the feature
where coverage is defined as the number of samples affected by the split
max_num_features :
Maximum number of top features displayed on plot. If None, all features will be
displayed.
height :
Bar height, passed to ax.barh()
xlim :
Tuple passed to axes.xlim()
ylim :
Tuple passed to axes.ylim()
title :
Axes title. To disable, pass None.
xlabel :
X axis title label. To disable, pass None.
ylabel :
Y axis title label. To disable, pass None.
fmap :
The name of feature map file.
show_values :
Show values on plot. To disable, pass False.
values_format :
Format string for values. "v" will be replaced by the value of the feature
importance. e.g. Pass "{v:.2f}" in order to limit the number of digits after
the decimal point to two, for each value printed on the graph.
kwargs :
Other keywords passed to ax.barh()
Returns
-------
ax : matplotlib Axes
"""
try:
import matplotlib.pyplot as plt
except ImportError as e:
raise ImportError("You must install matplotlib to plot importance") from e
if isinstance(booster, XGBModel):
importance = booster.get_booster().get_score(
importance_type=importance_type, fmap=fmap
)
elif isinstance(booster, Booster):
importance = booster.get_score(importance_type=importance_type, fmap=fmap)
elif isinstance(booster, dict):
importance = booster
else:
raise ValueError("tree must be Booster, XGBModel or dict instance")
if not importance:
raise ValueError(
"Booster.get_score() results in empty. "
+ "This maybe caused by having all trees as decision dumps."
)
tuples = [(k, importance[k]) for k in importance]
if max_num_features is not None:
# pylint: disable=invalid-unary-operand-type
tuples = sorted(tuples, key=lambda _x: _x[1])[-max_num_features:]
else:
tuples = sorted(tuples, key=lambda _x: _x[1])
labels, values = zip(*tuples)
if ax is None:
_, ax = plt.subplots(1, 1)
ylocs = np.arange(len(values))
ax.barh(ylocs, values, align="center", height=height, **kwargs)
if show_values is True:
for x, y in zip(values, ylocs):
ax.text(x + 1, y, values_format.format(v=x), va="center")
ax.set_yticks(ylocs)
ax.set_yticklabels(labels)
if xlim is not None:
if not isinstance(xlim, tuple) or len(xlim) != 2:
raise ValueError("xlim must be a tuple of 2 elements")
else:
xlim = (0, max(values) * 1.1)
ax.set_xlim(xlim)
if ylim is not None:
if not isinstance(ylim, tuple) or len(ylim) != 2:
raise ValueError("ylim must be a tuple of 2 elements")
else:
ylim = (-1, len(values))
ax.set_ylim(ylim)
if title is not None:
ax.set_title(title)
if xlabel is not None:
ax.set_xlabel(xlabel)
if ylabel is not None:
ax.set_ylabel(ylabel)
ax.grid(grid)
return ax
def to_graphviz(
booster: Union[Booster, XGBModel],
fmap: PathLike = "",
num_trees: int = 0,
rankdir: Optional[str] = None,
yes_color: Optional[str] = None,
no_color: Optional[str] = None,
condition_node_params: Optional[dict] = None,
leaf_node_params: Optional[dict] = None,
**kwargs: Any,
) -> GraphvizSource:
"""Convert specified tree to graphviz instance. IPython can automatically plot
the returned graphviz instance. Otherwise, you should call .render() method
of the returned graphviz instance.
Parameters
----------
booster :
Booster or XGBModel instance
fmap :
The name of feature map file
num_trees :
Specify the ordinal number of target tree
rankdir :
Passed to graphviz via graph_attr
yes_color :
Edge color when meets the node condition.
no_color :
Edge color when doesn't meet the node condition.
condition_node_params :
Condition node configuration for for graphviz. Example:
.. code-block:: python
{'shape': 'box',
'style': 'filled,rounded',
'fillcolor': '#78bceb'}
leaf_node_params :
Leaf node configuration for graphviz. Example:
.. code-block:: python
{'shape': 'box',
'style': 'filled',
'fillcolor': '#e48038'}
kwargs :
Other keywords passed to graphviz graph_attr, e.g. ``graph [ {key} = {value} ]``
Returns
-------
graph: graphviz.Source
"""
try:
from graphviz import Source
except ImportError as e:
raise ImportError("You must install graphviz to plot tree") from e
if isinstance(booster, XGBModel):
booster = booster.get_booster()
# squash everything back into kwargs again for compatibility
parameters = "dot"
extra = {}
for key, value in kwargs.items():
extra[key] = value
if rankdir is not None:
kwargs["graph_attrs"] = {}
kwargs["graph_attrs"]["rankdir"] = rankdir
for key, value in extra.items():
if kwargs.get("graph_attrs", None) is not None:
kwargs["graph_attrs"][key] = value
else:
kwargs["graph_attrs"] = {}
del kwargs[key]
if yes_color is not None or no_color is not None:
kwargs["edge"] = {}
if yes_color is not None:
kwargs["edge"]["yes_color"] = yes_color
if no_color is not None:
kwargs["edge"]["no_color"] = no_color
if condition_node_params is not None:
kwargs["condition_node_params"] = condition_node_params
if leaf_node_params is not None:
kwargs["leaf_node_params"] = leaf_node_params
if kwargs:
parameters += ":"
parameters += json.dumps(kwargs)
tree = booster.get_dump(fmap=fmap, dump_format=parameters)[num_trees]
g = Source(tree)
return g
def plot_tree(
booster: Booster,
fmap: PathLike = "",
num_trees: int = 0,
rankdir: Optional[str] = None,
ax: Optional[Axes] = None,
**kwargs: Any,
) -> Axes:
"""Plot specified tree.
Parameters
----------
booster : Booster, XGBModel
Booster or XGBModel instance
fmap: str (optional)
The name of feature map file
num_trees : int, default 0
Specify the ordinal number of target tree
rankdir : str, default "TB"
Passed to graphviz via graph_attr
ax : matplotlib Axes, default None
Target axes instance. If None, new figure and axes will be created.
kwargs :
Other keywords passed to to_graphviz
Returns
-------
ax : matplotlib Axes
"""
try:
from matplotlib import image
from matplotlib import pyplot as plt
except ImportError as e:
raise ImportError("You must install matplotlib to plot tree") from e
if ax is None:
_, ax = plt.subplots(1, 1)
g = to_graphviz(booster, fmap=fmap, num_trees=num_trees, rankdir=rankdir, **kwargs)
s = BytesIO()
s.write(g.pipe(format="png"))
s.seek(0)
img = image.imread(s)
ax.imshow(img)
ax.axis("off")
return ax
| 8,852
| 28.908784
| 88
|
py
|
xgboost
|
xgboost-master/python-package/xgboost/collective.py
|
"""XGBoost collective communication related API."""
import ctypes
import json
import logging
import pickle
from enum import IntEnum, unique
from typing import Any, Dict, List
import numpy as np
from ._typing import _T
from .core import _LIB, _check_call, c_str, from_pystr_to_cstr, py_str
LOGGER = logging.getLogger("[xgboost.collective]")
def init(**args: Any) -> None:
"""Initialize the collective library with arguments.
Parameters
----------
args: Dict[str, Any]
Keyword arguments representing the parameters and their values.
Accepted parameters:
- xgboost_communicator: The type of the communicator. Can be set as an environment
variable.
* rabit: Use Rabit. This is the default if the type is unspecified.
* federated: Use the gRPC interface for Federated Learning.
Only applicable to the Rabit communicator (these are case sensitive):
-- rabit_tracker_uri: Hostname of the tracker.
-- rabit_tracker_port: Port number of the tracker.
-- rabit_task_id: ID of the current task, can be used to obtain deterministic rank
assignment.
-- rabit_world_size: Total number of workers.
-- rabit_hadoop_mode: Enable Hadoop support.
-- rabit_tree_reduce_minsize: Minimal size for tree reduce.
-- rabit_reduce_ring_mincount: Minimal count to perform ring reduce.
-- rabit_reduce_buffer: Size of the reduce buffer.
-- rabit_bootstrap_cache: Size of the bootstrap cache.
-- rabit_debug: Enable debugging.
-- rabit_timeout: Enable timeout.
-- rabit_timeout_sec: Timeout in seconds.
-- rabit_enable_tcp_no_delay: Enable TCP no delay on Unix platforms.
Only applicable to the Rabit communicator (these are case-sensitive, and can be set as
environment variables):
-- DMLC_TRACKER_URI: Hostname of the tracker.
-- DMLC_TRACKER_PORT: Port number of the tracker.
-- DMLC_TASK_ID: ID of the current task, can be used to obtain deterministic rank
assignment.
-- DMLC_ROLE: Role of the current task, "worker" or "server".
-- DMLC_NUM_ATTEMPT: Number of attempts after task failure.
-- DMLC_WORKER_CONNECT_RETRY: Number of retries to connect to the tracker.
Only applicable to the Federated communicator (use upper case for environment variables, use
lower case for runtime configuration):
-- federated_server_address: Address of the federated server.
-- federated_world_size: Number of federated workers.
-- federated_rank: Rank of the current worker.
-- federated_server_cert: Server certificate file path. Only needed for the SSL mode.
-- federated_client_key: Client key file path. Only needed for the SSL mode.
-- federated_client_cert: Client certificate file path. Only needed for the SSL mode.
"""
config = from_pystr_to_cstr(json.dumps(args))
_check_call(_LIB.XGCommunicatorInit(config))
def finalize() -> None:
"""Finalize the communicator."""
_check_call(_LIB.XGCommunicatorFinalize())
def get_rank() -> int:
"""Get rank of current process.
Returns
-------
rank : int
Rank of current process.
"""
ret = _LIB.XGCommunicatorGetRank()
return ret
def get_world_size() -> int:
"""Get total number workers.
Returns
-------
n : int
Total number of process.
"""
ret = _LIB.XGCommunicatorGetWorldSize()
return ret
def is_distributed() -> int:
"""If the collective communicator is distributed."""
is_dist = _LIB.XGCommunicatorIsDistributed()
return is_dist
def communicator_print(msg: Any) -> None:
"""Print message to the communicator.
This function can be used to communicate the information of
the progress to the communicator.
Parameters
----------
msg : str
The message to be printed to the communicator.
"""
if not isinstance(msg, str):
msg = str(msg)
is_dist = _LIB.XGCommunicatorIsDistributed()
if is_dist != 0:
_check_call(_LIB.XGCommunicatorPrint(c_str(msg.strip())))
else:
print(msg.strip(), flush=True)
def get_processor_name() -> str:
"""Get the processor name.
Returns
-------
name : str
the name of processor(host)
"""
name_str = ctypes.c_char_p()
_check_call(_LIB.XGCommunicatorGetProcessorName(ctypes.byref(name_str)))
value = name_str.value
assert value
return py_str(value)
def broadcast(data: _T, root: int) -> _T:
"""Broadcast object from one node to all other nodes.
Parameters
----------
data : any type that can be pickled
Input data, if current rank does not equal root, this can be None
root : int
Rank of the node to broadcast data from.
Returns
-------
object : int
the result of broadcast.
"""
rank = get_rank()
length = ctypes.c_ulong()
if root == rank:
assert data is not None, "need to pass in data when broadcasting"
s = pickle.dumps(data, protocol=pickle.HIGHEST_PROTOCOL)
length.value = len(s)
# run first broadcast
_check_call(
_LIB.XGCommunicatorBroadcast(
ctypes.byref(length), ctypes.sizeof(ctypes.c_ulong), root
)
)
if root != rank:
dptr = (ctypes.c_char * length.value)()
# run second
_check_call(
_LIB.XGCommunicatorBroadcast(
ctypes.cast(dptr, ctypes.c_void_p), length.value, root
)
)
data = pickle.loads(dptr.raw)
del dptr
else:
_check_call(
_LIB.XGCommunicatorBroadcast(
ctypes.cast(ctypes.c_char_p(s), ctypes.c_void_p), length.value, root
)
)
del s
return data
# enumeration of dtypes
DTYPE_ENUM__ = {
np.dtype("int8"): 0,
np.dtype("uint8"): 1,
np.dtype("int32"): 2,
np.dtype("uint32"): 3,
np.dtype("int64"): 4,
np.dtype("uint64"): 5,
np.dtype("float32"): 6,
np.dtype("float64"): 7,
}
@unique
class Op(IntEnum):
"""Supported operations for allreduce."""
MAX = 0
MIN = 1
SUM = 2
BITWISE_AND = 3
BITWISE_OR = 4
BITWISE_XOR = 5
def allreduce(data: np.ndarray, op: Op) -> np.ndarray: # pylint:disable=invalid-name
"""Perform allreduce, return the result.
Parameters
----------
data :
Input data.
op :
Reduction operator.
Returns
-------
result :
The result of allreduce, have same shape as data
Notes
-----
This function is not thread-safe.
"""
if not isinstance(data, np.ndarray):
raise TypeError("allreduce only takes in numpy.ndarray")
buf = data.ravel()
if buf.base is data.base:
buf = buf.copy()
if buf.dtype not in DTYPE_ENUM__:
raise TypeError(f"data type {buf.dtype} not supported")
_check_call(
_LIB.XGCommunicatorAllreduce(
buf.ctypes.data_as(ctypes.c_void_p),
buf.size,
DTYPE_ENUM__[buf.dtype],
int(op),
None,
None,
)
)
return buf
class CommunicatorContext:
"""A context controlling collective communicator initialization and finalization."""
def __init__(self, **args: Any) -> None:
self.args = args
def __enter__(self) -> Dict[str, Any]:
init(**self.args)
assert is_distributed()
LOGGER.debug("-------------- communicator say hello ------------------")
return self.args
def __exit__(self, *args: List) -> None:
finalize()
LOGGER.debug("--------------- communicator say bye ------------------")
| 7,841
| 28.81749
| 100
|
py
|
xgboost
|
xgboost-master/python-package/xgboost/training.py
|
# pylint: disable=too-many-locals, too-many-arguments, invalid-name
# pylint: disable=too-many-branches, too-many-statements
"""Training Library containing training routines."""
import copy
import os
import warnings
from typing import Any, Dict, Iterable, List, Optional, Sequence, Tuple, Union, cast
import numpy as np
from ._typing import BoosterParam, Callable, FPreProcCallable
from .callback import (
CallbackContainer,
EarlyStopping,
EvaluationMonitor,
TrainingCallback,
)
from .compat import SKLEARN_INSTALLED, DataFrame, XGBStratifiedKFold
from .core import (
Booster,
DMatrix,
Metric,
Objective,
XGBoostError,
_deprecate_positional_args,
)
_CVFolds = Sequence["CVPack"]
def _assert_new_callback(callbacks: Optional[Sequence[TrainingCallback]]) -> None:
is_new_callback: bool = not callbacks or all(
isinstance(c, TrainingCallback) for c in callbacks
)
if not is_new_callback:
link = "https://xgboost.readthedocs.io/en/latest/python/callbacks.html"
raise ValueError(
f"Old style callback was removed in version 1.6. See: {link}."
)
def _configure_custom_metric(
feval: Optional[Metric], custom_metric: Optional[Metric]
) -> Optional[Metric]:
if feval is not None:
link = (
"https://xgboost.readthedocs.io/en/latest/tutorials/custom_metric_obj.html"
)
warnings.warn(
"`feval` is deprecated, use `custom_metric` instead. They have "
"different behavior when custom objective is also used."
f"See {link} for details on the `custom_metric`."
)
if feval is not None and custom_metric is not None:
raise ValueError(
"Both `feval` and `custom_metric` are supplied. Use `custom_metric` instead."
)
eval_metric = custom_metric if custom_metric is not None else feval
return eval_metric
@_deprecate_positional_args
def train(
params: Dict[str, Any],
dtrain: DMatrix,
num_boost_round: int = 10,
*,
evals: Optional[Sequence[Tuple[DMatrix, str]]] = None,
obj: Optional[Objective] = None,
feval: Optional[Metric] = None,
maximize: Optional[bool] = None,
early_stopping_rounds: Optional[int] = None,
evals_result: Optional[TrainingCallback.EvalsLog] = None,
verbose_eval: Optional[Union[bool, int]] = True,
xgb_model: Optional[Union[str, os.PathLike, Booster, bytearray]] = None,
callbacks: Optional[Sequence[TrainingCallback]] = None,
custom_metric: Optional[Metric] = None,
) -> Booster:
"""Train a booster with given parameters.
Parameters
----------
params :
Booster params.
dtrain :
Data to be trained.
num_boost_round :
Number of boosting iterations.
evals :
List of validation sets for which metrics will evaluated during training.
Validation metrics will help us track the performance of the model.
obj
Custom objective function. See :doc:`Custom Objective
</tutorials/custom_metric_obj>` for details.
feval :
.. deprecated:: 1.6.0
Use `custom_metric` instead.
maximize :
Whether to maximize feval.
early_stopping_rounds :
Activates early stopping. Validation metric needs to improve at least once in
every **early_stopping_rounds** round(s) to continue training.
Requires at least one item in **evals**.
The method returns the model from the last iteration (not the best one). Use
custom callback or model slicing if the best model is desired.
If there's more than one item in **evals**, the last entry will be used for early
stopping.
If there's more than one metric in the **eval_metric** parameter given in
**params**, the last metric will be used for early stopping.
If early stopping occurs, the model will have two additional fields:
``bst.best_score``, ``bst.best_iteration``.
evals_result :
This dictionary stores the evaluation results of all the items in watchlist.
Example: with a watchlist containing
``[(dtest,'eval'), (dtrain,'train')]`` and
a parameter containing ``('eval_metric': 'logloss')``,
the **evals_result** returns
.. code-block:: python
{'train': {'logloss': ['0.48253', '0.35953']},
'eval': {'logloss': ['0.480385', '0.357756']}}
verbose_eval :
Requires at least one item in **evals**.
If **verbose_eval** is True then the evaluation metric on the validation set is
printed at each boosting stage.
If **verbose_eval** is an integer then the evaluation metric on the validation set
is printed at every given **verbose_eval** boosting stage. The last boosting stage
/ the boosting stage found by using **early_stopping_rounds** is also printed.
Example: with ``verbose_eval=4`` and at least one item in **evals**, an evaluation metric
is printed every 4 boosting stages, instead of every boosting stage.
xgb_model :
Xgb model to be loaded before training (allows training continuation).
callbacks :
List of callback functions that are applied at end of each iteration.
It is possible to use predefined callbacks by using
:ref:`Callback API <callback_api>`.
.. note::
States in callback are not preserved during training, which means callback
objects can not be reused for multiple training sessions without
reinitialization or deepcopy.
.. code-block:: python
for params in parameters_grid:
# be sure to (re)initialize the callbacks before each run
callbacks = [xgb.callback.LearningRateScheduler(custom_rates)]
xgboost.train(params, Xy, callbacks=callbacks)
custom_metric:
.. versionadded 1.6.0
Custom metric function. See :doc:`Custom Metric </tutorials/custom_metric_obj>`
for details.
Returns
-------
Booster : a trained booster model
"""
callbacks = [] if callbacks is None else copy.copy(list(callbacks))
metric_fn = _configure_custom_metric(feval, custom_metric)
evals = list(evals) if evals else []
bst = Booster(params, [dtrain] + [d[0] for d in evals], model_file=xgb_model)
start_iteration = 0
_assert_new_callback(callbacks)
if verbose_eval:
verbose_eval = 1 if verbose_eval is True else verbose_eval
callbacks.append(EvaluationMonitor(period=verbose_eval))
if early_stopping_rounds:
callbacks.append(EarlyStopping(rounds=early_stopping_rounds, maximize=maximize))
cb_container = CallbackContainer(
callbacks,
metric=metric_fn,
# For old `feval` parameter, the behavior is unchanged. For the new
# `custom_metric`, it will receive proper prediction result when custom objective
# is not used.
output_margin=callable(obj) or metric_fn is feval,
)
bst = cb_container.before_training(bst)
for i in range(start_iteration, num_boost_round):
if cb_container.before_iteration(bst, i, dtrain, evals):
break
bst.update(dtrain, i, obj)
if cb_container.after_iteration(bst, i, dtrain, evals):
break
bst = cb_container.after_training(bst)
if evals_result is not None:
evals_result.update(cb_container.history)
# Copy to serialise and unserialise booster to reset state and free
# training memory
return bst.copy()
class CVPack:
""" "Auxiliary datastruct to hold one fold of CV."""
def __init__(
self, dtrain: DMatrix, dtest: DMatrix, param: Optional[Union[Dict, List]]
) -> None:
""" "Initialize the CVPack"""
self.dtrain = dtrain
self.dtest = dtest
self.watchlist = [(dtrain, "train"), (dtest, "test")]
self.bst = Booster(param, [dtrain, dtest])
def __getattr__(self, name: str) -> Callable:
def _inner(*args: Any, **kwargs: Any) -> Any:
return getattr(self.bst, name)(*args, **kwargs)
return _inner
def update(self, iteration: int, fobj: Optional[Objective]) -> None:
""" "Update the boosters for one iteration"""
self.bst.update(self.dtrain, iteration, fobj)
def eval(self, iteration: int, feval: Optional[Metric], output_margin: bool) -> str:
""" "Evaluate the CVPack for one iteration."""
return self.bst.eval_set(self.watchlist, iteration, feval, output_margin)
class _PackedBooster:
def __init__(self, cvfolds: _CVFolds) -> None:
self.cvfolds = cvfolds
def update(self, iteration: int, obj: Optional[Objective]) -> None:
"""Iterate through folds for update"""
for fold in self.cvfolds:
fold.update(iteration, obj)
def eval(
self, iteration: int, feval: Optional[Metric], output_margin: bool
) -> List[str]:
"""Iterate through folds for eval"""
result = [f.eval(iteration, feval, output_margin) for f in self.cvfolds]
return result
def set_attr(self, **kwargs: Optional[str]) -> Any:
"""Iterate through folds for setting attributes"""
for f in self.cvfolds:
f.bst.set_attr(**kwargs)
def attr(self, key: str) -> Optional[str]:
"""Redirect to booster attr."""
return self.cvfolds[0].bst.attr(key)
def set_param(
self,
params: Union[Dict, Iterable[Tuple[str, Any]], str],
value: Optional[str] = None,
) -> None:
"""Iterate through folds for set_param"""
for f in self.cvfolds:
f.bst.set_param(params, value)
def num_boosted_rounds(self) -> int:
"""Number of boosted rounds."""
return self.cvfolds[0].num_boosted_rounds()
@property
def best_iteration(self) -> int:
"""Get best_iteration"""
return int(cast(int, self.cvfolds[0].bst.attr("best_iteration")))
@property
def best_score(self) -> float:
"""Get best_score."""
return float(cast(float, self.cvfolds[0].bst.attr("best_score")))
def groups_to_rows(groups: List[np.ndarray], boundaries: np.ndarray) -> np.ndarray:
"""
Given group row boundaries, convert ground indexes to row indexes
:param groups: list of groups for testing
:param boundaries: rows index limits of each group
:return: row in group
"""
return np.concatenate([np.arange(boundaries[g], boundaries[g + 1]) for g in groups])
def mkgroupfold(
dall: DMatrix,
nfold: int,
param: BoosterParam,
evals: Sequence[str] = (),
fpreproc: Optional[FPreProcCallable] = None,
shuffle: bool = True,
) -> List[CVPack]:
"""
Make n folds for cross-validation maintaining groups
:return: cross-validation folds
"""
# we have groups for pairwise ranking... get a list of the group indexes
group_boundaries = dall.get_uint_info("group_ptr")
group_sizes = np.diff(group_boundaries)
if shuffle is True:
idx = np.random.permutation(len(group_sizes))
else:
idx = np.arange(len(group_sizes))
# list by fold of test group indexes
out_group_idset = np.array_split(idx, nfold)
# list by fold of train group indexes
in_group_idset = [
np.concatenate([out_group_idset[i] for i in range(nfold) if k != i])
for k in range(nfold)
]
# from the group indexes, convert them to row indexes
in_idset = [
groups_to_rows(in_groups, group_boundaries) for in_groups in in_group_idset
]
out_idset = [
groups_to_rows(out_groups, group_boundaries) for out_groups in out_group_idset
]
# build the folds by taking the appropriate slices
ret = []
for k in range(nfold):
# perform the slicing using the indexes determined by the above methods
dtrain = dall.slice(in_idset[k], allow_groups=True)
dtrain.set_group(group_sizes[in_group_idset[k]])
dtest = dall.slice(out_idset[k], allow_groups=True)
dtest.set_group(group_sizes[out_group_idset[k]])
# run preprocessing on the data set if needed
if fpreproc is not None:
dtrain, dtest, tparam = fpreproc(dtrain, dtest, param.copy())
else:
tparam = param
plst = list(tparam.items()) + [("eval_metric", itm) for itm in evals]
ret.append(CVPack(dtrain, dtest, plst))
return ret
def mknfold(
dall: DMatrix,
nfold: int,
param: BoosterParam,
seed: int,
evals: Sequence[str] = (),
fpreproc: Optional[FPreProcCallable] = None,
stratified: Optional[bool] = False,
folds: Optional[XGBStratifiedKFold] = None,
shuffle: bool = True,
) -> List[CVPack]:
"""
Make an n-fold list of CVPack from random indices.
"""
evals = list(evals)
np.random.seed(seed)
if stratified is False and folds is None:
# Do standard k-fold cross validation. Automatically determine the folds.
if len(dall.get_uint_info("group_ptr")) > 1:
return mkgroupfold(
dall, nfold, param, evals=evals, fpreproc=fpreproc, shuffle=shuffle
)
if shuffle is True:
idx = np.random.permutation(dall.num_row())
else:
idx = np.arange(dall.num_row())
out_idset = np.array_split(idx, nfold)
in_idset = [
np.concatenate([out_idset[i] for i in range(nfold) if k != i])
for k in range(nfold)
]
elif folds is not None:
# Use user specified custom split using indices
try:
in_idset = [x[0] for x in folds]
out_idset = [x[1] for x in folds]
except TypeError:
# Custom stratification using Sklearn KFoldSplit object
splits = list(folds.split(X=dall.get_label(), y=dall.get_label()))
in_idset = [x[0] for x in splits]
out_idset = [x[1] for x in splits]
nfold = len(out_idset)
else:
# Do standard stratefied shuffle k-fold split
sfk = XGBStratifiedKFold(n_splits=nfold, shuffle=True, random_state=seed)
splits = list(sfk.split(X=dall.get_label(), y=dall.get_label()))
in_idset = [x[0] for x in splits]
out_idset = [x[1] for x in splits]
nfold = len(out_idset)
ret = []
for k in range(nfold):
# perform the slicing using the indexes determined by the above methods
dtrain = dall.slice(in_idset[k])
dtest = dall.slice(out_idset[k])
# run preprocessing on the data set if needed
if fpreproc is not None:
dtrain, dtest, tparam = fpreproc(dtrain, dtest, param.copy())
else:
tparam = param
plst = list(tparam.items()) + [("eval_metric", itm) for itm in evals]
ret.append(CVPack(dtrain, dtest, plst))
return ret
def cv(
params: BoosterParam,
dtrain: DMatrix,
num_boost_round: int = 10,
nfold: int = 3,
stratified: bool = False,
folds: XGBStratifiedKFold = None,
metrics: Sequence[str] = (),
obj: Optional[Objective] = None,
feval: Optional[Metric] = None,
maximize: Optional[bool] = None,
early_stopping_rounds: Optional[int] = None,
fpreproc: Optional[FPreProcCallable] = None,
as_pandas: bool = True,
verbose_eval: Optional[Union[int, bool]] = None,
show_stdv: bool = True,
seed: int = 0,
callbacks: Optional[Sequence[TrainingCallback]] = None,
shuffle: bool = True,
custom_metric: Optional[Metric] = None,
) -> Union[Dict[str, float], DataFrame]:
# pylint: disable = invalid-name
"""Cross-validation with given parameters.
Parameters
----------
params : dict
Booster params.
dtrain : DMatrix
Data to be trained.
num_boost_round : int
Number of boosting iterations.
nfold : int
Number of folds in CV.
stratified : bool
Perform stratified sampling.
folds : a KFold or StratifiedKFold instance or list of fold indices
Sklearn KFolds or StratifiedKFolds object.
Alternatively may explicitly pass sample indices for each fold.
For ``n`` folds, **folds** should be a length ``n`` list of tuples.
Each tuple is ``(in,out)`` where ``in`` is a list of indices to be used
as the training samples for the ``n`` th fold and ``out`` is a list of
indices to be used as the testing samples for the ``n`` th fold.
metrics : string or list of strings
Evaluation metrics to be watched in CV.
obj :
Custom objective function. See :doc:`Custom Objective
</tutorials/custom_metric_obj>` for details.
feval : function
.. deprecated:: 1.6.0
Use `custom_metric` instead.
maximize : bool
Whether to maximize feval.
early_stopping_rounds: int
Activates early stopping. Cross-Validation metric (average of validation
metric computed over CV folds) needs to improve at least once in
every **early_stopping_rounds** round(s) to continue training.
The last entry in the evaluation history will represent the best iteration.
If there's more than one metric in the **eval_metric** parameter given in
**params**, the last metric will be used for early stopping.
fpreproc : function
Preprocessing function that takes (dtrain, dtest, param) and returns
transformed versions of those.
as_pandas : bool, default True
Return pd.DataFrame when pandas is installed.
If False or pandas is not installed, return np.ndarray
verbose_eval : bool, int, or None, default None
Whether to display the progress. If None, progress will be displayed
when np.ndarray is returned. If True, progress will be displayed at
boosting stage. If an integer is given, progress will be displayed
at every given `verbose_eval` boosting stage.
show_stdv : bool, default True
Whether to display the standard deviation in progress.
Results are not affected, and always contains std.
seed : int
Seed used to generate the folds (passed to numpy.random.seed).
callbacks :
List of callback functions that are applied at end of each iteration.
It is possible to use predefined callbacks by using
:ref:`Callback API <callback_api>`.
.. note::
States in callback are not preserved during training, which means callback
objects can not be reused for multiple training sessions without
reinitialization or deepcopy.
.. code-block:: python
for params in parameters_grid:
# be sure to (re)initialize the callbacks before each run
callbacks = [xgb.callback.LearningRateScheduler(custom_rates)]
xgboost.train(params, Xy, callbacks=callbacks)
shuffle : bool
Shuffle data before creating folds.
custom_metric :
.. versionadded 1.6.0
Custom metric function. See :doc:`Custom Metric </tutorials/custom_metric_obj>`
for details.
Returns
-------
evaluation history : list(string)
"""
if stratified is True and not SKLEARN_INSTALLED:
raise XGBoostError(
"sklearn needs to be installed in order to use stratified cv"
)
if isinstance(metrics, str):
metrics = [metrics]
params = params.copy()
if isinstance(params, list):
_metrics = [x[1] for x in params if x[0] == "eval_metric"]
params = dict(params)
if "eval_metric" in params:
params["eval_metric"] = _metrics
if (not metrics) and "eval_metric" in params:
if isinstance(params["eval_metric"], list):
metrics = params["eval_metric"]
else:
metrics = [params["eval_metric"]]
params.pop("eval_metric", None)
results: Dict[str, List[float]] = {}
cvfolds = mknfold(
dtrain, nfold, params, seed, metrics, fpreproc, stratified, folds, shuffle
)
metric_fn = _configure_custom_metric(feval, custom_metric)
# setup callbacks
callbacks = [] if callbacks is None else copy.copy(list(callbacks))
_assert_new_callback(callbacks)
if verbose_eval:
verbose_eval = 1 if verbose_eval is True else verbose_eval
callbacks.append(EvaluationMonitor(period=verbose_eval, show_stdv=show_stdv))
if early_stopping_rounds:
callbacks.append(EarlyStopping(rounds=early_stopping_rounds, maximize=maximize))
callbacks_container = CallbackContainer(
callbacks,
metric=metric_fn,
is_cv=True,
output_margin=callable(obj) or metric_fn is feval,
)
booster = _PackedBooster(cvfolds)
callbacks_container.before_training(booster)
for i in range(num_boost_round):
if callbacks_container.before_iteration(booster, i, dtrain, None):
break
booster.update(i, obj)
should_break = callbacks_container.after_iteration(booster, i, dtrain, None)
res = callbacks_container.aggregated_cv
for key, mean, std in cast(List[Tuple[str, float, float]], res):
if key + "-mean" not in results:
results[key + "-mean"] = []
if key + "-std" not in results:
results[key + "-std"] = []
results[key + "-mean"].append(mean)
results[key + "-std"].append(std)
if should_break:
for k in results.keys(): # pylint: disable=consider-iterating-dictionary
results[k] = results[k][: (booster.best_iteration + 1)]
break
if as_pandas:
try:
import pandas as pd
results = pd.DataFrame.from_dict(results)
except ImportError:
pass
callbacks_container.after_training(booster)
return results
| 21,948
| 35.520799
| 97
|
py
|
xgboost
|
xgboost-master/python-package/xgboost/data.py
|
# pylint: disable=too-many-arguments, too-many-branches, too-many-lines
# pylint: disable=too-many-return-statements, import-error
"""Data dispatching for DMatrix."""
import ctypes
import json
import os
import warnings
from typing import Any, Callable, Iterator, List, Optional, Sequence, Tuple, Union, cast
import numpy as np
from ._typing import (
CupyT,
DataType,
FeatureNames,
FeatureTypes,
FloatCompatible,
NumpyDType,
PandasDType,
c_bst_ulong,
)
from .compat import DataFrame, lazy_isinstance
from .core import (
_LIB,
DataIter,
DataSplitMode,
DMatrix,
_check_call,
_cuda_array_interface,
_ProxyDMatrix,
c_str,
from_pystr_to_cstr,
make_jcargs,
)
DispatchedDataBackendReturnType = Tuple[
ctypes.c_void_p, Optional[FeatureNames], Optional[FeatureTypes]
]
CAT_T = "c"
# meta info that can be a matrix instead of vector.
_matrix_meta = {"base_margin", "label"}
def _warn_unused_missing(data: DataType, missing: Optional[FloatCompatible]) -> None:
if (missing is not None) and (not np.isnan(missing)):
warnings.warn(
"`missing` is not used for current input data type:" + str(type(data)),
UserWarning,
)
def _check_data_shape(data: DataType) -> None:
if hasattr(data, "shape") and len(data.shape) != 2:
raise ValueError("Please reshape the input data into 2-dimensional matrix.")
def _is_scipy_csr(data: DataType) -> bool:
try:
import scipy.sparse
except ImportError:
return False
return isinstance(data, scipy.sparse.csr_matrix)
def _array_interface(data: np.ndarray) -> bytes:
assert (
data.dtype.hasobject is False
), "Input data contains `object` dtype. Expecting numeric data."
interface = data.__array_interface__
if "mask" in interface:
interface["mask"] = interface["mask"].__array_interface__
interface_str = bytes(json.dumps(interface), "utf-8")
return interface_str
def transform_scipy_sparse(data: DataType, is_csr: bool) -> DataType:
"""Ensure correct data alignment and data type for scipy sparse inputs. Input should
be either csr or csc matrix.
"""
from scipy.sparse import csc_matrix, csr_matrix
if len(data.indices) != len(data.data):
raise ValueError(f"length mismatch: {len(data.indices)} vs {len(data.data)}")
indptr, _ = _ensure_np_dtype(data.indptr, data.indptr.dtype)
indices, _ = _ensure_np_dtype(data.indices, data.indices.dtype)
values, _ = _ensure_np_dtype(data.data, data.data.dtype)
if (
indptr is not data.indptr
or indices is not data.indices
or values is not data.data
):
if is_csr:
data = csr_matrix((values, indices, indptr), shape=data.shape)
else:
data = csc_matrix((values, indices, indptr), shape=data.shape)
return data
def _from_scipy_csr(
data: DataType,
missing: FloatCompatible,
nthread: int,
feature_names: Optional[FeatureNames],
feature_types: Optional[FeatureTypes],
) -> DispatchedDataBackendReturnType:
"""Initialize data from a CSR matrix."""
handle = ctypes.c_void_p()
data = transform_scipy_sparse(data, True)
_check_call(
_LIB.XGDMatrixCreateFromCSR(
_array_interface(data.indptr),
_array_interface(data.indices),
_array_interface(data.data),
c_bst_ulong(data.shape[1]),
make_jcargs(missing=float(missing), nthread=int(nthread)),
ctypes.byref(handle),
)
)
return handle, feature_names, feature_types
def _is_scipy_csc(data: DataType) -> bool:
try:
import scipy.sparse
except ImportError:
return False
return isinstance(data, scipy.sparse.csc_matrix)
def _from_scipy_csc(
data: DataType,
missing: FloatCompatible,
nthread: int,
feature_names: Optional[FeatureNames],
feature_types: Optional[FeatureTypes],
) -> DispatchedDataBackendReturnType:
"""Initialize data from a CSC matrix."""
handle = ctypes.c_void_p()
transform_scipy_sparse(data, False)
_check_call(
_LIB.XGDMatrixCreateFromCSC(
_array_interface(data.indptr),
_array_interface(data.indices),
_array_interface(data.data),
c_bst_ulong(data.shape[0]),
make_jcargs(missing=float(missing), nthread=int(nthread)),
ctypes.byref(handle),
)
)
return handle, feature_names, feature_types
def _is_scipy_coo(data: DataType) -> bool:
try:
import scipy.sparse
except ImportError:
return False
return isinstance(data, scipy.sparse.coo_matrix)
def _is_numpy_array(data: DataType) -> bool:
return isinstance(data, (np.ndarray, np.matrix))
def _ensure_np_dtype(
data: DataType, dtype: Optional[NumpyDType]
) -> Tuple[np.ndarray, Optional[NumpyDType]]:
if data.dtype.hasobject or data.dtype in [np.float16, np.bool_]:
dtype = np.float32
data = data.astype(dtype, copy=False)
if not data.flags.aligned:
data = np.require(data, requirements="A")
return data, dtype
def _maybe_np_slice(data: DataType, dtype: Optional[NumpyDType]) -> np.ndarray:
"""Handle numpy slice. This can be removed if we use __array_interface__."""
try:
if not data.flags.c_contiguous:
data = np.array(data, copy=True, dtype=dtype)
else:
data = np.array(data, copy=False, dtype=dtype)
except AttributeError:
data = np.array(data, copy=False, dtype=dtype)
data, dtype = _ensure_np_dtype(data, dtype)
return data
def _from_numpy_array(
data: DataType,
missing: FloatCompatible,
nthread: int,
feature_names: Optional[FeatureNames],
feature_types: Optional[FeatureTypes],
data_split_mode: DataSplitMode = DataSplitMode.ROW,
) -> DispatchedDataBackendReturnType:
"""Initialize data from a 2-D numpy matrix."""
_check_data_shape(data)
data, _ = _ensure_np_dtype(data, data.dtype)
handle = ctypes.c_void_p()
_check_call(
_LIB.XGDMatrixCreateFromDense(
_array_interface(data),
make_jcargs(
missing=float(missing),
nthread=int(nthread),
data_split_mode=int(data_split_mode),
),
ctypes.byref(handle),
)
)
return handle, feature_names, feature_types
def _is_pandas_df(data: DataType) -> bool:
try:
import pandas as pd
except ImportError:
return False
return isinstance(data, pd.DataFrame)
def _is_modin_df(data: DataType) -> bool:
try:
import modin.pandas as pd
except ImportError:
return False
return isinstance(data, pd.DataFrame)
_pandas_dtype_mapper = {
"int8": "int",
"int16": "int",
"int32": "int",
"int64": "int",
"uint8": "int",
"uint16": "int",
"uint32": "int",
"uint64": "int",
"float16": "float",
"float32": "float",
"float64": "float",
"bool": "i",
}
# nullable types
pandas_nullable_mapper = {
"Int8": "int",
"Int16": "int",
"Int32": "int",
"Int64": "int",
"UInt8": "i",
"UInt16": "i",
"UInt32": "i",
"UInt64": "i",
"Float32": "float",
"Float64": "float",
"boolean": "i",
}
pandas_pyarrow_mapper = {
"int8[pyarrow]": "i",
"int16[pyarrow]": "i",
"int32[pyarrow]": "i",
"int64[pyarrow]": "i",
"uint8[pyarrow]": "i",
"uint16[pyarrow]": "i",
"uint32[pyarrow]": "i",
"uint64[pyarrow]": "i",
"float[pyarrow]": "float",
"float32[pyarrow]": "float",
"double[pyarrow]": "float",
"float64[pyarrow]": "float",
"bool[pyarrow]": "i",
}
_pandas_dtype_mapper.update(pandas_nullable_mapper)
_pandas_dtype_mapper.update(pandas_pyarrow_mapper)
_ENABLE_CAT_ERR = (
"When categorical type is supplied, The experimental DMatrix parameter"
"`enable_categorical` must be set to `True`."
)
def _invalid_dataframe_dtype(data: DataType) -> None:
# pandas series has `dtypes` but it's just a single object
# cudf series doesn't have `dtypes`.
if hasattr(data, "dtypes") and hasattr(data.dtypes, "__iter__"):
bad_fields = [
f"{data.columns[i]}: {dtype}"
for i, dtype in enumerate(data.dtypes)
if dtype.name not in _pandas_dtype_mapper
]
err = " Invalid columns:" + ", ".join(bad_fields)
else:
err = ""
type_err = "DataFrame.dtypes for data must be int, float, bool or category."
msg = f"""{type_err} {_ENABLE_CAT_ERR} {err}"""
raise ValueError(msg)
def pandas_feature_info(
data: DataFrame,
meta: Optional[str],
feature_names: Optional[FeatureNames],
feature_types: Optional[FeatureTypes],
enable_categorical: bool,
) -> Tuple[Optional[FeatureNames], Optional[FeatureTypes]]:
"""Handle feature info for pandas dataframe."""
import pandas as pd
from pandas.api.types import is_categorical_dtype, is_sparse
# handle feature names
if feature_names is None and meta is None:
if isinstance(data.columns, pd.MultiIndex):
feature_names = [" ".join([str(x) for x in i]) for i in data.columns]
elif isinstance(data.columns, (pd.Index, pd.RangeIndex)):
feature_names = list(map(str, data.columns))
else:
feature_names = data.columns.format()
# handle feature types
if feature_types is None and meta is None:
feature_types = []
for dtype in data.dtypes:
if is_sparse(dtype):
feature_types.append(_pandas_dtype_mapper[dtype.subtype.name])
elif (
is_categorical_dtype(dtype) or is_pa_ext_categorical_dtype(dtype)
) and enable_categorical:
feature_types.append(CAT_T)
else:
feature_types.append(_pandas_dtype_mapper[dtype.name])
return feature_names, feature_types
def is_nullable_dtype(dtype: PandasDType) -> bool:
"""Whether dtype is a pandas nullable type."""
from pandas.api.types import (
is_bool_dtype,
is_categorical_dtype,
is_float_dtype,
is_integer_dtype,
)
is_int = is_integer_dtype(dtype) and dtype.name in pandas_nullable_mapper
# np.bool has alias `bool`, while pd.BooleanDtype has `boolean`.
is_bool = is_bool_dtype(dtype) and dtype.name == "boolean"
is_float = is_float_dtype(dtype) and dtype.name in pandas_nullable_mapper
return is_int or is_bool or is_float or is_categorical_dtype(dtype)
def is_pa_ext_dtype(dtype: Any) -> bool:
"""Return whether dtype is a pyarrow extension type for pandas"""
return hasattr(dtype, "pyarrow_dtype")
def is_pa_ext_categorical_dtype(dtype: Any) -> bool:
"""Check whether dtype is a dictionary type."""
return lazy_isinstance(
getattr(dtype, "pyarrow_dtype", None), "pyarrow.lib", "DictionaryType"
)
def pandas_cat_null(data: DataFrame) -> DataFrame:
"""Handle categorical dtype and nullable extension types from pandas."""
import pandas as pd
from pandas.api.types import is_categorical_dtype
# handle category codes and nullable.
cat_columns = []
nul_columns = []
# avoid an unnecessary conversion if possible
for col, dtype in zip(data.columns, data.dtypes):
if is_categorical_dtype(dtype):
cat_columns.append(col)
elif is_pa_ext_categorical_dtype(dtype):
raise ValueError(
"pyarrow dictionary type is not supported. Use pandas category instead."
)
elif is_nullable_dtype(dtype):
nul_columns.append(col)
if cat_columns or nul_columns:
# Avoid transformation due to: PerformanceWarning: DataFrame is highly
# fragmented
transformed = data.copy(deep=False)
else:
transformed = data
def cat_codes(ser: pd.Series) -> pd.Series:
if is_categorical_dtype(ser.dtype):
return ser.cat.codes
assert is_pa_ext_categorical_dtype(ser.dtype)
# Not yet supported, the index is not ordered for some reason. Alternately:
# `combine_chunks().to_pandas().cat.codes`. The result is the same.
return ser.array.__arrow_array__().combine_chunks().dictionary_encode().indices
if cat_columns:
# DF doesn't have the cat attribute, as a result, we use apply here
transformed[cat_columns] = (
transformed[cat_columns]
.apply(cat_codes)
.astype(np.float32)
.replace(-1.0, np.NaN)
)
if nul_columns:
transformed[nul_columns] = transformed[nul_columns].astype(np.float32)
# TODO(jiamingy): Investigate the possibility of using dataframe protocol or arrow
# IPC format for pandas so that we can apply the data transformation inside XGBoost
# for better memory efficiency.
return transformed
def pandas_ext_num_types(data: DataFrame) -> DataFrame:
"""Experimental suppport for handling pyarrow extension numeric types."""
import pandas as pd
import pyarrow as pa
for col, dtype in zip(data.columns, data.dtypes):
if not is_pa_ext_dtype(dtype):
continue
# No copy, callstack:
# pandas.core.internals.managers.SingleBlockManager.array_values()
# pandas.core.internals.blocks.EABackedBlock.values
d_array: pd.arrays.ArrowExtensionArray = data[col].array
# no copy in __arrow_array__
# ArrowExtensionArray._data is a chunked array
aa: pa.ChunkedArray = d_array.__arrow_array__()
chunk: pa.Array = aa.combine_chunks()
# Alternately, we can use chunk.buffers(), which returns a list of buffers and
# we need to concatenate them ourselves.
arr = chunk.__array__()
data[col] = arr
return data
def _transform_pandas_df(
data: DataFrame,
enable_categorical: bool,
feature_names: Optional[FeatureNames] = None,
feature_types: Optional[FeatureTypes] = None,
meta: Optional[str] = None,
meta_type: Optional[NumpyDType] = None,
) -> Tuple[np.ndarray, Optional[FeatureNames], Optional[FeatureTypes]]:
from pandas.api.types import is_categorical_dtype, is_sparse
pyarrow_extension = False
for dtype in data.dtypes:
if not (
(dtype.name in _pandas_dtype_mapper)
or is_sparse(dtype)
or (is_categorical_dtype(dtype) and enable_categorical)
or is_pa_ext_dtype(dtype)
):
_invalid_dataframe_dtype(data)
if is_pa_ext_dtype(dtype):
pyarrow_extension = True
feature_names, feature_types = pandas_feature_info(
data, meta, feature_names, feature_types, enable_categorical
)
transformed = pandas_cat_null(data)
if pyarrow_extension:
if transformed is data:
transformed = data.copy(deep=False)
transformed = pandas_ext_num_types(transformed)
if meta and len(data.columns) > 1 and meta not in _matrix_meta:
raise ValueError(f"DataFrame for {meta} cannot have multiple columns")
dtype = meta_type if meta_type else np.float32
arr: np.ndarray = transformed.values
if meta_type:
arr = arr.astype(dtype)
return arr, feature_names, feature_types
def _from_pandas_df(
data: DataFrame,
enable_categorical: bool,
missing: FloatCompatible,
nthread: int,
feature_names: Optional[FeatureNames],
feature_types: Optional[FeatureTypes],
) -> DispatchedDataBackendReturnType:
data, feature_names, feature_types = _transform_pandas_df(
data, enable_categorical, feature_names, feature_types
)
return _from_numpy_array(data, missing, nthread, feature_names, feature_types)
def _is_pandas_series(data: DataType) -> bool:
try:
import pandas as pd
except ImportError:
return False
return isinstance(data, pd.Series)
def _meta_from_pandas_series(
data: DataType, name: str, dtype: Optional[NumpyDType], handle: ctypes.c_void_p
) -> None:
"""Help transform pandas series for meta data like labels"""
data = data.values.astype("float")
from pandas.api.types import is_sparse
if is_sparse(data):
data = data.to_dense() # type: ignore
assert len(data.shape) == 1 or data.shape[1] == 0 or data.shape[1] == 1
_meta_from_numpy(data, name, dtype, handle)
def _is_modin_series(data: DataType) -> bool:
try:
import modin.pandas as pd
except ImportError:
return False
return isinstance(data, pd.Series)
def _from_pandas_series(
data: DataType,
missing: FloatCompatible,
nthread: int,
enable_categorical: bool,
feature_names: Optional[FeatureNames],
feature_types: Optional[FeatureTypes],
) -> DispatchedDataBackendReturnType:
from pandas.api.types import is_categorical_dtype
if (data.dtype.name not in _pandas_dtype_mapper) and not (
is_categorical_dtype(data.dtype) and enable_categorical
):
_invalid_dataframe_dtype(data)
if enable_categorical and is_categorical_dtype(data.dtype):
data = data.cat.codes
return _from_numpy_array(
data.values.reshape(data.shape[0], 1).astype("float"),
missing,
nthread,
feature_names,
feature_types,
)
def _is_dt_df(data: DataType) -> bool:
return lazy_isinstance(data, "datatable", "Frame") or lazy_isinstance(
data, "datatable", "DataTable"
)
def _transform_dt_df(
data: DataType,
feature_names: Optional[FeatureNames],
feature_types: Optional[FeatureTypes],
meta: Optional[str] = None,
meta_type: Optional[NumpyDType] = None,
) -> Tuple[np.ndarray, Optional[FeatureNames], Optional[FeatureTypes]]:
"""Validate feature names and types if data table"""
_dt_type_mapper = {"bool": "bool", "int": "int", "real": "float"}
_dt_type_mapper2 = {"bool": "i", "int": "int", "real": "float"}
if meta and data.shape[1] > 1:
raise ValueError("DataTable for meta info cannot have multiple columns")
if meta:
meta_type = "float" if meta_type is None else meta_type
# below requires new dt version
# extract first column
data = data.to_numpy()[:, 0].astype(meta_type)
return data, None, None
data_types_names = tuple(lt.name for lt in data.ltypes)
bad_fields = [
data.names[i]
for i, type_name in enumerate(data_types_names)
if type_name not in _dt_type_mapper
]
if bad_fields:
msg = """DataFrame.types for data must be int, float or bool.
Did not expect the data types in fields """
raise ValueError(msg + ", ".join(bad_fields))
if feature_names is None and meta is None:
feature_names = data.names
# always return stypes for dt ingestion
if feature_types is not None:
raise ValueError("DataTable has own feature types, cannot pass them in.")
feature_types = np.vectorize(_dt_type_mapper2.get)(data_types_names).tolist()
return data, feature_names, feature_types
def _from_dt_df(
data: DataType,
missing: Optional[FloatCompatible],
nthread: int,
feature_names: Optional[FeatureNames],
feature_types: Optional[FeatureTypes],
enable_categorical: bool,
) -> DispatchedDataBackendReturnType:
if enable_categorical:
raise ValueError("categorical data in datatable is not supported yet.")
data, feature_names, feature_types = _transform_dt_df(
data, feature_names, feature_types, None, None
)
ptrs = (ctypes.c_void_p * data.ncols)()
if hasattr(data, "internal") and hasattr(data.internal, "column"):
# datatable>0.8.0
for icol in range(data.ncols):
col = data.internal.column(icol)
ptr = col.data_pointer
ptrs[icol] = ctypes.c_void_p(ptr)
else:
# datatable<=0.8.0
from datatable.internal import (
frame_column_data_r, # pylint: disable=no-name-in-module
)
for icol in range(data.ncols):
ptrs[icol] = frame_column_data_r(data, icol)
# always return stypes for dt ingestion
feature_type_strings = (ctypes.c_char_p * data.ncols)()
for icol in range(data.ncols):
feature_type_strings[icol] = ctypes.c_char_p(
data.stypes[icol].name.encode("utf-8")
)
_warn_unused_missing(data, missing)
handle = ctypes.c_void_p()
_check_call(
_LIB.XGDMatrixCreateFromDT(
ptrs,
feature_type_strings,
c_bst_ulong(data.shape[0]),
c_bst_ulong(data.shape[1]),
ctypes.byref(handle),
ctypes.c_int(nthread),
)
)
return handle, feature_names, feature_types
def _is_arrow(data: DataType) -> bool:
try:
import pyarrow as pa
from pyarrow import dataset as arrow_dataset
return isinstance(data, (pa.Table, arrow_dataset.Dataset))
except ImportError:
return False
def record_batch_data_iter(data_iter: Iterator) -> Callable:
"""Data iterator used to ingest Arrow columnar record batches. We are not using
class DataIter because it is only intended for building Device DMatrix and external
memory DMatrix.
"""
from pyarrow.cffi import ffi
c_schemas: List[ffi.CData] = []
c_arrays: List[ffi.CData] = []
def _next(data_handle: int) -> int:
from pyarrow.cffi import ffi
try:
batch = next(data_iter)
c_schemas.append(ffi.new("struct ArrowSchema*"))
c_arrays.append(ffi.new("struct ArrowArray*"))
ptr_schema = int(ffi.cast("uintptr_t", c_schemas[-1]))
ptr_array = int(ffi.cast("uintptr_t", c_arrays[-1]))
# pylint: disable=protected-access
batch._export_to_c(ptr_array, ptr_schema)
_check_call(
_LIB.XGImportArrowRecordBatch(
ctypes.c_void_p(data_handle),
ctypes.c_void_p(ptr_array),
ctypes.c_void_p(ptr_schema),
)
)
return 1
except StopIteration:
return 0
return _next
def _from_arrow(
data: DataType,
missing: FloatCompatible,
nthread: int,
feature_names: Optional[FeatureNames],
feature_types: Optional[FeatureTypes],
enable_categorical: bool,
) -> DispatchedDataBackendReturnType:
import pyarrow as pa
if not all(
pa.types.is_integer(t) or pa.types.is_floating(t) for t in data.schema.types
):
raise ValueError(
"Features in dataset can only be integers or floating point number"
)
if enable_categorical:
raise ValueError("categorical data in arrow is not supported yet.")
batches = data.to_batches()
rb_iter = iter(batches)
it = record_batch_data_iter(rb_iter)
next_callback = ctypes.CFUNCTYPE(ctypes.c_int, ctypes.c_void_p)(it)
handle = ctypes.c_void_p()
config = from_pystr_to_cstr(
json.dumps({"missing": missing, "nthread": nthread, "nbatch": len(batches)})
)
_check_call(
_LIB.XGDMatrixCreateFromArrowCallback(
next_callback,
config,
ctypes.byref(handle),
)
)
return handle, feature_names, feature_types
def _is_cudf_df(data: DataType) -> bool:
return lazy_isinstance(data, "cudf.core.dataframe", "DataFrame")
def _cudf_array_interfaces(data: DataType, cat_codes: list) -> bytes:
"""Extract CuDF __cuda_array_interface__. This is special as it returns a new list
of data and a list of array interfaces. The data is list of categorical codes that
caller can safely ignore, but have to keep their reference alive until usage of
array interface is finished.
"""
try:
from cudf.api.types import is_categorical_dtype
except ImportError:
from cudf.utils.dtypes import is_categorical_dtype
interfaces = []
def append(interface: dict) -> None:
if "mask" in interface:
interface["mask"] = interface["mask"].__cuda_array_interface__
interfaces.append(interface)
if _is_cudf_ser(data):
if is_categorical_dtype(data.dtype):
interface = cat_codes[0].__cuda_array_interface__
else:
interface = data.__cuda_array_interface__
append(interface)
else:
for i, col in enumerate(data):
if is_categorical_dtype(data[col].dtype):
codes = cat_codes[i]
interface = codes.__cuda_array_interface__
else:
interface = data[col].__cuda_array_interface__
append(interface)
interfaces_str = from_pystr_to_cstr(json.dumps(interfaces))
return interfaces_str
def _transform_cudf_df(
data: DataType,
feature_names: Optional[FeatureNames],
feature_types: Optional[FeatureTypes],
enable_categorical: bool,
) -> Tuple[ctypes.c_void_p, list, Optional[FeatureNames], Optional[FeatureTypes]]:
try:
from cudf.api.types import is_categorical_dtype
except ImportError:
from cudf.utils.dtypes import is_categorical_dtype
if _is_cudf_ser(data):
dtypes = [data.dtype]
else:
dtypes = data.dtypes
if not all(
dtype.name in _pandas_dtype_mapper
or (is_categorical_dtype(dtype) and enable_categorical)
for dtype in dtypes
):
_invalid_dataframe_dtype(data)
# handle feature names
if feature_names is None:
if _is_cudf_ser(data):
feature_names = [data.name]
elif lazy_isinstance(data.columns, "cudf.core.multiindex", "MultiIndex"):
feature_names = [" ".join([str(x) for x in i]) for i in data.columns]
elif (
lazy_isinstance(data.columns, "cudf.core.index", "RangeIndex")
or lazy_isinstance(data.columns, "cudf.core.index", "Int64Index")
# Unique to cuDF, no equivalence in pandas 1.3.3
or lazy_isinstance(data.columns, "cudf.core.index", "Int32Index")
):
feature_names = list(map(str, data.columns))
else:
feature_names = data.columns.format()
# handle feature types
if feature_types is None:
feature_types = []
for dtype in dtypes:
if is_categorical_dtype(dtype) and enable_categorical:
feature_types.append(CAT_T)
else:
feature_types.append(_pandas_dtype_mapper[dtype.name])
# handle categorical data
cat_codes = []
if _is_cudf_ser(data):
# unlike pandas, cuDF uses NA for missing data.
if is_categorical_dtype(data.dtype) and enable_categorical:
codes = data.cat.codes
cat_codes.append(codes)
else:
for col in data:
dtype = data[col].dtype
if is_categorical_dtype(dtype) and enable_categorical:
codes = data[col].cat.codes
cat_codes.append(codes)
elif is_categorical_dtype(dtype):
raise ValueError(_ENABLE_CAT_ERR)
else:
cat_codes.append([])
return data, cat_codes, feature_names, feature_types
def _from_cudf_df(
data: DataType,
missing: FloatCompatible,
nthread: int,
feature_names: Optional[FeatureNames],
feature_types: Optional[FeatureTypes],
enable_categorical: bool,
) -> DispatchedDataBackendReturnType:
data, cat_codes, feature_names, feature_types = _transform_cudf_df(
data, feature_names, feature_types, enable_categorical
)
interfaces_str = _cudf_array_interfaces(data, cat_codes)
handle = ctypes.c_void_p()
config = bytes(json.dumps({"missing": missing, "nthread": nthread}), "utf-8")
_check_call(
_LIB.XGDMatrixCreateFromCudaColumnar(
interfaces_str,
config,
ctypes.byref(handle),
)
)
return handle, feature_names, feature_types
def _is_cudf_ser(data: DataType) -> bool:
return lazy_isinstance(data, "cudf.core.series", "Series")
def _is_cupy_array(data: DataType) -> bool:
return any(
lazy_isinstance(data, n, "ndarray")
for n in ("cupy.core.core", "cupy", "cupy._core.core")
)
def _transform_cupy_array(data: DataType) -> CupyT:
import cupy # pylint: disable=import-error
if not hasattr(data, "__cuda_array_interface__") and hasattr(data, "__array__"):
data = cupy.array(data, copy=False)
if data.dtype.hasobject or data.dtype in [cupy.bool_]:
data = data.astype(cupy.float32, copy=False)
return data
def _from_cupy_array(
data: DataType,
missing: FloatCompatible,
nthread: int,
feature_names: Optional[FeatureNames],
feature_types: Optional[FeatureTypes],
) -> DispatchedDataBackendReturnType:
"""Initialize DMatrix from cupy ndarray."""
data = _transform_cupy_array(data)
interface_str = _cuda_array_interface(data)
handle = ctypes.c_void_p()
config = bytes(json.dumps({"missing": missing, "nthread": nthread}), "utf-8")
_check_call(
_LIB.XGDMatrixCreateFromCudaArrayInterface(
interface_str, config, ctypes.byref(handle)
)
)
return handle, feature_names, feature_types
def _is_cupy_csr(data: DataType) -> bool:
try:
import cupyx
except ImportError:
return False
return isinstance(data, cupyx.scipy.sparse.csr_matrix)
def _is_cupy_csc(data: DataType) -> bool:
try:
import cupyx
except ImportError:
return False
return isinstance(data, cupyx.scipy.sparse.csc_matrix)
def _is_dlpack(data: DataType) -> bool:
return "PyCapsule" in str(type(data)) and "dltensor" in str(data)
def _transform_dlpack(data: DataType) -> bool:
from cupy import fromDlpack # pylint: disable=E0401
assert "used_dltensor" not in str(data)
data = fromDlpack(data)
return data
def _from_dlpack(
data: DataType,
missing: FloatCompatible,
nthread: int,
feature_names: Optional[FeatureNames],
feature_types: Optional[FeatureTypes],
) -> DispatchedDataBackendReturnType:
data = _transform_dlpack(data)
return _from_cupy_array(data, missing, nthread, feature_names, feature_types)
def _is_uri(data: DataType) -> bool:
return isinstance(data, (str, os.PathLike))
def _from_uri(
data: DataType,
missing: Optional[FloatCompatible],
feature_names: Optional[FeatureNames],
feature_types: Optional[FeatureTypes],
data_split_mode: DataSplitMode = DataSplitMode.ROW,
) -> DispatchedDataBackendReturnType:
_warn_unused_missing(data, missing)
handle = ctypes.c_void_p()
data = os.fspath(os.path.expanduser(data))
args = {
"uri": str(data),
"data_split_mode": int(data_split_mode),
}
config = bytes(json.dumps(args), "utf-8")
_check_call(_LIB.XGDMatrixCreateFromURI(config, ctypes.byref(handle)))
return handle, feature_names, feature_types
def _is_list(data: DataType) -> bool:
return isinstance(data, list)
def _from_list(
data: Sequence,
missing: FloatCompatible,
n_threads: int,
feature_names: Optional[FeatureNames],
feature_types: Optional[FeatureTypes],
) -> DispatchedDataBackendReturnType:
array = np.array(data)
_check_data_shape(data)
return _from_numpy_array(array, missing, n_threads, feature_names, feature_types)
def _is_tuple(data: DataType) -> bool:
return isinstance(data, tuple)
def _from_tuple(
data: Sequence,
missing: FloatCompatible,
n_threads: int,
feature_names: Optional[FeatureNames],
feature_types: Optional[FeatureTypes],
) -> DispatchedDataBackendReturnType:
return _from_list(data, missing, n_threads, feature_names, feature_types)
def _is_iter(data: DataType) -> bool:
return isinstance(data, DataIter)
def _has_array_protocol(data: DataType) -> bool:
return hasattr(data, "__array__")
def _convert_unknown_data(data: DataType) -> DataType:
warnings.warn(
f"Unknown data type: {type(data)}, trying to convert it to csr_matrix",
UserWarning,
)
try:
import scipy.sparse
except ImportError:
return None
try:
data = scipy.sparse.csr_matrix(data)
except Exception: # pylint: disable=broad-except
return None
return data
def dispatch_data_backend(
data: DataType,
missing: FloatCompatible, # Or Optional[Float]
threads: int,
feature_names: Optional[FeatureNames],
feature_types: Optional[FeatureTypes],
enable_categorical: bool = False,
data_split_mode: DataSplitMode = DataSplitMode.ROW,
) -> DispatchedDataBackendReturnType:
"""Dispatch data for DMatrix."""
if not _is_cudf_ser(data) and not _is_pandas_series(data):
_check_data_shape(data)
if _is_scipy_csr(data):
return _from_scipy_csr(data, missing, threads, feature_names, feature_types)
if _is_scipy_csc(data):
return _from_scipy_csc(data, missing, threads, feature_names, feature_types)
if _is_scipy_coo(data):
return _from_scipy_csr(
data.tocsr(), missing, threads, feature_names, feature_types
)
if _is_numpy_array(data):
return _from_numpy_array(
data, missing, threads, feature_names, feature_types, data_split_mode
)
if _is_uri(data):
return _from_uri(data, missing, feature_names, feature_types, data_split_mode)
if _is_list(data):
return _from_list(data, missing, threads, feature_names, feature_types)
if _is_tuple(data):
return _from_tuple(data, missing, threads, feature_names, feature_types)
if _is_pandas_series(data):
import pandas as pd
data = pd.DataFrame(data)
if _is_pandas_df(data):
return _from_pandas_df(
data, enable_categorical, missing, threads, feature_names, feature_types
)
if _is_cudf_df(data) or _is_cudf_ser(data):
return _from_cudf_df(
data, missing, threads, feature_names, feature_types, enable_categorical
)
if _is_cupy_array(data):
return _from_cupy_array(data, missing, threads, feature_names, feature_types)
if _is_cupy_csr(data):
raise TypeError("cupyx CSR is not supported yet.")
if _is_cupy_csc(data):
raise TypeError("cupyx CSC is not supported yet.")
if _is_dlpack(data):
return _from_dlpack(data, missing, threads, feature_names, feature_types)
if _is_dt_df(data):
_warn_unused_missing(data, missing)
return _from_dt_df(
data, missing, threads, feature_names, feature_types, enable_categorical
)
if _is_modin_df(data):
return _from_pandas_df(
data, enable_categorical, missing, threads, feature_names, feature_types
)
if _is_modin_series(data):
return _from_pandas_series(
data, missing, threads, enable_categorical, feature_names, feature_types
)
if _is_arrow(data):
return _from_arrow(
data, missing, threads, feature_names, feature_types, enable_categorical
)
if _has_array_protocol(data):
array = np.asarray(data)
return _from_numpy_array(array, missing, threads, feature_names, feature_types)
converted = _convert_unknown_data(data)
if converted is not None:
return _from_scipy_csr(
converted, missing, threads, feature_names, feature_types
)
raise TypeError("Not supported type for data." + str(type(data)))
def _validate_meta_shape(data: DataType, name: str) -> None:
if hasattr(data, "shape"):
msg = f"Invalid shape: {data.shape} for {name}"
if name in _matrix_meta:
if len(data.shape) > 2:
raise ValueError(msg)
return
if len(data.shape) > 2 or (
len(data.shape) == 2 and (data.shape[1] != 0 and data.shape[1] != 1)
):
raise ValueError(f"Invalid shape: {data.shape} for {name}")
def _meta_from_numpy(
data: np.ndarray,
field: str,
dtype: Optional[NumpyDType],
handle: ctypes.c_void_p,
) -> None:
data, dtype = _ensure_np_dtype(data, dtype)
interface = data.__array_interface__
if interface.get("mask", None) is not None:
raise ValueError("Masked array is not supported.")
interface_str = _array_interface(data)
_check_call(_LIB.XGDMatrixSetInfoFromInterface(handle, c_str(field), interface_str))
def _meta_from_list(
data: Sequence, field: str, dtype: Optional[NumpyDType], handle: ctypes.c_void_p
) -> None:
data_np = np.array(data)
_meta_from_numpy(data_np, field, dtype, handle)
def _meta_from_tuple(
data: Sequence, field: str, dtype: Optional[NumpyDType], handle: ctypes.c_void_p
) -> None:
return _meta_from_list(data, field, dtype, handle)
def _meta_from_cudf_df(data: DataType, field: str, handle: ctypes.c_void_p) -> None:
if field not in _matrix_meta:
_meta_from_cudf_series(data.iloc[:, 0], field, handle)
else:
data = data.values
interface = _cuda_array_interface(data)
_check_call(_LIB.XGDMatrixSetInfoFromInterface(handle, c_str(field), interface))
def _meta_from_cudf_series(data: DataType, field: str, handle: ctypes.c_void_p) -> None:
interface = bytes(json.dumps([data.__cuda_array_interface__], indent=2), "utf-8")
_check_call(_LIB.XGDMatrixSetInfoFromInterface(handle, c_str(field), interface))
def _meta_from_cupy_array(data: DataType, field: str, handle: ctypes.c_void_p) -> None:
data = _transform_cupy_array(data)
interface = bytes(json.dumps([data.__cuda_array_interface__], indent=2), "utf-8")
_check_call(_LIB.XGDMatrixSetInfoFromInterface(handle, c_str(field), interface))
def _meta_from_dt(
data: DataType, field: str, dtype: Optional[NumpyDType], handle: ctypes.c_void_p
) -> None:
data, _, _ = _transform_dt_df(data, None, None, field, dtype)
_meta_from_numpy(data, field, dtype, handle)
def dispatch_meta_backend(
matrix: DMatrix, data: DataType, name: str, dtype: Optional[NumpyDType] = None
) -> None:
"""Dispatch for meta info."""
handle = matrix.handle
assert handle is not None
_validate_meta_shape(data, name)
if data is None:
return
if _is_list(data):
_meta_from_list(data, name, dtype, handle)
return
if _is_tuple(data):
_meta_from_tuple(data, name, dtype, handle)
return
if _is_numpy_array(data):
_meta_from_numpy(data, name, dtype, handle)
return
if _is_pandas_df(data):
data, _, _ = _transform_pandas_df(data, False, meta=name, meta_type=dtype)
_meta_from_numpy(data, name, dtype, handle)
return
if _is_pandas_series(data):
_meta_from_pandas_series(data, name, dtype, handle)
return
if _is_dlpack(data):
data = _transform_dlpack(data)
_meta_from_cupy_array(data, name, handle)
return
if _is_cupy_array(data):
_meta_from_cupy_array(data, name, handle)
return
if _is_cudf_ser(data):
_meta_from_cudf_series(data, name, handle)
return
if _is_cudf_df(data):
_meta_from_cudf_df(data, name, handle)
return
if _is_dt_df(data):
_meta_from_dt(data, name, dtype, handle)
return
if _is_modin_df(data):
data, _, _ = _transform_pandas_df(data, False, meta=name, meta_type=dtype)
_meta_from_numpy(data, name, dtype, handle)
return
if _is_modin_series(data):
data = data.values.astype("float")
assert len(data.shape) == 1 or data.shape[1] == 0 or data.shape[1] == 1
_meta_from_numpy(data, name, dtype, handle)
return
if _has_array_protocol(data):
# pyarrow goes here.
array = np.asarray(data)
_meta_from_numpy(array, name, dtype, handle)
return
raise TypeError("Unsupported type for " + name, str(type(data)))
class SingleBatchInternalIter(DataIter): # pylint: disable=R0902
"""An iterator for single batch data to help creating device DMatrix.
Transforming input directly to histogram with normal single batch data API
can not access weight for sketching. So this iterator acts as a staging
area for meta info.
"""
def __init__(self, **kwargs: Any) -> None:
self.kwargs = kwargs
self.it = 0 # pylint: disable=invalid-name
# This does not necessarily increase memory usage as the data transformation
# might use memory.
super().__init__(release_data=False)
def next(self, input_data: Callable) -> int:
if self.it == 1:
return 0
self.it += 1
input_data(**self.kwargs)
return 1
def reset(self) -> None:
self.it = 0
def _proxy_transform(
data: DataType,
feature_names: Optional[FeatureNames],
feature_types: Optional[FeatureTypes],
enable_categorical: bool,
) -> Tuple[
Union[bool, ctypes.c_void_p, np.ndarray],
Optional[list],
Optional[FeatureNames],
Optional[FeatureTypes],
]:
if _is_cudf_df(data) or _is_cudf_ser(data):
return _transform_cudf_df(
data, feature_names, feature_types, enable_categorical
)
if _is_cupy_array(data):
data = _transform_cupy_array(data)
return data, None, feature_names, feature_types
if _is_dlpack(data):
return _transform_dlpack(data), None, feature_names, feature_types
if _is_list(data) or _is_tuple(data):
data = np.array(data)
if _is_numpy_array(data):
data, _ = _ensure_np_dtype(data, data.dtype)
return data, None, feature_names, feature_types
if _is_scipy_csr(data):
data = transform_scipy_sparse(data, True)
return data, None, feature_names, feature_types
if _is_pandas_series(data):
import pandas as pd
data = pd.DataFrame(data)
if _is_pandas_df(data):
arr, feature_names, feature_types = _transform_pandas_df(
data, enable_categorical, feature_names, feature_types
)
arr, _ = _ensure_np_dtype(arr, arr.dtype)
return arr, None, feature_names, feature_types
raise TypeError("Value type is not supported for data iterator:" + str(type(data)))
def dispatch_proxy_set_data(
proxy: _ProxyDMatrix,
data: DataType,
cat_codes: Optional[list],
allow_host: bool,
) -> None:
"""Dispatch for QuantileDMatrix."""
if not _is_cudf_ser(data) and not _is_pandas_series(data):
_check_data_shape(data)
if _is_cudf_df(data):
# pylint: disable=W0212
proxy._set_data_from_cuda_columnar(data, cast(List, cat_codes))
return
if _is_cudf_ser(data):
# pylint: disable=W0212
proxy._set_data_from_cuda_columnar(data, cast(List, cat_codes))
return
if _is_cupy_array(data):
proxy._set_data_from_cuda_interface(data) # pylint: disable=W0212
return
if _is_dlpack(data):
data = _transform_dlpack(data)
proxy._set_data_from_cuda_interface(data) # pylint: disable=W0212
return
err = TypeError("Value type is not supported for data iterator:" + str(type(data)))
if not allow_host:
raise err
if _is_numpy_array(data):
_check_data_shape(data)
proxy._set_data_from_array(data) # pylint: disable=W0212
return
if _is_scipy_csr(data):
proxy._set_data_from_csr(data) # pylint: disable=W0212
return
raise err
| 43,680
| 31.452452
| 88
|
py
|
xgboost
|
xgboost-master/python-package/xgboost/config.py
|
# pylint: disable=missing-function-docstring
"""Global configuration for XGBoost"""
import ctypes
import json
from contextlib import contextmanager
from functools import wraps
from typing import Any, Callable, Dict, Iterator, Optional, cast
from ._typing import _F
from .core import _LIB, _check_call, c_str, py_str
def config_doc(
*,
header: Optional[str] = None,
extra_note: Optional[str] = None,
parameters: Optional[str] = None,
returns: Optional[str] = None,
see_also: Optional[str] = None,
) -> Callable[[_F], _F]:
"""Decorator to format docstring for config functions.
Parameters
----------
header: str
An introducion to the function
extra_note: str
Additional notes
parameters: str
Parameters of the function
returns: str
Return value
see_also: str
Related functions
"""
doc_template = """
{header}
Global configuration consists of a collection of parameters that can be applied in the
global scope. See :ref:`global_config` for the full list of parameters supported in
the global configuration.
{extra_note}
.. versionadded:: 1.4.0
"""
common_example = """
Example
-------
.. code-block:: python
import xgboost as xgb
# Show all messages, including ones pertaining to debugging
xgb.set_config(verbosity=2)
# Get current value of global configuration
# This is a dict containing all parameters in the global configuration,
# including 'verbosity'
config = xgb.get_config()
assert config['verbosity'] == 2
# Example of using the context manager xgb.config_context().
# The context manager will restore the previous value of the global
# configuration upon exiting.
with xgb.config_context(verbosity=0):
# Suppress warning caused by model generated with XGBoost version < 1.0.0
bst = xgb.Booster(model_file='./old_model.bin')
assert xgb.get_config()['verbosity'] == 2 # old value restored
Nested configuration context is also supported:
Example
-------
.. code-block:: python
with xgb.config_context(verbosity=3):
assert xgb.get_config()["verbosity"] == 3
with xgb.config_context(verbosity=2):
assert xgb.get_config()["verbosity"] == 2
xgb.set_config(verbosity=2)
assert xgb.get_config()["verbosity"] == 2
with xgb.config_context(verbosity=3):
assert xgb.get_config()["verbosity"] == 3
"""
def none_to_str(value: Optional[str]) -> str:
return "" if value is None else value
def config_doc_decorator(func: _F) -> _F:
func.__doc__ = (
doc_template.format(
header=none_to_str(header), extra_note=none_to_str(extra_note)
)
+ none_to_str(parameters)
+ none_to_str(returns)
+ none_to_str(common_example)
+ none_to_str(see_also)
)
@wraps(func)
def wrap(*args: Any, **kwargs: Any) -> Any:
return func(*args, **kwargs)
return cast(_F, wrap)
return config_doc_decorator
@config_doc(
header="""
Set global configuration.
""",
parameters="""
Parameters
----------
new_config: Dict[str, Any]
Keyword arguments representing the parameters and their values
""",
)
def set_config(**new_config: Any) -> None:
not_none = {}
for k, v in new_config.items():
if v is not None:
not_none[k] = v
config = json.dumps(not_none)
_check_call(_LIB.XGBSetGlobalConfig(c_str(config)))
@config_doc(
header="""
Get current values of the global configuration.
""",
returns="""
Returns
-------
args: Dict[str, Any]
The list of global parameters and their values
""",
)
def get_config() -> Dict[str, Any]:
config_str = ctypes.c_char_p()
_check_call(_LIB.XGBGetGlobalConfig(ctypes.byref(config_str)))
value = config_str.value
assert value
config = json.loads(py_str(value))
return config
@contextmanager
@config_doc(
header="""
Context manager for global XGBoost configuration.
""",
parameters="""
Parameters
----------
new_config: Dict[str, Any]
Keyword arguments representing the parameters and their values
""",
extra_note="""
.. note::
All settings, not just those presently modified, will be returned to their
previous values when the context manager is exited. This is not thread-safe.
""",
see_also="""
See Also
--------
set_config: Set global XGBoost configuration
get_config: Get current values of the global configuration
""",
)
def config_context(**new_config: Any) -> Iterator[None]:
old_config = get_config().copy()
set_config(**new_config)
try:
yield
finally:
set_config(**old_config)
| 5,045
| 25.983957
| 90
|
py
|
xgboost
|
xgboost-master/python-package/xgboost/federated.py
|
"""XGBoost Federated Learning related API."""
from .core import _LIB, XGBoostError, _check_call, build_info, c_str
def run_federated_server(
port: int,
world_size: int,
server_key_path: str = "",
server_cert_path: str = "",
client_cert_path: str = "",
) -> None:
"""Run the Federated Learning server.
Parameters
----------
port : int
The port to listen on.
world_size: int
The number of federated workers.
server_key_path: str
Path to the server private key file. SSL is turned off if empty.
server_cert_path: str
Path to the server certificate file. SSL is turned off if empty.
client_cert_path: str
Path to the client certificate file. SSL is turned off if empty.
"""
if build_info()["USE_FEDERATED"]:
if not server_key_path or not server_cert_path or not client_cert_path:
_check_call(_LIB.XGBRunInsecureFederatedServer(port, world_size))
else:
_check_call(
_LIB.XGBRunFederatedServer(
port,
world_size,
c_str(server_key_path),
c_str(server_cert_path),
c_str(client_cert_path),
)
)
else:
raise XGBoostError(
"XGBoost needs to be built with the federated learning plugin "
"enabled in order to use this module"
)
| 1,447
| 30.478261
| 79
|
py
|
xgboost
|
xgboost-master/python-package/xgboost/__init__.py
|
"""XGBoost: eXtreme Gradient Boosting library.
Contributors: https://github.com/dmlc/xgboost/blob/master/CONTRIBUTORS.md
"""
from . import tracker # noqa
from . import collective, dask, rabit
from .core import (
Booster,
DataIter,
DeviceQuantileDMatrix,
DMatrix,
QuantileDMatrix,
_py_version,
build_info,
)
from .tracker import RabitTracker # noqa
from .training import cv, train
try:
from .config import config_context, get_config, set_config
from .plotting import plot_importance, plot_tree, to_graphviz
from .sklearn import (
XGBClassifier,
XGBModel,
XGBRanker,
XGBRegressor,
XGBRFClassifier,
XGBRFRegressor,
)
except ImportError:
pass
__version__ = _py_version()
__all__ = [
# core
"DMatrix",
"DeviceQuantileDMatrix",
"QuantileDMatrix",
"Booster",
"DataIter",
"train",
"cv",
# utilities
"RabitTracker",
"build_info",
"plot_importance",
"plot_tree",
"to_graphviz",
"set_config",
"get_config",
"config_context",
# sklearn
"XGBModel",
"XGBClassifier",
"XGBRegressor",
"XGBRanker",
"XGBRFClassifier",
"XGBRFRegressor",
# dask
"dask",
# collective
"collective",
]
| 1,280
| 17.838235
| 73
|
py
|
xgboost
|
xgboost-master/python-package/xgboost/_typing.py
|
# pylint: disable=protected-access
"""Shared typing definition."""
import ctypes
import os
from typing import (
TYPE_CHECKING,
Any,
Callable,
Dict,
List,
Sequence,
Type,
TypeVar,
Union,
)
# os.PathLike/string/numpy.array/scipy.sparse/pd.DataFrame/dt.Frame/
# cudf.DataFrame/cupy.array/dlpack
import numpy as np
DataType = Any
# xgboost accepts some other possible types in practice due to historical reason, which is
# lesser tested. For now we encourage users to pass a simple list of string.
FeatureInfo = Sequence[str]
FeatureNames = FeatureInfo
FeatureTypes = FeatureInfo
BoosterParam = Union[List, Dict[str, Any]] # better be sequence
ArrayLike = Any
PathLike = Union[str, os.PathLike]
CupyT = ArrayLike # maybe need a stub for cupy arrays
NumpyOrCupy = Any
NumpyDType = Union[str, Type[np.number]] # pylint: disable=invalid-name
PandasDType = Any # real type is pandas.core.dtypes.base.ExtensionDtype
FloatCompatible = Union[float, np.float32, np.float64]
# callables
FPreProcCallable = Callable
# ctypes
# c_bst_ulong corresponds to bst_ulong defined in xgboost/c_api.h
c_bst_ulong = ctypes.c_uint64 # pylint: disable=C0103
ModelIn = Union[str, bytearray, os.PathLike]
CTypeT = TypeVar(
"CTypeT",
ctypes.c_void_p,
ctypes.c_char_p,
ctypes.c_int,
ctypes.c_float,
ctypes.c_uint,
ctypes.c_size_t,
)
# supported numeric types
CNumeric = Union[
ctypes.c_float,
ctypes.c_double,
ctypes.c_uint,
ctypes.c_uint64,
ctypes.c_int32,
ctypes.c_int64,
]
# c pointer types
if TYPE_CHECKING:
CStrPtr = ctypes._Pointer[ctypes.c_char]
CStrPptr = ctypes._Pointer[ctypes.c_char_p]
CFloatPtr = ctypes._Pointer[ctypes.c_float]
CNumericPtr = Union[
ctypes._Pointer[ctypes.c_float],
ctypes._Pointer[ctypes.c_double],
ctypes._Pointer[ctypes.c_uint],
ctypes._Pointer[ctypes.c_uint64],
ctypes._Pointer[ctypes.c_int32],
ctypes._Pointer[ctypes.c_int64],
]
else:
CStrPtr = ctypes._Pointer
CStrPptr = ctypes._Pointer
CFloatPtr = ctypes._Pointer
CNumericPtr = Union[
ctypes._Pointer,
ctypes._Pointer,
ctypes._Pointer,
ctypes._Pointer,
ctypes._Pointer,
ctypes._Pointer,
]
# template parameter
_T = TypeVar("_T")
_F = TypeVar("_F", bound=Callable[..., Any])
| 2,377
| 22.087379
| 90
|
py
|
xgboost
|
xgboost-master/python-package/xgboost/compat.py
|
# pylint: disable= invalid-name, unused-import
"""For compatibility and optional dependencies."""
import importlib.util
import logging
import sys
import types
from typing import Any, Dict, List, Optional, Sequence, cast
import numpy as np
from ._typing import _T
assert sys.version_info[0] == 3, "Python 2 is no longer supported."
def py_str(x: bytes) -> str:
"""convert c string back to python string"""
return x.decode("utf-8") # type: ignore
def lazy_isinstance(instance: Any, module: str, name: str) -> bool:
"""Use string representation to identify a type."""
# Notice, we use .__class__ as opposed to type() in order
# to support object proxies such as weakref.proxy
cls = instance.__class__
is_same_module = cls.__module__ == module
has_same_name = cls.__name__ == name
return is_same_module and has_same_name
# pandas
try:
from pandas import DataFrame, MultiIndex, Series
from pandas import concat as pandas_concat
PANDAS_INSTALLED = True
except ImportError:
MultiIndex = object
DataFrame = object
Series = object
pandas_concat = None
PANDAS_INSTALLED = False
# sklearn
try:
from sklearn.base import BaseEstimator as XGBModelBase
from sklearn.base import ClassifierMixin as XGBClassifierBase
from sklearn.base import RegressorMixin as XGBRegressorBase
from sklearn.preprocessing import LabelEncoder
try:
from sklearn.model_selection import KFold as XGBKFold
from sklearn.model_selection import StratifiedKFold as XGBStratifiedKFold
except ImportError:
from sklearn.cross_validation import KFold as XGBKFold
from sklearn.cross_validation import StratifiedKFold as XGBStratifiedKFold
SKLEARN_INSTALLED = True
except ImportError:
SKLEARN_INSTALLED = False
# used for compatibility without sklearn
XGBModelBase = object
XGBClassifierBase = object
XGBRegressorBase = object
LabelEncoder = object
XGBKFold = None
XGBStratifiedKFold = None
_logger = logging.getLogger(__name__)
def is_cudf_available() -> bool:
"""Check cuDF package available or not"""
if importlib.util.find_spec("cudf") is None:
return False
try:
import cudf
return True
except ImportError:
_logger.exception("Importing cuDF failed, use DMatrix instead of QDM")
return False
try:
import scipy.sparse as scipy_sparse
from scipy.sparse import csr_matrix as scipy_csr
except ImportError:
scipy_sparse = False
scipy_csr = object
def concat(value: Sequence[_T]) -> _T: # pylint: disable=too-many-return-statements
"""Concatenate row-wise."""
if isinstance(value[0], np.ndarray):
value_arr = cast(Sequence[np.ndarray], value)
return np.concatenate(value_arr, axis=0)
if scipy_sparse and isinstance(value[0], scipy_sparse.csr_matrix):
return scipy_sparse.vstack(value, format="csr")
if scipy_sparse and isinstance(value[0], scipy_sparse.csc_matrix):
return scipy_sparse.vstack(value, format="csc")
if scipy_sparse and isinstance(value[0], scipy_sparse.spmatrix):
# other sparse format will be converted to CSR.
return scipy_sparse.vstack(value, format="csr")
if PANDAS_INSTALLED and isinstance(value[0], (DataFrame, Series)):
return pandas_concat(value, axis=0)
if lazy_isinstance(value[0], "cudf.core.dataframe", "DataFrame") or lazy_isinstance(
value[0], "cudf.core.series", "Series"
):
from cudf import concat as CUDF_concat # pylint: disable=import-error
return CUDF_concat(value, axis=0)
from .data import _is_cupy_array
if _is_cupy_array(value[0]):
import cupy # pylint: disable=import-error
# pylint: disable=c-extension-no-member,no-member
d = cupy.cuda.runtime.getDevice()
for v in value:
arr = cast(cupy.ndarray, v)
d_v = arr.device.id
assert d_v == d, "Concatenating arrays on different devices."
return cupy.concatenate(value, axis=0)
raise TypeError("Unknown type.")
# Modified from tensorflow with added caching. There's a `LazyLoader` in
# `importlib.utils`, except it's unclear from its document on how to use it. This one
# seems to be easy to understand and works out of box.
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may not use this
# file except in compliance with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software distributed under
# the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the specific language governing
# permissions and limitations under the License.
class LazyLoader(types.ModuleType):
"""Lazily import a module, mainly to avoid pulling in large dependencies."""
def __init__(
self,
local_name: str,
parent_module_globals: Dict,
name: str,
warning: Optional[str] = None,
) -> None:
self._local_name = local_name
self._parent_module_globals = parent_module_globals
self._warning = warning
self.module: Optional[types.ModuleType] = None
super().__init__(name)
def _load(self) -> types.ModuleType:
"""Load the module and insert it into the parent's globals."""
# Import the target module and insert it into the parent's namespace
module = importlib.import_module(self.__name__)
self._parent_module_globals[self._local_name] = module
# Emit a warning if one was specified
if self._warning:
logging.warning(self._warning)
# Make sure to only warn once.
self._warning = None
# Update this object's dict so that if someone keeps a reference to the
# LazyLoader, lookups are efficient (__getattr__ is only called on lookups
# that fail).
self.__dict__.update(module.__dict__)
return module
def __getattr__(self, item: str) -> Any:
if not self.module:
self.module = self._load()
return getattr(self.module, item)
def __dir__(self) -> List[str]:
if not self.module:
self.module = self._load()
return dir(self.module)
| 6,485
| 32.261538
| 89
|
py
|
xgboost
|
xgboost-master/python-package/xgboost/callback.py
|
"""Callback library containing training routines. See :doc:`Callback Functions
</python/callbacks>` for a quick introduction.
"""
import collections
import os
import pickle
from abc import ABC
from typing import (
Any,
Callable,
Dict,
List,
Optional,
Sequence,
Tuple,
TypeVar,
Union,
cast,
)
import numpy
from . import collective
from .core import Booster, DMatrix, XGBoostError, _parse_eval_str
__all__ = [
"TrainingCallback",
"LearningRateScheduler",
"EarlyStopping",
"EvaluationMonitor",
"TrainingCheckPoint",
"CallbackContainer",
]
_Score = Union[float, Tuple[float, float]]
_ScoreList = Union[List[float], List[Tuple[float, float]]]
_Model = Any # real type is Union[Booster, CVPack]; need more work
# pylint: disable=unused-argument
class TrainingCallback(ABC):
"""Interface for training callback.
.. versionadded:: 1.3.0
"""
EvalsLog = Dict[str, Dict[str, _ScoreList]] # pylint: disable=invalid-name
def __init__(self) -> None:
pass
def before_training(self, model: _Model) -> _Model:
"""Run before training starts."""
return model
def after_training(self, model: _Model) -> _Model:
"""Run after training is finished."""
return model
def before_iteration(self, model: _Model, epoch: int, evals_log: EvalsLog) -> bool:
"""Run before each iteration. Return True when training should stop."""
return False
def after_iteration(self, model: _Model, epoch: int, evals_log: EvalsLog) -> bool:
"""Run after each iteration. Return True when training should stop."""
return False
def _aggcv(rlist: List[str]) -> List[Tuple[str, float, float]]:
# pylint: disable=invalid-name, too-many-locals
"""Aggregate cross-validation results."""
cvmap: Dict[Tuple[int, str], List[float]] = {}
idx = rlist[0].split()[0]
for line in rlist:
arr: List[str] = line.split()
assert idx == arr[0]
for metric_idx, it in enumerate(arr[1:]):
if not isinstance(it, str):
it = it.decode()
k, v = it.split(":")
if (metric_idx, k) not in cvmap:
cvmap[(metric_idx, k)] = []
cvmap[(metric_idx, k)].append(float(v))
msg = idx
results = []
for (_, name), s in sorted(cvmap.items(), key=lambda x: x[0][0]):
as_arr = numpy.array(s)
if not isinstance(msg, str):
msg = msg.decode()
mean, std = numpy.mean(as_arr), numpy.std(as_arr)
results.extend([(name, mean, std)])
return results
# allreduce type
_ART = TypeVar("_ART")
def _allreduce_metric(score: _ART) -> _ART:
"""Helper function for computing customized metric in distributed
environment. Not strictly correct as many functions don't use mean value
as final result.
"""
world = collective.get_world_size()
assert world != 0
if world == 1:
return score
if isinstance(score, tuple): # has mean and stdv
raise ValueError(
"xgboost.cv function should not be used in distributed environment."
)
arr = numpy.array([score])
arr = collective.allreduce(arr, collective.Op.SUM) / world
return arr[0]
class CallbackContainer:
"""A special internal callback for invoking a list of other callbacks.
.. versionadded:: 1.3.0
"""
def __init__(
self,
callbacks: Sequence[TrainingCallback],
metric: Optional[Callable] = None,
output_margin: bool = True,
is_cv: bool = False,
) -> None:
self.callbacks = set(callbacks)
if metric is not None:
msg = (
"metric must be callable object for monitoring. For "
+ "builtin metrics, passing them in training parameter"
+ " will invoke monitor automatically."
)
assert callable(metric), msg
self.metric = metric
self.history: TrainingCallback.EvalsLog = collections.OrderedDict()
self._output_margin = output_margin
self.is_cv = is_cv
if self.is_cv:
self.aggregated_cv = None
def before_training(self, model: _Model) -> _Model:
"""Function called before training."""
for c in self.callbacks:
model = c.before_training(model=model)
msg = "before_training should return the model"
if self.is_cv:
assert isinstance(model.cvfolds, list), msg
else:
assert isinstance(model, Booster), msg
return model
def after_training(self, model: _Model) -> _Model:
"""Function called after training."""
for c in self.callbacks:
model = c.after_training(model=model)
msg = "after_training should return the model"
if self.is_cv:
assert isinstance(model.cvfolds, list), msg
else:
assert isinstance(model, Booster), msg
if not self.is_cv:
if model.attr("best_score") is not None:
model.best_score = float(cast(str, model.attr("best_score")))
model.best_iteration = int(cast(str, model.attr("best_iteration")))
else:
# Due to compatibility with version older than 1.4, these attributes are
# added to Python object even if early stopping is not used.
model.best_iteration = model.num_boosted_rounds() - 1
model.set_attr(best_iteration=str(model.best_iteration))
return model
def before_iteration(
self,
model: _Model,
epoch: int,
dtrain: DMatrix,
evals: Optional[List[Tuple[DMatrix, str]]],
) -> bool:
"""Function called before training iteration."""
return any(
c.before_iteration(model, epoch, self.history) for c in self.callbacks
)
def _update_history(
self,
score: Union[List[Tuple[str, float]], List[Tuple[str, float, float]]],
epoch: int,
) -> None:
for d in score:
name: str = d[0]
s: float = d[1]
if self.is_cv:
std = float(cast(Tuple[str, float, float], d)[2])
x: _Score = (s, std)
else:
x = s
splited_names = name.split("-")
data_name = splited_names[0]
metric_name = "-".join(splited_names[1:])
x = _allreduce_metric(x)
if data_name not in self.history:
self.history[data_name] = collections.OrderedDict()
data_history = self.history[data_name]
if metric_name not in data_history:
data_history[metric_name] = cast(_ScoreList, [])
metric_history = data_history[metric_name]
if self.is_cv:
cast(List[Tuple[float, float]], metric_history).append(
cast(Tuple[float, float], x)
)
else:
cast(List[float], metric_history).append(cast(float, x))
def after_iteration(
self,
model: _Model,
epoch: int,
dtrain: DMatrix,
evals: Optional[List[Tuple[DMatrix, str]]],
) -> bool:
"""Function called after training iteration."""
if self.is_cv:
scores = model.eval(epoch, self.metric, self._output_margin)
scores = _aggcv(scores)
self.aggregated_cv = scores
self._update_history(scores, epoch)
else:
evals = [] if evals is None else evals
for _, name in evals:
assert name.find("-") == -1, "Dataset name should not contain `-`"
score: str = model.eval_set(evals, epoch, self.metric, self._output_margin)
metric_score = _parse_eval_str(score)
self._update_history(metric_score, epoch)
ret = any(c.after_iteration(model, epoch, self.history) for c in self.callbacks)
return ret
class LearningRateScheduler(TrainingCallback):
"""Callback function for scheduling learning rate.
.. versionadded:: 1.3.0
Parameters
----------
learning_rates :
If it's a callable object, then it should accept an integer parameter
`epoch` and returns the corresponding learning rate. Otherwise it
should be a sequence like list or tuple with the same size of boosting
rounds.
"""
def __init__(
self, learning_rates: Union[Callable[[int], float], Sequence[float]]
) -> None:
assert callable(learning_rates) or isinstance(
learning_rates, collections.abc.Sequence
)
if callable(learning_rates):
self.learning_rates = learning_rates
else:
self.learning_rates = lambda epoch: cast(Sequence, learning_rates)[epoch]
super().__init__()
def after_iteration(
self, model: _Model, epoch: int, evals_log: TrainingCallback.EvalsLog
) -> bool:
model.set_param("learning_rate", self.learning_rates(epoch))
return False
# pylint: disable=too-many-instance-attributes
class EarlyStopping(TrainingCallback):
"""Callback function for early stopping
.. versionadded:: 1.3.0
Parameters
----------
rounds :
Early stopping rounds.
metric_name :
Name of metric that is used for early stopping.
data_name :
Name of dataset that is used for early stopping.
maximize :
Whether to maximize evaluation metric. None means auto (discouraged).
save_best :
Whether training should return the best model or the last model.
min_delta :
Minimum absolute change in score to be qualified as an improvement.
.. versionadded:: 1.5.0
.. code-block:: python
es = xgboost.callback.EarlyStopping(
rounds=2,
min_delta=1e-3,
save_best=True,
maximize=False,
data_name="validation_0",
metric_name="mlogloss",
)
clf = xgboost.XGBClassifier(tree_method="gpu_hist", callbacks=[es])
X, y = load_digits(return_X_y=True)
clf.fit(X, y, eval_set=[(X, y)])
"""
# pylint: disable=too-many-arguments
def __init__(
self,
rounds: int,
metric_name: Optional[str] = None,
data_name: Optional[str] = None,
maximize: Optional[bool] = None,
save_best: Optional[bool] = False,
min_delta: float = 0.0,
) -> None:
self.data = data_name
self.metric_name = metric_name
self.rounds = rounds
self.save_best = save_best
self.maximize = maximize
self.stopping_history: TrainingCallback.EvalsLog = {}
self._min_delta = min_delta
if self._min_delta < 0:
raise ValueError("min_delta must be greater or equal to 0.")
self.current_rounds: int = 0
self.best_scores: dict = {}
self.starting_round: int = 0
super().__init__()
def before_training(self, model: _Model) -> _Model:
self.starting_round = model.num_boosted_rounds()
return model
def _update_rounds(
self, score: _Score, name: str, metric: str, model: _Model, epoch: int
) -> bool:
def get_s(value: _Score) -> float:
"""get score if it's cross validation history."""
return value[0] if isinstance(value, tuple) else value
def maximize(new: _Score, best: _Score) -> bool:
"""New score should be greater than the old one."""
return numpy.greater(get_s(new) - self._min_delta, get_s(best))
def minimize(new: _Score, best: _Score) -> bool:
"""New score should be smaller than the old one."""
return numpy.greater(get_s(best) - self._min_delta, get_s(new))
if self.maximize is None:
# Just to be compatibility with old behavior before 1.3. We should let
# user to decide.
maximize_metrics = (
"auc",
"aucpr",
"pre",
"pre@",
"map",
"ndcg",
"auc@",
"aucpr@",
"map@",
"ndcg@",
)
if metric != "mape" and any(metric.startswith(x) for x in maximize_metrics):
self.maximize = True
else:
self.maximize = False
if self.maximize:
improve_op = maximize
else:
improve_op = minimize
if not self.stopping_history: # First round
self.current_rounds = 0
self.stopping_history[name] = {}
self.stopping_history[name][metric] = cast(_ScoreList, [score])
self.best_scores[name] = {}
self.best_scores[name][metric] = [score]
model.set_attr(best_score=str(score), best_iteration=str(epoch))
elif not improve_op(score, self.best_scores[name][metric][-1]):
# Not improved
self.stopping_history[name][metric].append(score) # type: ignore
self.current_rounds += 1
else: # Improved
self.stopping_history[name][metric].append(score) # type: ignore
self.best_scores[name][metric].append(score)
record = self.stopping_history[name][metric][-1]
model.set_attr(best_score=str(record), best_iteration=str(epoch))
self.current_rounds = 0 # reset
if self.current_rounds >= self.rounds:
# Should stop
return True
return False
def after_iteration(
self, model: _Model, epoch: int, evals_log: TrainingCallback.EvalsLog
) -> bool:
epoch += self.starting_round # training continuation
msg = "Must have at least 1 validation dataset for early stopping."
assert len(evals_log.keys()) >= 1, msg
data_name = ""
if self.data:
for d, _ in evals_log.items():
if d == self.data:
data_name = d
if not data_name:
raise ValueError("No dataset named:", self.data)
else:
# Use the last one as default.
data_name = list(evals_log.keys())[-1]
assert isinstance(data_name, str) and data_name
data_log = evals_log[data_name]
# Filter out scores that can not be used for early stopping.
if self.metric_name:
metric_name = self.metric_name
else:
# Use last metric by default.
assert isinstance(data_log, collections.OrderedDict)
metric_name = list(data_log.keys())[-1]
score = data_log[metric_name][-1]
return self._update_rounds(score, data_name, metric_name, model, epoch)
def after_training(self, model: _Model) -> _Model:
try:
if self.save_best:
model = model[: int(model.attr("best_iteration")) + 1]
except XGBoostError as e:
raise XGBoostError(
"`save_best` is not applicable to current booster"
) from e
return model
class EvaluationMonitor(TrainingCallback):
"""Print the evaluation result at each iteration.
.. versionadded:: 1.3.0
Parameters
----------
metric :
Extra user defined metric.
rank :
Which worker should be used for printing the result.
period :
How many epoches between printing.
show_stdv :
Used in cv to show standard deviation. Users should not specify it.
"""
def __init__(self, rank: int = 0, period: int = 1, show_stdv: bool = False) -> None:
self.printer_rank = rank
self.show_stdv = show_stdv
self.period = period
assert period > 0
# last error message, useful when early stopping and period are used together.
self._latest: Optional[str] = None
super().__init__()
def _fmt_metric(
self, data: str, metric: str, score: float, std: Optional[float]
) -> str:
if std is not None and self.show_stdv:
msg = f"\t{data + '-' + metric}:{score:.5f}+{std:.5f}"
else:
msg = f"\t{data + '-' + metric}:{score:.5f}"
return msg
def after_iteration(
self, model: _Model, epoch: int, evals_log: TrainingCallback.EvalsLog
) -> bool:
if not evals_log:
return False
msg: str = f"[{epoch}]"
if collective.get_rank() == self.printer_rank:
for data, metric in evals_log.items():
for metric_name, log in metric.items():
stdv: Optional[float] = None
if isinstance(log[-1], tuple):
score = log[-1][0]
stdv = log[-1][1]
else:
score = log[-1]
msg += self._fmt_metric(data, metric_name, score, stdv)
msg += "\n"
if (epoch % self.period) == 0 or self.period == 1:
collective.communicator_print(msg)
self._latest = None
else:
# There is skipped message
self._latest = msg
return False
def after_training(self, model: _Model) -> _Model:
if collective.get_rank() == self.printer_rank and self._latest is not None:
collective.communicator_print(self._latest)
return model
class TrainingCheckPoint(TrainingCallback):
"""Checkpointing operation.
.. versionadded:: 1.3.0
Parameters
----------
directory :
Output model directory.
name :
pattern of output model file. Models will be saved as name_0.json, name_1.json,
name_2.json ....
as_pickle :
When set to True, all training parameters will be saved in pickle format, instead
of saving only the model.
iterations :
Interval of checkpointing. Checkpointing is slow so setting a larger number can
reduce performance hit.
"""
def __init__(
self,
directory: Union[str, os.PathLike],
name: str = "model",
as_pickle: bool = False,
iterations: int = 100,
) -> None:
self._path = os.fspath(directory)
self._name = name
self._as_pickle = as_pickle
self._iterations = iterations
self._epoch = 0
super().__init__()
def after_iteration(
self, model: _Model, epoch: int, evals_log: TrainingCallback.EvalsLog
) -> bool:
if self._epoch == self._iterations:
path = os.path.join(
self._path,
self._name
+ "_"
+ str(epoch)
+ (".pkl" if self._as_pickle else ".json"),
)
self._epoch = 0
if collective.get_rank() == 0:
if self._as_pickle:
with open(path, "wb") as fd:
pickle.dump(model, fd)
else:
model.save_model(path)
self._epoch += 1
return False
| 19,301
| 32.164948
| 89
|
py
|
xgboost
|
xgboost-master/python-package/xgboost/dask.py
|
# pylint: disable=too-many-arguments, too-many-locals
# pylint: disable=missing-class-docstring, invalid-name
# pylint: disable=too-many-lines
# pylint: disable=too-few-public-methods
# pylint: disable=import-error
"""
Dask extensions for distributed training
----------------------------------------
See :doc:`Distributed XGBoost with Dask </tutorials/dask>` for simple tutorial. Also
:doc:`/python/dask-examples/index` for some examples.
There are two sets of APIs in this module, one is the functional API including
``train`` and ``predict`` methods. Another is stateful Scikit-Learner wrapper
inherited from single-node Scikit-Learn interface.
The implementation is heavily influenced by dask_xgboost:
https://github.com/dask/dask-xgboost
Optional dask configuration
===========================
- **xgboost.scheduler_address**: Specify the scheduler address, see :ref:`tracker-ip`.
.. versionadded:: 1.6.0
.. code-block:: python
dask.config.set({"xgboost.scheduler_address": "192.0.0.100"})
# We can also specify the port.
dask.config.set({"xgboost.scheduler_address": "192.0.0.100:12345"})
"""
import collections
import logging
import platform
import socket
import warnings
from collections import defaultdict
from contextlib import contextmanager
from functools import partial, update_wrapper
from threading import Thread
from typing import (
TYPE_CHECKING,
Any,
Awaitable,
Callable,
Dict,
Generator,
List,
Optional,
Sequence,
Set,
Tuple,
TypedDict,
TypeVar,
Union,
)
import numpy
from . import collective, config
from ._typing import _T, FeatureNames, FeatureTypes, ModelIn
from .callback import TrainingCallback
from .compat import DataFrame, LazyLoader, concat, lazy_isinstance
from .core import (
Booster,
DataIter,
DMatrix,
Metric,
Objective,
QuantileDMatrix,
_check_distributed_params,
_deprecate_positional_args,
_expect,
)
from .data import _is_cudf_ser, _is_cupy_array
from .sklearn import (
XGBClassifier,
XGBClassifierBase,
XGBClassifierMixIn,
XGBModel,
XGBRanker,
XGBRankerMixIn,
XGBRegressorBase,
_can_use_qdm,
_check_rf_callback,
_cls_predict_proba,
_objective_decorator,
_wrap_evaluation_matrices,
xgboost_model_doc,
)
from .tracker import RabitTracker, get_host_ip
from .training import train as worker_train
if TYPE_CHECKING:
import dask
import distributed
from dask import array as da
from dask import dataframe as dd
else:
dd = LazyLoader("dd", globals(), "dask.dataframe")
da = LazyLoader("da", globals(), "dask.array")
dask = LazyLoader("dask", globals(), "dask")
distributed = LazyLoader("distributed", globals(), "dask.distributed")
_DaskCollection = Union["da.Array", "dd.DataFrame", "dd.Series"]
_DataT = Union["da.Array", "dd.DataFrame"] # do not use series as predictor
TrainReturnT = TypedDict(
"TrainReturnT",
{
"booster": Booster,
"history": Dict,
},
)
__all__ = [
"CommunicatorContext",
"DaskDMatrix",
"DaskQuantileDMatrix",
"DaskXGBRegressor",
"DaskXGBClassifier",
"DaskXGBRanker",
"DaskXGBRFRegressor",
"DaskXGBRFClassifier",
"train",
"predict",
"inplace_predict",
]
# TODOs:
# - CV
#
# Note for developers:
#
# As of writing asyncio is still a new feature of Python and in depth documentation is
# rare. Best examples of various asyncio tricks are in dask (luckily). Classes like
# Client, Worker are awaitable. Some general rules for the implementation here:
#
# - Synchronous world is different from asynchronous one, and they don't mix well.
# - Write everything with async, then use distributed Client sync function to do the
# switch.
# - Use Any for type hint when the return value can be union of Awaitable and plain
# value. This is caused by Client.sync can return both types depending on
# context. Right now there's no good way to silent:
#
# await train(...)
#
# if train returns an Union type.
LOGGER = logging.getLogger("[xgboost.dask]")
def _try_start_tracker(
n_workers: int,
addrs: List[Union[Optional[str], Optional[Tuple[str, int]]]],
) -> Dict[str, Union[int, str]]:
env: Dict[str, Union[int, str]] = {"DMLC_NUM_WORKER": n_workers}
try:
if isinstance(addrs[0], tuple):
host_ip = addrs[0][0]
port = addrs[0][1]
rabit_tracker = RabitTracker(
host_ip=get_host_ip(host_ip),
n_workers=n_workers,
port=port,
use_logger=False,
)
else:
addr = addrs[0]
assert isinstance(addr, str) or addr is None
host_ip = get_host_ip(addr)
rabit_tracker = RabitTracker(
host_ip=host_ip, n_workers=n_workers, use_logger=False, sortby="task"
)
env.update(rabit_tracker.worker_envs())
rabit_tracker.start(n_workers)
thread = Thread(target=rabit_tracker.join)
thread.daemon = True
thread.start()
except socket.error as e:
if len(addrs) < 2 or e.errno != 99:
raise
LOGGER.warning(
"Failed to bind address '%s', trying to use '%s' instead.",
str(addrs[0]),
str(addrs[1]),
)
env = _try_start_tracker(n_workers, addrs[1:])
return env
def _start_tracker(
n_workers: int,
addr_from_dask: Optional[str],
addr_from_user: Optional[Tuple[str, int]],
) -> Dict[str, Union[int, str]]:
"""Start Rabit tracker, recurse to try different addresses."""
env = _try_start_tracker(n_workers, [addr_from_user, addr_from_dask])
return env
def _assert_dask_support() -> None:
try:
import dask # pylint: disable=W0621,W0611
except ImportError as e:
raise ImportError(
"Dask needs to be installed in order to use this module"
) from e
if platform.system() == "Windows":
msg = "Windows is not officially supported for dask/xgboost,"
msg += " contribution are welcomed."
LOGGER.warning(msg)
class CommunicatorContext(collective.CommunicatorContext):
"""A context controlling collective communicator initialization and finalization."""
def __init__(self, **args: Any) -> None:
super().__init__(**args)
worker = distributed.get_worker()
with distributed.worker_client() as client:
info = client.scheduler_info()
w = info["workers"][worker.address]
wid = w["id"]
# We use task ID for rank assignment which makes the RABIT rank consistent (but
# not the same as task ID is string and "10" is sorted before "2") with dask
# worker ID. This outsources the rank assignment to dask and prevents
# non-deterministic issue.
self.args["DMLC_TASK_ID"] = f"[xgboost.dask-{wid}]:" + str(worker.address)
def dconcat(value: Sequence[_T]) -> _T:
"""Concatenate sequence of partitions."""
try:
return concat(value)
except TypeError:
return dd.multi.concat(list(value), axis=0)
def _xgb_get_client(client: Optional["distributed.Client"]) -> "distributed.Client":
"""Simple wrapper around testing None."""
if not isinstance(client, (type(distributed.get_client()), type(None))):
raise TypeError(
_expect([type(distributed.get_client()), type(None)], type(client))
)
ret = distributed.get_client() if client is None else client
return ret
# From the implementation point of view, DaskDMatrix complicates a lots of
# things. A large portion of the code base is about syncing and extracting
# stuffs from DaskDMatrix. But having an independent data structure gives us a
# chance to perform some specialized optimizations, like building histogram
# index directly.
class DaskDMatrix:
# pylint: disable=too-many-instance-attributes
"""DMatrix holding on references to Dask DataFrame or Dask Array. Constructing a
`DaskDMatrix` forces all lazy computation to be carried out. Wait for the input
data explicitly if you want to see actual computation of constructing `DaskDMatrix`.
See doc for :py:obj:`xgboost.DMatrix` constructor for other parameters. DaskDMatrix
accepts only dask collection.
.. note::
DaskDMatrix does not repartition or move data between workers. It's
the caller's responsibility to balance the data.
.. versionadded:: 1.0.0
Parameters
----------
client :
Specify the dask client used for training. Use default client returned from
dask if it's set to None.
"""
@_deprecate_positional_args
def __init__(
self,
client: "distributed.Client",
data: _DataT,
label: Optional[_DaskCollection] = None,
*,
weight: Optional[_DaskCollection] = None,
base_margin: Optional[_DaskCollection] = None,
missing: Optional[float] = None,
silent: bool = False, # pylint: disable=unused-argument
feature_names: Optional[FeatureNames] = None,
feature_types: Optional[FeatureTypes] = None,
group: Optional[_DaskCollection] = None,
qid: Optional[_DaskCollection] = None,
label_lower_bound: Optional[_DaskCollection] = None,
label_upper_bound: Optional[_DaskCollection] = None,
feature_weights: Optional[_DaskCollection] = None,
enable_categorical: bool = False,
) -> None:
_assert_dask_support()
client = _xgb_get_client(client)
self.feature_names = feature_names
self.feature_types = feature_types
self.missing = missing if missing is not None else numpy.nan
self.enable_categorical = enable_categorical
if qid is not None and weight is not None:
raise NotImplementedError("per-group weight is not implemented.")
if group is not None:
raise NotImplementedError(
"group structure is not implemented, use qid instead."
)
if len(data.shape) != 2:
raise ValueError(f"Expecting 2 dimensional input, got: {data.shape}")
if not isinstance(data, (dd.DataFrame, da.Array)):
raise TypeError(_expect((dd.DataFrame, da.Array), type(data)))
if not isinstance(label, (dd.DataFrame, da.Array, dd.Series, type(None))):
raise TypeError(_expect((dd.DataFrame, da.Array, dd.Series), type(label)))
self._n_cols = data.shape[1]
assert isinstance(self._n_cols, int)
self.worker_map: Dict[str, List[distributed.Future]] = defaultdict(list)
self.is_quantile: bool = False
self._init = client.sync(
self._map_local_data,
client,
data,
label=label,
weights=weight,
base_margin=base_margin,
qid=qid,
feature_weights=feature_weights,
label_lower_bound=label_lower_bound,
label_upper_bound=label_upper_bound,
)
def __await__(self) -> Generator:
return self._init.__await__()
async def _map_local_data(
self,
client: "distributed.Client",
data: _DataT,
label: Optional[_DaskCollection] = None,
weights: Optional[_DaskCollection] = None,
base_margin: Optional[_DaskCollection] = None,
qid: Optional[_DaskCollection] = None,
feature_weights: Optional[_DaskCollection] = None,
label_lower_bound: Optional[_DaskCollection] = None,
label_upper_bound: Optional[_DaskCollection] = None,
) -> "DaskDMatrix":
"""Obtain references to local data."""
from dask.delayed import Delayed
def inconsistent(
left: List[Any], left_name: str, right: List[Any], right_name: str
) -> str:
msg = (
f"Partitions between {left_name} and {right_name} are not "
f"consistent: {len(left)} != {len(right)}. "
f"Please try to repartition/rechunk your data."
)
return msg
def check_columns(parts: numpy.ndarray) -> None:
# x is required to be 2 dim in __init__
assert parts.ndim == 1 or parts.shape[1], (
"Data should be"
" partitioned by row. To avoid this specify the number"
" of columns for your dask Array explicitly. e.g."
" chunks=(partition_size, X.shape[1])"
)
def to_delayed(d: _DaskCollection) -> List[Delayed]:
"""Breaking data into partitions, a trick borrowed from dask_xgboost. `to_delayed`
downgrades high-level objects into numpy or pandas equivalents .
"""
d = client.persist(d)
delayed_obj = d.to_delayed()
if isinstance(delayed_obj, numpy.ndarray):
# da.Array returns an array to delayed objects
check_columns(delayed_obj)
delayed_list: List[Delayed] = delayed_obj.flatten().tolist()
else:
# dd.DataFrame
delayed_list = delayed_obj
return delayed_list
def flatten_meta(meta: Optional[_DaskCollection]) -> Optional[List[Delayed]]:
if meta is not None:
meta_parts: List[Delayed] = to_delayed(meta)
return meta_parts
return None
X_parts = to_delayed(data)
y_parts = flatten_meta(label)
w_parts = flatten_meta(weights)
margin_parts = flatten_meta(base_margin)
qid_parts = flatten_meta(qid)
ll_parts = flatten_meta(label_lower_bound)
lu_parts = flatten_meta(label_upper_bound)
parts: Dict[str, List[Delayed]] = {"data": X_parts}
def append_meta(m_parts: Optional[List[Delayed]], name: str) -> None:
if m_parts is not None:
assert len(X_parts) == len(m_parts), inconsistent(
X_parts, "X", m_parts, name
)
parts[name] = m_parts
append_meta(y_parts, "label")
append_meta(w_parts, "weight")
append_meta(margin_parts, "base_margin")
append_meta(qid_parts, "qid")
append_meta(ll_parts, "label_lower_bound")
append_meta(lu_parts, "label_upper_bound")
# At this point, `parts` looks like:
# [(x0, x1, ..), (y0, y1, ..), ..] in delayed form
# turn into list of dictionaries.
packed_parts: List[Dict[str, Delayed]] = []
for i in range(len(X_parts)):
part_dict: Dict[str, Delayed] = {}
for key, value in parts.items():
part_dict[key] = value[i]
packed_parts.append(part_dict)
# delay the zipped result
# pylint: disable=no-member
delayed_parts: List[Delayed] = list(map(dask.delayed, packed_parts))
# At this point, the mental model should look like:
# [(x0, y0, ..), (x1, y1, ..), ..] in delayed form
# convert delayed objects into futures and make sure they are realized
fut_parts: List[distributed.Future] = client.compute(delayed_parts)
await distributed.wait(fut_parts) # async wait for parts to be computed
# maybe we can call dask.align_partitions here to ease the partition alignment?
for part in fut_parts:
# Each part is [x0, y0, w0, ...] in future form.
assert part.status == "finished", part.status
# Preserving the partition order for prediction.
self.partition_order = {}
for i, part in enumerate(fut_parts):
self.partition_order[part.key] = i
key_to_partition = {part.key: part for part in fut_parts}
who_has: Dict[str, Tuple[str, ...]] = await client.scheduler.who_has(
keys=[part.key for part in fut_parts]
)
worker_map: Dict[str, List[distributed.Future]] = defaultdict(list)
for key, workers in who_has.items():
worker_map[next(iter(workers))].append(key_to_partition[key])
self.worker_map = worker_map
if feature_weights is None:
self.feature_weights = None
else:
self.feature_weights = await client.compute(feature_weights).result()
return self
def _create_fn_args(self, worker_addr: str) -> Dict[str, Any]:
"""Create a dictionary of objects that can be pickled for function
arguments.
"""
return {
"feature_names": self.feature_names,
"feature_types": self.feature_types,
"feature_weights": self.feature_weights,
"missing": self.missing,
"enable_categorical": self.enable_categorical,
"parts": self.worker_map.get(worker_addr, None),
"is_quantile": self.is_quantile,
}
def num_col(self) -> int:
"""Get the number of columns (features) in the DMatrix.
Returns
-------
number of columns
"""
return self._n_cols
_MapRetT = TypeVar("_MapRetT")
async def map_worker_partitions(
client: Optional["distributed.Client"],
func: Callable[..., _MapRetT],
*refs: Any,
workers: Sequence[str],
) -> List[_MapRetT]:
"""Map a function onto partitions of each worker."""
# Note for function purity:
# XGBoost is deterministic in most of the cases, which means train function is
# supposed to be idempotent. One known exception is gblinear with shotgun updater.
# We haven't been able to do a full verification so here we keep pure to be False.
client = _xgb_get_client(client)
futures = []
for addr in workers:
args = []
for ref in refs:
if isinstance(ref, DaskDMatrix):
# pylint: disable=protected-access
args.append(ref._create_fn_args(addr))
else:
args.append(ref)
fut = client.submit(
func, *args, pure=False, workers=[addr], allow_other_workers=False
)
futures.append(fut)
results = await client.gather(futures)
return results
_DataParts = List[Dict[str, Any]]
def _get_worker_parts(list_of_parts: _DataParts) -> Dict[str, List[Any]]:
assert isinstance(list_of_parts, list)
result: Dict[str, List[Any]] = {}
def append(i: int, name: str) -> None:
if name in list_of_parts[i]:
part = list_of_parts[i][name]
else:
part = None
if part is not None:
if name not in result:
result[name] = []
result[name].append(part)
for i, _ in enumerate(list_of_parts):
append(i, "data")
append(i, "label")
append(i, "weight")
append(i, "base_margin")
append(i, "qid")
append(i, "label_lower_bound")
append(i, "label_upper_bound")
return result
class DaskPartitionIter(DataIter): # pylint: disable=R0902
"""A data iterator for `DaskQuantileDMatrix`."""
def __init__(
self,
data: List[Any],
label: Optional[List[Any]] = None,
weight: Optional[List[Any]] = None,
base_margin: Optional[List[Any]] = None,
qid: Optional[List[Any]] = None,
label_lower_bound: Optional[List[Any]] = None,
label_upper_bound: Optional[List[Any]] = None,
feature_names: Optional[FeatureNames] = None,
feature_types: Optional[Union[Any, List[Any]]] = None,
feature_weights: Optional[Any] = None,
) -> None:
self._data = data
self._label = label
self._weight = weight
self._base_margin = base_margin
self._qid = qid
self._label_lower_bound = label_lower_bound
self._label_upper_bound = label_upper_bound
self._feature_names = feature_names
self._feature_types = feature_types
self._feature_weights = feature_weights
assert isinstance(self._data, collections.abc.Sequence)
types = (collections.abc.Sequence, type(None))
assert isinstance(self._label, types)
assert isinstance(self._weight, types)
assert isinstance(self._base_margin, types)
assert isinstance(self._label_lower_bound, types)
assert isinstance(self._label_upper_bound, types)
self._iter = 0 # set iterator to 0
super().__init__()
def _get(self, attr: str) -> Optional[Any]:
if getattr(self, attr) is not None:
return getattr(self, attr)[self._iter]
return None
def data(self) -> Any:
"""Utility function for obtaining current batch of data."""
return self._data[self._iter]
def reset(self) -> None:
"""Reset the iterator"""
self._iter = 0
def next(self, input_data: Callable) -> int:
"""Yield next batch of data"""
if self._iter == len(self._data):
# Return 0 when there's no more batch.
return 0
input_data(
data=self.data(),
label=self._get("_label"),
weight=self._get("_weight"),
group=None,
qid=self._get("_qid"),
base_margin=self._get("_base_margin"),
label_lower_bound=self._get("_label_lower_bound"),
label_upper_bound=self._get("_label_upper_bound"),
feature_names=self._feature_names,
feature_types=self._feature_types,
feature_weights=self._feature_weights,
)
self._iter += 1
return 1
class DaskQuantileDMatrix(DaskDMatrix):
"""A dask version of :py:class:`QuantileDMatrix`."""
@_deprecate_positional_args
def __init__(
self,
client: "distributed.Client",
data: _DataT,
label: Optional[_DaskCollection] = None,
*,
weight: Optional[_DaskCollection] = None,
base_margin: Optional[_DaskCollection] = None,
missing: Optional[float] = None,
silent: bool = False, # disable=unused-argument
feature_names: Optional[FeatureNames] = None,
feature_types: Optional[Union[Any, List[Any]]] = None,
max_bin: Optional[int] = None,
ref: Optional[DMatrix] = None,
group: Optional[_DaskCollection] = None,
qid: Optional[_DaskCollection] = None,
label_lower_bound: Optional[_DaskCollection] = None,
label_upper_bound: Optional[_DaskCollection] = None,
feature_weights: Optional[_DaskCollection] = None,
enable_categorical: bool = False,
) -> None:
super().__init__(
client=client,
data=data,
label=label,
weight=weight,
base_margin=base_margin,
group=group,
qid=qid,
label_lower_bound=label_lower_bound,
label_upper_bound=label_upper_bound,
missing=missing,
silent=silent,
feature_weights=feature_weights,
feature_names=feature_names,
feature_types=feature_types,
enable_categorical=enable_categorical,
)
self.max_bin = max_bin
self.is_quantile = True
self._ref: Optional[int] = id(ref) if ref is not None else None
def _create_fn_args(self, worker_addr: str) -> Dict[str, Any]:
args = super()._create_fn_args(worker_addr)
args["max_bin"] = self.max_bin
if self._ref is not None:
args["ref"] = self._ref
return args
class DaskDeviceQuantileDMatrix(DaskQuantileDMatrix):
"""Use `DaskQuantileDMatrix` instead.
.. deprecated:: 1.7.0
.. versionadded:: 1.2.0
"""
def __init__(self, *args: Any, **kwargs: Any) -> None:
warnings.warn("Please use `DaskQuantileDMatrix` instead.", FutureWarning)
super().__init__(*args, **kwargs)
def _create_quantile_dmatrix(
feature_names: Optional[FeatureNames],
feature_types: Optional[Union[Any, List[Any]]],
feature_weights: Optional[Any],
missing: float,
nthread: int,
parts: Optional[_DataParts],
max_bin: int,
enable_categorical: bool,
ref: Optional[DMatrix] = None,
) -> QuantileDMatrix:
worker = distributed.get_worker()
if parts is None:
msg = f"worker {worker.address} has an empty DMatrix."
LOGGER.warning(msg)
d = QuantileDMatrix(
numpy.empty((0, 0)),
feature_names=feature_names,
feature_types=feature_types,
max_bin=max_bin,
ref=ref,
enable_categorical=enable_categorical,
)
return d
unzipped_dict = _get_worker_parts(parts)
it = DaskPartitionIter(
**unzipped_dict,
feature_types=feature_types,
feature_names=feature_names,
feature_weights=feature_weights,
)
dmatrix = QuantileDMatrix(
it,
missing=missing,
nthread=nthread,
max_bin=max_bin,
ref=ref,
enable_categorical=enable_categorical,
)
return dmatrix
def _create_dmatrix(
feature_names: Optional[FeatureNames],
feature_types: Optional[Union[Any, List[Any]]],
feature_weights: Optional[Any],
missing: float,
nthread: int,
enable_categorical: bool,
parts: Optional[_DataParts],
) -> DMatrix:
"""Get data that local to worker from DaskDMatrix.
Returns
-------
A DMatrix object.
"""
worker = distributed.get_worker()
list_of_parts = parts
if list_of_parts is None:
msg = f"worker {worker.address} has an empty DMatrix."
LOGGER.warning(msg)
d = DMatrix(
numpy.empty((0, 0)),
feature_names=feature_names,
feature_types=feature_types,
enable_categorical=enable_categorical,
)
return d
T = TypeVar("T")
def concat_or_none(data: Sequence[Optional[T]]) -> Optional[T]:
if any(part is None for part in data):
return None
return dconcat(data)
unzipped_dict = _get_worker_parts(list_of_parts)
concated_dict: Dict[str, Any] = {}
for key, value in unzipped_dict.items():
v = concat_or_none(value)
concated_dict[key] = v
dmatrix = DMatrix(
**concated_dict,
missing=missing,
feature_names=feature_names,
feature_types=feature_types,
nthread=nthread,
enable_categorical=enable_categorical,
feature_weights=feature_weights,
)
return dmatrix
def _dmatrix_from_list_of_parts(is_quantile: bool, **kwargs: Any) -> DMatrix:
if is_quantile:
return _create_quantile_dmatrix(**kwargs)
return _create_dmatrix(**kwargs)
async def _get_rabit_args(
n_workers: int, dconfig: Optional[Dict[str, Any]], client: "distributed.Client"
) -> Dict[str, Union[str, int]]:
"""Get rabit context arguments from data distribution in DaskDMatrix."""
# There are 3 possible different addresses:
# 1. Provided by user via dask.config
# 2. Guessed by xgboost `get_host_ip` function
# 3. From dask scheduler
# We try 1 and 3 if 1 is available, otherwise 2 and 3.
valid_config = ["scheduler_address"]
# See if user config is available
host_ip: Optional[str] = None
port: int = 0
if dconfig is not None:
for k in dconfig:
if k not in valid_config:
raise ValueError(f"Unknown configuration: {k}")
host_ip = dconfig.get("scheduler_address", None)
if host_ip is not None and host_ip.startswith("[") and host_ip.endswith("]"):
# convert dask bracket format to proper IPv6 address.
host_ip = host_ip[1:-1]
if host_ip is not None:
try:
host_ip, port = distributed.comm.get_address_host_port(host_ip)
except ValueError:
pass
if host_ip is not None:
user_addr = (host_ip, port)
else:
user_addr = None
# Try address from dask scheduler, this might not work, see
# https://github.com/dask/dask-xgboost/pull/40
try:
sched_addr = distributed.comm.get_address_host(client.scheduler.address)
sched_addr = sched_addr.strip("/:")
except Exception: # pylint: disable=broad-except
sched_addr = None
# make sure all workers are online so that we can obtain reliable scheduler_info
await client.wait_for_workers(n_workers) # type: ignore
env = await client.run_on_scheduler(
_start_tracker, n_workers, sched_addr, user_addr
)
return env
def _get_dask_config() -> Optional[Dict[str, Any]]:
return dask.config.get("xgboost", default=None)
# train and predict methods are supposed to be "functional", which meets the
# dask paradigm. But as a side effect, the `evals_result` in single-node API
# is no longer supported since it mutates the input parameter, and it's not
# intuitive to sync the mutation result. Therefore, a dictionary containing
# evaluation history is instead returned.
def _get_workers_from_data(
dtrain: DaskDMatrix, evals: Optional[Sequence[Tuple[DaskDMatrix, str]]]
) -> List[str]:
X_worker_map: Set[str] = set(dtrain.worker_map.keys())
if evals:
for e in evals:
assert len(e) == 2
assert isinstance(e[0], DaskDMatrix) and isinstance(e[1], str)
if e[0] is dtrain:
continue
worker_map = set(e[0].worker_map.keys())
X_worker_map = X_worker_map.union(worker_map)
return list(X_worker_map)
def _filter_empty(
booster: Booster, local_history: TrainingCallback.EvalsLog, is_valid: bool
) -> Optional[TrainReturnT]:
n_workers = collective.get_world_size()
non_empty = numpy.zeros(shape=(n_workers,), dtype=numpy.int32)
rank = collective.get_rank()
non_empty[rank] = int(is_valid)
non_empty = collective.allreduce(non_empty, collective.Op.SUM)
non_empty = non_empty.astype(bool)
ret: Optional[TrainReturnT] = {
"booster": booster,
"history": local_history,
}
for i in range(non_empty.size):
# This is the first valid worker
if non_empty[i] and i == rank:
return ret
if non_empty[i]:
return None
raise ValueError("None of the workers can provide a valid result.")
async def _train_async(
client: "distributed.Client",
global_config: Dict[str, Any],
dconfig: Optional[Dict[str, Any]],
params: Dict[str, Any],
dtrain: DaskDMatrix,
num_boost_round: int,
evals: Optional[Sequence[Tuple[DaskDMatrix, str]]],
obj: Optional[Objective],
feval: Optional[Metric],
early_stopping_rounds: Optional[int],
verbose_eval: Union[int, bool],
xgb_model: Optional[Booster],
callbacks: Optional[Sequence[TrainingCallback]],
custom_metric: Optional[Metric],
) -> Optional[TrainReturnT]:
workers = _get_workers_from_data(dtrain, evals)
_rabit_args = await _get_rabit_args(len(workers), dconfig, client)
_check_distributed_params(params)
def dispatched_train(
parameters: Dict,
rabit_args: Dict[str, Union[str, int]],
train_id: int,
evals_name: List[str],
evals_id: List[int],
train_ref: dict,
*refs: dict,
) -> Optional[TrainReturnT]:
worker = distributed.get_worker()
local_param = parameters.copy()
n_threads = 0
# dask worker nthreads, "state" is available in 2022.6.1
dwnt = worker.state.nthreads if hasattr(worker, "state") else worker.nthreads
for p in ["nthread", "n_jobs"]:
if (
local_param.get(p, None) is not None
and local_param.get(p, dwnt) != dwnt
):
LOGGER.info("Overriding `nthreads` defined in dask worker.")
n_threads = local_param[p]
break
if n_threads == 0 or n_threads is None:
n_threads = dwnt
local_param.update({"nthread": n_threads, "n_jobs": n_threads})
local_history: TrainingCallback.EvalsLog = {}
with CommunicatorContext(**rabit_args), config.config_context(**global_config):
Xy = _dmatrix_from_list_of_parts(**train_ref, nthread=n_threads)
evals: List[Tuple[DMatrix, str]] = []
for i, ref in enumerate(refs):
if evals_id[i] == train_id:
evals.append((Xy, evals_name[i]))
continue
if ref.get("ref", None) is not None:
if ref["ref"] != train_id:
raise ValueError(
"The training DMatrix should be used as a reference"
" to evaluation `QuantileDMatrix`."
)
del ref["ref"]
eval_Xy = _dmatrix_from_list_of_parts(
**ref, nthread=n_threads, ref=Xy
)
else:
eval_Xy = _dmatrix_from_list_of_parts(**ref, nthread=n_threads)
evals.append((eval_Xy, evals_name[i]))
booster = worker_train(
params=local_param,
dtrain=Xy,
num_boost_round=num_boost_round,
evals_result=local_history,
evals=evals if len(evals) != 0 else None,
obj=obj,
feval=feval,
custom_metric=custom_metric,
early_stopping_rounds=early_stopping_rounds,
verbose_eval=verbose_eval,
xgb_model=xgb_model,
callbacks=callbacks,
)
# Don't return the boosters from empty workers. It's quite difficult to
# guarantee everything is in sync in the present of empty workers,
# especially with complex objectives like quantile.
return _filter_empty(booster, local_history, Xy.num_row() != 0)
async with distributed.MultiLock(workers, client):
if evals is not None:
evals_data = [d for d, n in evals]
evals_name = [n for d, n in evals]
evals_id = [id(d) for d in evals_data]
else:
evals_data = []
evals_name = []
evals_id = []
results = await map_worker_partitions(
client,
dispatched_train,
# extra function parameters
params,
_rabit_args,
id(dtrain),
evals_name,
evals_id,
*([dtrain] + evals_data),
# workers to be used for training
workers=workers,
)
return list(filter(lambda ret: ret is not None, results))[0]
@_deprecate_positional_args
def train( # pylint: disable=unused-argument
client: "distributed.Client",
params: Dict[str, Any],
dtrain: DaskDMatrix,
num_boost_round: int = 10,
*,
evals: Optional[Sequence[Tuple[DaskDMatrix, str]]] = None,
obj: Optional[Objective] = None,
feval: Optional[Metric] = None,
early_stopping_rounds: Optional[int] = None,
xgb_model: Optional[Booster] = None,
verbose_eval: Union[int, bool] = True,
callbacks: Optional[Sequence[TrainingCallback]] = None,
custom_metric: Optional[Metric] = None,
) -> Any:
"""Train XGBoost model.
.. versionadded:: 1.0.0
.. note::
Other parameters are the same as :py:func:`xgboost.train` except for
`evals_result`, which is returned as part of function return value instead of
argument.
Parameters
----------
client :
Specify the dask client used for training. Use default client returned from
dask if it's set to None.
Returns
-------
results: dict
A dictionary containing trained booster and evaluation history. `history` field
is the same as `eval_result` from `xgboost.train`.
.. code-block:: python
{'booster': xgboost.Booster,
'history': {'train': {'logloss': ['0.48253', '0.35953']},
'eval': {'logloss': ['0.480385', '0.357756']}}}
"""
_assert_dask_support()
client = _xgb_get_client(client)
args = locals()
return client.sync(
_train_async,
global_config=config.get_config(),
dconfig=_get_dask_config(),
**args,
)
def _can_output_df(is_df: bool, output_shape: Tuple) -> bool:
return is_df and len(output_shape) <= 2
def _maybe_dataframe(
data: Any, prediction: Any, columns: List[int], is_df: bool
) -> Any:
"""Return dataframe for prediction when applicable."""
if _can_output_df(is_df, prediction.shape):
# Need to preserve the index for dataframe.
# See issue: https://github.com/dmlc/xgboost/issues/6939
# In older versions of dask, the partition is actually a numpy array when input
# is dataframe.
index = getattr(data, "index", None)
if lazy_isinstance(data, "cudf.core.dataframe", "DataFrame"):
import cudf
if prediction.size == 0:
return cudf.DataFrame({}, columns=columns, dtype=numpy.float32)
prediction = cudf.DataFrame(
prediction, columns=columns, dtype=numpy.float32, index=index
)
else:
if prediction.size == 0:
return DataFrame({}, columns=columns, dtype=numpy.float32, index=index)
prediction = DataFrame(
prediction, columns=columns, dtype=numpy.float32, index=index
)
return prediction
async def _direct_predict_impl( # pylint: disable=too-many-branches
mapped_predict: Callable,
booster: "distributed.Future",
data: _DataT,
base_margin: Optional[_DaskCollection],
output_shape: Tuple[int, ...],
meta: Dict[int, str],
) -> _DaskCollection:
columns = tuple(meta.keys())
if len(output_shape) >= 3 and isinstance(data, dd.DataFrame):
# Without this check, dask will finish the prediction silently even if output
# dimension is greater than 3. But during map_partitions, dask passes a
# `dd.DataFrame` as local input to xgboost, which is converted to csr_matrix by
# `_convert_unknown_data` since dd.DataFrame is not known to xgboost native
# binding.
raise ValueError(
"Use `da.Array` or `DaskDMatrix` when output has more than 2 dimensions."
)
if _can_output_df(isinstance(data, dd.DataFrame), output_shape):
if base_margin is not None and isinstance(base_margin, da.Array):
# Easier for map_partitions
base_margin_df: Optional[
Union[dd.DataFrame, dd.Series]
] = base_margin.to_dask_dataframe()
else:
base_margin_df = base_margin
predictions = dd.map_partitions(
mapped_predict,
booster,
data,
True,
columns,
base_margin_df,
meta=dd.utils.make_meta(meta),
)
# classification can return a dataframe, drop 1 dim when it's reg/binary
if len(output_shape) == 1:
predictions = predictions.iloc[:, 0]
else:
if base_margin is not None and isinstance(
base_margin, (dd.Series, dd.DataFrame)
):
# Easier for map_blocks
base_margin_array: Optional[da.Array] = base_margin.to_dask_array()
else:
base_margin_array = base_margin
# Input data is 2-dim array, output can be 1(reg, binary)/2(multi-class,
# contrib)/3(contrib, interaction)/4(interaction) dims.
if len(output_shape) == 1:
drop_axis: Union[int, List[int]] = [1] # drop from 2 to 1 dim.
new_axis: Union[int, List[int]] = []
else:
drop_axis = []
if isinstance(data, dd.DataFrame):
new_axis = list(range(len(output_shape) - 2))
else:
new_axis = [i + 2 for i in range(len(output_shape) - 2)]
if len(output_shape) == 2:
# Somehow dask fail to infer output shape change for 2-dim prediction, and
# `chunks = (None, output_shape[1])` doesn't work due to None is not
# supported in map_blocks.
# data must be an array here as dataframe + 2-dim output predict will return
# a dataframe instead.
chunks: Optional[List[Tuple]] = list(data.chunks)
assert isinstance(chunks, list)
chunks[1] = (output_shape[1],)
else:
chunks = None
predictions = da.map_blocks(
mapped_predict,
booster,
data,
False,
columns,
base_margin_array,
chunks=chunks,
drop_axis=drop_axis,
new_axis=new_axis,
dtype=numpy.float32,
)
return predictions
def _infer_predict_output(
booster: Booster, features: int, is_df: bool, inplace: bool, **kwargs: Any
) -> Tuple[Tuple[int, ...], Dict[int, str]]:
"""Create a dummy test sample to infer output shape for prediction."""
assert isinstance(features, int)
rng = numpy.random.RandomState(1994)
test_sample = rng.randn(1, features)
if inplace:
kwargs = kwargs.copy()
if kwargs.pop("predict_type") == "margin":
kwargs["output_margin"] = True
m = DMatrix(test_sample, enable_categorical=True)
# generated DMatrix doesn't have feature name, so no validation.
test_predt = booster.predict(m, validate_features=False, **kwargs)
n_columns = test_predt.shape[1] if len(test_predt.shape) > 1 else 1
meta: Dict[int, str] = {}
if _can_output_df(is_df, test_predt.shape):
for i in range(n_columns):
meta[i] = "f4"
return test_predt.shape, meta
async def _get_model_future(
client: "distributed.Client", model: Union[Booster, Dict, "distributed.Future"]
) -> "distributed.Future":
if isinstance(model, Booster):
booster = await client.scatter(model, broadcast=True)
elif isinstance(model, dict):
booster = await client.scatter(model["booster"], broadcast=True)
elif isinstance(model, distributed.Future):
booster = model
t = booster.type
if t is not Booster:
raise TypeError(
f"Underlying type of model future should be `Booster`, got {t}"
)
else:
raise TypeError(_expect([Booster, dict, distributed.Future], type(model)))
return booster
# pylint: disable=too-many-statements
async def _predict_async(
client: "distributed.Client",
global_config: Dict[str, Any],
model: Union[Booster, Dict, "distributed.Future"],
data: _DataT,
output_margin: bool,
missing: float,
pred_leaf: bool,
pred_contribs: bool,
approx_contribs: bool,
pred_interactions: bool,
validate_features: bool,
iteration_range: Tuple[int, int],
strict_shape: bool,
) -> _DaskCollection:
_booster = await _get_model_future(client, model)
if not isinstance(data, (DaskDMatrix, da.Array, dd.DataFrame)):
raise TypeError(_expect([DaskDMatrix, da.Array, dd.DataFrame], type(data)))
def mapped_predict(
booster: Booster, partition: Any, is_df: bool, columns: List[int], _: Any
) -> Any:
with config.config_context(**global_config):
m = DMatrix(
data=partition,
missing=missing,
enable_categorical=True,
)
predt = booster.predict(
data=m,
output_margin=output_margin,
pred_leaf=pred_leaf,
pred_contribs=pred_contribs,
approx_contribs=approx_contribs,
pred_interactions=pred_interactions,
validate_features=validate_features,
iteration_range=iteration_range,
strict_shape=strict_shape,
)
predt = _maybe_dataframe(partition, predt, columns, is_df)
return predt
# Predict on dask collection directly.
if isinstance(data, (da.Array, dd.DataFrame)):
_output_shape, meta = await client.compute(
client.submit(
_infer_predict_output,
_booster,
features=data.shape[1],
is_df=isinstance(data, dd.DataFrame),
inplace=False,
output_margin=output_margin,
pred_leaf=pred_leaf,
pred_contribs=pred_contribs,
approx_contribs=approx_contribs,
pred_interactions=pred_interactions,
strict_shape=strict_shape,
)
)
return await _direct_predict_impl(
mapped_predict, _booster, data, None, _output_shape, meta
)
output_shape, _ = await client.compute(
client.submit(
_infer_predict_output,
booster=_booster,
features=data.num_col(),
is_df=False,
inplace=False,
output_margin=output_margin,
pred_leaf=pred_leaf,
pred_contribs=pred_contribs,
approx_contribs=approx_contribs,
pred_interactions=pred_interactions,
strict_shape=strict_shape,
)
)
# Prediction on dask DMatrix.
partition_order = data.partition_order
feature_names = data.feature_names
feature_types = data.feature_types
missing = data.missing
def dispatched_predict(booster: Booster, part: Dict[str, Any]) -> numpy.ndarray:
data = part["data"]
base_margin = part.get("base_margin", None)
with config.config_context(**global_config):
m = DMatrix(
data,
missing=missing,
base_margin=base_margin,
feature_names=feature_names,
feature_types=feature_types,
enable_categorical=True,
)
predt = booster.predict(
m,
output_margin=output_margin,
pred_leaf=pred_leaf,
pred_contribs=pred_contribs,
approx_contribs=approx_contribs,
pred_interactions=pred_interactions,
validate_features=validate_features,
iteration_range=iteration_range,
strict_shape=strict_shape,
)
return predt
all_parts = []
all_orders = []
all_shapes = []
all_workers: List[str] = []
workers_address = list(data.worker_map.keys())
for worker_addr in workers_address:
list_of_parts = data.worker_map[worker_addr]
all_parts.extend(list_of_parts)
all_workers.extend(len(list_of_parts) * [worker_addr])
all_orders.extend([partition_order[part.key] for part in list_of_parts])
for w, part in zip(all_workers, all_parts):
s = client.submit(lambda part: part["data"].shape[0], part, workers=[w])
all_shapes.append(s)
parts_with_order = list(zip(all_parts, all_shapes, all_orders, all_workers))
parts_with_order = sorted(parts_with_order, key=lambda p: p[2])
all_parts = [part for part, shape, order, w in parts_with_order]
all_shapes = [shape for part, shape, order, w in parts_with_order]
all_workers = [w for part, shape, order, w in parts_with_order]
futures = []
for w, part in zip(all_workers, all_parts):
f = client.submit(dispatched_predict, _booster, part, workers=[w])
futures.append(f)
# Constructing a dask array from list of numpy arrays
# See https://docs.dask.org/en/latest/array-creation.html
arrays = []
all_shapes = await client.gather(all_shapes)
for i, rows in enumerate(all_shapes):
arrays.append(
da.from_delayed(
futures[i], shape=(rows,) + output_shape[1:], dtype=numpy.float32
)
)
predictions = da.concatenate(arrays, axis=0)
return predictions
def predict( # pylint: disable=unused-argument
client: Optional["distributed.Client"],
model: Union[TrainReturnT, Booster, "distributed.Future"],
data: Union[DaskDMatrix, _DataT],
output_margin: bool = False,
missing: float = numpy.nan,
pred_leaf: bool = False,
pred_contribs: bool = False,
approx_contribs: bool = False,
pred_interactions: bool = False,
validate_features: bool = True,
iteration_range: Tuple[int, int] = (0, 0),
strict_shape: bool = False,
) -> Any:
"""Run prediction with a trained booster.
.. note::
Using ``inplace_predict`` might be faster when some features are not needed.
See :py:meth:`xgboost.Booster.predict` for details on various parameters. When
output has more than 2 dimensions (shap value, leaf with strict_shape), input
should be ``da.Array`` or ``DaskDMatrix``.
.. versionadded:: 1.0.0
Parameters
----------
client:
Specify the dask client used for training. Use default client
returned from dask if it's set to None.
model:
The trained model. It can be a distributed.Future so user can
pre-scatter it onto all workers.
data:
Input data used for prediction. When input is a dataframe object,
prediction output is a series.
missing:
Used when input data is not DaskDMatrix. Specify the value
considered as missing.
Returns
-------
prediction: dask.array.Array/dask.dataframe.Series
When input data is ``dask.array.Array`` or ``DaskDMatrix``, the return value is
an array, when input data is ``dask.dataframe.DataFrame``, return value can be
``dask.dataframe.Series``, ``dask.dataframe.DataFrame``, depending on the output
shape.
"""
_assert_dask_support()
client = _xgb_get_client(client)
return client.sync(_predict_async, global_config=config.get_config(), **locals())
async def _inplace_predict_async( # pylint: disable=too-many-branches
client: "distributed.Client",
global_config: Dict[str, Any],
model: Union[Booster, Dict, "distributed.Future"],
data: _DataT,
iteration_range: Tuple[int, int],
predict_type: str,
missing: float,
validate_features: bool,
base_margin: Optional[_DaskCollection],
strict_shape: bool,
) -> _DaskCollection:
client = _xgb_get_client(client)
booster = await _get_model_future(client, model)
if not isinstance(data, (da.Array, dd.DataFrame)):
raise TypeError(_expect([da.Array, dd.DataFrame], type(data)))
if base_margin is not None and not isinstance(
data, (da.Array, dd.DataFrame, dd.Series)
):
raise TypeError(_expect([da.Array, dd.DataFrame, dd.Series], type(base_margin)))
def mapped_predict(
booster: Booster,
partition: Any,
is_df: bool,
columns: List[int],
base_margin: Any,
) -> Any:
with config.config_context(**global_config):
prediction = booster.inplace_predict(
partition,
iteration_range=iteration_range,
predict_type=predict_type,
missing=missing,
base_margin=base_margin,
validate_features=validate_features,
strict_shape=strict_shape,
)
prediction = _maybe_dataframe(partition, prediction, columns, is_df)
return prediction
# await turns future into value.
shape, meta = await client.compute(
client.submit(
_infer_predict_output,
booster,
features=data.shape[1],
is_df=isinstance(data, dd.DataFrame),
inplace=True,
predict_type=predict_type,
iteration_range=iteration_range,
strict_shape=strict_shape,
)
)
return await _direct_predict_impl(
mapped_predict, booster, data, base_margin, shape, meta
)
def inplace_predict( # pylint: disable=unused-argument
client: Optional["distributed.Client"],
model: Union[TrainReturnT, Booster, "distributed.Future"],
data: _DataT,
iteration_range: Tuple[int, int] = (0, 0),
predict_type: str = "value",
missing: float = numpy.nan,
validate_features: bool = True,
base_margin: Optional[_DaskCollection] = None,
strict_shape: bool = False,
) -> Any:
"""Inplace prediction. See doc in :py:meth:`xgboost.Booster.inplace_predict` for
details.
.. versionadded:: 1.1.0
Parameters
----------
client:
Specify the dask client used for training. Use default client
returned from dask if it's set to None.
model:
See :py:func:`xgboost.dask.predict` for details.
data :
dask collection.
iteration_range:
See :py:meth:`xgboost.Booster.predict` for details.
predict_type:
See :py:meth:`xgboost.Booster.inplace_predict` for details.
missing:
Value in the input data which needs to be present as a missing
value. If None, defaults to np.nan.
base_margin:
See :py:obj:`xgboost.DMatrix` for details.
.. versionadded:: 1.4.0
strict_shape:
See :py:meth:`xgboost.Booster.predict` for details.
.. versionadded:: 1.4.0
Returns
-------
prediction :
When input data is ``dask.array.Array``, the return value is an array, when
input data is ``dask.dataframe.DataFrame``, return value can be
``dask.dataframe.Series``, ``dask.dataframe.DataFrame``, depending on the output
shape.
"""
_assert_dask_support()
client = _xgb_get_client(client)
# When used in asynchronous environment, the `client` object should have
# `asynchronous` attribute as True. When invoked by the skl interface, it's
# responsible for setting up the client.
return client.sync(
_inplace_predict_async, global_config=config.get_config(), **locals()
)
async def _async_wrap_evaluation_matrices(
client: Optional["distributed.Client"],
tree_method: Optional[str],
max_bin: Optional[int],
**kwargs: Any,
) -> Tuple[DaskDMatrix, Optional[List[Tuple[DaskDMatrix, str]]]]:
"""A switch function for async environment."""
def _dispatch(ref: Optional[DaskDMatrix], **kwargs: Any) -> DaskDMatrix:
if _can_use_qdm(tree_method):
return DaskQuantileDMatrix(
client=client, ref=ref, max_bin=max_bin, **kwargs
)
return DaskDMatrix(client=client, **kwargs)
train_dmatrix, evals = _wrap_evaluation_matrices(create_dmatrix=_dispatch, **kwargs)
train_dmatrix = await train_dmatrix
if evals is None:
return train_dmatrix, evals
awaited = []
for e in evals:
if e[0] is train_dmatrix: # already awaited
awaited.append(e)
continue
awaited.append((await e[0], e[1]))
return train_dmatrix, awaited
@contextmanager
def _set_worker_client(
model: "DaskScikitLearnBase", client: "distributed.Client"
) -> Generator:
"""Temporarily set the client for sklearn model."""
try:
model.client = client
yield model
finally:
model.client = None # type:ignore
class DaskScikitLearnBase(XGBModel):
"""Base class for implementing scikit-learn interface with Dask"""
_client = None
async def _predict_async(
self,
data: _DataT,
output_margin: bool,
validate_features: bool,
base_margin: Optional[_DaskCollection],
iteration_range: Optional[Tuple[int, int]],
) -> Any:
iteration_range = self._get_iteration_range(iteration_range)
if self._can_use_inplace_predict():
predts = await inplace_predict(
client=self.client,
model=self.get_booster(),
data=data,
iteration_range=iteration_range,
predict_type="margin" if output_margin else "value",
missing=self.missing,
base_margin=base_margin,
validate_features=validate_features,
)
if isinstance(predts, dd.DataFrame):
predts = predts.to_dask_array()
else:
test_dmatrix = await DaskDMatrix(
self.client,
data=data,
base_margin=base_margin,
missing=self.missing,
feature_types=self.feature_types,
)
predts = await predict(
self.client,
model=self.get_booster(),
data=test_dmatrix,
output_margin=output_margin,
validate_features=validate_features,
iteration_range=iteration_range,
)
return predts
def predict(
self,
X: _DataT,
output_margin: bool = False,
validate_features: bool = True,
base_margin: Optional[_DaskCollection] = None,
iteration_range: Optional[Tuple[int, int]] = None,
) -> Any:
_assert_dask_support()
return self.client.sync(
self._predict_async,
X,
output_margin=output_margin,
validate_features=validate_features,
base_margin=base_margin,
iteration_range=iteration_range,
)
async def _apply_async(
self,
X: _DataT,
iteration_range: Optional[Tuple[int, int]] = None,
) -> Any:
iteration_range = self._get_iteration_range(iteration_range)
test_dmatrix = await DaskDMatrix(
self.client,
data=X,
missing=self.missing,
feature_types=self.feature_types,
)
predts = await predict(
self.client,
model=self.get_booster(),
data=test_dmatrix,
pred_leaf=True,
iteration_range=iteration_range,
)
return predts
def apply(
self,
X: _DataT,
iteration_range: Optional[Tuple[int, int]] = None,
) -> Any:
_assert_dask_support()
return self.client.sync(self._apply_async, X, iteration_range=iteration_range)
def __await__(self) -> Awaitable[Any]:
# Generate a coroutine wrapper to make this class awaitable.
async def _() -> Awaitable[Any]:
return self
return self._client_sync(_).__await__()
def __getstate__(self) -> Dict:
this = self.__dict__.copy()
if "_client" in this:
del this["_client"]
return this
@property
def client(self) -> "distributed.Client":
"""The dask client used in this model. The `Client` object can not be serialized for
transmission, so if task is launched from a worker instead of directly from the
client process, this attribute needs to be set at that worker.
"""
client = _xgb_get_client(self._client)
return client
@client.setter
def client(self, clt: "distributed.Client") -> None:
# calling `worker_client' doesn't return the correct `asynchronous` attribute,
# so we have to pass it ourselves.
self._asynchronous = clt.asynchronous if clt is not None else False
self._client = clt
def _client_sync(self, func: Callable, **kwargs: Any) -> Any:
"""Get the correct client, when method is invoked inside a worker we
should use `worker_client' instead of default client.
"""
if self._client is None:
asynchronous = getattr(self, "_asynchronous", False)
try:
distributed.get_worker()
in_worker = True
except ValueError:
in_worker = False
if in_worker:
with distributed.worker_client() as client:
with _set_worker_client(self, client) as this:
ret = this.client.sync(
func, **kwargs, asynchronous=asynchronous
)
return ret
return ret
return self.client.sync(func, **kwargs, asynchronous=self.client.asynchronous)
@xgboost_model_doc(
"""Implementation of the Scikit-Learn API for XGBoost.""", ["estimators", "model"]
)
class DaskXGBRegressor(DaskScikitLearnBase, XGBRegressorBase):
"""dummy doc string to workaround pylint, replaced by the decorator."""
async def _fit_async(
self,
X: _DataT,
y: _DaskCollection,
sample_weight: Optional[_DaskCollection],
base_margin: Optional[_DaskCollection],
eval_set: Optional[Sequence[Tuple[_DaskCollection, _DaskCollection]]],
eval_metric: Optional[Union[str, Sequence[str], Metric]],
sample_weight_eval_set: Optional[Sequence[_DaskCollection]],
base_margin_eval_set: Optional[Sequence[_DaskCollection]],
early_stopping_rounds: Optional[int],
verbose: Union[int, bool],
xgb_model: Optional[Union[Booster, XGBModel]],
feature_weights: Optional[_DaskCollection],
callbacks: Optional[Sequence[TrainingCallback]],
) -> _DaskCollection:
params = self.get_xgb_params()
dtrain, evals = await _async_wrap_evaluation_matrices(
client=self.client,
tree_method=self.tree_method,
max_bin=self.max_bin,
X=X,
y=y,
group=None,
qid=None,
sample_weight=sample_weight,
base_margin=base_margin,
feature_weights=feature_weights,
eval_set=eval_set,
sample_weight_eval_set=sample_weight_eval_set,
base_margin_eval_set=base_margin_eval_set,
eval_group=None,
eval_qid=None,
missing=self.missing,
enable_categorical=self.enable_categorical,
feature_types=self.feature_types,
)
if callable(self.objective):
obj: Optional[Callable] = _objective_decorator(self.objective)
else:
obj = None
model, metric, params, early_stopping_rounds, callbacks = self._configure_fit(
xgb_model, eval_metric, params, early_stopping_rounds, callbacks
)
results = await self.client.sync(
_train_async,
asynchronous=True,
client=self.client,
global_config=config.get_config(),
dconfig=_get_dask_config(),
params=params,
dtrain=dtrain,
num_boost_round=self.get_num_boosting_rounds(),
evals=evals,
obj=obj,
feval=None,
custom_metric=metric,
verbose_eval=verbose,
early_stopping_rounds=early_stopping_rounds,
callbacks=callbacks,
xgb_model=model,
)
self._Booster = results["booster"]
self._set_evaluation_result(results["history"])
return self
# pylint: disable=missing-docstring, disable=unused-argument
@_deprecate_positional_args
def fit(
self,
X: _DataT,
y: _DaskCollection,
*,
sample_weight: Optional[_DaskCollection] = None,
base_margin: Optional[_DaskCollection] = None,
eval_set: Optional[Sequence[Tuple[_DaskCollection, _DaskCollection]]] = None,
eval_metric: Optional[Union[str, Sequence[str], Callable]] = None,
early_stopping_rounds: Optional[int] = None,
verbose: Union[int, bool] = True,
xgb_model: Optional[Union[Booster, XGBModel]] = None,
sample_weight_eval_set: Optional[Sequence[_DaskCollection]] = None,
base_margin_eval_set: Optional[Sequence[_DaskCollection]] = None,
feature_weights: Optional[_DaskCollection] = None,
callbacks: Optional[Sequence[TrainingCallback]] = None,
) -> "DaskXGBRegressor":
_assert_dask_support()
args = {k: v for k, v in locals().items() if k not in ("self", "__class__")}
return self._client_sync(self._fit_async, **args)
@xgboost_model_doc(
"Implementation of the scikit-learn API for XGBoost classification.",
["estimators", "model"],
)
class DaskXGBClassifier(DaskScikitLearnBase, XGBClassifierMixIn, XGBClassifierBase):
# pylint: disable=missing-class-docstring
async def _fit_async(
self,
X: _DataT,
y: _DaskCollection,
sample_weight: Optional[_DaskCollection],
base_margin: Optional[_DaskCollection],
eval_set: Optional[Sequence[Tuple[_DaskCollection, _DaskCollection]]],
eval_metric: Optional[Union[str, Sequence[str], Metric]],
sample_weight_eval_set: Optional[Sequence[_DaskCollection]],
base_margin_eval_set: Optional[Sequence[_DaskCollection]],
early_stopping_rounds: Optional[int],
verbose: Union[int, bool],
xgb_model: Optional[Union[Booster, XGBModel]],
feature_weights: Optional[_DaskCollection],
callbacks: Optional[Sequence[TrainingCallback]],
) -> "DaskXGBClassifier":
params = self.get_xgb_params()
dtrain, evals = await _async_wrap_evaluation_matrices(
self.client,
tree_method=self.tree_method,
max_bin=self.max_bin,
X=X,
y=y,
group=None,
qid=None,
sample_weight=sample_weight,
base_margin=base_margin,
feature_weights=feature_weights,
eval_set=eval_set,
sample_weight_eval_set=sample_weight_eval_set,
base_margin_eval_set=base_margin_eval_set,
eval_group=None,
eval_qid=None,
missing=self.missing,
enable_categorical=self.enable_categorical,
feature_types=self.feature_types,
)
# pylint: disable=attribute-defined-outside-init
if isinstance(y, da.Array):
self.classes_ = await self.client.compute(da.unique(y))
else:
self.classes_ = await self.client.compute(y.drop_duplicates())
if _is_cudf_ser(self.classes_):
self.classes_ = self.classes_.to_cupy()
if _is_cupy_array(self.classes_):
self.classes_ = self.classes_.get()
self.classes_ = numpy.array(self.classes_)
self.n_classes_ = len(self.classes_)
if self.n_classes_ > 2:
params["objective"] = "multi:softprob"
params["num_class"] = self.n_classes_
else:
params["objective"] = "binary:logistic"
if callable(self.objective):
obj: Optional[Callable] = _objective_decorator(self.objective)
else:
obj = None
model, metric, params, early_stopping_rounds, callbacks = self._configure_fit(
xgb_model, eval_metric, params, early_stopping_rounds, callbacks
)
results = await self.client.sync(
_train_async,
asynchronous=True,
client=self.client,
global_config=config.get_config(),
dconfig=_get_dask_config(),
params=params,
dtrain=dtrain,
num_boost_round=self.get_num_boosting_rounds(),
evals=evals,
obj=obj,
feval=None,
custom_metric=metric,
verbose_eval=verbose,
early_stopping_rounds=early_stopping_rounds,
callbacks=callbacks,
xgb_model=model,
)
self._Booster = results["booster"]
if not callable(self.objective):
self.objective = params["objective"]
self._set_evaluation_result(results["history"])
return self
# pylint: disable=unused-argument
def fit(
self,
X: _DataT,
y: _DaskCollection,
*,
sample_weight: Optional[_DaskCollection] = None,
base_margin: Optional[_DaskCollection] = None,
eval_set: Optional[Sequence[Tuple[_DaskCollection, _DaskCollection]]] = None,
eval_metric: Optional[Union[str, Sequence[str], Callable]] = None,
early_stopping_rounds: Optional[int] = None,
verbose: Union[int, bool] = True,
xgb_model: Optional[Union[Booster, XGBModel]] = None,
sample_weight_eval_set: Optional[Sequence[_DaskCollection]] = None,
base_margin_eval_set: Optional[Sequence[_DaskCollection]] = None,
feature_weights: Optional[_DaskCollection] = None,
callbacks: Optional[Sequence[TrainingCallback]] = None,
) -> "DaskXGBClassifier":
_assert_dask_support()
args = {k: v for k, v in locals().items() if k not in ("self", "__class__")}
return self._client_sync(self._fit_async, **args)
async def _predict_proba_async(
self,
X: _DataT,
validate_features: bool,
base_margin: Optional[_DaskCollection],
iteration_range: Optional[Tuple[int, int]],
) -> _DaskCollection:
if self.objective == "multi:softmax":
raise ValueError(
"multi:softmax doesn't support `predict_proba`. "
"Switch to `multi:softproba` instead"
)
predts = await super()._predict_async(
data=X,
output_margin=False,
validate_features=validate_features,
base_margin=base_margin,
iteration_range=iteration_range,
)
vstack = update_wrapper(
partial(da.vstack, allow_unknown_chunksizes=True), da.vstack
)
return _cls_predict_proba(getattr(self, "n_classes_", 0), predts, vstack)
# pylint: disable=missing-function-docstring
def predict_proba(
self,
X: _DaskCollection,
validate_features: bool = True,
base_margin: Optional[_DaskCollection] = None,
iteration_range: Optional[Tuple[int, int]] = None,
) -> Any:
_assert_dask_support()
return self._client_sync(
self._predict_proba_async,
X=X,
validate_features=validate_features,
base_margin=base_margin,
iteration_range=iteration_range,
)
predict_proba.__doc__ = XGBClassifier.predict_proba.__doc__
async def _predict_async(
self,
data: _DataT,
output_margin: bool,
validate_features: bool,
base_margin: Optional[_DaskCollection],
iteration_range: Optional[Tuple[int, int]],
) -> _DaskCollection:
pred_probs = await super()._predict_async(
data, output_margin, validate_features, base_margin, iteration_range
)
if output_margin:
return pred_probs
if len(pred_probs.shape) == 1:
preds = (pred_probs > 0.5).astype(int)
else:
assert len(pred_probs.shape) == 2
assert isinstance(pred_probs, da.Array)
# when using da.argmax directly, dask will construct a numpy based return
# array, which runs into error when computing GPU based prediction.
def _argmax(x: Any) -> Any:
return x.argmax(axis=1)
preds = da.map_blocks(_argmax, pred_probs, drop_axis=1)
return preds
def load_model(self, fname: ModelIn) -> None:
super().load_model(fname)
self._load_model_attributes(self.get_booster())
@xgboost_model_doc(
"""Implementation of the Scikit-Learn API for XGBoost Ranking.
.. versionadded:: 1.4.0
""",
["estimators", "model"],
end_note="""
.. note::
For dask implementation, group is not supported, use qid instead.
""",
)
class DaskXGBRanker(DaskScikitLearnBase, XGBRankerMixIn):
@_deprecate_positional_args
def __init__(self, *, objective: str = "rank:pairwise", **kwargs: Any):
if callable(objective):
raise ValueError("Custom objective function not supported by XGBRanker.")
super().__init__(objective=objective, kwargs=kwargs)
async def _fit_async(
self,
X: _DataT,
y: _DaskCollection,
group: Optional[_DaskCollection],
qid: Optional[_DaskCollection],
sample_weight: Optional[_DaskCollection],
base_margin: Optional[_DaskCollection],
eval_set: Optional[Sequence[Tuple[_DaskCollection, _DaskCollection]]],
sample_weight_eval_set: Optional[Sequence[_DaskCollection]],
base_margin_eval_set: Optional[Sequence[_DaskCollection]],
eval_group: Optional[Sequence[_DaskCollection]],
eval_qid: Optional[Sequence[_DaskCollection]],
eval_metric: Optional[Union[str, Sequence[str], Metric]],
early_stopping_rounds: Optional[int],
verbose: Union[int, bool],
xgb_model: Optional[Union[XGBModel, Booster]],
feature_weights: Optional[_DaskCollection],
callbacks: Optional[Sequence[TrainingCallback]],
) -> "DaskXGBRanker":
msg = "Use `qid` instead of `group` on dask interface."
if not (group is None and eval_group is None):
raise ValueError(msg)
if qid is None:
raise ValueError("`qid` is required for ranking.")
params = self.get_xgb_params()
dtrain, evals = await _async_wrap_evaluation_matrices(
self.client,
tree_method=self.tree_method,
max_bin=self.max_bin,
X=X,
y=y,
group=None,
qid=qid,
sample_weight=sample_weight,
base_margin=base_margin,
feature_weights=feature_weights,
eval_set=eval_set,
sample_weight_eval_set=sample_weight_eval_set,
base_margin_eval_set=base_margin_eval_set,
eval_group=None,
eval_qid=eval_qid,
missing=self.missing,
enable_categorical=self.enable_categorical,
feature_types=self.feature_types,
)
if eval_metric is not None:
if callable(eval_metric):
raise ValueError(
"Custom evaluation metric is not yet supported for XGBRanker."
)
model, metric, params, early_stopping_rounds, callbacks = self._configure_fit(
xgb_model, eval_metric, params, early_stopping_rounds, callbacks
)
results = await self.client.sync(
_train_async,
asynchronous=True,
client=self.client,
global_config=config.get_config(),
dconfig=_get_dask_config(),
params=params,
dtrain=dtrain,
num_boost_round=self.get_num_boosting_rounds(),
evals=evals,
obj=None,
feval=None,
custom_metric=metric,
verbose_eval=verbose,
early_stopping_rounds=early_stopping_rounds,
callbacks=callbacks,
xgb_model=model,
)
self._Booster = results["booster"]
self.evals_result_ = results["history"]
return self
# pylint: disable=unused-argument, arguments-differ
@_deprecate_positional_args
def fit(
self,
X: _DataT,
y: _DaskCollection,
*,
group: Optional[_DaskCollection] = None,
qid: Optional[_DaskCollection] = None,
sample_weight: Optional[_DaskCollection] = None,
base_margin: Optional[_DaskCollection] = None,
eval_set: Optional[Sequence[Tuple[_DaskCollection, _DaskCollection]]] = None,
eval_group: Optional[Sequence[_DaskCollection]] = None,
eval_qid: Optional[Sequence[_DaskCollection]] = None,
eval_metric: Optional[Union[str, Sequence[str], Callable]] = None,
early_stopping_rounds: Optional[int] = None,
verbose: Union[int, bool] = False,
xgb_model: Optional[Union[XGBModel, Booster]] = None,
sample_weight_eval_set: Optional[Sequence[_DaskCollection]] = None,
base_margin_eval_set: Optional[Sequence[_DaskCollection]] = None,
feature_weights: Optional[_DaskCollection] = None,
callbacks: Optional[Sequence[TrainingCallback]] = None,
) -> "DaskXGBRanker":
_assert_dask_support()
args = {k: v for k, v in locals().items() if k not in ("self", "__class__")}
return self._client_sync(self._fit_async, **args)
# FIXME(trivialfis): arguments differ due to additional parameters like group and
# qid.
fit.__doc__ = XGBRanker.fit.__doc__
@xgboost_model_doc(
"""Implementation of the Scikit-Learn API for XGBoost Random Forest Regressor.
.. versionadded:: 1.4.0
""",
["model", "objective"],
extra_parameters="""
n_estimators : int
Number of trees in random forest to fit.
""",
)
class DaskXGBRFRegressor(DaskXGBRegressor):
@_deprecate_positional_args
def __init__(
self,
*,
learning_rate: Optional[float] = 1,
subsample: Optional[float] = 0.8,
colsample_bynode: Optional[float] = 0.8,
reg_lambda: Optional[float] = 1e-5,
**kwargs: Any,
) -> None:
super().__init__(
learning_rate=learning_rate,
subsample=subsample,
colsample_bynode=colsample_bynode,
reg_lambda=reg_lambda,
**kwargs,
)
def get_xgb_params(self) -> Dict[str, Any]:
params = super().get_xgb_params()
params["num_parallel_tree"] = self.n_estimators
return params
def get_num_boosting_rounds(self) -> int:
return 1
# pylint: disable=unused-argument
def fit(
self,
X: _DataT,
y: _DaskCollection,
*,
sample_weight: Optional[_DaskCollection] = None,
base_margin: Optional[_DaskCollection] = None,
eval_set: Optional[Sequence[Tuple[_DaskCollection, _DaskCollection]]] = None,
eval_metric: Optional[Union[str, Sequence[str], Callable]] = None,
early_stopping_rounds: Optional[int] = None,
verbose: Union[int, bool] = True,
xgb_model: Optional[Union[Booster, XGBModel]] = None,
sample_weight_eval_set: Optional[Sequence[_DaskCollection]] = None,
base_margin_eval_set: Optional[Sequence[_DaskCollection]] = None,
feature_weights: Optional[_DaskCollection] = None,
callbacks: Optional[Sequence[TrainingCallback]] = None,
) -> "DaskXGBRFRegressor":
_assert_dask_support()
args = {k: v for k, v in locals().items() if k not in ("self", "__class__")}
_check_rf_callback(early_stopping_rounds, callbacks)
super().fit(**args)
return self
@xgboost_model_doc(
"""Implementation of the Scikit-Learn API for XGBoost Random Forest Classifier.
.. versionadded:: 1.4.0
""",
["model", "objective"],
extra_parameters="""
n_estimators : int
Number of trees in random forest to fit.
""",
)
class DaskXGBRFClassifier(DaskXGBClassifier):
@_deprecate_positional_args
def __init__(
self,
*,
learning_rate: Optional[float] = 1,
subsample: Optional[float] = 0.8,
colsample_bynode: Optional[float] = 0.8,
reg_lambda: Optional[float] = 1e-5,
**kwargs: Any,
) -> None:
super().__init__(
learning_rate=learning_rate,
subsample=subsample,
colsample_bynode=colsample_bynode,
reg_lambda=reg_lambda,
**kwargs,
)
def get_xgb_params(self) -> Dict[str, Any]:
params = super().get_xgb_params()
params["num_parallel_tree"] = self.n_estimators
return params
def get_num_boosting_rounds(self) -> int:
return 1
# pylint: disable=unused-argument
def fit(
self,
X: _DataT,
y: _DaskCollection,
*,
sample_weight: Optional[_DaskCollection] = None,
base_margin: Optional[_DaskCollection] = None,
eval_set: Optional[Sequence[Tuple[_DaskCollection, _DaskCollection]]] = None,
eval_metric: Optional[Union[str, Sequence[str], Callable]] = None,
early_stopping_rounds: Optional[int] = None,
verbose: Union[int, bool] = True,
xgb_model: Optional[Union[Booster, XGBModel]] = None,
sample_weight_eval_set: Optional[Sequence[_DaskCollection]] = None,
base_margin_eval_set: Optional[Sequence[_DaskCollection]] = None,
feature_weights: Optional[_DaskCollection] = None,
callbacks: Optional[Sequence[TrainingCallback]] = None,
) -> "DaskXGBRFClassifier":
_assert_dask_support()
args = {k: v for k, v in locals().items() if k not in ("self", "__class__")}
_check_rf_callback(early_stopping_rounds, callbacks)
super().fit(**args)
return self
| 80,968
| 34.373089
| 94
|
py
|
xgboost
|
xgboost-master/python-package/xgboost/sklearn.py
|
# pylint: disable=too-many-arguments, too-many-locals, invalid-name, fixme, too-many-lines
"""Scikit-Learn Wrapper interface for XGBoost."""
import copy
import json
import os
import warnings
from concurrent.futures import ThreadPoolExecutor
from typing import (
Any,
Callable,
Dict,
List,
Optional,
Sequence,
Tuple,
Type,
TypeVar,
Union,
cast,
)
import numpy as np
from scipy.special import softmax
from ._typing import ArrayLike, FeatureNames, FeatureTypes, ModelIn
from .callback import TrainingCallback
# Do not use class names on scikit-learn directly. Re-define the classes on
# .compat to guarantee the behavior without scikit-learn
from .compat import SKLEARN_INSTALLED, XGBClassifierBase, XGBModelBase, XGBRegressorBase
from .config import config_context
from .core import (
Booster,
DMatrix,
Metric,
Objective,
QuantileDMatrix,
XGBoostError,
_deprecate_positional_args,
_parse_eval_str,
)
from .data import _is_cudf_df, _is_cudf_ser, _is_cupy_array, _is_pandas_df
from .training import train
class XGBClassifierMixIn: # pylint: disable=too-few-public-methods
"""MixIn for classification."""
def __init__(self, *args: Any, **kwargs: Any) -> None:
super().__init__(*args, **kwargs)
def _load_model_attributes(self, booster: Booster) -> None:
config = json.loads(booster.save_config())
self.n_classes_ = int(config["learner"]["learner_model_param"]["num_class"])
# binary classification is treated as regression in XGBoost.
self.n_classes_ = 2 if self.n_classes_ < 2 else self.n_classes_
class XGBRankerMixIn: # pylint: disable=too-few-public-methods
"""MixIn for ranking, defines the _estimator_type usually defined in scikit-learn
base classes.
"""
_estimator_type = "ranker"
def _check_rf_callback(
early_stopping_rounds: Optional[int],
callbacks: Optional[Sequence[TrainingCallback]],
) -> None:
if early_stopping_rounds is not None or callbacks is not None:
raise NotImplementedError(
"`early_stopping_rounds` and `callbacks` are not implemented for"
" random forest."
)
def _can_use_qdm(tree_method: Optional[str]) -> bool:
return tree_method in ("hist", "gpu_hist", None, "auto")
SklObjective = Optional[
Union[str, Callable[[np.ndarray, np.ndarray], Tuple[np.ndarray, np.ndarray]]]
]
def _objective_decorator(
func: Callable[[np.ndarray, np.ndarray], Tuple[np.ndarray, np.ndarray]]
) -> Objective:
"""Decorate an objective function
Converts an objective function using the typical sklearn metrics
signature so that it is usable with ``xgboost.training.train``
Parameters
----------
func:
Expects a callable with signature ``func(y_true, y_pred)``:
y_true: array_like of shape [n_samples]
The target values
y_pred: array_like of shape [n_samples]
The predicted values
Returns
-------
new_func:
The new objective function as expected by ``xgboost.training.train``.
The signature is ``new_func(preds, dmatrix)``:
preds: array_like, shape [n_samples]
The predicted values
dmatrix: ``DMatrix``
The training set from which the labels will be extracted using
``dmatrix.get_label()``
"""
def inner(preds: np.ndarray, dmatrix: DMatrix) -> Tuple[np.ndarray, np.ndarray]:
"""internal function"""
labels = dmatrix.get_label()
return func(labels, preds)
return inner
def _metric_decorator(func: Callable) -> Metric:
"""Decorate a metric function from sklearn.
Converts an metric function that uses the typical sklearn metric signature so that
it is compatible with :py:func:`train`
"""
def inner(y_score: np.ndarray, dmatrix: DMatrix) -> Tuple[str, float]:
y_true = dmatrix.get_label()
weight = dmatrix.get_weight()
if weight.size == 0:
return func.__name__, func(y_true, y_score)
return func.__name__, func(y_true, y_score, sample_weight=weight)
return inner
def ltr_metric_decorator(func: Callable, n_jobs: Optional[int]) -> Metric:
"""Decorate a learning to rank metric."""
def inner(y_score: np.ndarray, dmatrix: DMatrix) -> Tuple[str, float]:
y_true = dmatrix.get_label()
group_ptr = dmatrix.get_uint_info("group_ptr")
if group_ptr.size < 2:
raise ValueError(
"Invalid `group_ptr`. Likely caused by invalid qid or group."
)
scores = np.empty(group_ptr.size - 1)
futures = []
weight = dmatrix.get_group()
no_weight = weight.size == 0
def task(i: int) -> float:
begin = group_ptr[i - 1]
end = group_ptr[i]
gy = y_true[begin:end]
gp = y_score[begin:end]
if gy.size == 1:
# Maybe there's a better default? 1.0 because many ranking score
# functions have output in range [0, 1].
return 1.0
return func(gy, gp)
workers = n_jobs if n_jobs is not None else os.cpu_count()
with ThreadPoolExecutor(max_workers=workers) as executor:
for i in range(1, group_ptr.size):
f = executor.submit(task, i)
futures.append(f)
for i, f in enumerate(futures):
scores[i] = f.result()
if no_weight:
return func.__name__, scores.mean()
return func.__name__, np.average(scores, weights=weight)
return inner
__estimator_doc = """
n_estimators : Optional[int]
Number of gradient boosted trees. Equivalent to number of boosting
rounds.
"""
__model_doc = f"""
max_depth : Optional[int]
Maximum tree depth for base learners.
max_leaves :
Maximum number of leaves; 0 indicates no limit.
max_bin :
If using histogram-based algorithm, maximum number of bins per feature
grow_policy :
Tree growing policy. 0: favor splitting at nodes closest to the node, i.e. grow
depth-wise. 1: favor splitting at nodes with highest loss change.
learning_rate : Optional[float]
Boosting learning rate (xgb's "eta")
verbosity : Optional[int]
The degree of verbosity. Valid values are 0 (silent) - 3 (debug).
objective : {SklObjective}
Specify the learning task and the corresponding learning objective or
a custom objective function to be used (see note below).
booster: Optional[str]
Specify which booster to use: gbtree, gblinear or dart.
tree_method: Optional[str]
Specify which tree method to use. Default to auto. If this parameter is set to
default, XGBoost will choose the most conservative option available. It's
recommended to study this option from the parameters document :doc:`tree method
</treemethod>`
n_jobs : Optional[int]
Number of parallel threads used to run xgboost. When used with other
Scikit-Learn algorithms like grid search, you may choose which algorithm to
parallelize and balance the threads. Creating thread contention will
significantly slow down both algorithms.
gamma : Optional[float]
(min_split_loss) Minimum loss reduction required to make a further partition on a
leaf node of the tree.
min_child_weight : Optional[float]
Minimum sum of instance weight(hessian) needed in a child.
max_delta_step : Optional[float]
Maximum delta step we allow each tree's weight estimation to be.
subsample : Optional[float]
Subsample ratio of the training instance.
sampling_method :
Sampling method. Used only by `gpu_hist` tree method.
- `uniform`: select random training instances uniformly.
- `gradient_based` select random training instances with higher probability when
the gradient and hessian are larger. (cf. CatBoost)
colsample_bytree : Optional[float]
Subsample ratio of columns when constructing each tree.
colsample_bylevel : Optional[float]
Subsample ratio of columns for each level.
colsample_bynode : Optional[float]
Subsample ratio of columns for each split.
reg_alpha : Optional[float]
L1 regularization term on weights (xgb's alpha).
reg_lambda : Optional[float]
L2 regularization term on weights (xgb's lambda).
scale_pos_weight : Optional[float]
Balancing of positive and negative weights.
base_score : Optional[float]
The initial prediction score of all instances, global bias.
random_state : Optional[Union[numpy.random.RandomState, int]]
Random number seed.
.. note::
Using gblinear booster with shotgun updater is nondeterministic as
it uses Hogwild algorithm.
missing : float, default np.nan
Value in the data which needs to be present as a missing value.
num_parallel_tree: Optional[int]
Used for boosting random forest.
monotone_constraints : Optional[Union[Dict[str, int], str]]
Constraint of variable monotonicity. See :doc:`tutorial </tutorials/monotonic>`
for more information.
interaction_constraints : Optional[Union[str, List[Tuple[str]]]]
Constraints for interaction representing permitted interactions. The
constraints must be specified in the form of a nested list, e.g. ``[[0, 1], [2,
3, 4]]``, where each inner list is a group of indices of features that are
allowed to interact with each other. See :doc:`tutorial
</tutorials/feature_interaction_constraint>` for more information
importance_type: Optional[str]
The feature importance type for the feature_importances\\_ property:
* For tree model, it's either "gain", "weight", "cover", "total_gain" or
"total_cover".
* For linear model, only "weight" is defined and it's the normalized coefficients
without bias.
device : Optional[str]
Device ordinal.
validate_parameters : Optional[bool]
Give warnings for unknown parameter.
enable_categorical : bool
.. versionadded:: 1.5.0
.. note:: This parameter is experimental
Experimental support for categorical data. When enabled, cudf/pandas.DataFrame
should be used to specify categorical data type. Also, JSON/UBJSON
serialization format is required.
feature_types : Optional[FeatureTypes]
.. versionadded:: 1.7.0
Used for specifying feature types without constructing a dataframe. See
:py:class:`DMatrix` for details.
max_cat_to_onehot : Optional[int]
.. versionadded:: 1.6.0
.. note:: This parameter is experimental
A threshold for deciding whether XGBoost should use one-hot encoding based split
for categorical data. When number of categories is lesser than the threshold
then one-hot encoding is chosen, otherwise the categories will be partitioned
into children nodes. Also, `enable_categorical` needs to be set to have
categorical feature support. See :doc:`Categorical Data
</tutorials/categorical>` and :ref:`cat-param` for details.
max_cat_threshold : Optional[int]
.. versionadded:: 1.7.0
.. note:: This parameter is experimental
Maximum number of categories considered for each split. Used only by
partition-based splits for preventing over-fitting. Also, `enable_categorical`
needs to be set to have categorical feature support. See :doc:`Categorical Data
</tutorials/categorical>` and :ref:`cat-param` for details.
multi_strategy : Optional[str]
.. versionadded:: 2.0.0
.. note:: This parameter is working-in-progress.
The strategy used for training multi-target models, including multi-target
regression and multi-class classification. See :doc:`/tutorials/multioutput` for
more information.
- ``one_output_per_tree``: One model for each target.
- ``multi_output_tree``: Use multi-target trees.
eval_metric : Optional[Union[str, List[str], Callable]]
.. versionadded:: 1.6.0
Metric used for monitoring the training result and early stopping. It can be a
string or list of strings as names of predefined metric in XGBoost (See
doc/parameter.rst), one of the metrics in :py:mod:`sklearn.metrics`, or any other
user defined metric that looks like `sklearn.metrics`.
If custom objective is also provided, then custom metric should implement the
corresponding reverse link function.
Unlike the `scoring` parameter commonly used in scikit-learn, when a callable
object is provided, it's assumed to be a cost function and by default XGBoost will
minimize the result during early stopping.
For advanced usage on Early stopping like directly choosing to maximize instead of
minimize, see :py:obj:`xgboost.callback.EarlyStopping`.
See :doc:`Custom Objective and Evaluation Metric </tutorials/custom_metric_obj>`
for more.
.. note::
This parameter replaces `eval_metric` in :py:meth:`fit` method. The old
one receives un-transformed prediction regardless of whether custom
objective is being used.
.. code-block:: python
from sklearn.datasets import load_diabetes
from sklearn.metrics import mean_absolute_error
X, y = load_diabetes(return_X_y=True)
reg = xgb.XGBRegressor(
tree_method="hist",
eval_metric=mean_absolute_error,
)
reg.fit(X, y, eval_set=[(X, y)])
early_stopping_rounds : Optional[int]
.. versionadded:: 1.6.0
- Activates early stopping. Validation metric needs to improve at least once in
every **early_stopping_rounds** round(s) to continue training. Requires at
least one item in **eval_set** in :py:meth:`fit`.
- If early stopping occurs, the model will have two additional attributes:
:py:attr:`best_score` and :py:attr:`best_iteration`. These are used by the
:py:meth:`predict` and :py:meth:`apply` methods to determine the optimal
number of trees during inference. If users want to access the full model
(including trees built after early stopping), they can specify the
`iteration_range` in these inference methods. In addition, other utilities
like model plotting can also use the entire model.
- If you prefer to discard the trees after `best_iteration`, consider using the
callback function :py:class:`xgboost.callback.EarlyStopping`.
- If there's more than one item in **eval_set**, the last entry will be used for
early stopping. If there's more than one metric in **eval_metric**, the last
metric will be used for early stopping.
.. note::
This parameter replaces `early_stopping_rounds` in :py:meth:`fit` method.
callbacks : Optional[List[TrainingCallback]]
List of callback functions that are applied at end of each iteration.
It is possible to use predefined callbacks by using
:ref:`Callback API <callback_api>`.
.. note::
States in callback are not preserved during training, which means callback
objects can not be reused for multiple training sessions without
reinitialization or deepcopy.
.. code-block:: python
for params in parameters_grid:
# be sure to (re)initialize the callbacks before each run
callbacks = [xgb.callback.LearningRateScheduler(custom_rates)]
reg = xgboost.XGBRegressor(**params, callbacks=callbacks)
reg.fit(X, y)
kwargs : dict, optional
Keyword arguments for XGBoost Booster object. Full documentation of parameters
can be found :doc:`here </parameter>`.
Attempting to set a parameter via the constructor args and \\*\\*kwargs
dict simultaneously will result in a TypeError.
.. note:: \\*\\*kwargs unsupported by scikit-learn
\\*\\*kwargs is unsupported by scikit-learn. We do not guarantee
that parameters passed via this argument will interact properly
with scikit-learn.
"""
__custom_obj_note = """
.. note:: Custom objective function
A custom objective function can be provided for the ``objective``
parameter. In this case, it should have the signature
``objective(y_true, y_pred) -> grad, hess``:
y_true: array_like of shape [n_samples]
The target values
y_pred: array_like of shape [n_samples]
The predicted values
grad: array_like of shape [n_samples]
The value of the gradient for each sample point.
hess: array_like of shape [n_samples]
The value of the second derivative for each sample point
"""
def xgboost_model_doc(
header: str,
items: List[str],
extra_parameters: Optional[str] = None,
end_note: Optional[str] = None,
) -> Callable[[Type], Type]:
"""Obtain documentation for Scikit-Learn wrappers
Parameters
----------
header: str
An introducion to the class.
items : list
A list of common doc items. Available items are:
- estimators: the meaning of n_estimators
- model: All the other parameters
- objective: note for customized objective
extra_parameters: str
Document for class specific parameters, placed at the head.
end_note: str
Extra notes put to the end."""
def get_doc(item: str) -> str:
"""Return selected item"""
__doc = {
"estimators": __estimator_doc,
"model": __model_doc,
"objective": __custom_obj_note,
}
return __doc[item]
def adddoc(cls: Type) -> Type:
doc = [
"""
Parameters
----------
"""
]
if extra_parameters:
doc.append(extra_parameters)
doc.extend([get_doc(i) for i in items])
if end_note:
doc.append(end_note)
full_doc = [
header + "\nSee :doc:`/python/sklearn_estimator` for more information.\n"
]
full_doc.extend(doc)
cls.__doc__ = "".join(full_doc)
return cls
return adddoc
def _wrap_evaluation_matrices(
missing: float,
X: Any,
y: Any,
group: Optional[Any],
qid: Optional[Any],
sample_weight: Optional[Any],
base_margin: Optional[Any],
feature_weights: Optional[Any],
eval_set: Optional[Sequence[Tuple[Any, Any]]],
sample_weight_eval_set: Optional[Sequence[Any]],
base_margin_eval_set: Optional[Sequence[Any]],
eval_group: Optional[Sequence[Any]],
eval_qid: Optional[Sequence[Any]],
create_dmatrix: Callable,
enable_categorical: bool,
feature_types: Optional[FeatureTypes],
) -> Tuple[Any, List[Tuple[Any, str]]]:
"""Convert array_like evaluation matrices into DMatrix. Perform validation on the
way."""
train_dmatrix = create_dmatrix(
data=X,
label=y,
group=group,
qid=qid,
weight=sample_weight,
base_margin=base_margin,
feature_weights=feature_weights,
missing=missing,
enable_categorical=enable_categorical,
feature_types=feature_types,
ref=None,
)
n_validation = 0 if eval_set is None else len(eval_set)
def validate_or_none(meta: Optional[Sequence], name: str) -> Sequence:
if meta is None:
return [None] * n_validation
if len(meta) != n_validation:
raise ValueError(
f"{name}'s length does not equal `eval_set`'s length, "
+ f"expecting {n_validation}, got {len(meta)}"
)
return meta
if eval_set is not None:
sample_weight_eval_set = validate_or_none(
sample_weight_eval_set, "sample_weight_eval_set"
)
base_margin_eval_set = validate_or_none(
base_margin_eval_set, "base_margin_eval_set"
)
eval_group = validate_or_none(eval_group, "eval_group")
eval_qid = validate_or_none(eval_qid, "eval_qid")
evals = []
for i, (valid_X, valid_y) in enumerate(eval_set):
# Skip the duplicated entry.
if all(
(
valid_X is X,
valid_y is y,
sample_weight_eval_set[i] is sample_weight,
base_margin_eval_set[i] is base_margin,
eval_group[i] is group,
eval_qid[i] is qid,
)
):
evals.append(train_dmatrix)
else:
m = create_dmatrix(
data=valid_X,
label=valid_y,
weight=sample_weight_eval_set[i],
group=eval_group[i],
qid=eval_qid[i],
base_margin=base_margin_eval_set[i],
missing=missing,
enable_categorical=enable_categorical,
feature_types=feature_types,
ref=train_dmatrix,
)
evals.append(m)
nevals = len(evals)
eval_names = [f"validation_{i}" for i in range(nevals)]
evals = list(zip(evals, eval_names))
else:
if any(
meta is not None
for meta in [
sample_weight_eval_set,
base_margin_eval_set,
eval_group,
eval_qid,
]
):
raise ValueError(
"`eval_set` is not set but one of the other evaluation meta info is "
"not None."
)
evals = []
return train_dmatrix, evals
DEFAULT_N_ESTIMATORS = 100
@xgboost_model_doc(
"""Implementation of the Scikit-Learn API for XGBoost.""",
["estimators", "model", "objective"],
)
class XGBModel(XGBModelBase):
# pylint: disable=too-many-arguments, too-many-instance-attributes, missing-docstring
def __init__(
self,
max_depth: Optional[int] = None,
max_leaves: Optional[int] = None,
max_bin: Optional[int] = None,
grow_policy: Optional[str] = None,
learning_rate: Optional[float] = None,
n_estimators: Optional[int] = None,
verbosity: Optional[int] = None,
objective: SklObjective = None,
booster: Optional[str] = None,
tree_method: Optional[str] = None,
n_jobs: Optional[int] = None,
gamma: Optional[float] = None,
min_child_weight: Optional[float] = None,
max_delta_step: Optional[float] = None,
subsample: Optional[float] = None,
sampling_method: Optional[str] = None,
colsample_bytree: Optional[float] = None,
colsample_bylevel: Optional[float] = None,
colsample_bynode: Optional[float] = None,
reg_alpha: Optional[float] = None,
reg_lambda: Optional[float] = None,
scale_pos_weight: Optional[float] = None,
base_score: Optional[float] = None,
random_state: Optional[Union[np.random.RandomState, int]] = None,
missing: float = np.nan,
num_parallel_tree: Optional[int] = None,
monotone_constraints: Optional[Union[Dict[str, int], str]] = None,
interaction_constraints: Optional[Union[str, Sequence[Sequence[str]]]] = None,
importance_type: Optional[str] = None,
device: Optional[str] = None,
validate_parameters: Optional[bool] = None,
enable_categorical: bool = False,
feature_types: Optional[FeatureTypes] = None,
max_cat_to_onehot: Optional[int] = None,
max_cat_threshold: Optional[int] = None,
multi_strategy: Optional[str] = None,
eval_metric: Optional[Union[str, List[str], Callable]] = None,
early_stopping_rounds: Optional[int] = None,
callbacks: Optional[List[TrainingCallback]] = None,
**kwargs: Any,
) -> None:
if not SKLEARN_INSTALLED:
raise ImportError(
"sklearn needs to be installed in order to use this module"
)
self.n_estimators = n_estimators
self.objective = objective
self.max_depth = max_depth
self.max_leaves = max_leaves
self.max_bin = max_bin
self.grow_policy = grow_policy
self.learning_rate = learning_rate
self.verbosity = verbosity
self.booster = booster
self.tree_method = tree_method
self.gamma = gamma
self.min_child_weight = min_child_weight
self.max_delta_step = max_delta_step
self.subsample = subsample
self.sampling_method = sampling_method
self.colsample_bytree = colsample_bytree
self.colsample_bylevel = colsample_bylevel
self.colsample_bynode = colsample_bynode
self.reg_alpha = reg_alpha
self.reg_lambda = reg_lambda
self.scale_pos_weight = scale_pos_weight
self.base_score = base_score
self.missing = missing
self.num_parallel_tree = num_parallel_tree
self.random_state = random_state
self.n_jobs = n_jobs
self.monotone_constraints = monotone_constraints
self.interaction_constraints = interaction_constraints
self.importance_type = importance_type
self.device = device
self.validate_parameters = validate_parameters
self.enable_categorical = enable_categorical
self.feature_types = feature_types
self.max_cat_to_onehot = max_cat_to_onehot
self.max_cat_threshold = max_cat_threshold
self.multi_strategy = multi_strategy
self.eval_metric = eval_metric
self.early_stopping_rounds = early_stopping_rounds
self.callbacks = callbacks
if kwargs:
self.kwargs = kwargs
def _more_tags(self) -> Dict[str, bool]:
"""Tags used for scikit-learn data validation."""
return {"allow_nan": True, "no_validation": True}
def __sklearn_is_fitted__(self) -> bool:
return hasattr(self, "_Booster")
def get_booster(self) -> Booster:
"""Get the underlying xgboost Booster of this model.
This will raise an exception when fit was not called
Returns
-------
booster : a xgboost booster of underlying model
"""
if not self.__sklearn_is_fitted__():
from sklearn.exceptions import NotFittedError
raise NotFittedError("need to call fit or load_model beforehand")
return self._Booster
def set_params(self, **params: Any) -> "XGBModel":
"""Set the parameters of this estimator. Modification of the sklearn method to
allow unknown kwargs. This allows using the full range of xgboost
parameters that are not defined as member variables in sklearn grid
search.
Returns
-------
self
"""
if not params:
# Simple optimization to gain speed (inspect is slow)
return self
# this concatenates kwargs into parameters, enabling `get_params` for
# obtaining parameters from keyword parameters.
for key, value in params.items():
if hasattr(self, key):
setattr(self, key, value)
else:
if not hasattr(self, "kwargs"):
self.kwargs = {}
self.kwargs[key] = value
if self.__sklearn_is_fitted__():
parameters = self.get_xgb_params()
self.get_booster().set_param(parameters)
return self
def get_params(self, deep: bool = True) -> Dict[str, Any]:
# pylint: disable=attribute-defined-outside-init
"""Get parameters."""
# Based on: https://stackoverflow.com/questions/59248211
# The basic flow in `get_params` is:
# 0. Return parameters in subclass first, by using inspect.
# 1. Return parameters in `XGBModel` (the base class).
# 2. Return whatever in `**kwargs`.
# 3. Merge them.
params = super().get_params(deep)
cp = copy.copy(self)
cp.__class__ = cp.__class__.__bases__[0]
params.update(cp.__class__.get_params(cp, deep))
# if kwargs is a dict, update params accordingly
if hasattr(self, "kwargs") and isinstance(self.kwargs, dict):
params.update(self.kwargs)
if isinstance(params["random_state"], np.random.RandomState):
params["random_state"] = params["random_state"].randint(
np.iinfo(np.int32).max
)
return params
def get_xgb_params(self) -> Dict[str, Any]:
"""Get xgboost specific parameters."""
params: Dict[str, Any] = self.get_params()
# Parameters that should not go into native learner.
wrapper_specific = {
"importance_type",
"kwargs",
"missing",
"n_estimators",
"use_label_encoder",
"enable_categorical",
"early_stopping_rounds",
"callbacks",
"feature_types",
}
filtered = {}
for k, v in params.items():
if k not in wrapper_specific and not callable(v):
filtered[k] = v
return filtered
def get_num_boosting_rounds(self) -> int:
"""Gets the number of xgboost boosting rounds."""
return DEFAULT_N_ESTIMATORS if self.n_estimators is None else self.n_estimators
def _get_type(self) -> str:
if not hasattr(self, "_estimator_type"):
raise TypeError(
"`_estimator_type` undefined. "
"Please use appropriate mixin to define estimator type."
)
return self._estimator_type # pylint: disable=no-member
def save_model(self, fname: Union[str, os.PathLike]) -> None:
meta: Dict[str, Any] = {}
# For validation.
meta["_estimator_type"] = self._get_type()
meta_str = json.dumps(meta)
self.get_booster().set_attr(scikit_learn=meta_str)
self.get_booster().save_model(fname)
self.get_booster().set_attr(scikit_learn=None)
save_model.__doc__ = f"""{Booster.save_model.__doc__}"""
def load_model(self, fname: ModelIn) -> None:
# pylint: disable=attribute-defined-outside-init
if not self.__sklearn_is_fitted__():
self._Booster = Booster({"n_jobs": self.n_jobs})
self.get_booster().load_model(fname)
meta_str = self.get_booster().attr("scikit_learn")
if meta_str is None:
return
meta = json.loads(meta_str)
t = meta.get("_estimator_type", None)
if t is not None and t != self._get_type():
raise TypeError(
"Loading an estimator with different type. Expecting: "
f"{self._get_type()}, got: {t}"
)
self.feature_types = self.get_booster().feature_types
self.get_booster().set_attr(scikit_learn=None)
load_model.__doc__ = f"""{Booster.load_model.__doc__}"""
# pylint: disable=too-many-branches
def _configure_fit(
self,
booster: Optional[Union[Booster, "XGBModel", str]],
eval_metric: Optional[Union[Callable, str, Sequence[str]]],
params: Dict[str, Any],
early_stopping_rounds: Optional[int],
callbacks: Optional[Sequence[TrainingCallback]],
) -> Tuple[
Optional[Union[Booster, str, "XGBModel"]],
Optional[Metric],
Dict[str, Any],
Optional[int],
Optional[Sequence[TrainingCallback]],
]:
"""Configure parameters for :py:meth:`fit`."""
if isinstance(booster, XGBModel):
model: Optional[Union[Booster, str]] = booster.get_booster()
else:
model = booster
def _deprecated(parameter: str) -> None:
warnings.warn(
f"`{parameter}` in `fit` method is deprecated for better compatibility "
f"with scikit-learn, use `{parameter}` in constructor or`set_params` "
"instead.",
UserWarning,
)
def _duplicated(parameter: str) -> None:
raise ValueError(
f"2 different `{parameter}` are provided. Use the one in constructor "
"or `set_params` instead."
)
# Configure evaluation metric.
if eval_metric is not None:
_deprecated("eval_metric")
if self.eval_metric is not None and eval_metric is not None:
_duplicated("eval_metric")
# - track where does the evaluation metric come from
if self.eval_metric is not None:
from_fit = False
eval_metric = self.eval_metric
else:
from_fit = True
# - configure callable evaluation metric
metric: Optional[Metric] = None
if eval_metric is not None:
if callable(eval_metric) and from_fit:
# No need to wrap the evaluation function for old parameter.
metric = eval_metric
elif callable(eval_metric):
# Parameter from constructor or set_params
if self._get_type() == "ranker":
metric = ltr_metric_decorator(eval_metric, self.n_jobs)
else:
metric = _metric_decorator(eval_metric)
else:
params.update({"eval_metric": eval_metric})
# Configure early_stopping_rounds
if early_stopping_rounds is not None:
_deprecated("early_stopping_rounds")
if early_stopping_rounds is not None and self.early_stopping_rounds is not None:
_duplicated("early_stopping_rounds")
early_stopping_rounds = (
self.early_stopping_rounds
if self.early_stopping_rounds is not None
else early_stopping_rounds
)
# Configure callbacks
if callbacks is not None:
_deprecated("callbacks")
if callbacks is not None and self.callbacks is not None:
_duplicated("callbacks")
callbacks = self.callbacks if self.callbacks is not None else callbacks
tree_method = params.get("tree_method", None)
if self.enable_categorical and tree_method == "exact":
raise ValueError(
"Experimental support for categorical data is not implemented for"
" current tree method yet."
)
return model, metric, params, early_stopping_rounds, callbacks
def _create_dmatrix(self, ref: Optional[DMatrix], **kwargs: Any) -> DMatrix:
# Use `QuantileDMatrix` to save memory.
if _can_use_qdm(self.tree_method) and self.booster != "gblinear":
try:
return QuantileDMatrix(
**kwargs, ref=ref, nthread=self.n_jobs, max_bin=self.max_bin
)
except TypeError: # `QuantileDMatrix` supports lesser types than DMatrix
pass
return DMatrix(**kwargs, nthread=self.n_jobs)
def _set_evaluation_result(self, evals_result: TrainingCallback.EvalsLog) -> None:
if evals_result:
self.evals_result_ = cast(Dict[str, Dict[str, List[float]]], evals_result)
@_deprecate_positional_args
def fit(
self,
X: ArrayLike,
y: ArrayLike,
*,
sample_weight: Optional[ArrayLike] = None,
base_margin: Optional[ArrayLike] = None,
eval_set: Optional[Sequence[Tuple[ArrayLike, ArrayLike]]] = None,
eval_metric: Optional[Union[str, Sequence[str], Metric]] = None,
early_stopping_rounds: Optional[int] = None,
verbose: Optional[Union[bool, int]] = True,
xgb_model: Optional[Union[Booster, str, "XGBModel"]] = None,
sample_weight_eval_set: Optional[Sequence[ArrayLike]] = None,
base_margin_eval_set: Optional[Sequence[ArrayLike]] = None,
feature_weights: Optional[ArrayLike] = None,
callbacks: Optional[Sequence[TrainingCallback]] = None,
) -> "XGBModel":
# pylint: disable=invalid-name,attribute-defined-outside-init
"""Fit gradient boosting model.
Note that calling ``fit()`` multiple times will cause the model object to be
re-fit from scratch. To resume training from a previous checkpoint, explicitly
pass ``xgb_model`` argument.
Parameters
----------
X :
Feature matrix. See :ref:`py-data` for a list of supported types.
When the ``tree_method`` is set to ``hist`` or ``gpu_hist``, internally, the
:py:class:`QuantileDMatrix` will be used instead of the :py:class:`DMatrix`
for conserving memory. However, this has performance implications when the
device of input data is not matched with algorithm. For instance, if the
input is a numpy array on CPU but ``gpu_hist`` is used for training, then
the data is first processed on CPU then transferred to GPU.
y :
Labels
sample_weight :
instance weights
base_margin :
global bias for each instance.
eval_set :
A list of (X, y) tuple pairs to use as validation sets, for which
metrics will be computed.
Validation metrics will help us track the performance of the model.
eval_metric : str, list of str, or callable, optional
.. deprecated:: 1.6.0
Use `eval_metric` in :py:meth:`__init__` or :py:meth:`set_params` instead.
early_stopping_rounds : int
.. deprecated:: 1.6.0
Use `early_stopping_rounds` in :py:meth:`__init__` or :py:meth:`set_params`
instead.
verbose :
If `verbose` is True and an evaluation set is used, the evaluation metric
measured on the validation set is printed to stdout at each boosting stage.
If `verbose` is an integer, the evaluation metric is printed at each
`verbose` boosting stage. The last boosting stage / the boosting stage found
by using `early_stopping_rounds` is also printed.
xgb_model :
file name of stored XGBoost model or 'Booster' instance XGBoost model to be
loaded before training (allows training continuation).
sample_weight_eval_set :
A list of the form [L_1, L_2, ..., L_n], where each L_i is an array like
object storing instance weights for the i-th validation set.
base_margin_eval_set :
A list of the form [M_1, M_2, ..., M_n], where each M_i is an array like
object storing base margin for the i-th validation set.
feature_weights :
Weight for each feature, defines the probability of each feature being
selected when colsample is being used. All values must be greater than 0,
otherwise a `ValueError` is thrown.
callbacks :
.. deprecated:: 1.6.0
Use `callbacks` in :py:meth:`__init__` or :py:meth:`set_params` instead.
"""
with config_context(verbosity=self.verbosity):
evals_result: TrainingCallback.EvalsLog = {}
train_dmatrix, evals = _wrap_evaluation_matrices(
missing=self.missing,
X=X,
y=y,
group=None,
qid=None,
sample_weight=sample_weight,
base_margin=base_margin,
feature_weights=feature_weights,
eval_set=eval_set,
sample_weight_eval_set=sample_weight_eval_set,
base_margin_eval_set=base_margin_eval_set,
eval_group=None,
eval_qid=None,
create_dmatrix=self._create_dmatrix,
enable_categorical=self.enable_categorical,
feature_types=self.feature_types,
)
params = self.get_xgb_params()
if callable(self.objective):
obj: Optional[Objective] = _objective_decorator(self.objective)
params["objective"] = "reg:squarederror"
else:
obj = None
(
model,
metric,
params,
early_stopping_rounds,
callbacks,
) = self._configure_fit(
xgb_model, eval_metric, params, early_stopping_rounds, callbacks
)
self._Booster = train(
params,
train_dmatrix,
self.get_num_boosting_rounds(),
evals=evals,
early_stopping_rounds=early_stopping_rounds,
evals_result=evals_result,
obj=obj,
custom_metric=metric,
verbose_eval=verbose,
xgb_model=model,
callbacks=callbacks,
)
self._set_evaluation_result(evals_result)
return self
def _can_use_inplace_predict(self) -> bool:
if self.booster != "gblinear":
return True
return False
def _get_iteration_range(
self, iteration_range: Optional[Tuple[int, int]]
) -> Tuple[int, int]:
if iteration_range is None or iteration_range[1] == 0:
# Use best_iteration if defined.
try:
iteration_range = (0, self.best_iteration + 1)
except AttributeError:
iteration_range = (0, 0)
if self.booster == "gblinear":
iteration_range = (0, 0)
return iteration_range
def predict(
self,
X: ArrayLike,
output_margin: bool = False,
validate_features: bool = True,
base_margin: Optional[ArrayLike] = None,
iteration_range: Optional[Tuple[int, int]] = None,
) -> ArrayLike:
"""Predict with `X`. If the model is trained with early stopping, then
:py:attr:`best_iteration` is used automatically. The estimator uses
`inplace_predict` by default and falls back to using :py:class:`DMatrix` if
devices between the data and the estimator don't match.
.. note:: This function is only thread safe for `gbtree` and `dart`.
Parameters
----------
X :
Data to predict with.
output_margin :
Whether to output the raw untransformed margin value.
validate_features :
When this is True, validate that the Booster's and data's feature_names are
identical. Otherwise, it is assumed that the feature_names are the same.
base_margin :
Margin added to prediction.
iteration_range :
Specifies which layer of trees are used in prediction. For example, if a
random forest is trained with 100 rounds. Specifying ``iteration_range=(10,
20)``, then only the forests built during [10, 20) (half open set) rounds
are used in this prediction.
.. versionadded:: 1.4.0
Returns
-------
prediction
"""
with config_context(verbosity=self.verbosity):
iteration_range = self._get_iteration_range(iteration_range)
if self._can_use_inplace_predict():
try:
predts = self.get_booster().inplace_predict(
data=X,
iteration_range=iteration_range,
predict_type="margin" if output_margin else "value",
missing=self.missing,
base_margin=base_margin,
validate_features=validate_features,
)
if _is_cupy_array(predts):
import cupy # pylint: disable=import-error
predts = cupy.asnumpy(predts) # ensure numpy array is used.
return predts
except TypeError:
# coo, csc, dt
pass
test = DMatrix(
X,
base_margin=base_margin,
missing=self.missing,
nthread=self.n_jobs,
feature_types=self.feature_types,
enable_categorical=self.enable_categorical,
)
return self.get_booster().predict(
data=test,
iteration_range=iteration_range,
output_margin=output_margin,
validate_features=validate_features,
)
def apply(
self,
X: ArrayLike,
iteration_range: Optional[Tuple[int, int]] = None,
) -> np.ndarray:
"""Return the predicted leaf every tree for each sample. If the model is trained
with early stopping, then :py:attr:`best_iteration` is used automatically.
Parameters
----------
X : array_like, shape=[n_samples, n_features]
Input features matrix.
iteration_range :
See :py:meth:`predict`.
Returns
-------
X_leaves : array_like, shape=[n_samples, n_trees]
For each datapoint x in X and for each tree, return the index of the
leaf x ends up in. Leaves are numbered within
``[0; 2**(self.max_depth+1))``, possibly with gaps in the numbering.
"""
with config_context(verbosity=self.verbosity):
iteration_range = self._get_iteration_range(iteration_range)
test_dmatrix = DMatrix(
X,
missing=self.missing,
feature_types=self.feature_types,
nthread=self.n_jobs,
)
return self.get_booster().predict(
test_dmatrix, pred_leaf=True, iteration_range=iteration_range
)
def evals_result(self) -> Dict[str, Dict[str, List[float]]]:
"""Return the evaluation results.
If **eval_set** is passed to the :py:meth:`fit` function, you can call
``evals_result()`` to get evaluation results for all passed **eval_sets**. When
**eval_metric** is also passed to the :py:meth:`fit` function, the
**evals_result** will contain the **eval_metrics** passed to the :py:meth:`fit`
function.
The returned evaluation result is a dictionary:
.. code-block:: python
{'validation_0': {'logloss': ['0.604835', '0.531479']},
'validation_1': {'logloss': ['0.41965', '0.17686']}}
Returns
-------
evals_result
"""
if getattr(self, "evals_result_", None) is not None:
evals_result = self.evals_result_
else:
raise XGBoostError(
"No evaluation result, `eval_set` is not used during training."
)
return evals_result
@property
def n_features_in_(self) -> int:
"""Number of features seen during :py:meth:`fit`."""
booster = self.get_booster()
return booster.num_features()
@property
def feature_names_in_(self) -> np.ndarray:
"""Names of features seen during :py:meth:`fit`. Defined only when `X` has
feature names that are all strings.
"""
feature_names = self.get_booster().feature_names
if feature_names is None:
raise AttributeError(
"`feature_names_in_` is defined only when `X` has feature names that "
"are all strings."
)
return np.array(feature_names)
def _early_stopping_attr(self, attr: str) -> Union[float, int]:
booster = self.get_booster()
try:
return getattr(booster, attr)
except AttributeError as e:
raise AttributeError(
f"`{attr}` in only defined when early stopping is used."
) from e
@property
def best_score(self) -> float:
"""The best score obtained by early stopping."""
return float(self._early_stopping_attr("best_score"))
@property
def best_iteration(self) -> int:
"""The best iteration obtained by early stopping. This attribute is 0-based,
for instance if the best iteration is the first round, then best_iteration is 0.
"""
return int(self._early_stopping_attr("best_iteration"))
@property
def feature_importances_(self) -> np.ndarray:
"""Feature importances property, return depends on `importance_type`
parameter. When model trained with multi-class/multi-label/multi-target dataset,
the feature importance is "averaged" over all targets. The "average" is defined
based on the importance type. For instance, if the importance type is
"total_gain", then the score is sum of loss change for each split from all
trees.
Returns
-------
feature_importances_ : array of shape ``[n_features]`` except for multi-class
linear model, which returns an array with shape `(n_features, n_classes)`
"""
b: Booster = self.get_booster()
def dft() -> str:
return "weight" if self.booster == "gblinear" else "gain"
score = b.get_score(
importance_type=self.importance_type if self.importance_type else dft()
)
if b.feature_names is None:
feature_names: FeatureNames = [f"f{i}" for i in range(self.n_features_in_)]
else:
feature_names = b.feature_names
# gblinear returns all features so the `get` in next line is only for gbtree.
all_features = [score.get(f, 0.0) for f in feature_names]
all_features_arr = np.array(all_features, dtype=np.float32)
total = all_features_arr.sum()
if total == 0:
return all_features_arr
return all_features_arr / total
@property
def coef_(self) -> np.ndarray:
"""
Coefficients property
.. note:: Coefficients are defined only for linear learners
Coefficients are only defined when the linear model is chosen as
base learner (`booster=gblinear`). It is not defined for other base
learner types, such as tree learners (`booster=gbtree`).
Returns
-------
coef_ : array of shape ``[n_features]`` or ``[n_classes, n_features]``
"""
if self.get_xgb_params()["booster"] != "gblinear":
raise AttributeError(
f"Coefficients are not defined for Booster type {self.booster}"
)
b = self.get_booster()
coef = np.array(json.loads(b.get_dump(dump_format="json")[0])["weight"])
# Logic for multiclass classification
n_classes = getattr(self, "n_classes_", None)
if n_classes is not None:
if n_classes > 2:
assert len(coef.shape) == 1
assert coef.shape[0] % n_classes == 0
coef = coef.reshape((n_classes, -1))
return coef
@property
def intercept_(self) -> np.ndarray:
"""
Intercept (bias) property
.. note:: Intercept is defined only for linear learners
Intercept (bias) is only defined when the linear model is chosen as base
learner (`booster=gblinear`). It is not defined for other base learner types,
such as tree learners (`booster=gbtree`).
Returns
-------
intercept_ : array of shape ``(1,)`` or ``[n_classes]``
"""
if self.get_xgb_params()["booster"] != "gblinear":
raise AttributeError(
f"Intercept (bias) is not defined for Booster type {self.booster}"
)
b = self.get_booster()
return np.array(json.loads(b.get_dump(dump_format="json")[0])["bias"])
PredtT = TypeVar("PredtT", bound=np.ndarray)
def _cls_predict_proba(n_classes: int, prediction: PredtT, vstack: Callable) -> PredtT:
assert len(prediction.shape) <= 2
if len(prediction.shape) == 2 and prediction.shape[1] == n_classes:
# multi-class
return prediction
if (
len(prediction.shape) == 2
and n_classes == 2
and prediction.shape[1] >= n_classes
):
# multi-label
return prediction
# binary logistic function
classone_probs = prediction
classzero_probs = 1.0 - classone_probs
return vstack((classzero_probs, classone_probs)).transpose()
@xgboost_model_doc(
"Implementation of the scikit-learn API for XGBoost classification.",
["model", "objective"],
extra_parameters="""
n_estimators : Optional[int]
Number of boosting rounds.
""",
)
class XGBClassifier(XGBModel, XGBClassifierMixIn, XGBClassifierBase):
# pylint: disable=missing-docstring,invalid-name,too-many-instance-attributes
@_deprecate_positional_args
def __init__(
self,
*,
objective: SklObjective = "binary:logistic",
**kwargs: Any,
) -> None:
super().__init__(objective=objective, **kwargs)
@_deprecate_positional_args
def fit(
self,
X: ArrayLike,
y: ArrayLike,
*,
sample_weight: Optional[ArrayLike] = None,
base_margin: Optional[ArrayLike] = None,
eval_set: Optional[Sequence[Tuple[ArrayLike, ArrayLike]]] = None,
eval_metric: Optional[Union[str, Sequence[str], Metric]] = None,
early_stopping_rounds: Optional[int] = None,
verbose: Optional[Union[bool, int]] = True,
xgb_model: Optional[Union[Booster, str, XGBModel]] = None,
sample_weight_eval_set: Optional[Sequence[ArrayLike]] = None,
base_margin_eval_set: Optional[Sequence[ArrayLike]] = None,
feature_weights: Optional[ArrayLike] = None,
callbacks: Optional[Sequence[TrainingCallback]] = None,
) -> "XGBClassifier":
# pylint: disable = attribute-defined-outside-init,too-many-statements
with config_context(verbosity=self.verbosity):
evals_result: TrainingCallback.EvalsLog = {}
# We keep the n_classes_ as a simple member instead of loading it from
# booster in a Python property. This way we can have efficient and
# thread-safe prediction.
if _is_cudf_df(y) or _is_cudf_ser(y):
import cupy as cp # pylint: disable=E0401
classes = cp.unique(y.values)
self.n_classes_ = len(classes)
expected_classes = cp.array(self.classes_)
elif _is_cupy_array(y):
import cupy as cp # pylint: disable=E0401
classes = cp.unique(y)
self.n_classes_ = len(classes)
expected_classes = cp.array(self.classes_)
else:
classes = np.unique(np.asarray(y))
self.n_classes_ = len(classes)
expected_classes = self.classes_
if (
classes.shape != expected_classes.shape
or not (classes == expected_classes).all()
):
raise ValueError(
f"Invalid classes inferred from unique values of `y`. "
f"Expected: {expected_classes}, got {classes}"
)
params = self.get_xgb_params()
if callable(self.objective):
obj: Optional[Objective] = _objective_decorator(self.objective)
# Use default value. Is it really not used ?
params["objective"] = "binary:logistic"
else:
obj = None
if self.n_classes_ > 2:
# Switch to using a multiclass objective in the underlying XGB instance
if params.get("objective", None) != "multi:softmax":
params["objective"] = "multi:softprob"
params["num_class"] = self.n_classes_
(
model,
metric,
params,
early_stopping_rounds,
callbacks,
) = self._configure_fit(
xgb_model, eval_metric, params, early_stopping_rounds, callbacks
)
train_dmatrix, evals = _wrap_evaluation_matrices(
missing=self.missing,
X=X,
y=y,
group=None,
qid=None,
sample_weight=sample_weight,
base_margin=base_margin,
feature_weights=feature_weights,
eval_set=eval_set,
sample_weight_eval_set=sample_weight_eval_set,
base_margin_eval_set=base_margin_eval_set,
eval_group=None,
eval_qid=None,
create_dmatrix=self._create_dmatrix,
enable_categorical=self.enable_categorical,
feature_types=self.feature_types,
)
self._Booster = train(
params,
train_dmatrix,
self.get_num_boosting_rounds(),
evals=evals,
early_stopping_rounds=early_stopping_rounds,
evals_result=evals_result,
obj=obj,
custom_metric=metric,
verbose_eval=verbose,
xgb_model=model,
callbacks=callbacks,
)
if not callable(self.objective):
self.objective = params["objective"]
self._set_evaluation_result(evals_result)
return self
assert XGBModel.fit.__doc__ is not None
fit.__doc__ = XGBModel.fit.__doc__.replace(
"Fit gradient boosting model", "Fit gradient boosting classifier", 1
)
def predict(
self,
X: ArrayLike,
output_margin: bool = False,
validate_features: bool = True,
base_margin: Optional[ArrayLike] = None,
iteration_range: Optional[Tuple[int, int]] = None,
) -> ArrayLike:
with config_context(verbosity=self.verbosity):
class_probs = super().predict(
X=X,
output_margin=output_margin,
validate_features=validate_features,
base_margin=base_margin,
iteration_range=iteration_range,
)
if output_margin:
# If output_margin is active, simply return the scores
return class_probs
if len(class_probs.shape) > 1 and self.n_classes_ != 2:
# multi-class, turns softprob into softmax
column_indexes: np.ndarray = np.argmax(class_probs, axis=1)
elif len(class_probs.shape) > 1 and class_probs.shape[1] != 1:
# multi-label
column_indexes = np.zeros(class_probs.shape)
column_indexes[class_probs > 0.5] = 1
elif self.objective == "multi:softmax":
return class_probs.astype(np.int32)
else:
# turns soft logit into class label
column_indexes = np.repeat(0, class_probs.shape[0])
column_indexes[class_probs > 0.5] = 1
return column_indexes
def predict_proba(
self,
X: ArrayLike,
validate_features: bool = True,
base_margin: Optional[ArrayLike] = None,
iteration_range: Optional[Tuple[int, int]] = None,
) -> np.ndarray:
"""Predict the probability of each `X` example being of a given class. If the
model is trained with early stopping, then :py:attr:`best_iteration` is used
automatically. The estimator uses `inplace_predict` by default and falls back to
using :py:class:`DMatrix` if devices between the data and the estimator don't
match.
.. note:: This function is only thread safe for `gbtree` and `dart`.
Parameters
----------
X :
Feature matrix. See :ref:`py-data` for a list of supported types.
validate_features :
When this is True, validate that the Booster's and data's feature_names are
identical. Otherwise, it is assumed that the feature_names are the same.
base_margin :
Margin added to prediction.
iteration_range :
Specifies which layer of trees are used in prediction. For example, if a
random forest is trained with 100 rounds. Specifying `iteration_range=(10,
20)`, then only the forests built during [10, 20) (half open set) rounds are
used in this prediction.
Returns
-------
prediction :
a numpy array of shape array-like of shape (n_samples, n_classes) with the
probability of each data example being of a given class.
"""
# custom obj: Do nothing as we don't know what to do.
# softprob: Do nothing, output is proba.
# softmax: Use softmax from scipy
# binary:logistic: Expand the prob vector into 2-class matrix after predict.
# binary:logitraw: Unsupported by predict_proba()
if self.objective == "multi:softmax":
raw_predt = super().predict(
X=X,
validate_features=validate_features,
base_margin=base_margin,
iteration_range=iteration_range,
output_margin=True,
)
class_prob = softmax(raw_predt, axis=1)
return class_prob
class_probs = super().predict(
X=X,
validate_features=validate_features,
base_margin=base_margin,
iteration_range=iteration_range,
)
return _cls_predict_proba(self.n_classes_, class_probs, np.vstack)
@property
def classes_(self) -> np.ndarray:
return np.arange(self.n_classes_)
def load_model(self, fname: ModelIn) -> None:
super().load_model(fname)
self._load_model_attributes(self.get_booster())
@xgboost_model_doc(
"scikit-learn API for XGBoost random forest classification.",
["model", "objective"],
extra_parameters="""
n_estimators : Optional[int]
Number of trees in random forest to fit.
""",
)
class XGBRFClassifier(XGBClassifier):
# pylint: disable=missing-docstring
@_deprecate_positional_args
def __init__(
self,
*,
learning_rate: float = 1.0,
subsample: float = 0.8,
colsample_bynode: float = 0.8,
reg_lambda: float = 1e-5,
**kwargs: Any,
):
super().__init__(
learning_rate=learning_rate,
subsample=subsample,
colsample_bynode=colsample_bynode,
reg_lambda=reg_lambda,
**kwargs,
)
_check_rf_callback(self.early_stopping_rounds, self.callbacks)
def get_xgb_params(self) -> Dict[str, Any]:
params = super().get_xgb_params()
params["num_parallel_tree"] = super().get_num_boosting_rounds()
return params
def get_num_boosting_rounds(self) -> int:
return 1
# pylint: disable=unused-argument
@_deprecate_positional_args
def fit(
self,
X: ArrayLike,
y: ArrayLike,
*,
sample_weight: Optional[ArrayLike] = None,
base_margin: Optional[ArrayLike] = None,
eval_set: Optional[Sequence[Tuple[ArrayLike, ArrayLike]]] = None,
eval_metric: Optional[Union[str, Sequence[str], Metric]] = None,
early_stopping_rounds: Optional[int] = None,
verbose: Optional[Union[bool, int]] = True,
xgb_model: Optional[Union[Booster, str, XGBModel]] = None,
sample_weight_eval_set: Optional[Sequence[ArrayLike]] = None,
base_margin_eval_set: Optional[Sequence[ArrayLike]] = None,
feature_weights: Optional[ArrayLike] = None,
callbacks: Optional[Sequence[TrainingCallback]] = None,
) -> "XGBRFClassifier":
args = {k: v for k, v in locals().items() if k not in ("self", "__class__")}
_check_rf_callback(early_stopping_rounds, callbacks)
super().fit(**args)
return self
@xgboost_model_doc(
"Implementation of the scikit-learn API for XGBoost regression.",
["estimators", "model", "objective"],
)
class XGBRegressor(XGBModel, XGBRegressorBase):
# pylint: disable=missing-docstring
@_deprecate_positional_args
def __init__(
self, *, objective: SklObjective = "reg:squarederror", **kwargs: Any
) -> None:
super().__init__(objective=objective, **kwargs)
@xgboost_model_doc(
"scikit-learn API for XGBoost random forest regression.",
["model", "objective"],
extra_parameters="""
n_estimators : Optional[int]
Number of trees in random forest to fit.
""",
)
class XGBRFRegressor(XGBRegressor):
# pylint: disable=missing-docstring
@_deprecate_positional_args
def __init__(
self,
*,
learning_rate: float = 1.0,
subsample: float = 0.8,
colsample_bynode: float = 0.8,
reg_lambda: float = 1e-5,
**kwargs: Any,
) -> None:
super().__init__(
learning_rate=learning_rate,
subsample=subsample,
colsample_bynode=colsample_bynode,
reg_lambda=reg_lambda,
**kwargs,
)
_check_rf_callback(self.early_stopping_rounds, self.callbacks)
def get_xgb_params(self) -> Dict[str, Any]:
params = super().get_xgb_params()
params["num_parallel_tree"] = super().get_num_boosting_rounds()
return params
def get_num_boosting_rounds(self) -> int:
return 1
# pylint: disable=unused-argument
@_deprecate_positional_args
def fit(
self,
X: ArrayLike,
y: ArrayLike,
*,
sample_weight: Optional[ArrayLike] = None,
base_margin: Optional[ArrayLike] = None,
eval_set: Optional[Sequence[Tuple[ArrayLike, ArrayLike]]] = None,
eval_metric: Optional[Union[str, Sequence[str], Metric]] = None,
early_stopping_rounds: Optional[int] = None,
verbose: Optional[Union[bool, int]] = True,
xgb_model: Optional[Union[Booster, str, XGBModel]] = None,
sample_weight_eval_set: Optional[Sequence[ArrayLike]] = None,
base_margin_eval_set: Optional[Sequence[ArrayLike]] = None,
feature_weights: Optional[ArrayLike] = None,
callbacks: Optional[Sequence[TrainingCallback]] = None,
) -> "XGBRFRegressor":
args = {k: v for k, v in locals().items() if k not in ("self", "__class__")}
_check_rf_callback(early_stopping_rounds, callbacks)
super().fit(**args)
return self
def _get_qid(
X: ArrayLike, qid: Optional[ArrayLike]
) -> Tuple[ArrayLike, Optional[ArrayLike]]:
"""Get the special qid column from X if exists."""
if (_is_pandas_df(X) or _is_cudf_df(X)) and hasattr(X, "qid"):
if qid is not None:
raise ValueError(
"Found both the special column `qid` in `X` and the `qid` from the"
"`fit` method. Please remove one of them."
)
q_x = X.qid
X = X.drop("qid", axis=1)
return X, q_x
return X, qid
@xgboost_model_doc(
"""Implementation of the Scikit-Learn API for XGBoost Ranking.
See :doc:`Learning to Rank </tutorials/learning_to_rank>` for an introducion.
""",
["estimators", "model"],
end_note="""
.. note::
A custom objective function is currently not supported by XGBRanker.
.. note::
Query group information is only required for ranking training but not
prediction. Multiple groups can be predicted on a single call to
:py:meth:`predict`.
When fitting the model with the `group` parameter, your data need to be sorted
by the query group first. `group` is an array that contains the size of each
query group.
Similarly, when fitting the model with the `qid` parameter, the data should be
sorted according to query index and `qid` is an array that contains the query
index for each training sample.
For example, if your original data look like:
+-------+-----------+---------------+
| qid | label | features |
+-------+-----------+---------------+
| 1 | 0 | x_1 |
+-------+-----------+---------------+
| 1 | 1 | x_2 |
+-------+-----------+---------------+
| 1 | 0 | x_3 |
+-------+-----------+---------------+
| 2 | 0 | x_4 |
+-------+-----------+---------------+
| 2 | 1 | x_5 |
+-------+-----------+---------------+
| 2 | 1 | x_6 |
+-------+-----------+---------------+
| 2 | 1 | x_7 |
+-------+-----------+---------------+
then :py:meth:`fit` method can be called with either `group` array as ``[3, 4]``
or with `qid` as ``[1, 1, 1, 2, 2, 2, 2]``, that is the qid column. Also, the
`qid` can be a special column of input `X` instead of a separated parameter, see
:py:meth:`fit` for more info.""",
)
class XGBRanker(XGBModel, XGBRankerMixIn):
# pylint: disable=missing-docstring,too-many-arguments,invalid-name
@_deprecate_positional_args
def __init__(self, *, objective: str = "rank:ndcg", **kwargs: Any):
super().__init__(objective=objective, **kwargs)
if callable(self.objective):
raise ValueError("custom objective function not supported by XGBRanker")
if "rank:" not in objective:
raise ValueError("please use XGBRanker for ranking task")
def _create_ltr_dmatrix(
self, ref: Optional[DMatrix], data: ArrayLike, qid: ArrayLike, **kwargs: Any
) -> DMatrix:
data, qid = _get_qid(data, qid)
if kwargs.get("group", None) is None and qid is None:
raise ValueError("Either `group` or `qid` is required for ranking task")
return super()._create_dmatrix(ref=ref, data=data, qid=qid, **kwargs)
@_deprecate_positional_args
def fit(
self,
X: ArrayLike,
y: ArrayLike,
*,
group: Optional[ArrayLike] = None,
qid: Optional[ArrayLike] = None,
sample_weight: Optional[ArrayLike] = None,
base_margin: Optional[ArrayLike] = None,
eval_set: Optional[Sequence[Tuple[ArrayLike, ArrayLike]]] = None,
eval_group: Optional[Sequence[ArrayLike]] = None,
eval_qid: Optional[Sequence[ArrayLike]] = None,
eval_metric: Optional[Union[str, Sequence[str], Metric]] = None,
early_stopping_rounds: Optional[int] = None,
verbose: Optional[Union[bool, int]] = False,
xgb_model: Optional[Union[Booster, str, XGBModel]] = None,
sample_weight_eval_set: Optional[Sequence[ArrayLike]] = None,
base_margin_eval_set: Optional[Sequence[ArrayLike]] = None,
feature_weights: Optional[ArrayLike] = None,
callbacks: Optional[Sequence[TrainingCallback]] = None,
) -> "XGBRanker":
# pylint: disable = attribute-defined-outside-init,arguments-differ
"""Fit gradient boosting ranker
Note that calling ``fit()`` multiple times will cause the model object to be
re-fit from scratch. To resume training from a previous checkpoint, explicitly
pass ``xgb_model`` argument.
Parameters
----------
X :
Feature matrix. See :ref:`py-data` for a list of supported types.
When this is a :py:class:`pandas.DataFrame` or a :py:class:`cudf.DataFrame`,
it may contain a special column called ``qid`` for specifying the query
index. Using a special column is the same as using the `qid` parameter,
except for being compatible with sklearn utility functions like
:py:func:`sklearn.model_selection.cross_validation`. The same convention
applies to the :py:meth:`XGBRanker.score` and :py:meth:`XGBRanker.predict`.
+-----+----------------+----------------+
| qid | feat_0 | feat_1 |
+-----+----------------+----------------+
| 0 | :math:`x_{00}` | :math:`x_{01}` |
+-----+----------------+----------------+
| 1 | :math:`x_{10}` | :math:`x_{11}` |
+-----+----------------+----------------+
| 1 | :math:`x_{20}` | :math:`x_{21}` |
+-----+----------------+----------------+
When the ``tree_method`` is set to ``hist`` or ``gpu_hist``, internally, the
:py:class:`QuantileDMatrix` will be used instead of the :py:class:`DMatrix`
for conserving memory. However, this has performance implications when the
device of input data is not matched with algorithm. For instance, if the
input is a numpy array on CPU but ``gpu_hist`` is used for training, then
the data is first processed on CPU then transferred to GPU.
y :
Labels
group :
Size of each query group of training data. Should have as many elements as
the query groups in the training data. If this is set to None, then user
must provide qid.
qid :
Query ID for each training sample. Should have the size of n_samples. If
this is set to None, then user must provide group or a special column in X.
sample_weight :
Query group weights
.. note:: Weights are per-group for ranking tasks
In ranking task, one weight is assigned to each query group/id (not each
data point). This is because we only care about the relative ordering of
data points within each group, so it doesn't make sense to assign
weights to individual data points.
base_margin :
Global bias for each instance.
eval_set :
A list of (X, y) tuple pairs to use as validation sets, for which
metrics will be computed.
Validation metrics will help us track the performance of the model.
eval_group :
A list in which ``eval_group[i]`` is the list containing the sizes of all
query groups in the ``i``-th pair in **eval_set**.
eval_qid :
A list in which ``eval_qid[i]`` is the array containing query ID of ``i``-th
pair in **eval_set**. The special column convention in `X` applies to
validation datasets as well.
eval_metric : str, list of str, optional
.. deprecated:: 1.6.0
use `eval_metric` in :py:meth:`__init__` or :py:meth:`set_params` instead.
early_stopping_rounds : int
.. deprecated:: 1.6.0
use `early_stopping_rounds` in :py:meth:`__init__` or
:py:meth:`set_params` instead.
verbose :
If `verbose` is True and an evaluation set is used, the evaluation metric
measured on the validation set is printed to stdout at each boosting stage.
If `verbose` is an integer, the evaluation metric is printed at each
`verbose` boosting stage. The last boosting stage / the boosting stage found
by using `early_stopping_rounds` is also printed.
xgb_model :
file name of stored XGBoost model or 'Booster' instance XGBoost model to be
loaded before training (allows training continuation).
sample_weight_eval_set :
A list of the form [L_1, L_2, ..., L_n], where each L_i is a list of
group weights on the i-th validation set.
.. note:: Weights are per-group for ranking tasks
In ranking task, one weight is assigned to each query group (not each
data point). This is because we only care about the relative ordering of
data points within each group, so it doesn't make sense to assign
weights to individual data points.
base_margin_eval_set :
A list of the form [M_1, M_2, ..., M_n], where each M_i is an array like
object storing base margin for the i-th validation set.
feature_weights :
Weight for each feature, defines the probability of each feature being
selected when colsample is being used. All values must be greater than 0,
otherwise a `ValueError` is thrown.
callbacks :
.. deprecated:: 1.6.0
Use `callbacks` in :py:meth:`__init__` or :py:meth:`set_params` instead.
"""
with config_context(verbosity=self.verbosity):
train_dmatrix, evals = _wrap_evaluation_matrices(
missing=self.missing,
X=X,
y=y,
group=group,
qid=qid,
sample_weight=sample_weight,
base_margin=base_margin,
feature_weights=feature_weights,
eval_set=eval_set,
sample_weight_eval_set=sample_weight_eval_set,
base_margin_eval_set=base_margin_eval_set,
eval_group=eval_group,
eval_qid=eval_qid,
create_dmatrix=self._create_ltr_dmatrix,
enable_categorical=self.enable_categorical,
feature_types=self.feature_types,
)
evals_result: TrainingCallback.EvalsLog = {}
params = self.get_xgb_params()
(
model,
metric,
params,
early_stopping_rounds,
callbacks,
) = self._configure_fit(
xgb_model, eval_metric, params, early_stopping_rounds, callbacks
)
self._Booster = train(
params,
train_dmatrix,
num_boost_round=self.get_num_boosting_rounds(),
early_stopping_rounds=early_stopping_rounds,
evals=evals,
evals_result=evals_result,
custom_metric=metric,
verbose_eval=verbose,
xgb_model=model,
callbacks=callbacks,
)
self.objective = params["objective"]
self._set_evaluation_result(evals_result)
return self
def predict(
self,
X: ArrayLike,
output_margin: bool = False,
validate_features: bool = True,
base_margin: Optional[ArrayLike] = None,
iteration_range: Optional[Tuple[int, int]] = None,
) -> ArrayLike:
X, _ = _get_qid(X, None)
return super().predict(
X,
output_margin,
validate_features,
base_margin,
iteration_range=iteration_range,
)
def apply(
self,
X: ArrayLike,
iteration_range: Optional[Tuple[int, int]] = None,
) -> ArrayLike:
X, _ = _get_qid(X, None)
return super().apply(X, iteration_range)
def score(self, X: ArrayLike, y: ArrayLike) -> float:
"""Evaluate score for data using the last evaluation metric. If the model is
trained with early stopping, then :py:attr:`best_iteration` is used
automatically.
Parameters
----------
X : Union[pd.DataFrame, cudf.DataFrame]
Feature matrix. A DataFrame with a special `qid` column.
y :
Labels
Returns
-------
score :
The result of the first evaluation metric for the ranker.
"""
X, qid = _get_qid(X, None)
Xyq = DMatrix(X, y, qid=qid)
if callable(self.eval_metric):
metric = ltr_metric_decorator(self.eval_metric, self.n_jobs)
result_str = self.get_booster().eval_set([(Xyq, "eval")], feval=metric)
else:
result_str = self.get_booster().eval(Xyq)
metric_score = _parse_eval_str(result_str)
return metric_score[-1][1]
| 80,941
| 37.397533
| 90
|
py
|
xgboost
|
xgboost-master/python-package/xgboost/testing/data.py
|
# pylint: disable=invalid-name
"""Utilities for data generation."""
import os
import zipfile
from dataclasses import dataclass
from typing import Any, Generator, List, NamedTuple, Optional, Tuple, Union
from urllib import request
import numpy as np
import pytest
from numpy import typing as npt
from numpy.random import Generator as RNG
from scipy import sparse
import xgboost
from xgboost.data import pandas_pyarrow_mapper
joblib = pytest.importorskip("joblib")
memory = joblib.Memory("./cachedir", verbose=0)
def np_dtypes(
n_samples: int, n_features: int
) -> Generator[Tuple[np.ndarray, np.ndarray], None, None]:
"""Enumerate all supported dtypes from numpy."""
import pandas as pd
rng = np.random.RandomState(1994)
# Integer and float.
orig = rng.randint(low=0, high=127, size=n_samples * n_features).reshape(
n_samples, n_features
)
dtypes = [
np.int32,
np.int64,
np.byte,
np.short,
np.intc,
np.int_,
np.longlong,
np.uint32,
np.uint64,
np.ubyte,
np.ushort,
np.uintc,
np.uint,
np.ulonglong,
np.float16,
np.float32,
np.float64,
np.half,
np.single,
np.double,
]
for dtype in dtypes:
X = np.array(orig, dtype=dtype)
yield orig, X
yield orig.tolist(), X.tolist()
for dtype in dtypes:
X = np.array(orig, dtype=dtype)
df_orig = pd.DataFrame(orig)
df = pd.DataFrame(X)
yield df_orig, df
# Boolean
orig = rng.binomial(1, 0.5, size=n_samples * n_features).reshape(
n_samples, n_features
)
for dtype in [np.bool_, bool]:
X = np.array(orig, dtype=dtype)
yield orig, X
for dtype in [np.bool_, bool]:
X = np.array(orig, dtype=dtype)
df_orig = pd.DataFrame(orig)
df = pd.DataFrame(X)
yield df_orig, df
def pd_dtypes() -> Generator:
"""Enumerate all supported pandas extension types."""
import pandas as pd
# Integer
dtypes = [
pd.UInt8Dtype(),
pd.UInt16Dtype(),
pd.UInt32Dtype(),
pd.UInt64Dtype(),
pd.Int8Dtype(),
pd.Int16Dtype(),
pd.Int32Dtype(),
pd.Int64Dtype(),
]
Null: Union[float, None, Any] = np.nan
orig = pd.DataFrame(
{"f0": [1, 2, Null, 3], "f1": [4, 3, Null, 1]}, dtype=np.float32
)
for Null in (np.nan, None, pd.NA):
for dtype in dtypes:
df = pd.DataFrame(
{"f0": [1, 2, Null, 3], "f1": [4, 3, Null, 1]}, dtype=dtype
)
yield orig, df
# Float
Null = np.nan
dtypes = [pd.Float32Dtype(), pd.Float64Dtype()]
orig = pd.DataFrame(
{"f0": [1.0, 2.0, Null, 3.0], "f1": [3.0, 2.0, Null, 1.0]}, dtype=np.float32
)
for Null in (np.nan, None, pd.NA):
for dtype in dtypes:
df = pd.DataFrame(
{"f0": [1.0, 2.0, Null, 3.0], "f1": [3.0, 2.0, Null, 1.0]}, dtype=dtype
)
yield orig, df
ser_orig = orig["f0"]
ser = df["f0"]
assert isinstance(ser, pd.Series)
assert isinstance(ser_orig, pd.Series)
yield ser_orig, ser
# Categorical
orig = orig.astype("category")
for Null in (np.nan, None, pd.NA):
df = pd.DataFrame(
{"f0": [1.0, 2.0, Null, 3.0], "f1": [3.0, 2.0, Null, 1.0]},
dtype=pd.CategoricalDtype(),
)
yield orig, df
# Boolean
for Null in [None, pd.NA]:
data = {"f0": [True, False, Null, True], "f1": [False, True, Null, True]}
# pd.NA is not convertible to bool.
orig = pd.DataFrame(data, dtype=np.bool_ if Null is None else pd.BooleanDtype())
df = pd.DataFrame(data, dtype=pd.BooleanDtype())
yield orig, df
def pd_arrow_dtypes() -> Generator:
"""Pandas DataFrame with pyarrow backed type."""
import pandas as pd
import pyarrow as pa # pylint: disable=import-error
# Integer
dtypes = pandas_pyarrow_mapper
Null: Union[float, None, Any] = np.nan
orig = pd.DataFrame(
{"f0": [1, 2, Null, 3], "f1": [4, 3, Null, 1]}, dtype=np.float32
)
# Create a dictionary-backed dataframe, enable this when the roundtrip is
# implemented in pandas/pyarrow
#
# category = pd.ArrowDtype(pa.dictionary(pa.int32(), pa.int32(), ordered=True))
# df = pd.DataFrame({"f0": [0, 2, Null, 3], "f1": [4, 3, Null, 1]}, dtype=category)
# Error:
# >>> df.astype("category")
# Function 'dictionary_encode' has no kernel matching input types
# (array[dictionary<values=int32, indices=int32, ordered=0>])
# Error:
# pd_cat_df = pd.DataFrame(
# {"f0": [0, 2, Null, 3], "f1": [4, 3, Null, 1]},
# dtype="category"
# )
# pa_catcodes = (
# df["f1"].array.__arrow_array__().combine_chunks().to_pandas().cat.codes
# )
# pd_catcodes = pd_cat_df["f1"].cat.codes
# assert pd_catcodes.equals(pa_catcodes)
for Null in (None, pd.NA):
for dtype in dtypes:
if dtype.startswith("float16") or dtype.startswith("bool"):
continue
df = pd.DataFrame(
{"f0": [1, 2, Null, 3], "f1": [4, 3, Null, 1]}, dtype=dtype
)
yield orig, df
orig = pd.DataFrame(
{"f0": [True, False, pd.NA, True], "f1": [False, True, pd.NA, True]},
dtype=pd.BooleanDtype(),
)
df = pd.DataFrame(
{"f0": [True, False, pd.NA, True], "f1": [False, True, pd.NA, True]},
dtype=pd.ArrowDtype(pa.bool_()),
)
yield orig, df
def check_inf(rng: RNG) -> None:
"""Validate there's no inf in X."""
X = rng.random(size=32).reshape(8, 4)
y = rng.random(size=8)
X[5, 2] = np.inf
with pytest.raises(ValueError, match="Input data contains `inf`"):
xgboost.QuantileDMatrix(X, y)
with pytest.raises(ValueError, match="Input data contains `inf`"):
xgboost.DMatrix(X, y)
@memory.cache
def get_california_housing() -> Tuple[np.ndarray, np.ndarray]:
"""Fetch the California housing dataset from sklearn."""
datasets = pytest.importorskip("sklearn.datasets")
data = datasets.fetch_california_housing()
return data.data, data.target
@memory.cache
def get_digits() -> Tuple[np.ndarray, np.ndarray]:
"""Fetch the digits dataset from sklearn."""
datasets = pytest.importorskip("sklearn.datasets")
data = datasets.load_digits()
return data.data, data.target
@memory.cache
def get_cancer() -> Tuple[np.ndarray, np.ndarray]:
"""Fetch the breast cancer dataset from sklearn."""
datasets = pytest.importorskip("sklearn.datasets")
return datasets.load_breast_cancer(return_X_y=True)
@memory.cache
def get_sparse() -> Tuple[np.ndarray, np.ndarray]:
"""Generate a sparse dataset."""
datasets = pytest.importorskip("sklearn.datasets")
rng = np.random.RandomState(199)
n = 2000
sparsity = 0.75
X, y = datasets.make_regression(n, random_state=rng)
flag = rng.binomial(1, sparsity, X.shape)
for i in range(X.shape[0]):
for j in range(X.shape[1]):
if flag[i, j]:
X[i, j] = np.nan
return X, y
@memory.cache
def get_ames_housing() -> Tuple[np.ndarray, np.ndarray]:
"""
Number of samples: 1460
Number of features: 20
Number of categorical features: 10
Number of numerical features: 10
"""
datasets = pytest.importorskip("sklearn.datasets")
X, y = datasets.fetch_openml(data_id=42165, as_frame=True, return_X_y=True)
categorical_columns_subset: List[str] = [
"BldgType", # 5 cats, no nan
"GarageFinish", # 3 cats, nan
"LotConfig", # 5 cats, no nan
"Functional", # 7 cats, no nan
"MasVnrType", # 4 cats, nan
"HouseStyle", # 8 cats, no nan
"FireplaceQu", # 5 cats, nan
"ExterCond", # 5 cats, no nan
"ExterQual", # 4 cats, no nan
"PoolQC", # 3 cats, nan
]
numerical_columns_subset: List[str] = [
"3SsnPorch",
"Fireplaces",
"BsmtHalfBath",
"HalfBath",
"GarageCars",
"TotRmsAbvGrd",
"BsmtFinSF1",
"BsmtFinSF2",
"GrLivArea",
"ScreenPorch",
]
X = X[categorical_columns_subset + numerical_columns_subset]
X[categorical_columns_subset] = X[categorical_columns_subset].astype("category")
return X, y
@memory.cache
def get_mq2008(
dpath: str,
) -> Tuple[
sparse.csr_matrix,
np.ndarray,
np.ndarray,
sparse.csr_matrix,
np.ndarray,
np.ndarray,
sparse.csr_matrix,
np.ndarray,
np.ndarray,
]:
"""Fetch the mq2008 dataset."""
datasets = pytest.importorskip("sklearn.datasets")
src = "https://s3-us-west-2.amazonaws.com/xgboost-examples/MQ2008.zip"
target = os.path.join(dpath, "MQ2008.zip")
if not os.path.exists(target):
request.urlretrieve(url=src, filename=target)
with zipfile.ZipFile(target, "r") as f:
f.extractall(path=dpath)
(
x_train,
y_train,
qid_train,
x_test,
y_test,
qid_test,
x_valid,
y_valid,
qid_valid,
) = datasets.load_svmlight_files(
(
os.path.join(dpath, "MQ2008/Fold1/train.txt"),
os.path.join(dpath, "MQ2008/Fold1/test.txt"),
os.path.join(dpath, "MQ2008/Fold1/vali.txt"),
),
query_id=True,
zero_based=False,
)
return (
x_train,
y_train,
qid_train,
x_test,
y_test,
qid_test,
x_valid,
y_valid,
qid_valid,
)
RelData = Tuple[sparse.csr_matrix, npt.NDArray[np.int32], npt.NDArray[np.int32]]
@dataclass
class ClickFold:
"""A structure containing information about generated user-click data."""
X: sparse.csr_matrix
y: npt.NDArray[np.int32]
qid: npt.NDArray[np.int32]
score: npt.NDArray[np.float32]
click: npt.NDArray[np.int32]
pos: npt.NDArray[np.int64]
class RelDataCV(NamedTuple):
"""Simple data struct for holding a train-test split of a learning to rank dataset."""
train: RelData
test: RelData
max_rel: int
def is_binary(self) -> bool:
"""Whether the label consists of binary relevance degree."""
return self.max_rel == 1
class PBM: # pylint: disable=too-few-public-methods
"""Simulate click data with position bias model. There are other models available in
`ULTRA <https://github.com/ULTR-Community/ULTRA.git>`_ like the cascading model.
References
----------
Unbiased LambdaMART: An Unbiased Pairwise Learning-to-Rank Algorithm
"""
def __init__(self, eta: float) -> None:
# click probability for each relevance degree. (from 0 to 4)
self.click_prob = np.array([0.1, 0.16, 0.28, 0.52, 1.0])
exam_prob = np.array(
[0.68, 0.61, 0.48, 0.34, 0.28, 0.20, 0.11, 0.10, 0.08, 0.06]
)
# Observation probability, encoding positional bias for each position
self.exam_prob = np.power(exam_prob, eta)
def sample_clicks_for_query(
self, labels: npt.NDArray[np.int32], position: npt.NDArray[np.int64]
) -> npt.NDArray[np.int32]:
"""Sample clicks for one query based on input relevance degree and position.
Parameters
----------
labels :
relevance_degree
"""
labels = np.array(labels, copy=True)
click_prob = np.zeros(labels.shape)
# minimum
labels[labels < 0] = 0
# maximum
labels[labels >= len(self.click_prob)] = -1
click_prob = self.click_prob[labels]
exam_prob = np.zeros(labels.shape)
assert position.size == labels.size
ranks = np.array(position, copy=True)
# maximum
ranks[ranks >= self.exam_prob.size] = -1
exam_prob = self.exam_prob[ranks]
rng = np.random.default_rng(1994)
prob = rng.random(size=labels.shape[0], dtype=np.float32)
clicks: npt.NDArray[np.int32] = np.zeros(labels.shape, dtype=np.int32)
clicks[prob < exam_prob * click_prob] = 1
return clicks
def rlencode(x: npt.NDArray[np.int32]) -> Tuple[npt.NDArray, npt.NDArray, npt.NDArray]:
"""Run length encoding using numpy, modified from:
https://gist.github.com/nvictus/66627b580c13068589957d6ab0919e66
"""
x = np.asarray(x)
n = x.size
starts = np.r_[0, np.flatnonzero(~np.isclose(x[1:], x[:-1], equal_nan=True)) + 1]
lengths = np.diff(np.r_[starts, n])
values = x[starts]
indptr = np.append(starts, np.array([x.size]))
return indptr, lengths, values
def init_rank_score(
X: sparse.csr_matrix,
y: npt.NDArray[np.int32],
qid: npt.NDArray[np.int32],
sample_rate: float = 0.1,
) -> npt.NDArray[np.float32]:
"""We use XGBoost to generate the initial score instead of SVMRank for
simplicity. Sample rate is set to 0.1 by default so that we can test with small
datasets.
"""
# random sample
rng = np.random.default_rng(1994)
n_samples = int(X.shape[0] * sample_rate)
index = np.arange(0, X.shape[0], dtype=np.uint64)
rng.shuffle(index)
index = index[:n_samples]
X_train = X[index]
y_train = y[index]
qid_train = qid[index]
# Sort training data based on query id, required by XGBoost.
sorted_idx = np.argsort(qid_train)
X_train = X_train[sorted_idx]
y_train = y_train[sorted_idx]
qid_train = qid_train[sorted_idx]
ltr = xgboost.XGBRanker(objective="rank:ndcg", tree_method="hist")
ltr.fit(X_train, y_train, qid=qid_train)
# Use the original order of the data.
scores = ltr.predict(X)
return scores
def simulate_one_fold(
fold: Tuple[sparse.csr_matrix, npt.NDArray[np.int32], npt.NDArray[np.int32]],
scores_fold: npt.NDArray[np.float32],
) -> ClickFold:
"""Simulate clicks for one fold."""
X_fold, y_fold, qid_fold = fold
assert qid_fold.dtype == np.int32
qids = np.unique(qid_fold)
position = np.empty((y_fold.size,), dtype=np.int64)
clicks = np.empty((y_fold.size,), dtype=np.int32)
pbm = PBM(eta=1.0)
# Avoid grouping by qid as we want to preserve the original data partition by
# the dataset authors.
for q in qids:
qid_mask = q == qid_fold
qid_mask = qid_mask.reshape(qid_mask.shape[0])
query_scores = scores_fold[qid_mask]
# Initial rank list, scores sorted to decreasing order
query_position = np.argsort(query_scores)[::-1]
position[qid_mask] = query_position
# get labels
relevance_degrees = y_fold[qid_mask]
query_clicks = pbm.sample_clicks_for_query(relevance_degrees, query_position)
clicks[qid_mask] = query_clicks
assert X_fold.shape[0] == qid_fold.shape[0], (X_fold.shape, qid_fold.shape)
assert X_fold.shape[0] == clicks.shape[0], (X_fold.shape, clicks.shape)
return ClickFold(X_fold, y_fold, qid_fold, scores_fold, clicks, position)
# pylint: disable=too-many-locals
def simulate_clicks(cv_data: RelDataCV) -> Tuple[ClickFold, Optional[ClickFold]]:
"""Simulate click data using position biased model (PBM)."""
X, y, qid = list(zip(cv_data.train, cv_data.test))
# ptr to train-test split
indptr = np.array([0] + [v.shape[0] for v in X])
indptr = np.cumsum(indptr)
assert len(indptr) == 2 + 1 # train, test
X_full = sparse.vstack(X)
y_full = np.concatenate(y)
qid_full = np.concatenate(qid)
# Obtain initial relevance score for click simulation
scores_full = init_rank_score(X_full, y_full, qid_full)
# partition it back to (train, test) tuple
scores = [scores_full[indptr[i - 1] : indptr[i]] for i in range(1, indptr.size)]
X_lst, y_lst, q_lst, s_lst, c_lst, p_lst = [], [], [], [], [], []
for i in range(indptr.size - 1):
fold = simulate_one_fold((X[i], y[i], qid[i]), scores[i])
X_lst.append(fold.X)
y_lst.append(fold.y)
q_lst.append(fold.qid)
s_lst.append(fold.score)
c_lst.append(fold.click)
p_lst.append(fold.pos)
scores_check_1 = [s_lst[i] for i in range(indptr.size - 1)]
for i in range(2):
assert (scores_check_1[i] == scores[i]).all()
if len(X_lst) == 1:
train = ClickFold(X_lst[0], y_lst[0], q_lst[0], s_lst[0], c_lst[0], p_lst[0])
test = None
else:
train, test = (
ClickFold(X_lst[i], y_lst[i], q_lst[i], s_lst[i], c_lst[i], p_lst[i])
for i in range(len(X_lst))
)
return train, test
def sort_ltr_samples(
X: sparse.csr_matrix,
y: npt.NDArray[np.int32],
qid: npt.NDArray[np.int32],
clicks: npt.NDArray[np.int32],
pos: npt.NDArray[np.int64],
) -> Tuple[
sparse.csr_matrix,
npt.NDArray[np.int32],
npt.NDArray[np.int32],
npt.NDArray[np.int32],
]:
"""Sort data based on query index and position."""
sorted_idx = np.argsort(qid)
X = X[sorted_idx]
clicks = clicks[sorted_idx]
qid = qid[sorted_idx]
pos = pos[sorted_idx]
indptr, _, _ = rlencode(qid)
for i in range(1, indptr.size):
beg = indptr[i - 1]
end = indptr[i]
assert beg < end, (beg, end)
assert np.unique(qid[beg:end]).size == 1, (beg, end)
query_pos = pos[beg:end]
assert query_pos.min() == 0, query_pos.min()
assert query_pos.max() >= query_pos.size - 1, (
query_pos.max(),
query_pos.size,
i,
np.unique(qid[beg:end]),
)
sorted_idx = np.argsort(query_pos)
X[beg:end] = X[beg:end][sorted_idx]
clicks[beg:end] = clicks[beg:end][sorted_idx]
y[beg:end] = y[beg:end][sorted_idx]
# not necessary
qid[beg:end] = qid[beg:end][sorted_idx]
data = X, clicks, y, qid
return data
| 17,993
| 28.693069
| 90
|
py
|
xgboost
|
xgboost-master/python-package/xgboost/testing/updater.py
|
"""Tests for updaters."""
import json
from functools import partial, update_wrapper
from typing import Any, Dict
import numpy as np
import xgboost as xgb
import xgboost.testing as tm
def get_basescore(model: xgb.XGBModel) -> float:
"""Get base score from an XGBoost sklearn estimator."""
base_score = float(
json.loads(model.get_booster().save_config())["learner"]["learner_model_param"][
"base_score"
]
)
return base_score
def check_init_estimation(tree_method: str) -> None:
"""Test for init estimation."""
from sklearn.datasets import (
make_classification,
make_multilabel_classification,
make_regression,
)
def run_reg(X: np.ndarray, y: np.ndarray) -> None: # pylint: disable=invalid-name
reg = xgb.XGBRegressor(tree_method=tree_method, max_depth=1, n_estimators=1)
reg.fit(X, y, eval_set=[(X, y)])
base_score_0 = get_basescore(reg)
score_0 = reg.evals_result()["validation_0"]["rmse"][0]
reg = xgb.XGBRegressor(
tree_method=tree_method, max_depth=1, n_estimators=1, boost_from_average=0
)
reg.fit(X, y, eval_set=[(X, y)])
base_score_1 = get_basescore(reg)
score_1 = reg.evals_result()["validation_0"]["rmse"][0]
assert not np.isclose(base_score_0, base_score_1)
assert score_0 < score_1 # should be better
# pylint: disable=unbalanced-tuple-unpacking
X, y = make_regression(n_samples=4096, random_state=17)
run_reg(X, y)
# pylint: disable=unbalanced-tuple-unpacking
X, y = make_regression(n_samples=4096, n_targets=3, random_state=17)
run_reg(X, y)
def run_clf(X: np.ndarray, y: np.ndarray) -> None: # pylint: disable=invalid-name
clf = xgb.XGBClassifier(tree_method=tree_method, max_depth=1, n_estimators=1)
clf.fit(X, y, eval_set=[(X, y)])
base_score_0 = get_basescore(clf)
score_0 = clf.evals_result()["validation_0"]["logloss"][0]
clf = xgb.XGBClassifier(
tree_method=tree_method, max_depth=1, n_estimators=1, boost_from_average=0
)
clf.fit(X, y, eval_set=[(X, y)])
base_score_1 = get_basescore(clf)
score_1 = clf.evals_result()["validation_0"]["logloss"][0]
assert not np.isclose(base_score_0, base_score_1)
assert score_0 < score_1 # should be better
# pylint: disable=unbalanced-tuple-unpacking
X, y = make_classification(n_samples=4096, random_state=17)
run_clf(X, y)
X, y = make_multilabel_classification(
n_samples=4096, n_labels=3, n_classes=5, random_state=17
)
run_clf(X, y)
# pylint: disable=too-many-locals
def check_quantile_loss(tree_method: str, weighted: bool) -> None:
"""Test for quantile loss."""
from sklearn.datasets import make_regression
from sklearn.metrics import mean_pinball_loss
from xgboost.sklearn import _metric_decorator
n_samples = 4096
n_features = 8
n_estimators = 8
# non-zero base score can cause floating point difference with GPU predictor.
# multi-class has small difference than single target in the prediction kernel
base_score = 0.0
rng = np.random.RandomState(1994)
# pylint: disable=unbalanced-tuple-unpacking
X, y = make_regression(
n_samples=n_samples,
n_features=n_features,
random_state=rng,
)
if weighted:
weight = rng.random(size=n_samples)
else:
weight = None
Xy = xgb.QuantileDMatrix(X, y, weight=weight)
alpha = np.array([0.1, 0.5])
evals_result: Dict[str, Dict] = {}
booster_multi = xgb.train(
{
"objective": "reg:quantileerror",
"tree_method": tree_method,
"quantile_alpha": alpha,
"base_score": base_score,
},
Xy,
num_boost_round=n_estimators,
evals=[(Xy, "Train")],
evals_result=evals_result,
)
predt_multi = booster_multi.predict(Xy, strict_shape=True)
assert tm.non_increasing(evals_result["Train"]["quantile"])
assert evals_result["Train"]["quantile"][-1] < 20.0
# check that there's a way to use custom metric and compare the results.
metrics = [
_metric_decorator(
update_wrapper(
partial(mean_pinball_loss, sample_weight=weight, alpha=alpha[i]),
mean_pinball_loss,
)
)
for i in range(alpha.size)
]
predts = np.empty(predt_multi.shape)
for i in range(alpha.shape[0]):
a = alpha[i]
booster_i = xgb.train(
{
"objective": "reg:quantileerror",
"tree_method": tree_method,
"quantile_alpha": a,
"base_score": base_score,
},
Xy,
num_boost_round=n_estimators,
evals=[(Xy, "Train")],
custom_metric=metrics[i],
evals_result=evals_result,
)
assert tm.non_increasing(evals_result["Train"]["quantile"])
assert evals_result["Train"]["quantile"][-1] < 30.0
np.testing.assert_allclose(
np.array(evals_result["Train"]["quantile"]),
np.array(evals_result["Train"]["mean_pinball_loss"]),
atol=1e-6,
rtol=1e-6,
)
predts[:, i] = booster_i.predict(Xy)
for i in range(alpha.shape[0]):
np.testing.assert_allclose(predts[:, i], predt_multi[:, i])
def check_cut(
n_entries: int, indptr: np.ndarray, data: np.ndarray, dtypes: Any
) -> None:
"""Check the cut values."""
from pandas.api.types import is_categorical_dtype
assert data.shape[0] == indptr[-1]
assert data.shape[0] == n_entries
assert indptr.dtype == np.uint64
for i in range(1, indptr.size):
beg = int(indptr[i - 1])
end = int(indptr[i])
for j in range(beg + 1, end):
assert data[j] > data[j - 1]
if is_categorical_dtype(dtypes[i - 1]):
assert data[j] == data[j - 1] + 1
def check_get_quantile_cut_device(tree_method: str, use_cupy: bool) -> None:
"""Check with optional cupy."""
from pandas.api.types import is_categorical_dtype
n_samples = 1024
n_features = 14
max_bin = 16
dtypes = [np.float32] * n_features
# numerical
X, y, w = tm.make_regression(n_samples, n_features, use_cupy=use_cupy)
# - qdm
Xyw: xgb.DMatrix = xgb.QuantileDMatrix(X, y, weight=w, max_bin=max_bin)
indptr, data = Xyw.get_quantile_cut()
check_cut((max_bin + 1) * n_features, indptr, data, dtypes)
# - dm
Xyw = xgb.DMatrix(X, y, weight=w)
xgb.train({"tree_method": tree_method, "max_bin": max_bin}, Xyw)
indptr, data = Xyw.get_quantile_cut()
check_cut((max_bin + 1) * n_features, indptr, data, dtypes)
# - ext mem
n_batches = 3
n_samples_per_batch = 256
it = tm.IteratorForTest(
*tm.make_batches(n_samples_per_batch, n_features, n_batches, use_cupy),
cache="cache",
)
Xy: xgb.DMatrix = xgb.DMatrix(it)
xgb.train({"tree_method": tree_method, "max_bin": max_bin}, Xyw)
indptr, data = Xyw.get_quantile_cut()
check_cut((max_bin + 1) * n_features, indptr, data, dtypes)
# categorical
n_categories = 32
X, y = tm.make_categorical(n_samples, n_features, n_categories, False, sparsity=0.8)
if use_cupy:
import cudf # pylint: disable=import-error
import cupy as cp # pylint: disable=import-error
X = cudf.from_pandas(X)
y = cp.array(y)
# - qdm
Xy = xgb.QuantileDMatrix(X, y, max_bin=max_bin, enable_categorical=True)
indptr, data = Xy.get_quantile_cut()
check_cut(n_categories * n_features, indptr, data, X.dtypes)
# - dm
Xy = xgb.DMatrix(X, y, enable_categorical=True)
xgb.train({"tree_method": tree_method, "max_bin": max_bin}, Xy)
indptr, data = Xy.get_quantile_cut()
check_cut(n_categories * n_features, indptr, data, X.dtypes)
# mixed
X, y = tm.make_categorical(
n_samples, n_features, n_categories, False, sparsity=0.8, cat_ratio=0.5
)
n_cat_features = len([0 for dtype in X.dtypes if is_categorical_dtype(dtype)])
n_num_features = n_features - n_cat_features
n_entries = n_categories * n_cat_features + (max_bin + 1) * n_num_features
# - qdm
Xy = xgb.QuantileDMatrix(X, y, max_bin=max_bin, enable_categorical=True)
indptr, data = Xy.get_quantile_cut()
check_cut(n_entries, indptr, data, X.dtypes)
# - dm
Xy = xgb.DMatrix(X, y, enable_categorical=True)
xgb.train({"tree_method": tree_method, "max_bin": max_bin}, Xy)
indptr, data = Xy.get_quantile_cut()
check_cut(n_entries, indptr, data, X.dtypes)
def check_get_quantile_cut(tree_method: str) -> None:
"""Check the quantile cut getter."""
use_cupy = tree_method == "gpu_hist"
check_get_quantile_cut_device(tree_method, False)
if use_cupy:
check_get_quantile_cut_device(tree_method, True)
| 8,994
| 33.72973
| 88
|
py
|
xgboost
|
xgboost-master/python-package/xgboost/testing/shared.py
|
"""Testing code shared by other tests."""
# pylint: disable=invalid-name
import collections
import importlib.util
import json
import os
import tempfile
from typing import Any, Callable, Dict, Type
import numpy as np
import xgboost as xgb
from xgboost._typing import ArrayLike
def validate_leaf_output(leaf: np.ndarray, num_parallel_tree: int) -> None:
"""Validate output for predict leaf tests."""
for i in range(leaf.shape[0]): # n_samples
for j in range(leaf.shape[1]): # n_rounds
for k in range(leaf.shape[2]): # n_classes
tree_group = leaf[i, j, k, :]
assert tree_group.shape[0] == num_parallel_tree
# No sampling, all trees within forest are the same
assert np.all(tree_group == tree_group[0])
def validate_data_initialization(
dmatrix: Type, model: Type[xgb.XGBModel], X: ArrayLike, y: ArrayLike
) -> None:
"""Assert that we don't create duplicated DMatrix."""
old_init = dmatrix.__init__
count = [0]
def new_init(self: Any, **kwargs: Any) -> Callable:
count[0] += 1
return old_init(self, **kwargs)
dmatrix.__init__ = new_init
model(n_estimators=1).fit(X, y, eval_set=[(X, y)])
assert count[0] == 1
count[0] = 0 # only 1 DMatrix is created.
y_copy = y.copy()
model(n_estimators=1).fit(X, y, eval_set=[(X, y_copy)])
assert count[0] == 2 # a different Python object is considered different
dmatrix.__init__ = old_init
# pylint: disable=too-many-arguments,too-many-locals
def get_feature_weights(
X: ArrayLike,
y: ArrayLike,
fw: np.ndarray,
parser_path: str,
tree_method: str,
model: Type[xgb.XGBModel] = xgb.XGBRegressor,
) -> np.ndarray:
"""Get feature weights using the demo parser."""
with tempfile.TemporaryDirectory() as tmpdir:
colsample_bynode = 0.5
reg = model(tree_method=tree_method, colsample_bynode=colsample_bynode)
reg.fit(X, y, feature_weights=fw)
model_path = os.path.join(tmpdir, "model.json")
reg.save_model(model_path)
with open(model_path, "r", encoding="utf-8") as fd:
model = json.load(fd)
spec = importlib.util.spec_from_file_location("JsonParser", parser_path)
assert spec is not None
jsonm = importlib.util.module_from_spec(spec)
assert spec.loader is not None
spec.loader.exec_module(jsonm)
model = jsonm.Model(model)
splits: Dict[int, int] = {}
total_nodes = 0
for tree in model.trees:
n_nodes = len(tree.nodes)
total_nodes += n_nodes
for n in range(n_nodes):
if tree.is_leaf(n):
continue
if splits.get(tree.split_index(n), None) is None:
splits[tree.split_index(n)] = 1
else:
splits[tree.split_index(n)] += 1
od = collections.OrderedDict(sorted(splits.items()))
tuples = list(od.items())
k, v = list(zip(*tuples))
w = np.polyfit(k, v, deg=1)
return w
| 3,113
| 31.4375
| 80
|
py
|
xgboost
|
xgboost-master/python-package/xgboost/testing/metrics.py
|
"""Tests for evaluation metrics."""
from typing import Dict, List
import numpy as np
import pytest
import xgboost as xgb
from xgboost.compat import concat
from xgboost.core import _parse_eval_str
def check_precision_score(tree_method: str) -> None:
"""Test for precision with ranking and classification."""
datasets = pytest.importorskip("sklearn.datasets")
X, y = datasets.make_classification(
n_samples=1024, n_features=4, n_classes=2, random_state=2023
)
qid = np.zeros(shape=y.shape) # same group
ltr = xgb.XGBRanker(n_estimators=2, tree_method=tree_method)
ltr.fit(X, y, qid=qid)
# re-generate so that XGBoost doesn't evaluate the result to 1.0
X, y = datasets.make_classification(
n_samples=512, n_features=4, n_classes=2, random_state=1994
)
ltr.set_params(eval_metric="pre@32")
result = _parse_eval_str(
ltr.get_booster().eval_set(evals=[(xgb.DMatrix(X, y), "Xy")])
)
score_0 = result[1][1]
X_list = []
y_list = []
n_query_groups = 3
q_list: List[np.ndarray] = []
for i in range(n_query_groups):
# same for all groups
X, y = datasets.make_classification(
n_samples=512, n_features=4, n_classes=2, random_state=1994
)
X_list.append(X)
y_list.append(y)
q = np.full(shape=y.shape, fill_value=i, dtype=np.uint64)
q_list.append(q)
qid = concat(q_list)
X = concat(X_list)
y = concat(y_list)
result = _parse_eval_str(
ltr.get_booster().eval_set(evals=[(xgb.DMatrix(X, y, qid=qid), "Xy")])
)
assert result[1][0].endswith("pre@32")
score_1 = result[1][1]
assert score_1 == score_0
def check_quantile_error(tree_method: str) -> None:
"""Test for the `quantile` loss."""
from sklearn.datasets import make_regression
from sklearn.metrics import mean_pinball_loss
rng = np.random.RandomState(19)
# pylint: disable=unbalanced-tuple-unpacking
X, y = make_regression(128, 3, random_state=rng)
Xy = xgb.QuantileDMatrix(X, y)
evals_result: Dict[str, Dict] = {}
booster = xgb.train(
{"tree_method": tree_method, "eval_metric": "quantile", "quantile_alpha": 0.3},
Xy,
evals=[(Xy, "Train")],
evals_result=evals_result,
)
predt = booster.inplace_predict(X)
loss = mean_pinball_loss(y, predt, alpha=0.3)
np.testing.assert_allclose(evals_result["Train"]["quantile"][-1], loss)
| 2,468
| 29.8625
| 87
|
py
|
xgboost
|
xgboost-master/python-package/xgboost/testing/__init__.py
|
"""Utilities for defining Python tests. The module is private and subject to frequent
change without notice.
"""
# pylint: disable=invalid-name,missing-function-docstring,import-error
import gc
import importlib.util
import multiprocessing
import os
import platform
import socket
import sys
from concurrent.futures import ThreadPoolExecutor
from contextlib import contextmanager
from io import StringIO
from platform import system
from typing import (
Any,
Callable,
Dict,
Generator,
List,
Optional,
Sequence,
Set,
Tuple,
TypedDict,
TypeVar,
Union,
)
import numpy as np
import pytest
from scipy import sparse
import xgboost as xgb
from xgboost.core import ArrayLike
from xgboost.sklearn import SklObjective
from xgboost.testing.data import (
get_california_housing,
get_cancer,
get_digits,
get_sparse,
memory,
)
hypothesis = pytest.importorskip("hypothesis")
# pylint:disable=wrong-import-position,wrong-import-order
from hypothesis import strategies
from hypothesis.extra.numpy import arrays
datasets = pytest.importorskip("sklearn.datasets")
PytestSkip = TypedDict("PytestSkip", {"condition": bool, "reason": str})
def has_ipv6() -> bool:
"""Check whether IPv6 is enabled on this host."""
# connection error in macos, still need some fixes.
if system() not in ("Linux", "Windows"):
return False
if socket.has_ipv6:
try:
with socket.socket(
socket.AF_INET6, socket.SOCK_STREAM
) as server, socket.socket(socket.AF_INET6, socket.SOCK_STREAM) as client:
server.bind(("::1", 0))
port = server.getsockname()[1]
server.listen()
client.connect(("::1", port))
conn, _ = server.accept()
client.sendall("abc".encode())
msg = conn.recv(3).decode()
# if the code can be executed to this point, the message should be
# correct.
assert msg == "abc"
return True
except OSError:
pass
return False
def no_mod(name: str) -> PytestSkip:
spec = importlib.util.find_spec(name)
return {"condition": spec is None, "reason": f"{name} is not installed."}
def no_ipv6() -> PytestSkip:
"""PyTest skip mark for IPv6."""
return {"condition": not has_ipv6(), "reason": "IPv6 is required to be enabled."}
def no_ubjson() -> PytestSkip:
return no_mod("ubjson")
def no_sklearn() -> PytestSkip:
return no_mod("sklearn")
def no_dask() -> PytestSkip:
if sys.platform.startswith("win"):
return {"reason": "Unsupported platform.", "condition": True}
return no_mod("dask")
def no_dask_ml() -> PytestSkip:
if sys.platform.startswith("win"):
return {"reason": "Unsupported platform.", "condition": True}
return no_mod("dask_ml")
def no_spark() -> PytestSkip:
if sys.platform.startswith("win") or sys.platform.startswith("darwin"):
return {"reason": "Unsupported platform.", "condition": True}
return no_mod("pyspark")
def no_pandas() -> PytestSkip:
return no_mod("pandas")
def no_arrow() -> PytestSkip:
return no_mod("pyarrow")
def no_modin() -> PytestSkip:
return no_mod("modin")
def no_dt() -> PytestSkip:
return no_mod("datatable")
def no_matplotlib() -> PytestSkip:
reason = "Matplotlib is not installed."
try:
import matplotlib.pyplot as _ # noqa
return {"condition": False, "reason": reason}
except ImportError:
return {"condition": True, "reason": reason}
def no_dask_cuda() -> PytestSkip:
return no_mod("dask_cuda")
def no_cudf() -> PytestSkip:
return no_mod("cudf")
def no_cupy() -> PytestSkip:
return no_mod("cupy")
def no_dask_cudf() -> PytestSkip:
return no_mod("dask_cudf")
def no_json_schema() -> PytestSkip:
return no_mod("jsonschema")
def no_graphviz() -> PytestSkip:
return no_mod("graphviz")
def no_rmm() -> PytestSkip:
return no_mod("rmm")
def no_multiple(*args: Any) -> PytestSkip:
condition = False
reason = ""
for arg in args:
condition = condition or arg["condition"]
if arg["condition"]:
reason = arg["reason"]
break
return {"condition": condition, "reason": reason}
def skip_s390x() -> PytestSkip:
condition = platform.machine() == "s390x"
reason = "Known to fail on s390x"
return {"condition": condition, "reason": reason}
class IteratorForTest(xgb.core.DataIter):
"""Iterator for testing streaming DMatrix. (external memory, quantile)"""
def __init__(
self,
X: Sequence,
y: Sequence,
w: Optional[Sequence],
cache: Optional[str],
) -> None:
assert len(X) == len(y)
self.X = X
self.y = y
self.w = w
self.it = 0
super().__init__(cache_prefix=cache)
def next(self, input_data: Callable) -> int:
if self.it == len(self.X):
return 0
with pytest.raises(TypeError, match="keyword args"):
input_data(self.X[self.it], self.y[self.it], None)
# Use copy to make sure the iterator doesn't hold a reference to the data.
input_data(
data=self.X[self.it].copy(),
label=self.y[self.it].copy(),
weight=self.w[self.it].copy() if self.w else None,
)
gc.collect() # clear up the copy, see if XGBoost access freed memory.
self.it += 1
return 1
def reset(self) -> None:
self.it = 0
def as_arrays(
self,
) -> Tuple[Union[np.ndarray, sparse.csr_matrix], ArrayLike, ArrayLike]:
if isinstance(self.X[0], sparse.csr_matrix):
X = sparse.vstack(self.X, format="csr")
else:
X = np.concatenate(self.X, axis=0)
y = np.concatenate(self.y, axis=0)
if self.w:
w = np.concatenate(self.w, axis=0)
else:
w = None
return X, y, w
def make_batches(
n_samples_per_batch: int, n_features: int, n_batches: int, use_cupy: bool = False
) -> Tuple[List[np.ndarray], List[np.ndarray], List[np.ndarray]]:
X = []
y = []
w = []
if use_cupy:
import cupy
rng = cupy.random.RandomState(1994)
else:
rng = np.random.RandomState(1994)
for _ in range(n_batches):
_X = rng.randn(n_samples_per_batch, n_features)
_y = rng.randn(n_samples_per_batch)
_w = rng.uniform(low=0, high=1, size=n_samples_per_batch)
X.append(_X)
y.append(_y)
w.append(_w)
return X, y, w
def make_regression(
n_samples: int, n_features: int, use_cupy: bool
) -> Tuple[ArrayLike, ArrayLike, ArrayLike]:
"""Make a simple regression dataset."""
X, y, w = make_batches(n_samples, n_features, 1, use_cupy)
return X[0], y[0], w[0]
def make_batches_sparse(
n_samples_per_batch: int, n_features: int, n_batches: int, sparsity: float
) -> Tuple[List[sparse.csr_matrix], List[np.ndarray], List[np.ndarray]]:
X = []
y = []
w = []
rng = np.random.RandomState(1994)
for _ in range(n_batches):
_X = sparse.random(
n_samples_per_batch,
n_features,
1.0 - sparsity,
format="csr",
dtype=np.float32,
random_state=rng,
)
_y = rng.randn(n_samples_per_batch)
_w = rng.uniform(low=0, high=1, size=n_samples_per_batch)
X.append(_X)
y.append(_y)
w.append(_w)
return X, y, w
class TestDataset:
"""Contains a dataset in numpy format as well as the relevant objective and metric."""
def __init__(
self, name: str, get_dataset: Callable, objective: str, metric: str
) -> None:
self.name = name
self.objective = objective
self.metric = metric
self.X, self.y = get_dataset()
self.w: Optional[np.ndarray] = None
self.margin: Optional[np.ndarray] = None
def set_params(self, params_in: Dict[str, Any]) -> Dict[str, Any]:
params_in["objective"] = self.objective
params_in["eval_metric"] = self.metric
if self.objective == "multi:softmax":
params_in["num_class"] = int(np.max(self.y) + 1)
return params_in
def get_dmat(self) -> xgb.DMatrix:
return xgb.DMatrix(
self.X,
self.y,
weight=self.w,
base_margin=self.margin,
enable_categorical=True,
)
def get_device_dmat(self, max_bin: Optional[int]) -> xgb.QuantileDMatrix:
import cupy as cp
w = None if self.w is None else cp.array(self.w)
X = cp.array(self.X, dtype=np.float32)
y = cp.array(self.y, dtype=np.float32)
return xgb.QuantileDMatrix(
X, y, weight=w, base_margin=self.margin, max_bin=max_bin
)
def get_external_dmat(self) -> xgb.DMatrix:
n_samples = self.X.shape[0]
n_batches = 10
per_batch = n_samples // n_batches + 1
predictor = []
response = []
weight = []
for i in range(n_batches):
beg = i * per_batch
end = min((i + 1) * per_batch, n_samples)
assert end != beg
X = self.X[beg:end, ...]
y = self.y[beg:end]
w = self.w[beg:end] if self.w is not None else None
predictor.append(X)
response.append(y)
if w is not None:
weight.append(w)
it = IteratorForTest(
predictor, response, weight if weight else None, cache="cache"
)
return xgb.DMatrix(it)
def __repr__(self) -> str:
return self.name
# pylint: disable=too-many-arguments,too-many-locals
@memory.cache
def make_categorical(
n_samples: int,
n_features: int,
n_categories: int,
onehot: bool,
sparsity: float = 0.0,
cat_ratio: float = 1.0,
shuffle: bool = False,
) -> Tuple[ArrayLike, np.ndarray]:
"""Generate categorical features for test.
Parameters
----------
n_categories:
Number of categories for categorical features.
onehot:
Should we apply one-hot encoding to the data?
sparsity:
The ratio of the amount of missing values over the number of all entries.
cat_ratio:
The ratio of features that are categorical.
shuffle:
Whether we should shuffle the columns.
Returns
-------
X, y
"""
import pandas as pd
from pandas.api.types import is_categorical_dtype
rng = np.random.RandomState(1994)
pd_dict = {}
for i in range(n_features + 1):
c = rng.randint(low=0, high=n_categories, size=n_samples)
pd_dict[str(i)] = pd.Series(c, dtype=np.int64)
df = pd.DataFrame(pd_dict)
label = df.iloc[:, 0]
df = df.iloc[:, 1:]
for i in range(0, n_features):
label += df.iloc[:, i]
label += 1
categories = np.arange(0, n_categories)
for col in df.columns:
if rng.binomial(1, cat_ratio, size=1)[0] == 1:
df[col] = df[col].astype("category")
df[col] = df[col].cat.set_categories(categories)
if sparsity > 0.0:
for i in range(n_features):
index = rng.randint(
low=0, high=n_samples - 1, size=int(n_samples * sparsity)
)
df.iloc[index, i] = np.NaN
if is_categorical_dtype(df.dtypes[i]):
assert n_categories == np.unique(df.dtypes[i].categories).size
if onehot:
df = pd.get_dummies(df)
if shuffle:
columns = list(df.columns)
rng.shuffle(columns)
df = df[columns]
return df, label
def make_ltr(
n_samples: int, n_features: int, n_query_groups: int, max_rel: int
) -> Tuple[np.ndarray, np.ndarray, np.ndarray, np.ndarray]:
"""Make a dataset for testing LTR."""
rng = np.random.default_rng(1994)
X = rng.normal(0, 1.0, size=n_samples * n_features).reshape(n_samples, n_features)
y = np.sum(X, axis=1)
y -= y.min()
y = np.round(y / y.max() * max_rel).astype(np.int32)
qid = rng.integers(0, n_query_groups, size=n_samples, dtype=np.int32)
w = rng.normal(0, 1.0, size=n_query_groups)
w -= np.min(w)
w /= np.max(w)
qid = np.sort(qid)
return X, y, qid, w
def _cat_sampled_from() -> strategies.SearchStrategy:
@strategies.composite
def _make_cat(draw: Callable) -> Tuple[int, int, int, float]:
n_samples = draw(strategies.integers(2, 512))
n_features = draw(strategies.integers(1, 4))
n_cats = draw(strategies.integers(1, 128))
sparsity = draw(
strategies.floats(
min_value=0,
max_value=1,
allow_nan=False,
allow_infinity=False,
allow_subnormal=False,
)
)
return n_samples, n_features, n_cats, sparsity
def _build(args: Tuple[int, int, int, float]) -> TestDataset:
n_samples = args[0]
n_features = args[1]
n_cats = args[2]
sparsity = args[3]
return TestDataset(
f"{n_samples}x{n_features}-{n_cats}-{sparsity}",
lambda: make_categorical(n_samples, n_features, n_cats, False, sparsity),
"reg:squarederror",
"rmse",
)
return _make_cat().map(_build) # pylint: disable=no-member
categorical_dataset_strategy: strategies.SearchStrategy = _cat_sampled_from()
# pylint: disable=too-many-locals
@memory.cache
def make_sparse_regression(
n_samples: int, n_features: int, sparsity: float, as_dense: bool
) -> Tuple[Union[sparse.csr_matrix], np.ndarray]:
"""Make sparse matrix.
Parameters
----------
as_dense:
Return the matrix as np.ndarray with missing values filled by NaN
"""
if not hasattr(np.random, "default_rng"):
# old version of numpy on s390x
rng = np.random.RandomState(1994)
X = sparse.random(
m=n_samples,
n=n_features,
density=1.0 - sparsity,
random_state=rng,
format="csr",
)
y = rng.normal(loc=0.0, scale=1.0, size=n_samples)
return X, y
# Use multi-thread to speed up the generation, convenient if you use this function
# for benchmarking.
n_threads = min(multiprocessing.cpu_count(), n_features)
def random_csc(t_id: int) -> sparse.csc_matrix:
rng = np.random.default_rng(1994 * t_id)
thread_size = n_features // n_threads
if t_id == n_threads - 1:
n_features_tloc = n_features - t_id * thread_size
else:
n_features_tloc = thread_size
X = sparse.random(
m=n_samples,
n=n_features_tloc,
density=1.0 - sparsity,
random_state=rng,
).tocsc()
y = np.zeros((n_samples, 1))
for i in range(X.shape[1]):
size = X.indptr[i + 1] - X.indptr[i]
if size != 0:
y += X[:, i].toarray() * rng.random((n_samples, 1)) * 0.2
return X, y
futures = []
with ThreadPoolExecutor(max_workers=n_threads) as executor:
for i in range(n_threads):
futures.append(executor.submit(random_csc, i))
X_results = []
y_results = []
for f in futures:
X, y = f.result()
X_results.append(X)
y_results.append(y)
assert len(y_results) == n_threads
csr: sparse.csr_matrix = sparse.hstack(X_results, format="csr")
y = np.asarray(y_results)
y = y.reshape((y.shape[0], y.shape[1])).T
y = np.sum(y, axis=1)
assert csr.shape[0] == n_samples
assert csr.shape[1] == n_features
assert y.shape[0] == n_samples
if as_dense:
arr = csr.toarray()
assert arr.shape[0] == n_samples
assert arr.shape[1] == n_features
arr[arr == 0] = np.nan
return arr, y
return csr, y
sparse_datasets_strategy = strategies.sampled_from(
[
TestDataset(
"1e5x8-0.95-csr",
lambda: make_sparse_regression(int(1e5), 8, 0.95, False),
"reg:squarederror",
"rmse",
),
TestDataset(
"1e5x8-0.5-csr",
lambda: make_sparse_regression(int(1e5), 8, 0.5, False),
"reg:squarederror",
"rmse",
),
TestDataset(
"1e5x8-0.5-dense",
lambda: make_sparse_regression(int(1e5), 8, 0.5, True),
"reg:squarederror",
"rmse",
),
TestDataset(
"1e5x8-0.05-csr",
lambda: make_sparse_regression(int(1e5), 8, 0.05, False),
"reg:squarederror",
"rmse",
),
TestDataset(
"1e5x8-0.05-dense",
lambda: make_sparse_regression(int(1e5), 8, 0.05, True),
"reg:squarederror",
"rmse",
),
]
)
def make_datasets_with_margin(
unweighted_strategy: strategies.SearchStrategy,
) -> Callable:
"""Factory function for creating strategies that generates datasets with weight and
base margin.
"""
@strategies.composite
def weight_margin(draw: Callable) -> TestDataset:
data: TestDataset = draw(unweighted_strategy)
if draw(strategies.booleans()):
data.w = draw(
arrays(np.float64, (len(data.y)), elements=strategies.floats(0.1, 2.0))
)
if draw(strategies.booleans()):
num_class = 1
if data.objective == "multi:softmax":
num_class = int(np.max(data.y) + 1)
elif data.name.startswith("mtreg"):
num_class = data.y.shape[1]
data.margin = draw(
arrays(
np.float64,
(data.y.shape[0] * num_class),
elements=strategies.floats(0.5, 1.0),
)
)
assert data.margin is not None
if num_class != 1:
data.margin = data.margin.reshape(data.y.shape[0], num_class)
return data
return weight_margin
# A strategy for drawing from a set of example datasets. May add random weights to the
# dataset
@memory.cache
def make_dataset_strategy() -> Callable:
_unweighted_datasets_strategy = strategies.sampled_from(
[
TestDataset(
"calif_housing", get_california_housing, "reg:squarederror", "rmse"
),
TestDataset(
"calif_housing-l1", get_california_housing, "reg:absoluteerror", "mae"
),
TestDataset("cancer", get_cancer, "binary:logistic", "logloss"),
TestDataset("sparse", get_sparse, "reg:squarederror", "rmse"),
TestDataset("sparse-l1", get_sparse, "reg:absoluteerror", "mae"),
TestDataset(
"empty",
lambda: (np.empty((0, 100)), np.empty(0)),
"reg:squarederror",
"rmse",
),
]
)
return make_datasets_with_margin(_unweighted_datasets_strategy)()
_unweighted_multi_datasets_strategy = strategies.sampled_from(
[
TestDataset("digits", get_digits, "multi:softmax", "mlogloss"),
TestDataset(
"mtreg",
lambda: datasets.make_regression(n_samples=128, n_features=2, n_targets=3),
"reg:squarederror",
"rmse",
),
TestDataset(
"mtreg-l1",
lambda: datasets.make_regression(n_samples=128, n_features=2, n_targets=3),
"reg:absoluteerror",
"mae",
),
]
)
# A strategy for drawing from a set of multi-target/multi-class datasets.
multi_dataset_strategy = make_datasets_with_margin(
_unweighted_multi_datasets_strategy
)()
def non_increasing(L: Sequence[float], tolerance: float = 1e-4) -> bool:
return all((y - x) < tolerance for x, y in zip(L, L[1:]))
def predictor_equal(lhs: xgb.DMatrix, rhs: xgb.DMatrix) -> bool:
"""Assert whether two DMatrices contain the same predictors."""
lcsr = lhs.get_data()
rcsr = rhs.get_data()
return all(
(
np.array_equal(lcsr.data, rcsr.data),
np.array_equal(lcsr.indices, rcsr.indices),
np.array_equal(lcsr.indptr, rcsr.indptr),
)
)
M = TypeVar("M", xgb.Booster, xgb.XGBModel)
def eval_error_metric(predt: np.ndarray, dtrain: xgb.DMatrix) -> Tuple[str, np.float64]:
"""Evaluation metric for xgb.train"""
label = dtrain.get_label()
r = np.zeros(predt.shape)
gt = predt > 0.5
if predt.size == 0:
return "CustomErr", np.float64(0.0)
r[gt] = 1 - label[gt]
le = predt <= 0.5
r[le] = label[le]
return "CustomErr", np.sum(r)
def eval_error_metric_skl(y_true: np.ndarray, y_score: np.ndarray) -> np.float64:
"""Evaluation metric that looks like metrics provided by sklearn."""
r = np.zeros(y_score.shape)
gt = y_score > 0.5
r[gt] = 1 - y_true[gt]
le = y_score <= 0.5
r[le] = y_true[le]
return np.sum(r)
def root_mean_square(y_true: np.ndarray, y_score: np.ndarray) -> float:
err = y_score - y_true
rmse = np.sqrt(np.dot(err, err) / y_score.size)
return rmse
def softmax(x: np.ndarray) -> np.ndarray:
e = np.exp(x)
return e / np.sum(e)
def softprob_obj(classes: int) -> SklObjective:
def objective(
labels: np.ndarray, predt: np.ndarray
) -> Tuple[np.ndarray, np.ndarray]:
rows = labels.shape[0]
grad = np.zeros((rows, classes), dtype=float)
hess = np.zeros((rows, classes), dtype=float)
eps = 1e-6
for r in range(predt.shape[0]):
target = labels[r]
p = softmax(predt[r, :])
for c in range(predt.shape[1]):
assert target >= 0 or target <= classes
g = p[c] - 1.0 if c == target else p[c]
h = max((2.0 * p[c] * (1.0 - p[c])).item(), eps)
grad[r, c] = g
hess[r, c] = h
grad = grad.reshape((rows * classes, 1))
hess = hess.reshape((rows * classes, 1))
return grad, hess
return objective
class DirectoryExcursion:
"""Change directory. Change back and optionally cleaning up the directory when
exit.
"""
def __init__(self, path: os.PathLike, cleanup: bool = False):
self.path = path
self.curdir = os.path.normpath(os.path.abspath(os.path.curdir))
self.cleanup = cleanup
self.files: Set[str] = set()
def __enter__(self) -> None:
os.chdir(self.path)
if self.cleanup:
self.files = {
os.path.join(root, f)
for root, subdir, files in os.walk(os.path.expanduser(self.path))
for f in files
}
def __exit__(self, *args: Any) -> None:
os.chdir(self.curdir)
if self.cleanup:
files = {
os.path.join(root, f)
for root, subdir, files in os.walk(os.path.expanduser(self.path))
for f in files
}
diff = files.difference(self.files)
for f in diff:
os.remove(f)
@contextmanager
def captured_output() -> Generator[Tuple[StringIO, StringIO], None, None]:
"""Reassign stdout temporarily in order to test printed statements
Taken from:
https://stackoverflow.com/questions/4219717/how-to-assert-output-with-nosetest-unittest-in-python
Also works for pytest.
"""
new_out, new_err = StringIO(), StringIO()
old_out, old_err = sys.stdout, sys.stderr
try:
sys.stdout, sys.stderr = new_out, new_err
yield sys.stdout, sys.stderr
finally:
sys.stdout, sys.stderr = old_out, old_err
def timeout(sec: int, *args: Any, enable: bool = True, **kwargs: Any) -> Any:
"""Make a pytest mark for the `pytest-timeout` package.
Parameters
----------
sec :
Timeout seconds.
enable :
Control whether timeout should be applied, used for debugging.
Returns
-------
pytest.mark.timeout
"""
if enable:
return pytest.mark.timeout(sec, *args, **kwargs)
return pytest.mark.timeout(None, *args, **kwargs)
def setup_rmm_pool(_: Any, pytestconfig: pytest.Config) -> None:
if pytestconfig.getoption("--use-rmm-pool"):
if no_rmm()["condition"]:
raise ImportError("The --use-rmm-pool option requires the RMM package")
if no_dask_cuda()["condition"]:
raise ImportError(
"The --use-rmm-pool option requires the dask_cuda package"
)
import rmm
from dask_cuda.utils import get_n_gpus
rmm.reinitialize(
pool_allocator=True,
initial_pool_size=1024 * 1024 * 1024,
devices=list(range(get_n_gpus())),
)
def get_client_workers(client: Any) -> List[str]:
"Get workers from a dask client."
workers = client.scheduler_info()["workers"]
return list(workers.keys())
def demo_dir(path: str) -> str:
"""Look for the demo directory based on the test file name."""
path = normpath(os.path.dirname(path))
while True:
subdirs = [f.path for f in os.scandir(path) if f.is_dir()]
subdirs = [os.path.basename(d) for d in subdirs]
if "demo" in subdirs:
return os.path.join(path, "demo")
new_path = normpath(os.path.join(path, os.path.pardir))
assert new_path != path
path = new_path
def normpath(path: str) -> str:
return os.path.normpath(os.path.abspath(path))
def data_dir(path: str) -> str:
return os.path.join(demo_dir(path), "data")
def load_agaricus(path: str) -> Tuple[xgb.DMatrix, xgb.DMatrix]:
dpath = data_dir(path)
dtrain = xgb.DMatrix(os.path.join(dpath, "agaricus.txt.train?format=libsvm"))
dtest = xgb.DMatrix(os.path.join(dpath, "agaricus.txt.test?format=libsvm"))
return dtrain, dtest
def project_root(path: str) -> str:
return normpath(os.path.join(demo_dir(path), os.path.pardir))
| 26,177
| 27.735456
| 101
|
py
|
xgboost
|
xgboost-master/python-package/xgboost/testing/ranking.py
|
# pylint: disable=too-many-locals
"""Tests for learning to rank."""
from types import ModuleType
from typing import Any
import numpy as np
import pytest
import xgboost as xgb
from xgboost import testing as tm
def run_ranking_qid_df(impl: ModuleType, tree_method: str) -> None:
"""Test ranking with qid packed into X."""
import scipy.sparse
from sklearn.metrics import mean_squared_error
from sklearn.model_selection import StratifiedGroupKFold, cross_val_score
X, y, q, _ = tm.make_ltr(n_samples=128, n_features=2, n_query_groups=8, max_rel=3)
# pack qid into x using dataframe
df = impl.DataFrame(X)
df["qid"] = q
ranker = xgb.XGBRanker(n_estimators=3, eval_metric="ndcg", tree_method=tree_method)
ranker.fit(df, y)
s = ranker.score(df, y)
assert s > 0.7
# works with validation datasets as well
valid_df = df.copy()
valid_df.iloc[0, 0] = 3.0
ranker.fit(df, y, eval_set=[(valid_df, y)])
# same as passing qid directly
ranker = xgb.XGBRanker(n_estimators=3, eval_metric="ndcg", tree_method=tree_method)
ranker.fit(X, y, qid=q)
s1 = ranker.score(df, y)
assert np.isclose(s, s1)
# Works with standard sklearn cv
if tree_method != "gpu_hist":
# we need cuML for this.
kfold = StratifiedGroupKFold(shuffle=False)
results = cross_val_score(ranker, df, y, cv=kfold, groups=df.qid)
assert len(results) == 5
# Works with custom metric
def neg_mse(*args: Any, **kwargs: Any) -> float:
return -float(mean_squared_error(*args, **kwargs))
ranker = xgb.XGBRanker(
n_estimators=3,
eval_metric=neg_mse,
tree_method=tree_method,
disable_default_eval_metric=True,
)
ranker.fit(df, y, eval_set=[(valid_df, y)])
score = ranker.score(valid_df, y)
assert np.isclose(score, ranker.evals_result()["validation_0"]["neg_mse"][-1])
# Works with sparse data
if tree_method != "gpu_hist":
# no sparse with cuDF
X_csr = scipy.sparse.csr_matrix(X)
df = impl.DataFrame.sparse.from_spmatrix(
X_csr, columns=[str(i) for i in range(X.shape[1])]
)
df["qid"] = q
ranker = xgb.XGBRanker(
n_estimators=3, eval_metric="ndcg", tree_method=tree_method
)
ranker.fit(df, y)
s2 = ranker.score(df, y)
assert np.isclose(s2, s)
with pytest.raises(ValueError, match="Either `group` or `qid`."):
ranker.fit(df, y, eval_set=[(X, y)])
| 2,513
| 31.230769
| 87
|
py
|
xgboost
|
xgboost-master/python-package/xgboost/testing/params.py
|
"""Strategies for updater tests."""
from typing import cast
import pytest
strategies = pytest.importorskip("hypothesis.strategies")
exact_parameter_strategy = strategies.fixed_dictionaries(
{
"nthread": strategies.integers(1, 4),
"max_depth": strategies.integers(1, 11),
"min_child_weight": strategies.floats(0.5, 2.0),
"alpha": strategies.floats(1e-5, 2.0),
"lambda": strategies.floats(1e-5, 2.0),
"eta": strategies.floats(0.01, 0.5),
"gamma": strategies.floats(1e-5, 2.0),
"seed": strategies.integers(0, 10),
# We cannot enable subsampling as the training loss can increase
# 'subsample': strategies.floats(0.5, 1.0),
"colsample_bytree": strategies.floats(0.5, 1.0),
"colsample_bylevel": strategies.floats(0.5, 1.0),
}
)
hist_parameter_strategy = strategies.fixed_dictionaries(
{
"max_depth": strategies.integers(1, 11),
"max_leaves": strategies.integers(0, 1024),
"max_bin": strategies.integers(2, 512),
"grow_policy": strategies.sampled_from(["lossguide", "depthwise"]),
"min_child_weight": strategies.floats(0.5, 2.0),
# We cannot enable subsampling as the training loss can increase
# 'subsample': strategies.floats(0.5, 1.0),
"colsample_bytree": strategies.floats(0.5, 1.0),
"colsample_bylevel": strategies.floats(0.5, 1.0),
}
).filter(
lambda x: (cast(int, x["max_depth"]) > 0 or cast(int, x["max_leaves"]) > 0)
and (cast(int, x["max_depth"]) > 0 or x["grow_policy"] == "lossguide")
)
hist_multi_parameter_strategy = strategies.fixed_dictionaries(
{
"max_depth": strategies.integers(1, 11),
"max_leaves": strategies.integers(0, 1024),
"max_bin": strategies.integers(2, 512),
"multi_strategy": strategies.sampled_from(
["multi_output_tree", "one_output_per_tree"]
),
"grow_policy": strategies.sampled_from(["lossguide", "depthwise"]),
"min_child_weight": strategies.floats(0.5, 2.0),
# We cannot enable subsampling as the training loss can increase
# 'subsample': strategies.floats(0.5, 1.0),
"colsample_bytree": strategies.floats(0.5, 1.0),
"colsample_bylevel": strategies.floats(0.5, 1.0),
}
).filter(
lambda x: (cast(int, x["max_depth"]) > 0 or cast(int, x["max_leaves"]) > 0)
and (cast(int, x["max_depth"]) > 0 or x["grow_policy"] == "lossguide")
)
cat_parameter_strategy = strategies.fixed_dictionaries(
{
"max_cat_to_onehot": strategies.integers(1, 128),
"max_cat_threshold": strategies.integers(1, 128),
}
)
lambdarank_parameter_strategy = strategies.fixed_dictionaries(
{
"lambdarank_unbiased": strategies.sampled_from([True, False]),
"lambdarank_pair_method": strategies.sampled_from(["topk", "mean"]),
"lambdarank_num_pair_per_sample": strategies.integers(1, 8),
"lambdarank_bias_norm": strategies.floats(0.5, 2.0),
"objective": strategies.sampled_from(
["rank:ndcg", "rank:map", "rank:pairwise"]
),
}
).filter(
lambda x: not (x["lambdarank_unbiased"] and x["lambdarank_pair_method"] == "mean")
)
| 3,227
| 37.428571
| 86
|
py
|
xgboost
|
xgboost-master/python-package/xgboost/testing/dask.py
|
"""Tests for dask shared by different test modules."""
import numpy as np
import pandas as pd
from dask import array as da
from dask import dataframe as dd
from distributed import Client
import xgboost as xgb
from xgboost.testing.updater import get_basescore
def check_init_estimation_clf(tree_method: str, client: Client) -> None:
"""Test init estimation for classsifier."""
from sklearn.datasets import make_classification
X, y = make_classification(n_samples=4096 * 2, n_features=32, random_state=1994)
clf = xgb.XGBClassifier(n_estimators=1, max_depth=1, tree_method=tree_method)
clf.fit(X, y)
base_score = get_basescore(clf)
dx = da.from_array(X).rechunk(chunks=(32, None))
dy = da.from_array(y).rechunk(chunks=(32,))
dclf = xgb.dask.DaskXGBClassifier(
n_estimators=1, max_depth=1, tree_method=tree_method
)
dclf.client = client
dclf.fit(dx, dy)
dbase_score = get_basescore(dclf)
np.testing.assert_allclose(base_score, dbase_score)
def check_init_estimation_reg(tree_method: str, client: Client) -> None:
"""Test init estimation for regressor."""
from sklearn.datasets import make_regression
# pylint: disable=unbalanced-tuple-unpacking
X, y = make_regression(n_samples=4096 * 2, n_features=32, random_state=1994)
reg = xgb.XGBRegressor(n_estimators=1, max_depth=1, tree_method=tree_method)
reg.fit(X, y)
base_score = get_basescore(reg)
dx = da.from_array(X).rechunk(chunks=(32, None))
dy = da.from_array(y).rechunk(chunks=(32,))
dreg = xgb.dask.DaskXGBRegressor(
n_estimators=1, max_depth=1, tree_method=tree_method
)
dreg.client = client
dreg.fit(dx, dy)
dbase_score = get_basescore(dreg)
np.testing.assert_allclose(base_score, dbase_score)
def check_init_estimation(tree_method: str, client: Client) -> None:
"""Test init estimation."""
check_init_estimation_reg(tree_method, client)
check_init_estimation_clf(tree_method, client)
def check_uneven_nan(client: Client, tree_method: str, n_workers: int) -> None:
"""Issue #9271, not every worker has missing value."""
assert n_workers >= 2
with client.as_current():
clf = xgb.dask.DaskXGBClassifier(tree_method=tree_method)
X = pd.DataFrame({"a": range(10000), "b": range(10000, 0, -1)})
y = pd.Series([*[0] * 5000, *[1] * 5000])
X["a"][:3000:1000] = np.NaN
client.wait_for_workers(n_workers=n_workers)
clf.fit(
dd.from_pandas(X, npartitions=n_workers),
dd.from_pandas(y, npartitions=n_workers),
)
| 2,607
| 33.315789
| 84
|
py
|
xgboost
|
xgboost-master/python-package/xgboost/spark/core.py
|
"""XGBoost pyspark integration submodule for core code."""
import base64
# pylint: disable=fixme, too-many-ancestors, protected-access, no-member, invalid-name
# pylint: disable=too-few-public-methods, too-many-lines, too-many-branches
import json
import logging
import os
from collections import namedtuple
from typing import (
Any,
Callable,
Dict,
Iterator,
List,
Optional,
Tuple,
Type,
Union,
cast,
)
import numpy as np
import pandas as pd
from pyspark import SparkContext, cloudpickle
from pyspark.ml import Estimator, Model
from pyspark.ml.functions import array_to_vector, vector_to_array
from pyspark.ml.linalg import VectorUDT
from pyspark.ml.param import Param, Params, TypeConverters
from pyspark.ml.param.shared import (
HasFeaturesCol,
HasLabelCol,
HasPredictionCol,
HasProbabilityCol,
HasRawPredictionCol,
HasValidationIndicatorCol,
HasWeightCol,
)
from pyspark.ml.util import (
DefaultParamsReader,
DefaultParamsWriter,
MLReadable,
MLReader,
MLWritable,
MLWriter,
)
from pyspark.sql import Column, DataFrame
from pyspark.sql.functions import col, countDistinct, pandas_udf, rand, struct
from pyspark.sql.types import (
ArrayType,
DoubleType,
FloatType,
IntegerType,
IntegralType,
LongType,
ShortType,
)
from scipy.special import expit, softmax # pylint: disable=no-name-in-module
import xgboost
from xgboost import XGBClassifier
from xgboost.compat import is_cudf_available
from xgboost.core import Booster, _check_distributed_params
from xgboost.sklearn import DEFAULT_N_ESTIMATORS, XGBModel, _can_use_qdm
from xgboost.training import train as worker_train
from .data import (
_read_csr_matrix_from_unwrapped_spark_vec,
alias,
create_dmatrix_from_partitions,
pred_contribs,
stack_series,
)
from .params import (
HasArbitraryParamsDict,
HasBaseMarginCol,
HasContribPredictionCol,
HasEnableSparseDataOptim,
HasFeaturesCols,
HasQueryIdCol,
)
from .utils import (
CommunicatorContext,
_get_default_params_from_func,
_get_gpu_id,
_get_max_num_concurrent_tasks,
_get_rabit_args,
_get_spark_session,
_is_local,
deserialize_booster,
deserialize_xgb_model,
get_class_name,
get_logger,
serialize_booster,
use_cuda,
)
# Put pyspark specific params here, they won't be passed to XGBoost.
# like `validationIndicatorCol`, `base_margin_col`
_pyspark_specific_params = [
"featuresCol",
"labelCol",
"weightCol",
"rawPredictionCol",
"predictionCol",
"probabilityCol",
"validationIndicatorCol",
"base_margin_col",
"arbitrary_params_dict",
"force_repartition",
"num_workers",
"feature_names",
"features_cols",
"enable_sparse_data_optim",
"qid_col",
"repartition_random_shuffle",
"pred_contrib_col",
]
_non_booster_params = ["missing", "n_estimators", "feature_types", "feature_weights"]
_pyspark_param_alias_map = {
"features_col": "featuresCol",
"label_col": "labelCol",
"weight_col": "weightCol",
"raw_prediction_col": "rawPredictionCol",
"prediction_col": "predictionCol",
"probability_col": "probabilityCol",
"validation_indicator_col": "validationIndicatorCol",
}
_inverse_pyspark_param_alias_map = {v: k for k, v in _pyspark_param_alias_map.items()}
_unsupported_xgb_params = [
"gpu_id", # we have "device" pyspark param instead.
"enable_categorical", # Use feature_types param to specify categorical feature instead
"use_label_encoder",
"n_jobs", # Do not allow user to set it, will use `spark.task.cpus` value instead.
"nthread", # Ditto
]
_unsupported_fit_params = {
"sample_weight", # Supported by spark param weightCol
"eval_set", # Supported by spark param validation_indicator_col
"sample_weight_eval_set", # Supported by spark param weight_col + validation_indicator_col
"base_margin", # Supported by spark param base_margin_col
"base_margin_eval_set", # Supported by spark param base_margin_col + validation_indicator_col
"group", # Use spark param `qid_col` instead
"qid", # Use spark param `qid_col` instead
"eval_group", # Use spark param `qid_col` instead
"eval_qid", # Use spark param `qid_col` instead
}
_unsupported_train_params = {
"evals", # Supported by spark param validation_indicator_col
"evals_result", # Won't support yet+
}
_unsupported_predict_params = {
# for classification, we can use rawPrediction as margin
"output_margin",
"validate_features", # TODO
"base_margin", # Use pyspark base_margin_col param instead.
}
# TODO: supply hint message for all other unsupported params.
_unsupported_params_hint_message = {
"enable_categorical": "`xgboost.spark` estimators do not have 'enable_categorical' param, "
"but you can set `feature_types` param and mark categorical features with 'c' string."
}
# Global prediction names
Pred = namedtuple(
"Pred", ("prediction", "raw_prediction", "probability", "pred_contrib")
)
pred = Pred("prediction", "rawPrediction", "probability", "predContrib")
_INIT_BOOSTER_SAVE_PATH = "init_booster.json"
class _SparkXGBParams(
HasFeaturesCol,
HasLabelCol,
HasWeightCol,
HasPredictionCol,
HasValidationIndicatorCol,
HasArbitraryParamsDict,
HasBaseMarginCol,
HasFeaturesCols,
HasEnableSparseDataOptim,
HasQueryIdCol,
HasContribPredictionCol,
):
num_workers = Param(
Params._dummy(),
"num_workers",
"The number of XGBoost workers. Each XGBoost worker corresponds to one spark task.",
TypeConverters.toInt,
)
device = Param(
Params._dummy(),
"device",
(
"The device type for XGBoost executors. Available options are `cpu`,`cuda`"
" and `gpu`. Set `device` to `cuda` or `gpu` if the executors are running "
"on GPU instances. Currently, only one GPU per task is supported."
),
TypeConverters.toString,
)
use_gpu = Param(
Params._dummy(),
"use_gpu",
(
"Deprecated, use `device` instead. A boolean variable. Set use_gpu=true "
"if the executors are running on GPU instances. Currently, only one GPU per"
" task is supported."
),
TypeConverters.toBoolean,
)
force_repartition = Param(
Params._dummy(),
"force_repartition",
"A boolean variable. Set force_repartition=true if you "
+ "want to force the input dataset to be repartitioned before XGBoost training."
+ "Note: The auto repartitioning judgement is not fully accurate, so it is recommended"
+ "to have force_repartition be True.",
TypeConverters.toBoolean,
)
repartition_random_shuffle = Param(
Params._dummy(),
"repartition_random_shuffle",
"A boolean variable. Set repartition_random_shuffle=true if you want to random shuffle "
"dataset when repartitioning is required. By default is True.",
TypeConverters.toBoolean,
)
feature_names = Param(
Params._dummy(),
"feature_names",
"A list of str to specify feature names.",
TypeConverters.toList,
)
@classmethod
def _xgb_cls(cls) -> Type[XGBModel]:
"""
Subclasses should override this method and
returns an xgboost.XGBModel subclass
"""
raise NotImplementedError()
# Parameters for xgboost.XGBModel()
@classmethod
def _get_xgb_params_default(cls) -> Dict[str, Any]:
"""Get the xgboost.sklearn.XGBModel default parameters and filter out some"""
xgb_model_default = cls._xgb_cls()()
params_dict = xgb_model_default.get_params()
filtered_params_dict = {
k: params_dict[k] for k in params_dict if k not in _unsupported_xgb_params
}
filtered_params_dict["n_estimators"] = DEFAULT_N_ESTIMATORS
return filtered_params_dict
def _set_xgb_params_default(self) -> None:
"""Set xgboost parameters into spark parameters"""
filtered_params_dict = self._get_xgb_params_default()
self._setDefault(**filtered_params_dict)
def _gen_xgb_params_dict(
self, gen_xgb_sklearn_estimator_param: bool = False
) -> Dict[str, Any]:
"""Generate the xgboost parameters which will be passed into xgboost library"""
xgb_params = {}
non_xgb_params = (
set(_pyspark_specific_params)
| self._get_fit_params_default().keys()
| self._get_predict_params_default().keys()
)
if not gen_xgb_sklearn_estimator_param:
non_xgb_params |= set(_non_booster_params)
for param in self.extractParamMap():
if param.name not in non_xgb_params:
xgb_params[param.name] = self.getOrDefault(param)
arbitrary_params_dict = self.getOrDefault(
self.getParam("arbitrary_params_dict")
)
xgb_params.update(arbitrary_params_dict)
return xgb_params
# Parameters for xgboost.XGBModel().fit()
@classmethod
def _get_fit_params_default(cls) -> Dict[str, Any]:
"""Get the xgboost.XGBModel().fit() parameters"""
fit_params = _get_default_params_from_func(
cls._xgb_cls().fit, _unsupported_fit_params
)
return fit_params
def _set_fit_params_default(self) -> None:
"""Get the xgboost.XGBModel().fit() parameters and set them to spark parameters"""
filtered_params_dict = self._get_fit_params_default()
self._setDefault(**filtered_params_dict)
def _gen_fit_params_dict(self) -> Dict[str, Any]:
"""Generate the fit parameters which will be passed into fit function"""
fit_params_keys = self._get_fit_params_default().keys()
fit_params = {}
for param in self.extractParamMap():
if param.name in fit_params_keys:
fit_params[param.name] = self.getOrDefault(param)
return fit_params
@classmethod
def _get_predict_params_default(cls) -> Dict[str, Any]:
"""Get the parameters from xgboost.XGBModel().predict()"""
predict_params = _get_default_params_from_func(
cls._xgb_cls().predict, _unsupported_predict_params
)
return predict_params
def _set_predict_params_default(self) -> None:
"""Get the parameters from xgboost.XGBModel().predict() and
set them into spark parameters"""
filtered_params_dict = self._get_predict_params_default()
self._setDefault(**filtered_params_dict)
def _gen_predict_params_dict(self) -> Dict[str, Any]:
"""Generate predict parameters which will be passed into xgboost.XGBModel().predict()"""
predict_params_keys = self._get_predict_params_default().keys()
predict_params = {}
for param in self.extractParamMap():
if param.name in predict_params_keys:
predict_params[param.name] = self.getOrDefault(param)
return predict_params
def _validate_params(self) -> None:
# pylint: disable=too-many-branches
init_model = self.getOrDefault("xgb_model")
if init_model is not None and not isinstance(init_model, Booster):
raise ValueError(
"The xgb_model param must be set with a `xgboost.core.Booster` "
"instance."
)
if self.getOrDefault(self.num_workers) < 1:
raise ValueError(
f"Number of workers was {self.getOrDefault(self.num_workers)}."
f"It cannot be less than 1 [Default is 1]"
)
tree_method = self.getOrDefault(self.getParam("tree_method"))
if (
self.getOrDefault(self.use_gpu) or use_cuda(self.getOrDefault(self.device))
) and not _can_use_qdm(tree_method):
raise ValueError(
f"The `{tree_method}` tree method is not supported on GPU."
)
if self.getOrDefault(self.features_cols):
if not use_cuda(self.getOrDefault(self.device)) and not self.getOrDefault(
self.use_gpu
):
raise ValueError(
"features_col param with list value requires `device=cuda`."
)
if self.getOrDefault("objective") is not None:
if not isinstance(self.getOrDefault("objective"), str):
raise ValueError("Only string type 'objective' param is allowed.")
eval_metric = "eval_metric"
if self.getOrDefault(eval_metric) is not None:
if not (
isinstance(self.getOrDefault(eval_metric), str)
or (
isinstance(self.getOrDefault(eval_metric), List)
and all(
isinstance(metric, str)
for metric in self.getOrDefault(eval_metric)
)
)
):
raise ValueError(
"Only string type or list of string type 'eval_metric' param is allowed."
)
if self.getOrDefault("early_stopping_rounds") is not None:
if not (
self.isDefined(self.validationIndicatorCol)
and self.getOrDefault(self.validationIndicatorCol) != ""
):
raise ValueError(
"If 'early_stopping_rounds' param is set, you need to set "
"'validation_indicator_col' param as well."
)
if self.getOrDefault(self.enable_sparse_data_optim):
if self.getOrDefault("missing") != 0.0:
# If DMatrix is constructed from csr / csc matrix, then inactive elements
# in csr / csc matrix are regarded as missing value, but, in pyspark, we
# are hard to control elements to be active or inactive in sparse vector column,
# some spark transformers such as VectorAssembler might compress vectors
# to be dense or sparse format automatically, and when a spark ML vector object
# is compressed to sparse vector, then all zero value elements become inactive.
# So we force setting missing param to be 0 when enable_sparse_data_optim config
# is True.
raise ValueError(
"If enable_sparse_data_optim is True, missing param != 0 is not supported."
)
if self.getOrDefault(self.features_cols):
raise ValueError(
"If enable_sparse_data_optim is True, you cannot set multiple feature columns "
"but you should set one feature column with values of "
"`pyspark.ml.linalg.Vector` type."
)
if use_cuda(self.getOrDefault(self.device)) or self.getOrDefault(self.use_gpu):
gpu_per_task = (
_get_spark_session()
.sparkContext.getConf()
.get("spark.task.resource.gpu.amount")
)
is_local = _is_local(_get_spark_session().sparkContext)
if is_local:
# checking spark local mode.
if gpu_per_task:
raise RuntimeError(
"The spark cluster does not support gpu configuration for local mode. "
"Please delete spark.executor.resource.gpu.amount and "
"spark.task.resource.gpu.amount"
)
# Support GPU training in Spark local mode is just for debugging purposes,
# so it's okay for printing the below warning instead of checking the real
# gpu numbers and raising the exception.
get_logger(self.__class__.__name__).warning(
"You enabled GPU in spark local mode. Please make sure your local "
"node has at least %d GPUs",
self.getOrDefault(self.num_workers),
)
else:
# checking spark non-local mode.
if not gpu_per_task or int(gpu_per_task) < 1:
raise RuntimeError(
"The spark cluster does not have the necessary GPU"
+ "configuration for the spark task. Therefore, we cannot"
+ "run xgboost training using GPU."
)
if int(gpu_per_task) > 1:
get_logger(self.__class__.__name__).warning(
"You configured %s GPU cores for each spark task, but in "
"XGBoost training, every Spark task will only use one GPU core.",
gpu_per_task,
)
def _validate_and_convert_feature_col_as_float_col_list(
dataset: DataFrame, features_col_names: List[str]
) -> List[Column]:
"""Values in feature columns must be integral types or float/double types"""
feature_cols = []
for c in features_col_names:
if isinstance(dataset.schema[c].dataType, DoubleType):
feature_cols.append(col(c).cast(FloatType()).alias(c))
elif isinstance(dataset.schema[c].dataType, (FloatType, IntegralType)):
feature_cols.append(col(c))
else:
raise ValueError(
"Values in feature columns must be integral types or float/double types."
)
return feature_cols
def _validate_and_convert_feature_col_as_array_col(
dataset: DataFrame, features_col_name: str
) -> Column:
"""It handles
1. Convert vector type to array type
2. Cast to Array(Float32)"""
features_col_datatype = dataset.schema[features_col_name].dataType
features_col = col(features_col_name)
if isinstance(features_col_datatype, ArrayType):
if not isinstance(
features_col_datatype.elementType,
(DoubleType, FloatType, LongType, IntegerType, ShortType),
):
raise ValueError(
"If feature column is array type, its elements must be number type."
)
features_array_col = features_col.cast(ArrayType(FloatType())).alias(alias.data)
elif isinstance(features_col_datatype, VectorUDT):
features_array_col = vector_to_array(features_col, dtype="float32").alias(
alias.data
)
else:
raise ValueError(
"feature column must be array type or `pyspark.ml.linalg.Vector` type, "
"if you want to use multiple numetric columns as features, please use "
"`pyspark.ml.transform.VectorAssembler` to assemble them into a vector "
"type column first."
)
return features_array_col
def _get_unwrap_udt_fn() -> Callable[[Union[Column, str]], Column]:
try:
from pyspark.sql.functions import unwrap_udt
return unwrap_udt
except ImportError:
pass
try:
from pyspark.databricks.sql.functions import unwrap_udt as databricks_unwrap_udt
return databricks_unwrap_udt
except ImportError as exc:
raise RuntimeError(
"Cannot import pyspark `unwrap_udt` function. Please install pyspark>=3.4 "
"or run on Databricks Runtime."
) from exc
def _get_unwrapped_vec_cols(feature_col: Column) -> List[Column]:
unwrap_udt = _get_unwrap_udt_fn()
features_unwrapped_vec_col = unwrap_udt(feature_col)
# After a `pyspark.ml.linalg.VectorUDT` type column being unwrapped, it becomes
# a pyspark struct type column, the struct fields are:
# - `type`: byte
# - `size`: int
# - `indices`: array<int>
# - `values`: array<double>
# For sparse vector, `type` field is 0, `size` field means vector length,
# `indices` field is the array of active element indices, `values` field
# is the array of active element values.
# For dense vector, `type` field is 1, `size` and `indices` fields are None,
# `values` field is the array of the vector element values.
return [
features_unwrapped_vec_col.type.alias("featureVectorType"),
features_unwrapped_vec_col.size.alias("featureVectorSize"),
features_unwrapped_vec_col.indices.alias("featureVectorIndices"),
# Note: the value field is double array type, cast it to float32 array type
# for speedup following repartitioning.
features_unwrapped_vec_col.values.cast(ArrayType(FloatType())).alias(
"featureVectorValues"
),
]
FeatureProp = namedtuple(
"FeatureProp",
("enable_sparse_data_optim", "has_validation_col", "features_cols_names"),
)
class _SparkXGBEstimator(Estimator, _SparkXGBParams, MLReadable, MLWritable):
_input_kwargs: Dict[str, Any]
def __init__(self) -> None:
super().__init__()
self._set_xgb_params_default()
self._set_fit_params_default()
self._set_predict_params_default()
# Note: The default value for arbitrary_params_dict must always be empty dict.
# For additional settings added into "arbitrary_params_dict" by default,
# they are added in `setParams`.
self._setDefault(
num_workers=1,
device="cpu",
use_gpu=False,
force_repartition=False,
repartition_random_shuffle=False,
feature_names=None,
feature_types=None,
arbitrary_params_dict={},
)
def setParams(self, **kwargs: Any) -> None: # pylint: disable=invalid-name
"""
Set params for the estimator.
"""
_extra_params = {}
if "arbitrary_params_dict" in kwargs:
raise ValueError("Invalid param name: 'arbitrary_params_dict'.")
for k, v in kwargs.items():
# We're not allowing user use features_cols directly.
if k == self.features_cols.name:
raise ValueError(
f"Unsupported param '{k}' please use features_col instead."
)
if k in _inverse_pyspark_param_alias_map:
raise ValueError(
f"Please use param name {_inverse_pyspark_param_alias_map[k]} instead."
)
if k in _pyspark_param_alias_map:
if k == _inverse_pyspark_param_alias_map[
self.featuresCol.name
] and isinstance(v, list):
real_k = self.features_cols.name
k = real_k
else:
real_k = _pyspark_param_alias_map[k]
k = real_k
if self.hasParam(k):
if k == "features_col" and isinstance(v, list):
self._set(**{"features_cols": v})
else:
self._set(**{str(k): v})
else:
if (
k in _unsupported_xgb_params
or k in _unsupported_fit_params
or k in _unsupported_predict_params
or k in _unsupported_train_params
):
err_msg = _unsupported_params_hint_message.get(
k, f"Unsupported param '{k}'."
)
raise ValueError(err_msg)
_extra_params[k] = v
_check_distributed_params(kwargs)
_existing_extra_params = self.getOrDefault(self.arbitrary_params_dict)
self._set(arbitrary_params_dict={**_existing_extra_params, **_extra_params})
@classmethod
def _pyspark_model_cls(cls) -> Type["_SparkXGBModel"]:
"""
Subclasses should override this method and
returns a _SparkXGBModel subclass
"""
raise NotImplementedError()
def _create_pyspark_model(self, xgb_model: XGBModel) -> "_SparkXGBModel":
return self._pyspark_model_cls()(xgb_model)
def _convert_to_sklearn_model(self, booster: bytearray, config: str) -> XGBModel:
xgb_sklearn_params = self._gen_xgb_params_dict(
gen_xgb_sklearn_estimator_param=True
)
sklearn_model = self._xgb_cls()(**xgb_sklearn_params)
sklearn_model.load_model(booster)
sklearn_model._Booster.load_config(config)
return sklearn_model
def _query_plan_contains_valid_repartition(self, dataset: DataFrame) -> bool:
"""
Returns true if the latest element in the logical plan is a valid repartition
The logic plan string format is like:
== Optimized Logical Plan ==
Repartition 4, true
+- LogicalRDD [features#12, label#13L], false
i.e., the top line in the logical plan is the last operation to execute.
so, in this method, we check the first line, if it is a "Repartition" operation,
and the result dataframe has the same partition number with num_workers param,
then it means the dataframe is well repartitioned and we don't need to
repartition the dataframe again.
"""
num_partitions = dataset.rdd.getNumPartitions()
assert dataset._sc._jvm is not None
query_plan = dataset._sc._jvm.PythonSQLUtils.explainString(
dataset._jdf.queryExecution(), "extended"
)
start = query_plan.index("== Optimized Logical Plan ==")
start += len("== Optimized Logical Plan ==") + 1
num_workers = self.getOrDefault(self.num_workers)
if (
query_plan[start : start + len("Repartition")] == "Repartition"
and num_workers == num_partitions
):
return True
return False
def _repartition_needed(self, dataset: DataFrame) -> bool:
"""
We repartition the dataset if the number of workers is not equal to the number of
partitions. There is also a check to make sure there was "active partitioning"
where either Round Robin or Hash partitioning was actively used before this stage.
"""
if self.getOrDefault(self.force_repartition):
return True
try:
if self._query_plan_contains_valid_repartition(dataset):
return False
except Exception: # pylint: disable=broad-except
pass
return True
def _get_distributed_train_params(self, dataset: DataFrame) -> Dict[str, Any]:
"""
This just gets the configuration params for distributed xgboost
"""
params = self._gen_xgb_params_dict()
fit_params = self._gen_fit_params_dict()
verbose_eval = fit_params.pop("verbose", None)
params.update(fit_params)
params["verbose_eval"] = verbose_eval
classification = self._xgb_cls() == XGBClassifier
if classification:
num_classes = int(
dataset.select(countDistinct(alias.label)).collect()[0][0]
)
if num_classes <= 2:
params["objective"] = "binary:logistic"
else:
params["objective"] = "multi:softprob"
params["num_class"] = num_classes
else:
# use user specified objective or default objective.
# e.g., the default objective for Regressor is 'reg:squarederror'
params["objective"] = self.getOrDefault("objective")
# TODO: support "num_parallel_tree" for random forest
params["num_boost_round"] = self.getOrDefault("n_estimators")
return params
@classmethod
def _get_xgb_train_call_args(
cls, train_params: Dict[str, Any]
) -> Tuple[Dict[str, Any], Dict[str, Any]]:
xgb_train_default_args = _get_default_params_from_func(
xgboost.train, _unsupported_train_params
)
booster_params, kwargs_params = {}, {}
for key, value in train_params.items():
if key in xgb_train_default_args:
kwargs_params[key] = value
else:
booster_params[key] = value
booster_params = {
k: v for k, v in booster_params.items() if k not in _non_booster_params
}
return booster_params, kwargs_params
def _prepare_input_columns_and_feature_prop(
self, dataset: DataFrame
) -> Tuple[List[Column], FeatureProp]:
label_col = col(self.getOrDefault(self.labelCol)).alias(alias.label)
select_cols = [label_col]
features_cols_names = None
enable_sparse_data_optim = self.getOrDefault(self.enable_sparse_data_optim)
if enable_sparse_data_optim:
features_col_name = self.getOrDefault(self.featuresCol)
features_col_datatype = dataset.schema[features_col_name].dataType
if not isinstance(features_col_datatype, VectorUDT):
raise ValueError(
"If enable_sparse_data_optim is True, the feature column values must be "
"`pyspark.ml.linalg.Vector` type."
)
select_cols.extend(_get_unwrapped_vec_cols(col(features_col_name)))
else:
if self.getOrDefault(self.features_cols):
features_cols_names = self.getOrDefault(self.features_cols)
features_cols = _validate_and_convert_feature_col_as_float_col_list(
dataset, features_cols_names
)
select_cols.extend(features_cols)
else:
features_array_col = _validate_and_convert_feature_col_as_array_col(
dataset, self.getOrDefault(self.featuresCol)
)
select_cols.append(features_array_col)
if self.isDefined(self.weightCol) and self.getOrDefault(self.weightCol) != "":
select_cols.append(
col(self.getOrDefault(self.weightCol)).alias(alias.weight)
)
has_validation_col = False
if (
self.isDefined(self.validationIndicatorCol)
and self.getOrDefault(self.validationIndicatorCol) != ""
):
select_cols.append(
col(self.getOrDefault(self.validationIndicatorCol)).alias(alias.valid)
)
# In some cases, see https://issues.apache.org/jira/browse/SPARK-40407,
# the df.repartition can result in some reducer partitions without data,
# which will cause exception or hanging issue when creating DMatrix.
has_validation_col = True
if (
self.isDefined(self.base_margin_col)
and self.getOrDefault(self.base_margin_col) != ""
):
select_cols.append(
col(self.getOrDefault(self.base_margin_col)).alias(alias.margin)
)
if self.isDefined(self.qid_col) and self.getOrDefault(self.qid_col) != "":
select_cols.append(col(self.getOrDefault(self.qid_col)).alias(alias.qid))
feature_prop = FeatureProp(
enable_sparse_data_optim, has_validation_col, features_cols_names
)
return select_cols, feature_prop
def _prepare_input(self, dataset: DataFrame) -> Tuple[DataFrame, FeatureProp]:
"""Prepare the input including column pruning, repartition and so on"""
select_cols, feature_prop = self._prepare_input_columns_and_feature_prop(
dataset
)
dataset = dataset.select(*select_cols)
num_workers = self.getOrDefault(self.num_workers)
sc = _get_spark_session().sparkContext
max_concurrent_tasks = _get_max_num_concurrent_tasks(sc)
if num_workers > max_concurrent_tasks:
get_logger(self.__class__.__name__).warning(
"The num_workers %s set for xgboost distributed "
"training is greater than current max number of concurrent "
"spark task slots, you need wait until more task slots available "
"or you need increase spark cluster workers.",
num_workers,
)
if self._repartition_needed(dataset) or (
self.isDefined(self.validationIndicatorCol)
and self.getOrDefault(self.validationIndicatorCol) != ""
):
# If validationIndicatorCol defined, we always repartition dataset
# to balance data, because user might unionise train and validation dataset,
# without shuffling data then some partitions might contain only train or validation
# dataset.
if self.getOrDefault(self.repartition_random_shuffle):
# In some cases, spark round-robin repartition might cause data skew
# use random shuffle can address it.
dataset = dataset.repartition(num_workers, rand(1))
else:
dataset = dataset.repartition(num_workers)
if self.isDefined(self.qid_col) and self.getOrDefault(self.qid_col) != "":
# XGBoost requires qid to be sorted for each partition
dataset = dataset.sortWithinPartitions(alias.qid, ascending=True)
return dataset, feature_prop
def _get_xgb_parameters(
self, dataset: DataFrame
) -> Tuple[Dict[str, Any], Dict[str, Any], Dict[str, Any]]:
train_params = self._get_distributed_train_params(dataset)
booster_params, train_call_kwargs_params = self._get_xgb_train_call_args(
train_params
)
cpu_per_task = int(
_get_spark_session().sparkContext.getConf().get("spark.task.cpus", "1")
)
dmatrix_kwargs = {
"nthread": cpu_per_task,
"feature_types": self.getOrDefault("feature_types"),
"feature_names": self.getOrDefault("feature_names"),
"feature_weights": self.getOrDefault("feature_weights"),
"missing": float(self.getOrDefault("missing")),
}
if dmatrix_kwargs["feature_types"] is not None:
dmatrix_kwargs["enable_categorical"] = True
booster_params["nthread"] = cpu_per_task
# Remove the parameters whose value is None
booster_params = {k: v for k, v in booster_params.items() if v is not None}
train_call_kwargs_params = {
k: v for k, v in train_call_kwargs_params.items() if v is not None
}
dmatrix_kwargs = {k: v for k, v in dmatrix_kwargs.items() if v is not None}
return booster_params, train_call_kwargs_params, dmatrix_kwargs
def _fit(self, dataset: DataFrame) -> "_SparkXGBModel":
# pylint: disable=too-many-statements, too-many-locals
self._validate_params()
dataset, feature_prop = self._prepare_input(dataset)
(
booster_params,
train_call_kwargs_params,
dmatrix_kwargs,
) = self._get_xgb_parameters(dataset)
run_on_gpu = use_cuda(self.getOrDefault(self.device)) or self.getOrDefault(
self.use_gpu
)
is_local = _is_local(_get_spark_session().sparkContext)
num_workers = self.getOrDefault(self.num_workers)
def _train_booster(
pandas_df_iter: Iterator[pd.DataFrame],
) -> Iterator[pd.DataFrame]:
"""Takes in an RDD partition and outputs a booster for that partition after
going through the Rabit Ring protocol
"""
from pyspark import BarrierTaskContext
context = BarrierTaskContext.get()
dev_ordinal = None
use_qdm = _can_use_qdm(booster_params.get("tree_method", None))
if run_on_gpu:
dev_ordinal = (
context.partitionId() if is_local else _get_gpu_id(context)
)
booster_params["device"] = "cuda:" + str(dev_ordinal)
# If cuDF is not installed, then using DMatrix instead of QDM,
# because without cuDF, DMatrix performs better than QDM.
# Note: Checking `is_cudf_available` in spark worker side because
# spark worker might has different python environment with driver side.
use_qdm = use_qdm and is_cudf_available()
if use_qdm and (booster_params.get("max_bin", None) is not None):
dmatrix_kwargs["max_bin"] = booster_params["max_bin"]
_rabit_args = {}
if context.partitionId() == 0:
get_logger("XGBoostPySpark").debug(
"booster params: %s\n"
"train_call_kwargs_params: %s\n"
"dmatrix_kwargs: %s",
booster_params,
train_call_kwargs_params,
dmatrix_kwargs,
)
_rabit_args = _get_rabit_args(context, num_workers)
worker_message = {
"rabit_msg": _rabit_args,
"use_qdm": use_qdm,
}
messages = context.allGather(message=json.dumps(worker_message))
if len(set(json.loads(x)["use_qdm"] for x in messages)) != 1:
raise RuntimeError("The workers' cudf environments are in-consistent ")
_rabit_args = json.loads(messages[0])["rabit_msg"]
evals_result: Dict[str, Any] = {}
with CommunicatorContext(context, **_rabit_args):
dtrain, dvalid = create_dmatrix_from_partitions(
pandas_df_iter,
feature_prop.features_cols_names,
dev_ordinal,
use_qdm,
dmatrix_kwargs,
enable_sparse_data_optim=feature_prop.enable_sparse_data_optim,
has_validation_col=feature_prop.has_validation_col,
)
if dvalid is not None:
dval = [(dtrain, "training"), (dvalid, "validation")]
else:
dval = None
booster = worker_train(
params=booster_params,
dtrain=dtrain,
evals=dval,
evals_result=evals_result,
**train_call_kwargs_params,
)
context.barrier()
if context.partitionId() == 0:
yield pd.DataFrame(
data={
"config": [booster.save_config()],
"booster": [booster.save_raw("json").decode("utf-8")],
}
)
def _run_job() -> Tuple[str, str]:
ret = (
dataset.mapInPandas(
_train_booster, schema="config string, booster string" # type: ignore
)
.rdd.barrier()
.mapPartitions(lambda x: x)
.collect()[0]
)
return ret[0], ret[1]
(config, booster) = _run_job()
result_xgb_model = self._convert_to_sklearn_model(
bytearray(booster, "utf-8"), config
)
spark_model = self._create_pyspark_model(result_xgb_model)
# According to pyspark ML convention, the model uid should be the same
# with estimator uid.
spark_model._resetUid(self.uid)
return self._copyValues(spark_model)
def write(self) -> "SparkXGBWriter":
"""
Return the writer for saving the estimator.
"""
return SparkXGBWriter(self)
@classmethod
def read(cls) -> "SparkXGBReader":
"""
Return the reader for loading the estimator.
"""
return SparkXGBReader(cls)
class _SparkXGBModel(Model, _SparkXGBParams, MLReadable, MLWritable):
def __init__(self, xgb_sklearn_model: Optional[XGBModel] = None) -> None:
super().__init__()
self._xgb_sklearn_model = xgb_sklearn_model
@classmethod
def _xgb_cls(cls) -> Type[XGBModel]:
raise NotImplementedError()
def get_booster(self) -> Booster:
"""
Return the `xgboost.core.Booster` instance.
"""
assert self._xgb_sklearn_model is not None
return self._xgb_sklearn_model.get_booster()
def get_feature_importances(
self, importance_type: str = "weight"
) -> Dict[str, Union[float, List[float]]]:
"""Get feature importance of each feature.
Importance type can be defined as:
* 'weight': the number of times a feature is used to split the data across all trees.
* 'gain': the average gain across all splits the feature is used in.
* 'cover': the average coverage across all splits the feature is used in.
* 'total_gain': the total gain across all splits the feature is used in.
* 'total_cover': the total coverage across all splits the feature is used in.
Parameters
----------
importance_type: str, default 'weight'
One of the importance types defined above.
"""
return self.get_booster().get_score(importance_type=importance_type)
def write(self) -> "SparkXGBModelWriter":
"""
Return the writer for saving the model.
"""
return SparkXGBModelWriter(self)
@classmethod
def read(cls) -> "SparkXGBModelReader":
"""
Return the reader for loading the model.
"""
return SparkXGBModelReader(cls)
def _get_feature_col(
self, dataset: DataFrame
) -> Tuple[List[Column], Optional[List[str]]]:
"""XGBoost model trained with features_cols parameter can also predict
vector or array feature type. But first we need to check features_cols
and then featuresCol
"""
if self.getOrDefault(self.enable_sparse_data_optim):
feature_col_names = None
features_col = _get_unwrapped_vec_cols(
col(self.getOrDefault(self.featuresCol))
)
return features_col, feature_col_names
feature_col_names = self.getOrDefault(self.features_cols)
features_col = []
if feature_col_names and set(feature_col_names).issubset(set(dataset.columns)):
# The model is trained with features_cols and the predicted dataset
# also contains all the columns specified by features_cols.
features_col = _validate_and_convert_feature_col_as_float_col_list(
dataset, feature_col_names
)
else:
# 1. The model was trained by features_cols, but the dataset doesn't contain
# all the columns specified by features_cols, so we need to check if
# the dataframe has the featuresCol
# 2. The model was trained by featuresCol, and the predicted dataset must contain
# featuresCol column.
feature_col_names = None
features_col.append(
_validate_and_convert_feature_col_as_array_col(
dataset, self.getOrDefault(self.featuresCol)
)
)
return features_col, feature_col_names
def _transform(self, dataset: DataFrame) -> DataFrame:
# pylint: disable=too-many-statements, too-many-locals
# Save xgb_sklearn_model and predict_params to be local variable
# to avoid the `self` object to be pickled to remote.
xgb_sklearn_model = self._xgb_sklearn_model
predict_params = self._gen_predict_params_dict()
has_base_margin = False
if (
self.isDefined(self.base_margin_col)
and self.getOrDefault(self.base_margin_col) != ""
):
has_base_margin = True
base_margin_col = col(self.getOrDefault(self.base_margin_col)).alias(
alias.margin
)
features_col, feature_col_names = self._get_feature_col(dataset)
enable_sparse_data_optim = self.getOrDefault(self.enable_sparse_data_optim)
pred_contrib_col_name = None
if (
self.isDefined(self.pred_contrib_col)
and self.getOrDefault(self.pred_contrib_col) != ""
):
pred_contrib_col_name = self.getOrDefault(self.pred_contrib_col)
single_pred = True
schema = "double"
if pred_contrib_col_name:
single_pred = False
schema = f"{pred.prediction} double, {pred.pred_contrib} array<double>"
@pandas_udf(schema) # type: ignore
def predict_udf(iterator: Iterator[pd.DataFrame]) -> Iterator[pd.Series]:
assert xgb_sklearn_model is not None
model = xgb_sklearn_model
for data in iterator:
if enable_sparse_data_optim:
X = _read_csr_matrix_from_unwrapped_spark_vec(data)
else:
if feature_col_names is not None:
X = data[feature_col_names]
else:
X = stack_series(data[alias.data])
if has_base_margin:
base_margin = data[alias.margin].to_numpy()
else:
base_margin = None
data = {}
preds = model.predict(
X,
base_margin=base_margin,
validate_features=False,
**predict_params,
)
data[pred.prediction] = pd.Series(preds)
if pred_contrib_col_name:
contribs = pred_contribs(model, X, base_margin)
data[pred.pred_contrib] = pd.Series(list(contribs))
yield pd.DataFrame(data=data)
else:
yield data[pred.prediction]
if has_base_margin:
pred_col = predict_udf(struct(*features_col, base_margin_col))
else:
pred_col = predict_udf(struct(*features_col))
prediction_col_name = self.getOrDefault(self.predictionCol)
if single_pred:
dataset = dataset.withColumn(prediction_col_name, pred_col)
else:
pred_struct_col = "_prediction_struct"
dataset = dataset.withColumn(pred_struct_col, pred_col)
dataset = dataset.withColumn(
prediction_col_name, getattr(col(pred_struct_col), pred.prediction)
)
if pred_contrib_col_name:
dataset = dataset.withColumn(
pred_contrib_col_name,
array_to_vector(getattr(col(pred_struct_col), pred.pred_contrib)),
)
dataset = dataset.drop(pred_struct_col)
return dataset
class _ClassificationModel( # pylint: disable=abstract-method
_SparkXGBModel, HasProbabilityCol, HasRawPredictionCol, HasContribPredictionCol
):
"""
The model returned by :func:`xgboost.spark.SparkXGBClassifier.fit`
.. Note:: This API is experimental.
"""
def _transform(self, dataset: DataFrame) -> DataFrame:
# pylint: disable=too-many-statements, too-many-locals
# Save xgb_sklearn_model and predict_params to be local variable
# to avoid the `self` object to be pickled to remote.
xgb_sklearn_model = self._xgb_sklearn_model
predict_params = self._gen_predict_params_dict()
has_base_margin = False
if (
self.isDefined(self.base_margin_col)
and self.getOrDefault(self.base_margin_col) != ""
):
has_base_margin = True
base_margin_col = col(self.getOrDefault(self.base_margin_col)).alias(
alias.margin
)
def transform_margin(margins: np.ndarray) -> Tuple[np.ndarray, np.ndarray]:
if margins.ndim == 1:
# binomial case
classone_probs = expit(margins)
classzero_probs = 1.0 - classone_probs
raw_preds = np.vstack((-margins, margins)).transpose()
class_probs = np.vstack((classzero_probs, classone_probs)).transpose()
else:
# multinomial case
raw_preds = margins
class_probs = softmax(raw_preds, axis=1)
return raw_preds, class_probs
features_col, feature_col_names = self._get_feature_col(dataset)
enable_sparse_data_optim = self.getOrDefault(self.enable_sparse_data_optim)
pred_contrib_col_name = None
if (
self.isDefined(self.pred_contrib_col)
and self.getOrDefault(self.pred_contrib_col) != ""
):
pred_contrib_col_name = self.getOrDefault(self.pred_contrib_col)
schema = (
f"{pred.raw_prediction} array<double>, {pred.prediction} double,"
f" {pred.probability} array<double>"
)
if pred_contrib_col_name:
# We will force setting strict_shape to True when predicting contribs,
# So, it will also output 3-D shape result.
schema = f"{schema}, {pred.pred_contrib} array<array<double>>"
@pandas_udf(schema) # type: ignore
def predict_udf(
iterator: Iterator[Tuple[pd.Series, ...]]
) -> Iterator[pd.DataFrame]:
assert xgb_sklearn_model is not None
model = xgb_sklearn_model
for data in iterator:
if enable_sparse_data_optim:
X = _read_csr_matrix_from_unwrapped_spark_vec(data)
else:
if feature_col_names is not None:
X = data[feature_col_names] # type: ignore
else:
X = stack_series(data[alias.data])
if has_base_margin:
base_margin = stack_series(data[alias.margin])
else:
base_margin = None
margins = model.predict(
X,
base_margin=base_margin,
output_margin=True,
validate_features=False,
**predict_params,
)
raw_preds, class_probs = transform_margin(margins)
# It seems that they use argmax of class probs,
# not of margin to get the prediction (Note: scala implementation)
preds = np.argmax(class_probs, axis=1)
result: Dict[str, pd.Series] = {
pred.raw_prediction: pd.Series(list(raw_preds)),
pred.prediction: pd.Series(preds),
pred.probability: pd.Series(list(class_probs)),
}
if pred_contrib_col_name:
contribs = pred_contribs(model, X, base_margin, strict_shape=True)
result[pred.pred_contrib] = pd.Series(list(contribs.tolist()))
yield pd.DataFrame(data=result)
if has_base_margin:
pred_struct = predict_udf(struct(*features_col, base_margin_col))
else:
pred_struct = predict_udf(struct(*features_col))
pred_struct_col = "_prediction_struct"
dataset = dataset.withColumn(pred_struct_col, pred_struct)
raw_prediction_col_name = self.getOrDefault(self.rawPredictionCol)
if raw_prediction_col_name:
dataset = dataset.withColumn(
raw_prediction_col_name,
array_to_vector(getattr(col(pred_struct_col), pred.raw_prediction)),
)
prediction_col_name = self.getOrDefault(self.predictionCol)
if prediction_col_name:
dataset = dataset.withColumn(
prediction_col_name, getattr(col(pred_struct_col), pred.prediction)
)
probability_col_name = self.getOrDefault(self.probabilityCol)
if probability_col_name:
dataset = dataset.withColumn(
probability_col_name,
array_to_vector(getattr(col(pred_struct_col), pred.probability)),
)
if pred_contrib_col_name:
dataset = dataset.withColumn(
pred_contrib_col_name,
getattr(col(pred_struct_col), pred.pred_contrib),
)
return dataset.drop(pred_struct_col)
class _SparkXGBSharedReadWrite:
@staticmethod
def saveMetadata(
instance: Union[_SparkXGBEstimator, _SparkXGBModel],
path: str,
sc: SparkContext,
logger: logging.Logger,
extraMetadata: Optional[Dict[str, Any]] = None,
) -> None:
"""
Save the metadata of an xgboost.spark._SparkXGBEstimator or
xgboost.spark._SparkXGBModel.
"""
instance._validate_params()
skipParams = ["callbacks", "xgb_model"]
jsonParams = {}
for p, v in instance._paramMap.items(): # pylint: disable=protected-access
if p.name not in skipParams:
jsonParams[p.name] = v
extraMetadata = extraMetadata or {}
callbacks = instance.getOrDefault("callbacks")
if callbacks is not None:
logger.warning(
"The callbacks parameter is saved using cloudpickle and it "
"is not a fully self-contained format. It may fail to load "
"with different versions of dependencies."
)
serialized_callbacks = base64.encodebytes(
cloudpickle.dumps(callbacks)
).decode("ascii")
extraMetadata["serialized_callbacks"] = serialized_callbacks
init_booster = instance.getOrDefault("xgb_model")
if init_booster is not None:
extraMetadata["init_booster"] = _INIT_BOOSTER_SAVE_PATH
DefaultParamsWriter.saveMetadata(
instance, path, sc, extraMetadata=extraMetadata, paramMap=jsonParams
)
if init_booster is not None:
ser_init_booster = serialize_booster(init_booster)
save_path = os.path.join(path, _INIT_BOOSTER_SAVE_PATH)
_get_spark_session().createDataFrame(
[(ser_init_booster,)], ["init_booster"]
).write.parquet(save_path)
@staticmethod
def loadMetadataAndInstance(
pyspark_xgb_cls: Union[Type[_SparkXGBEstimator], Type[_SparkXGBModel]],
path: str,
sc: SparkContext,
logger: logging.Logger,
) -> Tuple[Dict[str, Any], Union[_SparkXGBEstimator, _SparkXGBModel]]:
"""
Load the metadata and the instance of an xgboost.spark._SparkXGBEstimator or
xgboost.spark._SparkXGBModel.
:return: a tuple of (metadata, instance)
"""
metadata = DefaultParamsReader.loadMetadata(
path, sc, expectedClassName=get_class_name(pyspark_xgb_cls)
)
pyspark_xgb = pyspark_xgb_cls()
DefaultParamsReader.getAndSetParams(pyspark_xgb, metadata)
if "serialized_callbacks" in metadata:
serialized_callbacks = metadata["serialized_callbacks"]
try:
callbacks = cloudpickle.loads(
base64.decodebytes(serialized_callbacks.encode("ascii"))
)
pyspark_xgb.set(pyspark_xgb.callbacks, callbacks) # type: ignore
except Exception as e: # pylint: disable=W0703
logger.warning(
f"Fails to load the callbacks param due to {e}. Please set the "
"callbacks param manually for the loaded estimator."
)
if "init_booster" in metadata:
load_path = os.path.join(path, metadata["init_booster"])
ser_init_booster = (
_get_spark_session().read.parquet(load_path).collect()[0].init_booster
)
init_booster = deserialize_booster(ser_init_booster)
pyspark_xgb.set(pyspark_xgb.xgb_model, init_booster) # type: ignore
pyspark_xgb._resetUid(metadata["uid"]) # pylint: disable=protected-access
return metadata, pyspark_xgb
class SparkXGBWriter(MLWriter):
"""
Spark Xgboost estimator writer.
"""
def __init__(self, instance: "_SparkXGBEstimator") -> None:
super().__init__()
self.instance = instance
self.logger = get_logger(self.__class__.__name__, level="WARN")
def saveImpl(self, path: str) -> None:
"""
save model.
"""
_SparkXGBSharedReadWrite.saveMetadata(self.instance, path, self.sc, self.logger)
class SparkXGBReader(MLReader):
"""
Spark Xgboost estimator reader.
"""
def __init__(self, cls: Type["_SparkXGBEstimator"]) -> None:
super().__init__()
self.cls = cls
self.logger = get_logger(self.__class__.__name__, level="WARN")
def load(self, path: str) -> "_SparkXGBEstimator":
"""
load model.
"""
_, pyspark_xgb = _SparkXGBSharedReadWrite.loadMetadataAndInstance(
self.cls, path, self.sc, self.logger
)
return cast("_SparkXGBEstimator", pyspark_xgb)
class SparkXGBModelWriter(MLWriter):
"""
Spark Xgboost model writer.
"""
def __init__(self, instance: _SparkXGBModel) -> None:
super().__init__()
self.instance = instance
self.logger = get_logger(self.__class__.__name__, level="WARN")
def saveImpl(self, path: str) -> None:
"""
Save metadata and model for a :py:class:`_SparkXGBModel`
- save metadata to path/metadata
- save model to path/model.json
"""
xgb_model = self.instance._xgb_sklearn_model
assert xgb_model is not None
_SparkXGBSharedReadWrite.saveMetadata(self.instance, path, self.sc, self.logger)
model_save_path = os.path.join(path, "model")
booster = xgb_model.get_booster().save_raw("json").decode("utf-8")
_get_spark_session().sparkContext.parallelize([booster], 1).saveAsTextFile(
model_save_path
)
class SparkXGBModelReader(MLReader):
"""
Spark Xgboost model reader.
"""
def __init__(self, cls: Type["_SparkXGBModel"]) -> None:
super().__init__()
self.cls = cls
self.logger = get_logger(self.__class__.__name__, level="WARN")
def load(self, path: str) -> "_SparkXGBModel":
"""
Load metadata and model for a :py:class:`_SparkXGBModel`
:return: SparkXGBRegressorModel or SparkXGBClassifierModel instance
"""
_, py_model = _SparkXGBSharedReadWrite.loadMetadataAndInstance(
self.cls, path, self.sc, self.logger
)
py_model = cast("_SparkXGBModel", py_model)
xgb_sklearn_params = py_model._gen_xgb_params_dict(
gen_xgb_sklearn_estimator_param=True
)
model_load_path = os.path.join(path, "model")
ser_xgb_model = (
_get_spark_session().sparkContext.textFile(model_load_path).collect()[0]
)
def create_xgb_model() -> "XGBModel":
return self.cls._xgb_cls()(**xgb_sklearn_params)
xgb_model = deserialize_xgb_model(ser_xgb_model, create_xgb_model)
py_model._xgb_sklearn_model = xgb_model
return py_model
| 59,223
| 37.733813
| 99
|
py
|
xgboost
|
xgboost-master/python-package/xgboost/spark/utils.py
|
"""Xgboost pyspark integration submodule for helper functions."""
# pylint: disable=fixme
import inspect
import logging
import os
import sys
import uuid
from threading import Thread
from typing import Any, Callable, Dict, Optional, Set, Type
import pyspark
from pyspark import BarrierTaskContext, SparkContext, SparkFiles
from pyspark.sql.session import SparkSession
from xgboost import Booster, XGBModel, collective
from xgboost.tracker import RabitTracker
def get_class_name(cls: Type) -> str:
"""Return the class name."""
return f"{cls.__module__}.{cls.__name__}"
def _get_default_params_from_func(
func: Callable, unsupported_set: Set[str]
) -> Dict[str, Any]:
"""Returns a dictionary of parameters and their default value of function fn. Only
the parameters with a default value will be included.
"""
sig = inspect.signature(func)
filtered_params_dict = {}
for parameter in sig.parameters.values():
# Remove parameters without a default value and those in the unsupported_set
if (
parameter.default is not parameter.empty
and parameter.name not in unsupported_set
):
filtered_params_dict[parameter.name] = parameter.default
return filtered_params_dict
class CommunicatorContext:
"""A context controlling collective communicator initialization and finalization.
This isn't specificially necessary (note Part 3), but it is more understandable
coding-wise.
"""
def __init__(self, context: BarrierTaskContext, **args: Any) -> None:
self.args = args
self.args["DMLC_TASK_ID"] = str(context.partitionId())
def __enter__(self) -> None:
collective.init(**self.args)
def __exit__(self, *args: Any) -> None:
collective.finalize()
def _start_tracker(context: BarrierTaskContext, n_workers: int) -> Dict[str, Any]:
"""Start Rabit tracker with n_workers"""
env: Dict[str, Any] = {"DMLC_NUM_WORKER": n_workers}
host = _get_host_ip(context)
rabit_context = RabitTracker(host_ip=host, n_workers=n_workers)
env.update(rabit_context.worker_envs())
rabit_context.start(n_workers)
thread = Thread(target=rabit_context.join)
thread.daemon = True
thread.start()
return env
def _get_rabit_args(context: BarrierTaskContext, n_workers: int) -> Dict[str, Any]:
"""Get rabit context arguments to send to each worker."""
env = _start_tracker(context, n_workers)
return env
def _get_host_ip(context: BarrierTaskContext) -> str:
"""Gets the hostIP for Spark. This essentially gets the IP of the first worker."""
task_ip_list = [info.address.split(":")[0] for info in context.getTaskInfos()]
return task_ip_list[0]
def _get_spark_session() -> SparkSession:
"""Get or create spark session. Note: This function can only be invoked from driver
side.
"""
if pyspark.TaskContext.get() is not None:
# This is a safety check.
raise RuntimeError(
"_get_spark_session should not be invoked from executor side."
)
return SparkSession.builder.getOrCreate()
def get_logger(name: str, level: str = "INFO") -> logging.Logger:
"""Gets a logger by name, or creates and configures it for the first time."""
logger = logging.getLogger(name)
logger.setLevel(level)
# If the logger is configured, skip the configure
if not logger.handlers and not logging.getLogger().handlers:
handler = logging.StreamHandler(sys.stderr)
logger.addHandler(handler)
return logger
def _get_max_num_concurrent_tasks(spark_context: SparkContext) -> int:
"""Gets the current max number of concurrent tasks."""
# pylint: disable=protected-access
# spark 3.1 and above has a different API for fetching max concurrent tasks
if spark_context._jsc.sc().version() >= "3.1":
return spark_context._jsc.sc().maxNumConcurrentTasks(
spark_context._jsc.sc().resourceProfileManager().resourceProfileFromId(0)
)
return spark_context._jsc.sc().maxNumConcurrentTasks()
def _is_local(spark_context: SparkContext) -> bool:
"""Whether it is Spark local mode"""
# pylint: disable=protected-access
return spark_context._jsc.sc().isLocal()
def _get_gpu_id(task_context: BarrierTaskContext) -> int:
"""Get the gpu id from the task resources"""
if task_context is None:
# This is a safety check.
raise RuntimeError("_get_gpu_id should not be invoked from driver side.")
resources = task_context.resources()
if "gpu" not in resources:
raise RuntimeError(
"Couldn't get the gpu id, Please check the GPU resource configuration"
)
# return the first gpu id.
return int(resources["gpu"].addresses[0].strip())
def _get_or_create_tmp_dir() -> str:
root_dir = SparkFiles.getRootDirectory()
xgb_tmp_dir = os.path.join(root_dir, "xgboost-tmp")
if not os.path.exists(xgb_tmp_dir):
os.makedirs(xgb_tmp_dir)
return xgb_tmp_dir
def deserialize_xgb_model(
model: str, xgb_model_creator: Callable[[], XGBModel]
) -> XGBModel:
"""
Deserialize an xgboost.XGBModel instance from the input model.
"""
xgb_model = xgb_model_creator()
xgb_model.load_model(bytearray(model.encode("utf-8")))
return xgb_model
def serialize_booster(booster: Booster) -> str:
"""
Serialize the input booster to a string.
Parameters
----------
booster:
an xgboost.core.Booster instance
"""
# TODO: change to use string io
tmp_file_name = os.path.join(_get_or_create_tmp_dir(), f"{uuid.uuid4()}.json")
booster.save_model(tmp_file_name)
with open(tmp_file_name, encoding="utf-8") as f:
ser_model_string = f.read()
return ser_model_string
def deserialize_booster(model: str) -> Booster:
"""
Deserialize an xgboost.core.Booster from the input ser_model_string.
"""
booster = Booster()
# TODO: change to use string io
tmp_file_name = os.path.join(_get_or_create_tmp_dir(), f"{uuid.uuid4()}.json")
with open(tmp_file_name, "w", encoding="utf-8") as f:
f.write(model)
booster.load_model(tmp_file_name)
return booster
def use_cuda(device: Optional[str]) -> bool:
"""Whether xgboost is using CUDA workers."""
return device in ("cuda", "gpu")
| 6,352
| 31.747423
| 87
|
py
|
xgboost
|
xgboost-master/python-package/xgboost/spark/data.py
|
# pylint: disable=protected-access
"""Utilities for processing spark partitions."""
from collections import defaultdict, namedtuple
from typing import Any, Callable, Dict, Iterator, List, Optional, Sequence, Tuple, Union
import numpy as np
import pandas as pd
from scipy.sparse import csr_matrix
from xgboost import DataIter, DMatrix, QuantileDMatrix, XGBModel
from xgboost.compat import concat
from .._typing import ArrayLike
from .utils import get_logger # type: ignore
def stack_series(series: pd.Series) -> np.ndarray:
"""Stack a series of arrays."""
array = series.to_numpy(copy=False)
array = np.stack(array)
return array
# Global constant for defining column alias shared between estimator and data
# processing procedures.
Alias = namedtuple("Alias", ("data", "label", "weight", "margin", "valid", "qid"))
alias = Alias("values", "label", "weight", "baseMargin", "validationIndicator", "qid")
def concat_or_none(seq: Optional[Sequence[np.ndarray]]) -> Optional[np.ndarray]:
"""Concatenate the data if it's not None."""
if seq:
return concat(seq)
return None
def cache_partitions(
iterator: Iterator[pd.DataFrame], append: Callable[[pd.DataFrame, str, bool], None]
) -> None:
"""Extract partitions from pyspark iterator. `append` is a user defined function for
accepting new partition."""
def make_blob(part: pd.DataFrame, is_valid: bool) -> None:
append(part, alias.data, is_valid)
append(part, alias.label, is_valid)
append(part, alias.weight, is_valid)
append(part, alias.margin, is_valid)
append(part, alias.qid, is_valid)
has_validation: Optional[bool] = None
for part in iterator:
if has_validation is None:
has_validation = alias.valid in part.columns
if has_validation is True:
assert alias.valid in part.columns
if has_validation:
train = part.loc[~part[alias.valid], :]
valid = part.loc[part[alias.valid], :]
else:
train, valid = part, None
make_blob(train, False)
if valid is not None:
make_blob(valid, True)
class PartIter(DataIter):
"""Iterator for creating Quantile DMatrix from partitions."""
def __init__(
self, data: Dict[str, List], device_id: Optional[int], **kwargs: Any
) -> None:
self._iter = 0
self._device_id = device_id
self._data = data
self._kwargs = kwargs
super().__init__()
def _fetch(self, data: Optional[Sequence[pd.DataFrame]]) -> Optional[pd.DataFrame]:
if not data:
return None
if self._device_id is not None:
import cudf # pylint: disable=import-error
import cupy as cp # pylint: disable=import-error
# We must set the device after import cudf, which will change the device id to 0
# See https://github.com/rapidsai/cudf/issues/11386
cp.cuda.runtime.setDevice(self._device_id) # pylint: disable=I1101
return cudf.DataFrame(data[self._iter])
return data[self._iter]
def next(self, input_data: Callable) -> int:
if self._iter == len(self._data[alias.data]):
return 0
input_data(
data=self._fetch(self._data[alias.data]),
label=self._fetch(self._data.get(alias.label, None)),
weight=self._fetch(self._data.get(alias.weight, None)),
base_margin=self._fetch(self._data.get(alias.margin, None)),
qid=self._fetch(self._data.get(alias.qid, None)),
**self._kwargs,
)
self._iter += 1
return 1
def reset(self) -> None:
self._iter = 0
def _read_csr_matrix_from_unwrapped_spark_vec(part: pd.DataFrame) -> csr_matrix:
# variables for constructing csr_matrix
csr_indices_list, csr_indptr_list, csr_values_list = [], [0], []
n_features = 0
for vec_type, vec_size_, vec_indices, vec_values in zip(
part.featureVectorType,
part.featureVectorSize,
part.featureVectorIndices,
part.featureVectorValues,
):
if vec_type == 0:
# sparse vector
vec_size = int(vec_size_)
csr_indices = vec_indices
csr_values = vec_values
else:
# dense vector
# Note: According to spark ML VectorUDT format,
# when type field is 1, the size field is also empty.
# we need to check the values field to get vector length.
vec_size = len(vec_values)
csr_indices = np.arange(vec_size, dtype=np.int32)
csr_values = vec_values
if n_features == 0:
n_features = vec_size
assert n_features == vec_size
csr_indices_list.append(csr_indices)
csr_indptr_list.append(csr_indptr_list[-1] + len(csr_indices))
csr_values_list.append(csr_values)
csr_indptr_arr = np.array(csr_indptr_list)
csr_indices_arr = np.concatenate(csr_indices_list)
csr_values_arr = np.concatenate(csr_values_list)
return csr_matrix(
(csr_values_arr, csr_indices_arr, csr_indptr_arr), shape=(len(part), n_features)
)
def make_qdm(
data: Dict[str, List[np.ndarray]],
dev_ordinal: Optional[int],
meta: Dict[str, Any],
ref: Optional[DMatrix],
params: Dict[str, Any],
) -> DMatrix:
"""Handle empty partition for QuantileDMatrix."""
if not data:
return QuantileDMatrix(np.empty((0, 0)), ref=ref)
it = PartIter(data, dev_ordinal, **meta)
m = QuantileDMatrix(it, **params, ref=ref)
return m
def create_dmatrix_from_partitions( # pylint: disable=too-many-arguments
iterator: Iterator[pd.DataFrame],
feature_cols: Optional[Sequence[str]],
dev_ordinal: Optional[int],
use_qdm: bool,
kwargs: Dict[str, Any], # use dict to make sure this parameter is passed.
enable_sparse_data_optim: bool,
has_validation_col: bool,
) -> Tuple[DMatrix, Optional[DMatrix]]:
"""Create DMatrix from spark data partitions.
Parameters
----------
iterator :
Pyspark partition iterator.
feature_cols:
A sequence of feature names, used only when rapids plugin is enabled.
dev_ordinal:
Device ordinal, used when GPU is enabled.
use_qdm :
Whether QuantileDMatrix should be used instead of DMatrix.
kwargs :
Metainfo for DMatrix.
enable_sparse_data_optim :
Whether sparse data should be unwrapped
has_validation:
Whether there's validation data.
Returns
-------
Training DMatrix and an optional validation DMatrix.
"""
# pylint: disable=too-many-locals, too-many-statements
train_data: Dict[str, List[np.ndarray]] = defaultdict(list)
valid_data: Dict[str, List[np.ndarray]] = defaultdict(list)
n_features: int = 0
def append_m(part: pd.DataFrame, name: str, is_valid: bool) -> None:
nonlocal n_features
if name == alias.data or name in part.columns:
if (
name == alias.data
and feature_cols is not None
and part[feature_cols].shape[0] > 0 # guard against empty partition
):
array: Optional[np.ndarray] = part[feature_cols]
elif part[name].shape[0] > 0:
array = part[name]
if name == alias.data:
# For the array/vector typed case.
array = stack_series(array)
else:
array = None
if name == alias.data and array is not None:
if n_features == 0:
n_features = array.shape[1]
assert n_features == array.shape[1]
if array is None:
return
if is_valid:
valid_data[name].append(array)
else:
train_data[name].append(array)
def append_m_sparse(part: pd.DataFrame, name: str, is_valid: bool) -> None:
nonlocal n_features
if name == alias.data or name in part.columns:
if name == alias.data:
array = _read_csr_matrix_from_unwrapped_spark_vec(part)
if n_features == 0:
n_features = array.shape[1]
assert n_features == array.shape[1]
else:
array = part[name]
if is_valid:
valid_data[name].append(array)
else:
train_data[name].append(array)
def make(values: Dict[str, List[np.ndarray]], kwargs: Dict[str, Any]) -> DMatrix:
if len(values) == 0:
get_logger("XGBoostPySpark").warning(
"Detected an empty partition in the training data. Consider to enable"
" repartition_random_shuffle"
)
# We must construct an empty DMatrix to bypass the AllReduce
return DMatrix(data=np.empty((0, 0)), **kwargs)
data = concat_or_none(values[alias.data])
label = concat_or_none(values.get(alias.label, None))
weight = concat_or_none(values.get(alias.weight, None))
margin = concat_or_none(values.get(alias.margin, None))
qid = concat_or_none(values.get(alias.qid, None))
return DMatrix(
data=data, label=label, weight=weight, base_margin=margin, qid=qid, **kwargs
)
if enable_sparse_data_optim:
append_fn = append_m_sparse
assert "missing" in kwargs and kwargs["missing"] == 0.0
else:
append_fn = append_m
def split_params() -> Tuple[Dict[str, Any], Dict[str, Union[int, float, bool]]]:
# FIXME(jiamingy): we really need a better way to bridge distributed frameworks
# to XGBoost native interface and prevent scattering parameters like this.
# parameters that are not related to data.
non_data_keys = (
"max_bin",
"missing",
"silent",
"nthread",
"enable_categorical",
)
non_data_params = {}
meta = {}
for k, v in kwargs.items():
if k in non_data_keys:
non_data_params[k] = v
else:
meta[k] = v
return meta, non_data_params
meta, params = split_params()
if feature_cols is not None and use_qdm:
cache_partitions(iterator, append_fn)
dtrain: DMatrix = make_qdm(train_data, dev_ordinal, meta, None, params)
elif feature_cols is not None and not use_qdm:
cache_partitions(iterator, append_fn)
dtrain = make(train_data, kwargs)
elif feature_cols is None and use_qdm:
cache_partitions(iterator, append_fn)
dtrain = make_qdm(train_data, dev_ordinal, meta, None, params)
else:
cache_partitions(iterator, append_fn)
dtrain = make(train_data, kwargs)
# Using has_validation_col here to indicate if there is validation col
# instead of getting it from iterator, since the iterator may be empty
# in some special case. That is to say, we must ensure every worker
# construct DMatrix even there is no data since we need to ensure every
# worker do the AllReduce when constructing DMatrix, or else it may hang
# forever.
if has_validation_col:
if use_qdm:
dvalid: Optional[DMatrix] = make_qdm(
valid_data, dev_ordinal, meta, dtrain, params
)
else:
dvalid = make(valid_data, kwargs) if has_validation_col else None
else:
dvalid = None
if dvalid is not None:
assert dvalid.num_col() == dtrain.num_col()
return dtrain, dvalid
def pred_contribs(
model: XGBModel,
data: ArrayLike,
base_margin: Optional[ArrayLike] = None,
strict_shape: bool = False,
) -> np.ndarray:
"""Predict contributions with data with the full model."""
iteration_range = model._get_iteration_range(None)
data_dmatrix = DMatrix(
data,
base_margin=base_margin,
missing=model.missing,
nthread=model.n_jobs,
feature_types=model.feature_types,
enable_categorical=model.enable_categorical,
)
return model.get_booster().predict(
data_dmatrix,
pred_contribs=True,
validate_features=False,
iteration_range=iteration_range,
strict_shape=strict_shape,
)
| 12,431
| 33.247934
| 92
|
py
|
xgboost
|
xgboost-master/python-package/xgboost/spark/__init__.py
|
"""PySpark XGBoost integration interface"""
try:
import pyspark
except ImportError as e:
raise ImportError("pyspark package needs to be installed to use this module") from e
from .estimator import (
SparkXGBClassifier,
SparkXGBClassifierModel,
SparkXGBRanker,
SparkXGBRankerModel,
SparkXGBRegressor,
SparkXGBRegressorModel,
)
__all__ = [
"SparkXGBClassifier",
"SparkXGBClassifierModel",
"SparkXGBRegressor",
"SparkXGBRegressorModel",
"SparkXGBRanker",
"SparkXGBRankerModel",
]
| 536
| 20.48
| 88
|
py
|
xgboost
|
xgboost-master/python-package/xgboost/spark/params.py
|
"""Xgboost pyspark integration submodule for params."""
from typing import Dict
# pylint: disable=too-few-public-methods
from pyspark.ml.param import TypeConverters
from pyspark.ml.param.shared import Param, Params
class HasArbitraryParamsDict(Params):
"""
This is a Params based class that is extended by _SparkXGBParams
and holds the variable to store the **kwargs parts of the XGBoost
input.
"""
arbitrary_params_dict: "Param[Dict]" = Param(
Params._dummy(),
"arbitrary_params_dict",
"arbitrary_params_dict This parameter holds all of the additional parameters which are "
"not exposed as the the XGBoost Spark estimator params but can be recognized by "
"underlying XGBoost library. It is stored as a dictionary.",
)
class HasBaseMarginCol(Params):
"""
This is a Params based class that is extended by _SparkXGBParams
and holds the variable to store the base margin column part of XGboost.
"""
base_margin_col = Param(
Params._dummy(),
"base_margin_col",
"This stores the name for the column of the base margin",
typeConverter=TypeConverters.toString,
)
class HasFeaturesCols(Params):
"""
Mixin for param features_cols: a list of feature column names.
This parameter is taken effect only when use_gpu is enabled.
"""
features_cols = Param(
Params._dummy(),
"features_cols",
"feature column names.",
typeConverter=TypeConverters.toListString,
)
def __init__(self) -> None:
super().__init__()
self._setDefault(features_cols=[])
class HasEnableSparseDataOptim(Params):
"""
This is a Params based class that is extended by _SparkXGBParams
and holds the variable to store the boolean config of enabling sparse data optimization.
"""
enable_sparse_data_optim = Param(
Params._dummy(),
"enable_sparse_data_optim",
"This stores the boolean config of enabling sparse data optimization, if enabled, "
"Xgboost DMatrix object will be constructed from sparse matrix instead of "
"dense matrix. This config is disabled by default. If most of examples in your "
"training dataset contains sparse features, we suggest to enable this config.",
typeConverter=TypeConverters.toBoolean,
)
def __init__(self) -> None:
super().__init__()
self._setDefault(enable_sparse_data_optim=False)
class HasQueryIdCol(Params):
"""
Mixin for param qid_col: query id column name.
"""
qid_col = Param(
Params._dummy(),
"qid_col",
"query id column name",
typeConverter=TypeConverters.toString,
)
class HasContribPredictionCol(Params):
"""
Mixin for param pred_contrib_col: contribution prediction column name.
Output is a 3-dim array, with (rows, groups, columns + 1) for classification case.
Else, it can be a 2 dimension for regression case.
"""
pred_contrib_col: "Param[str]" = Param(
Params._dummy(),
"pred_contrib_col",
"feature contributions to individual predictions.",
typeConverter=TypeConverters.toString,
)
| 3,221
| 29.396226
| 96
|
py
|
xgboost
|
xgboost-master/python-package/xgboost/spark/estimator.py
|
"""Xgboost pyspark integration submodule for estimator API."""
# pylint: disable=too-many-ancestors
# pylint: disable=fixme, too-many-ancestors, protected-access, no-member, invalid-name
# pylint: disable=unused-argument, too-many-locals
import warnings
from typing import Any, List, Optional, Type, Union
import numpy as np
from pyspark import keyword_only
from pyspark.ml.param import Param, Params
from pyspark.ml.param.shared import HasProbabilityCol, HasRawPredictionCol
from xgboost import XGBClassifier, XGBRanker, XGBRegressor
from .core import ( # type: ignore
_ClassificationModel,
_SparkXGBEstimator,
_SparkXGBModel,
)
from .utils import get_class_name
def _set_pyspark_xgb_cls_param_attrs(
estimator: Type[_SparkXGBEstimator], model: Type[_SparkXGBModel]
) -> None:
"""This function automatically infer to xgboost parameters and set them
into corresponding pyspark estimators and models"""
params_dict = estimator._get_xgb_params_default()
def param_value_converter(v: Any) -> Any:
if isinstance(v, np.generic):
# convert numpy scalar values to corresponding python scalar values
return np.array(v).item()
if isinstance(v, dict):
return {k: param_value_converter(nv) for k, nv in v.items()}
if isinstance(v, list):
return [param_value_converter(nv) for nv in v]
return v
def set_param_attrs(attr_name: str, param: Param) -> None:
param.typeConverter = param_value_converter
setattr(estimator, attr_name, param)
setattr(model, attr_name, param)
for name in params_dict.keys():
doc = (
f"Refer to XGBoost doc of "
f"{get_class_name(estimator._xgb_cls())} for this param {name}"
)
param_obj: Param = Param(Params._dummy(), name=name, doc=doc)
set_param_attrs(name, param_obj)
fit_params_dict = estimator._get_fit_params_default()
for name in fit_params_dict.keys():
doc = (
f"Refer to XGBoost doc of {get_class_name(estimator._xgb_cls())}"
f".fit() for this param {name}"
)
if name == "callbacks":
doc += (
"The callbacks can be arbitrary functions. It is saved using cloudpickle "
"which is not a fully self-contained format. It may fail to load with "
"different versions of dependencies."
)
param_obj = Param(Params._dummy(), name=name, doc=doc)
set_param_attrs(name, param_obj)
predict_params_dict = estimator._get_predict_params_default()
for name in predict_params_dict.keys():
doc = (
f"Refer to XGBoost doc of {get_class_name(estimator._xgb_cls())}"
f".predict() for this param {name}"
)
param_obj = Param(Params._dummy(), name=name, doc=doc)
set_param_attrs(name, param_obj)
def _deprecated_use_gpu() -> None:
warnings.warn(
"`use_gpu` is deprecated since 2.0.0, use `device` instead", FutureWarning
)
class SparkXGBRegressor(_SparkXGBEstimator):
"""SparkXGBRegressor is a PySpark ML estimator. It implements the XGBoost regression
algorithm based on XGBoost python library, and it can be used in PySpark Pipeline
and PySpark ML meta algorithms like
- :py:class:`~pyspark.ml.tuning.CrossValidator`/
- :py:class:`~pyspark.ml.tuning.TrainValidationSplit`/
- :py:class:`~pyspark.ml.classification.OneVsRest`
SparkXGBRegressor automatically supports most of the parameters in
:py:class:`xgboost.XGBRegressor` constructor and most of the parameters used in
:py:meth:`xgboost.XGBRegressor.fit` and :py:meth:`xgboost.XGBRegressor.predict`
method.
To enable GPU support, set `device` to `cuda` or `gpu`.
SparkXGBRegressor doesn't support setting `base_margin` explicitly as well, but
support another param called `base_margin_col`. see doc below for more details.
SparkXGBRegressor doesn't support `validate_features` and `output_margin` param.
SparkXGBRegressor doesn't support setting `nthread` xgboost param, instead, the
`nthread` param for each xgboost worker will be set equal to `spark.task.cpus`
config value.
Parameters
----------
features_col:
When the value is string, it requires the features column name to be vector type.
When the value is a list of string, it requires all the feature columns to be numeric types.
label_col:
Label column name. Default to "label".
prediction_col:
Prediction column name. Default to "prediction"
pred_contrib_col:
Contribution prediction column name.
validation_indicator_col:
For params related to `xgboost.XGBRegressor` training with
evaluation dataset's supervision,
set :py:attr:`xgboost.spark.SparkXGBRegressor.validation_indicator_col`
parameter instead of setting the `eval_set` parameter in `xgboost.XGBRegressor`
fit method.
weight_col:
To specify the weight of the training and validation dataset, set
:py:attr:`xgboost.spark.SparkXGBRegressor.weight_col` parameter instead of setting
`sample_weight` and `sample_weight_eval_set` parameter in `xgboost.XGBRegressor`
fit method.
base_margin_col:
To specify the base margins of the training and validation
dataset, set :py:attr:`xgboost.spark.SparkXGBRegressor.base_margin_col` parameter
instead of setting `base_margin` and `base_margin_eval_set` in the
`xgboost.XGBRegressor` fit method.
num_workers:
How many XGBoost workers to be used to train.
Each XGBoost worker corresponds to one spark task.
use_gpu:
.. deprecated:: 2.0.0
Use `device` instead.
device:
Device for XGBoost workers, available options are `cpu`, `cuda`, and `gpu`.
force_repartition:
Boolean value to specify if forcing the input dataset to be repartitioned
before XGBoost training.
repartition_random_shuffle:
Boolean value to specify if randomly shuffling the dataset when repartitioning is required.
enable_sparse_data_optim:
Boolean value to specify if enabling sparse data optimization, if True,
Xgboost DMatrix object will be constructed from sparse matrix instead of
dense matrix.
kwargs:
A dictionary of xgboost parameters, please refer to
https://xgboost.readthedocs.io/en/stable/parameter.html
Note
----
The Parameters chart above contains parameters that need special handling.
For a full list of parameters, see entries with `Param(parent=...` below.
This API is experimental.
Examples
--------
>>> from xgboost.spark import SparkXGBRegressor
>>> from pyspark.ml.linalg import Vectors
>>> df_train = spark.createDataFrame([
... (Vectors.dense(1.0, 2.0, 3.0), 0, False, 1.0),
... (Vectors.sparse(3, {1: 1.0, 2: 5.5}), 1, False, 2.0),
... (Vectors.dense(4.0, 5.0, 6.0), 2, True, 1.0),
... (Vectors.sparse(3, {1: 6.0, 2: 7.5}), 3, True, 2.0),
... ], ["features", "label", "isVal", "weight"])
>>> df_test = spark.createDataFrame([
... (Vectors.dense(1.0, 2.0, 3.0), ),
... (Vectors.sparse(3, {1: 1.0, 2: 5.5}), )
... ], ["features"])
>>> xgb_regressor = SparkXGBRegressor(max_depth=5, missing=0.0,
... validation_indicator_col='isVal', weight_col='weight',
... early_stopping_rounds=1, eval_metric='rmse')
>>> xgb_reg_model = xgb_regressor.fit(df_train)
>>> xgb_reg_model.transform(df_test)
"""
@keyword_only
def __init__(
self,
*,
features_col: Union[str, List[str]] = "features",
label_col: str = "label",
prediction_col: str = "prediction",
pred_contrib_col: Optional[str] = None,
validation_indicator_col: Optional[str] = None,
weight_col: Optional[str] = None,
base_margin_col: Optional[str] = None,
num_workers: int = 1,
use_gpu: Optional[bool] = None,
device: Optional[str] = None,
force_repartition: bool = False,
repartition_random_shuffle: bool = False,
enable_sparse_data_optim: bool = False,
**kwargs: Any,
) -> None:
super().__init__()
input_kwargs = self._input_kwargs
if use_gpu:
_deprecated_use_gpu()
self.setParams(**input_kwargs)
@classmethod
def _xgb_cls(cls) -> Type[XGBRegressor]:
return XGBRegressor
@classmethod
def _pyspark_model_cls(cls) -> Type["SparkXGBRegressorModel"]:
return SparkXGBRegressorModel
def _validate_params(self) -> None:
super()._validate_params()
if self.isDefined(self.qid_col):
raise ValueError(
"Spark Xgboost regressor estimator does not support `qid_col` param."
)
class SparkXGBRegressorModel(_SparkXGBModel):
"""
The model returned by :func:`xgboost.spark.SparkXGBRegressor.fit`
.. Note:: This API is experimental.
"""
@classmethod
def _xgb_cls(cls) -> Type[XGBRegressor]:
return XGBRegressor
_set_pyspark_xgb_cls_param_attrs(SparkXGBRegressor, SparkXGBRegressorModel)
class SparkXGBClassifier(_SparkXGBEstimator, HasProbabilityCol, HasRawPredictionCol):
"""SparkXGBClassifier is a PySpark ML estimator. It implements the XGBoost
classification algorithm based on XGBoost python library, and it can be used in
PySpark Pipeline and PySpark ML meta algorithms like
- :py:class:`~pyspark.ml.tuning.CrossValidator`/
- :py:class:`~pyspark.ml.tuning.TrainValidationSplit`/
- :py:class:`~pyspark.ml.classification.OneVsRest`
SparkXGBClassifier automatically supports most of the parameters in
:py:class:`xgboost.XGBClassifier` constructor and most of the parameters used in
:py:meth:`xgboost.XGBClassifier.fit` and :py:meth:`xgboost.XGBClassifier.predict`
method.
To enable GPU support, set `device` to `cuda` or `gpu`.
SparkXGBClassifier doesn't support setting `base_margin` explicitly as well, but
support another param called `base_margin_col`. see doc below for more details.
SparkXGBClassifier doesn't support setting `output_margin`, but we can get output
margin from the raw prediction column. See `raw_prediction_col` param doc below for
more details.
SparkXGBClassifier doesn't support `validate_features` and `output_margin` param.
SparkXGBClassifier doesn't support setting `nthread` xgboost param, instead, the
`nthread` param for each xgboost worker will be set equal to `spark.task.cpus`
config value.
Parameters
----------
features_col:
When the value is string, it requires the features column name to be vector type.
When the value is a list of string, it requires all the feature columns to be numeric types.
label_col:
Label column name. Default to "label".
prediction_col:
Prediction column name. Default to "prediction"
probability_col:
Column name for predicted class conditional probabilities. Default to probabilityCol
raw_prediction_col:
The `output_margin=True` is implicitly supported by the
`rawPredictionCol` output column, which is always returned with the predicted margin
values.
pred_contrib_col:
Contribution prediction column name.
validation_indicator_col:
For params related to `xgboost.XGBClassifier` training with
evaluation dataset's supervision,
set :py:attr:`xgboost.spark.SparkXGBClassifier.validation_indicator_col`
parameter instead of setting the `eval_set` parameter in `xgboost.XGBClassifier`
fit method.
weight_col:
To specify the weight of the training and validation dataset, set
:py:attr:`xgboost.spark.SparkXGBClassifier.weight_col` parameter instead of setting
`sample_weight` and `sample_weight_eval_set` parameter in `xgboost.XGBClassifier`
fit method.
base_margin_col:
To specify the base margins of the training and validation
dataset, set :py:attr:`xgboost.spark.SparkXGBClassifier.base_margin_col` parameter
instead of setting `base_margin` and `base_margin_eval_set` in the
`xgboost.XGBClassifier` fit method.
num_workers:
How many XGBoost workers to be used to train.
Each XGBoost worker corresponds to one spark task.
use_gpu:
.. deprecated:: 2.0.0
Use `device` instead.
device:
Device for XGBoost workers, available options are `cpu`, `cuda`, and `gpu`.
force_repartition:
Boolean value to specify if forcing the input dataset to be repartitioned
before XGBoost training.
repartition_random_shuffle:
Boolean value to specify if randomly shuffling the dataset when repartitioning is required.
enable_sparse_data_optim:
Boolean value to specify if enabling sparse data optimization, if True,
Xgboost DMatrix object will be constructed from sparse matrix instead of
dense matrix.
kwargs:
A dictionary of xgboost parameters, please refer to
https://xgboost.readthedocs.io/en/stable/parameter.html
Note
----
The Parameters chart above contains parameters that need special handling.
For a full list of parameters, see entries with `Param(parent=...` below.
This API is experimental.
Examples
--------
>>> from xgboost.spark import SparkXGBClassifier
>>> from pyspark.ml.linalg import Vectors
>>> df_train = spark.createDataFrame([
... (Vectors.dense(1.0, 2.0, 3.0), 0, False, 1.0),
... (Vectors.sparse(3, {1: 1.0, 2: 5.5}), 1, False, 2.0),
... (Vectors.dense(4.0, 5.0, 6.0), 0, True, 1.0),
... (Vectors.sparse(3, {1: 6.0, 2: 7.5}), 1, True, 2.0),
... ], ["features", "label", "isVal", "weight"])
>>> df_test = spark.createDataFrame([
... (Vectors.dense(1.0, 2.0, 3.0), ),
... ], ["features"])
>>> xgb_classifier = SparkXGBClassifier(max_depth=5, missing=0.0,
... validation_indicator_col='isVal', weight_col='weight',
... early_stopping_rounds=1, eval_metric='logloss')
>>> xgb_clf_model = xgb_classifier.fit(df_train)
>>> xgb_clf_model.transform(df_test).show()
"""
@keyword_only
def __init__(
self,
*,
features_col: Union[str, List[str]] = "features",
label_col: str = "label",
prediction_col: str = "prediction",
probability_col: str = "probability",
raw_prediction_col: str = "rawPrediction",
pred_contrib_col: Optional[str] = None,
validation_indicator_col: Optional[str] = None,
weight_col: Optional[str] = None,
base_margin_col: Optional[str] = None,
num_workers: int = 1,
use_gpu: Optional[bool] = None,
device: Optional[str] = None,
force_repartition: bool = False,
repartition_random_shuffle: bool = False,
enable_sparse_data_optim: bool = False,
**kwargs: Any,
) -> None:
super().__init__()
# The default 'objective' param value comes from sklearn `XGBClassifier` ctor,
# but in pyspark we will automatically set objective param depending on
# binary or multinomial input dataset, and we need to remove the fixed default
# param value as well to avoid causing ambiguity.
input_kwargs = self._input_kwargs
if use_gpu:
_deprecated_use_gpu()
self.setParams(**input_kwargs)
self._setDefault(objective=None)
@classmethod
def _xgb_cls(cls) -> Type[XGBClassifier]:
return XGBClassifier
@classmethod
def _pyspark_model_cls(cls) -> Type["SparkXGBClassifierModel"]:
return SparkXGBClassifierModel
def _validate_params(self) -> None:
super()._validate_params()
if self.isDefined(self.qid_col):
raise ValueError(
"Spark Xgboost classifier estimator does not support `qid_col` param."
)
if self.getOrDefault("objective"): # pylint: disable=no-member
raise ValueError(
"Setting custom 'objective' param is not allowed in 'SparkXGBClassifier'."
)
class SparkXGBClassifierModel(_ClassificationModel):
"""
The model returned by :func:`xgboost.spark.SparkXGBClassifier.fit`
.. Note:: This API is experimental.
"""
@classmethod
def _xgb_cls(cls) -> Type[XGBClassifier]:
return XGBClassifier
_set_pyspark_xgb_cls_param_attrs(SparkXGBClassifier, SparkXGBClassifierModel)
class SparkXGBRanker(_SparkXGBEstimator):
"""SparkXGBRanker is a PySpark ML estimator. It implements the XGBoost
ranking algorithm based on XGBoost python library, and it can be used in
PySpark Pipeline and PySpark ML meta algorithms like
:py:class:`~pyspark.ml.tuning.CrossValidator`/
:py:class:`~pyspark.ml.tuning.TrainValidationSplit`/
:py:class:`~pyspark.ml.classification.OneVsRest`
SparkXGBRanker automatically supports most of the parameters in
:py:class:`xgboost.XGBRanker` constructor and most of the parameters used in
:py:meth:`xgboost.XGBRanker.fit` and :py:meth:`xgboost.XGBRanker.predict` method.
To enable GPU support, set `device` to `cuda` or `gpu`.
SparkXGBRanker doesn't support setting `base_margin` explicitly as well, but support
another param called `base_margin_col`. see doc below for more details.
SparkXGBRanker doesn't support setting `output_margin`, but we can get output margin
from the raw prediction column. See `raw_prediction_col` param doc below for more
details.
SparkXGBRanker doesn't support `validate_features` and `output_margin` param.
SparkXGBRanker doesn't support setting `nthread` xgboost param, instead, the
`nthread` param for each xgboost worker will be set equal to `spark.task.cpus`
config value.
Parameters
----------
features_col:
When the value is string, it requires the features column name to be vector type.
When the value is a list of string, it requires all the feature columns to be numeric types.
label_col:
Label column name. Default to "label".
prediction_col:
Prediction column name. Default to "prediction"
pred_contrib_col:
Contribution prediction column name.
validation_indicator_col:
For params related to `xgboost.XGBRanker` training with
evaluation dataset's supervision,
set :py:attr:`xgboost.spark.SparkXGBRanker.validation_indicator_col`
parameter instead of setting the `eval_set` parameter in :py:class:`xgboost.XGBRanker`
fit method.
weight_col:
To specify the weight of the training and validation dataset, set
:py:attr:`xgboost.spark.SparkXGBRanker.weight_col` parameter instead of setting
`sample_weight` and `sample_weight_eval_set` parameter in :py:class:`xgboost.XGBRanker`
fit method.
base_margin_col:
To specify the base margins of the training and validation
dataset, set :py:attr:`xgboost.spark.SparkXGBRanker.base_margin_col` parameter
instead of setting `base_margin` and `base_margin_eval_set` in the
:py:class:`xgboost.XGBRanker` fit method.
qid_col:
Query id column name.
num_workers:
How many XGBoost workers to be used to train.
Each XGBoost worker corresponds to one spark task.
use_gpu:
.. deprecated:: 2.0.0
Use `device` instead.
device:
Device for XGBoost workers, available options are `cpu`, `cuda`, and `gpu`.
force_repartition:
Boolean value to specify if forcing the input dataset to be repartitioned
before XGBoost training.
repartition_random_shuffle:
Boolean value to specify if randomly shuffling the dataset when repartitioning is required.
enable_sparse_data_optim:
Boolean value to specify if enabling sparse data optimization, if True,
Xgboost DMatrix object will be constructed from sparse matrix instead of
dense matrix.
kwargs:
A dictionary of xgboost parameters, please refer to
https://xgboost.readthedocs.io/en/stable/parameter.html
.. Note:: The Parameters chart above contains parameters that need special handling.
For a full list of parameters, see entries with `Param(parent=...` below.
.. Note:: This API is experimental.
Examples
--------
>>> from xgboost.spark import SparkXGBRanker
>>> from pyspark.ml.linalg import Vectors
>>> ranker = SparkXGBRanker(qid_col="qid")
>>> df_train = spark.createDataFrame(
... [
... (Vectors.dense(1.0, 2.0, 3.0), 0, 0),
... (Vectors.dense(4.0, 5.0, 6.0), 1, 0),
... (Vectors.dense(9.0, 4.0, 8.0), 2, 0),
... (Vectors.sparse(3, {1: 1.0, 2: 5.5}), 0, 1),
... (Vectors.sparse(3, {1: 6.0, 2: 7.5}), 1, 1),
... (Vectors.sparse(3, {1: 8.0, 2: 9.5}), 2, 1),
... ],
... ["features", "label", "qid"],
... )
>>> df_test = spark.createDataFrame(
... [
... (Vectors.dense(1.5, 2.0, 3.0), 0),
... (Vectors.dense(4.5, 5.0, 6.0), 0),
... (Vectors.dense(9.0, 4.5, 8.0), 0),
... (Vectors.sparse(3, {1: 1.0, 2: 6.0}), 1),
... (Vectors.sparse(3, {1: 6.0, 2: 7.0}), 1),
... (Vectors.sparse(3, {1: 8.0, 2: 10.5}), 1),
... ],
... ["features", "qid"],
... )
>>> model = ranker.fit(df_train)
>>> model.transform(df_test).show()
"""
@keyword_only
def __init__(
self,
*,
features_col: Union[str, List[str]] = "features",
label_col: str = "label",
prediction_col: str = "prediction",
pred_contrib_col: Optional[str] = None,
validation_indicator_col: Optional[str] = None,
weight_col: Optional[str] = None,
base_margin_col: Optional[str] = None,
qid_col: Optional[str] = None,
num_workers: int = 1,
use_gpu: Optional[bool] = None,
device: Optional[str] = None,
force_repartition: bool = False,
repartition_random_shuffle: bool = False,
enable_sparse_data_optim: bool = False,
**kwargs: Any,
) -> None:
super().__init__()
input_kwargs = self._input_kwargs
if use_gpu:
_deprecated_use_gpu()
self.setParams(**input_kwargs)
@classmethod
def _xgb_cls(cls) -> Type[XGBRanker]:
return XGBRanker
@classmethod
def _pyspark_model_cls(cls) -> Type["SparkXGBRankerModel"]:
return SparkXGBRankerModel
def _validate_params(self) -> None:
super()._validate_params()
if not self.isDefined(self.qid_col):
raise ValueError(
"Spark Xgboost ranker estimator requires setting `qid_col` param."
)
class SparkXGBRankerModel(_SparkXGBModel):
"""
The model returned by :func:`xgboost.spark.SparkXGBRanker.fit`
.. Note:: This API is experimental.
"""
@classmethod
def _xgb_cls(cls) -> Type[XGBRanker]:
return XGBRanker
_set_pyspark_xgb_cls_param_attrs(SparkXGBRanker, SparkXGBRankerModel)
| 23,434
| 37.544408
| 100
|
py
|
xgboost
|
xgboost-master/python-package/packager/build_config.py
|
"""Build configuration"""
import dataclasses
from typing import Any, Dict, List, Optional
@dataclasses.dataclass
class BuildConfiguration: # pylint: disable=R0902
"""Configurations use when building libxgboost"""
# Whether to hide C++ symbols in libxgboost.so
hide_cxx_symbols: bool = True
# Whether to enable OpenMP
use_openmp: bool = True
# Whether to enable CUDA
use_cuda: bool = False
# Whether to enable NCCL
use_nccl: bool = False
# Whether to enable HDFS
use_hdfs: bool = False
# Whether to enable Azure Storage
use_azure: bool = False
# Whether to enable AWS S3
use_s3: bool = False
# Whether to enable the dense parser plugin
plugin_dense_parser: bool = False
# Special option: See explanation below
use_system_libxgboost: bool = False
def _set_config_setting(self, config_settings: Dict[str, Any]) -> None:
for field_name in config_settings:
setattr(
self,
field_name,
(config_settings[field_name].lower() in ["true", "1", "on"]),
)
def update(self, config_settings: Optional[Dict[str, Any]]) -> None:
"""Parse config_settings from Pip (or other PEP 517 frontend)"""
if config_settings is not None:
self._set_config_setting(config_settings)
def get_cmake_args(self) -> List[str]:
"""Convert build configuration to CMake args"""
cmake_args = []
for field_name in [x.name for x in dataclasses.fields(self)]:
if field_name in ["use_system_libxgboost"]:
continue
cmake_option = field_name.upper()
cmake_value = "ON" if getattr(self, field_name) is True else "OFF"
cmake_args.append(f"-D{cmake_option}={cmake_value}")
return cmake_args
| 1,840
| 34.403846
| 78
|
py
|
xgboost
|
xgboost-master/python-package/packager/sdist.py
|
"""
Functions for building sdist
"""
import logging
import pathlib
from .util import copy_with_logging, copytree_with_logging
def copy_cpp_src_tree(
cpp_src_dir: pathlib.Path, target_dir: pathlib.Path, logger: logging.Logger
) -> None:
"""Copy C++ source tree into build directory"""
for subdir in [
"src",
"include",
"dmlc-core",
"gputreeshap",
"rabit",
"cmake",
"plugin",
]:
copytree_with_logging(cpp_src_dir / subdir, target_dir / subdir, logger=logger)
for filename in ["CMakeLists.txt", "LICENSE"]:
copy_with_logging(cpp_src_dir.joinpath(filename), target_dir, logger=logger)
| 678
| 23.25
| 87
|
py
|
xgboost
|
xgboost-master/python-package/packager/pep517.py
|
"""
Custom build backend for XGBoost Python package.
Builds source distribution and binary wheels, following PEP 517 / PEP 660.
Reuses components of Hatchling (https://github.com/pypa/hatch/tree/master/backend) for the sake
of brevity.
"""
import dataclasses
import logging
import os
import pathlib
import tempfile
from contextlib import contextmanager
from typing import Any, Dict, Iterator, Optional, Union
import hatchling.build
from .build_config import BuildConfiguration
from .nativelib import locate_local_libxgboost, locate_or_build_libxgboost
from .sdist import copy_cpp_src_tree
from .util import copy_with_logging, copytree_with_logging
@contextmanager
def cd(path: Union[str, pathlib.Path]) -> Iterator[str]: # pylint: disable=C0103
"""
Temporarily change working directory.
TODO(hcho3): Remove this once we adopt Python 3.11, which implements contextlib.chdir.
"""
path = str(path)
path = os.path.realpath(path)
cwd = os.getcwd()
os.chdir(path)
try:
yield path
finally:
os.chdir(cwd)
TOPLEVEL_DIR = pathlib.Path(__file__).parent.parent.absolute().resolve()
logging.basicConfig(level=logging.INFO)
# Aliases
get_requires_for_build_sdist = hatchling.build.get_requires_for_build_sdist
get_requires_for_build_wheel = hatchling.build.get_requires_for_build_wheel
get_requires_for_build_editable = hatchling.build.get_requires_for_build_editable
def build_wheel(
wheel_directory: str,
config_settings: Optional[Dict[str, Any]] = None,
metadata_directory: Optional[str] = None,
) -> str:
"""Build a wheel"""
logger = logging.getLogger("xgboost.packager.build_wheel")
build_config = BuildConfiguration()
build_config.update(config_settings)
logger.info("Parsed build configuration: %s", dataclasses.asdict(build_config))
# Create tempdir with Python package + libxgboost
with tempfile.TemporaryDirectory() as td:
td_path = pathlib.Path(td)
build_dir = td_path / "libbuild"
build_dir.mkdir()
workspace = td_path / "whl_workspace"
workspace.mkdir()
logger.info("Copying project files to temporary directory %s", str(workspace))
copy_with_logging(TOPLEVEL_DIR / "pyproject.toml", workspace, logger=logger)
copy_with_logging(TOPLEVEL_DIR / "hatch_build.py", workspace, logger=logger)
copy_with_logging(TOPLEVEL_DIR / "README.rst", workspace, logger=logger)
pkg_path = workspace / "xgboost"
copytree_with_logging(TOPLEVEL_DIR / "xgboost", pkg_path, logger=logger)
lib_path = pkg_path / "lib"
lib_path.mkdir()
libxgboost = locate_or_build_libxgboost(
TOPLEVEL_DIR, build_dir=build_dir, build_config=build_config
)
if not build_config.use_system_libxgboost:
copy_with_logging(libxgboost, lib_path, logger=logger)
with cd(workspace):
wheel_name = hatchling.build.build_wheel(
wheel_directory, config_settings, metadata_directory
)
return wheel_name
def build_sdist(
sdist_directory: str,
config_settings: Optional[Dict[str, Any]] = None,
) -> str:
"""Build a source distribution"""
logger = logging.getLogger("xgboost.packager.build_sdist")
if config_settings:
raise NotImplementedError(
"XGBoost's custom build backend doesn't support config_settings option "
f"when building sdist. {config_settings=}"
)
cpp_src_dir = TOPLEVEL_DIR.parent
if not cpp_src_dir.joinpath("CMakeLists.txt").exists():
raise RuntimeError(f"Did not find CMakeLists.txt from {cpp_src_dir}")
# Create tempdir with Python package + C++ sources
with tempfile.TemporaryDirectory() as td:
td_path = pathlib.Path(td)
workspace = td_path / "sdist_workspace"
workspace.mkdir()
logger.info("Copying project files to temporary directory %s", str(workspace))
copy_with_logging(TOPLEVEL_DIR / "pyproject.toml", workspace, logger=logger)
copy_with_logging(TOPLEVEL_DIR / "hatch_build.py", workspace, logger=logger)
copy_with_logging(TOPLEVEL_DIR / "README.rst", workspace, logger=logger)
copytree_with_logging(
TOPLEVEL_DIR / "xgboost", workspace / "xgboost", logger=logger
)
copytree_with_logging(
TOPLEVEL_DIR / "packager", workspace / "packager", logger=logger
)
temp_cpp_src_dir = workspace / "cpp_src"
copy_cpp_src_tree(cpp_src_dir, target_dir=temp_cpp_src_dir, logger=logger)
with cd(workspace):
sdist_name = hatchling.build.build_sdist(sdist_directory, config_settings)
return sdist_name
def build_editable(
wheel_directory: str,
config_settings: Optional[Dict[str, Any]] = None,
metadata_directory: Optional[str] = None,
) -> str:
"""Build an editable installation. We mostly delegate to Hatchling."""
logger = logging.getLogger("xgboost.packager.build_editable")
if config_settings:
raise NotImplementedError(
"XGBoost's custom build backend doesn't support config_settings option "
f"when building editable installation. {config_settings=}"
)
if locate_local_libxgboost(TOPLEVEL_DIR, logger=logger) is None:
raise RuntimeError(
"To use the editable installation, first build libxgboost with CMake. "
"See https://xgboost.readthedocs.io/en/latest/build.html for detailed instructions."
)
return hatchling.build.build_editable(
wheel_directory, config_settings, metadata_directory
)
| 5,643
| 34.496855
| 96
|
py
|
xgboost
|
xgboost-master/python-package/packager/nativelib.py
|
"""
Functions for building libxgboost
"""
import logging
import os
import pathlib
import shutil
import subprocess
import sys
from platform import system
from typing import Optional
from .build_config import BuildConfiguration
def _lib_name() -> str:
"""Return platform dependent shared object name."""
if system() in ["Linux", "OS400"] or system().upper().endswith("BSD"):
name = "libxgboost.so"
elif system() == "Darwin":
name = "libxgboost.dylib"
elif system() == "Windows":
name = "xgboost.dll"
else:
raise NotImplementedError(f"System {system()} not supported")
return name
def build_libxgboost(
cpp_src_dir: pathlib.Path,
build_dir: pathlib.Path,
build_config: BuildConfiguration,
) -> pathlib.Path:
"""Build libxgboost in a temporary directory and obtain the path to built libxgboost"""
logger = logging.getLogger("xgboost.packager.build_libxgboost")
if not cpp_src_dir.is_dir():
raise RuntimeError(f"Expected {cpp_src_dir} to be a directory")
logger.info(
"Building %s from the C++ source files in %s...", _lib_name(), str(cpp_src_dir)
)
def _build(*, generator: str) -> None:
cmake_cmd = [
"cmake",
str(cpp_src_dir),
generator,
"-DKEEP_BUILD_ARTIFACTS_IN_BINARY_DIR=ON",
]
cmake_cmd.extend(build_config.get_cmake_args())
# Flag for cross-compiling for Apple Silicon
# We use environment variable because it's the only way to pass down custom flags
# through the cibuildwheel package, which calls `pip wheel` command.
if "CIBW_TARGET_OSX_ARM64" in os.environ:
cmake_cmd.append("-DCMAKE_OSX_ARCHITECTURES=arm64")
logger.info("CMake args: %s", str(cmake_cmd))
subprocess.check_call(cmake_cmd, cwd=build_dir)
if system() == "Windows":
subprocess.check_call(
["cmake", "--build", ".", "--config", "Release"], cwd=build_dir
)
else:
nproc = os.cpu_count()
assert build_tool is not None
subprocess.check_call([build_tool, f"-j{nproc}"], cwd=build_dir)
if system() == "Windows":
supported_generators = (
"-GVisual Studio 17 2022",
"-GVisual Studio 16 2019",
"-GVisual Studio 15 2017",
"-GMinGW Makefiles",
)
for generator in supported_generators:
try:
_build(generator=generator)
logger.info(
"Successfully built %s using generator %s", _lib_name(), generator
)
break
except subprocess.CalledProcessError as e:
logger.info(
"Tried building with generator %s but failed with exception %s",
generator,
str(e),
)
# Empty build directory
shutil.rmtree(build_dir)
build_dir.mkdir()
else:
raise RuntimeError(
"None of the supported generators produced a successful build!"
f"Supported generators: {supported_generators}"
)
else:
build_tool = "ninja" if shutil.which("ninja") else "make"
generator = "-GNinja" if build_tool == "ninja" else "-GUnix Makefiles"
try:
_build(generator=generator)
except subprocess.CalledProcessError as e:
logger.info("Failed to build with OpenMP. Exception: %s", str(e))
build_config.use_openmp = False
_build(generator=generator)
return build_dir / "lib" / _lib_name()
def locate_local_libxgboost(
toplevel_dir: pathlib.Path,
logger: logging.Logger,
) -> Optional[pathlib.Path]:
"""
Locate libxgboost from the local project directory's lib/ subdirectory.
"""
libxgboost = toplevel_dir.parent / "lib" / _lib_name()
if libxgboost.exists():
logger.info("Found %s at %s", libxgboost.name, str(libxgboost.parent))
return libxgboost
return None
def locate_or_build_libxgboost(
toplevel_dir: pathlib.Path,
build_dir: pathlib.Path,
build_config: BuildConfiguration,
) -> pathlib.Path:
"""Locate libxgboost; if not exist, build it"""
logger = logging.getLogger("xgboost.packager.locate_or_build_libxgboost")
if build_config.use_system_libxgboost:
# Find libxgboost from system prefix
sys_base_prefix = pathlib.Path(sys.base_prefix).absolute().resolve()
libxgboost_sys = sys_base_prefix / "lib" / _lib_name()
if not libxgboost_sys.exists():
raise RuntimeError(
f"use_system_libxgboost was specified but {_lib_name()} is "
f"not found in {libxgboost_sys.parent}"
)
logger.info("Using system XGBoost: %s", str(libxgboost_sys))
return libxgboost_sys
libxgboost = locate_local_libxgboost(toplevel_dir, logger=logger)
if libxgboost is not None:
return libxgboost
if toplevel_dir.joinpath("cpp_src").exists():
# Source distribution; all C++ source files to be found in cpp_src/
cpp_src_dir = toplevel_dir.joinpath("cpp_src")
else:
# Probably running "pip install ." from python-package/
cpp_src_dir = toplevel_dir.parent
if not cpp_src_dir.joinpath("CMakeLists.txt").exists():
raise RuntimeError(f"Did not find CMakeLists.txt from {cpp_src_dir}")
return build_libxgboost(cpp_src_dir, build_dir=build_dir, build_config=build_config)
| 5,623
| 34.371069
| 91
|
py
|
xgboost
|
xgboost-master/python-package/packager/util.py
|
"""
Utility functions for implementing PEP 517 backend
"""
import logging
import pathlib
import shutil
def copytree_with_logging(
src: pathlib.Path, dest: pathlib.Path, logger: logging.Logger
) -> None:
"""Call shutil.copytree() with logging"""
logger.info("Copying %s -> %s", str(src), str(dest))
shutil.copytree(src, dest)
def copy_with_logging(
src: pathlib.Path, dest: pathlib.Path, logger: logging.Logger
) -> None:
"""Call shutil.copy() with logging"""
if dest.is_dir():
logger.info("Copying %s -> %s", str(src), str(dest / src.name))
else:
logger.info("Copying %s -> %s", str(src), str(dest))
shutil.copy(src, dest)
| 679
| 25.153846
| 71
|
py
|
xgboost
|
xgboost-master/python-package/packager/__init__.py
| 0
| 0
| 0
|
py
|
|
xgboost
|
xgboost-master/jvm-packages/create_jni.py
|
#!/usr/bin/env python
import errno
import argparse
import glob
import os
import platform
import shutil
import subprocess
import sys
from contextlib import contextmanager
# Monkey-patch the API inconsistency between Python2.X and 3.X.
if sys.platform.startswith("linux"):
sys.platform = "linux"
CONFIG = {
"USE_OPENMP": "ON",
"USE_HDFS": "OFF",
"USE_AZURE": "OFF",
"USE_S3": "OFF",
"USE_CUDA": "OFF",
"USE_NCCL": "OFF",
"JVM_BINDINGS": "ON",
"LOG_CAPI_INVOCATION": "OFF"
}
@contextmanager
def cd(path):
path = normpath(path)
cwd = os.getcwd()
os.chdir(path)
print("cd " + path)
try:
yield path
finally:
os.chdir(cwd)
def maybe_makedirs(path):
path = normpath(path)
print("mkdir -p " + path)
try:
os.makedirs(path)
except OSError as e:
if e.errno != errno.EEXIST:
raise
def run(command, **kwargs):
print(command)
subprocess.check_call(command, shell=True, **kwargs)
def cp(source, target):
source = normpath(source)
target = normpath(target)
print("cp {0} {1}".format(source, target))
shutil.copy(source, target)
def normpath(path):
"""Normalize UNIX path to a native path."""
normalized = os.path.join(*path.split("/"))
if os.path.isabs(path):
return os.path.abspath("/") + normalized
else:
return normalized
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument('--log-capi-invocation', type=str, choices=['ON', 'OFF'], default='OFF')
parser.add_argument('--use-cuda', type=str, choices=['ON', 'OFF'], default='OFF')
cli_args = parser.parse_args()
if sys.platform == "darwin":
# Enable of your compiler supports OpenMP.
CONFIG["USE_OPENMP"] = "OFF"
os.environ["JAVA_HOME"] = subprocess.check_output(
"/usr/libexec/java_home").strip().decode()
print("building Java wrapper")
with cd(".."):
build_dir = 'build-gpu' if cli_args.use_cuda == 'ON' else 'build'
maybe_makedirs(build_dir)
with cd(build_dir):
if sys.platform == "win32":
# Force x64 build on Windows.
maybe_generator = ' -A x64'
else:
maybe_generator = ""
if sys.platform == "linux":
maybe_parallel_build = " -- -j $(nproc)"
else:
maybe_parallel_build = ""
if cli_args.log_capi_invocation == 'ON':
CONFIG['LOG_CAPI_INVOCATION'] = 'ON'
if cli_args.use_cuda == 'ON':
CONFIG['USE_CUDA'] = 'ON'
CONFIG['USE_NCCL'] = 'ON'
args = ["-D{0}:BOOL={1}".format(k, v) for k, v in CONFIG.items()]
# if enviorment set rabit_mock
if os.getenv("RABIT_MOCK", None) is not None:
args.append("-DRABIT_MOCK:BOOL=ON")
# if enviorment set GPU_ARCH_FLAG
gpu_arch_flag = os.getenv("GPU_ARCH_FLAG", None)
if gpu_arch_flag is not None:
args.append("%s" % gpu_arch_flag)
lib_dir = os.path.join(os.pardir, 'lib')
if os.path.exists(lib_dir):
shutil.rmtree(lib_dir)
run("cmake .. " + " ".join(args) + maybe_generator)
run("cmake --build . --config Release" + maybe_parallel_build)
with cd("demo/CLI/regression"):
run(f'"{sys.executable}" mapfeat.py')
run(f'"{sys.executable}" mknfold.py machine.txt 1')
xgboost4j = 'xgboost4j-gpu' if cli_args.use_cuda == 'ON' else 'xgboost4j'
xgboost4j_spark = 'xgboost4j-spark-gpu' if cli_args.use_cuda == 'ON' else 'xgboost4j-spark'
print("copying native library")
library_name, os_folder = {
"Windows": ("xgboost4j.dll", "windows"),
"Darwin": ("libxgboost4j.dylib", "macos"),
"Linux": ("libxgboost4j.so", "linux"),
"SunOS": ("libxgboost4j.so", "solaris"),
}[platform.system()]
arch_folder = {
"x86_64": "x86_64", # on Linux & macOS x86_64
"amd64": "x86_64", # on Windows x86_64
"i86pc": "x86_64", # on Solaris x86_64
"sun4v": "sparc", # on Solaris sparc
"arm64": "aarch64", # on macOS & Windows ARM 64-bit
"aarch64": "aarch64"
}[platform.machine().lower()]
output_folder = "{}/src/main/resources/lib/{}/{}".format(xgboost4j, os_folder, arch_folder)
maybe_makedirs(output_folder)
cp("../lib/" + library_name, output_folder)
print("copying pure-Python tracker")
cp("../python-package/xgboost/tracker.py", "{}/src/main/resources".format(xgboost4j))
print("copying train/test files")
maybe_makedirs("{}/src/test/resources".format(xgboost4j_spark))
with cd("../demo/CLI/regression"):
run(f'"{sys.executable}" mapfeat.py')
run(f'"{sys.executable}" mknfold.py machine.txt 1')
for file in glob.glob("../demo/CLI/regression/machine.txt.t*"):
cp(file, "{}/src/test/resources".format(xgboost4j_spark))
for file in glob.glob("../demo/data/agaricus.*"):
cp(file, "{}/src/test/resources".format(xgboost4j_spark))
maybe_makedirs("{}/src/test/resources".format(xgboost4j))
for file in glob.glob("../demo/data/agaricus.*"):
cp(file, "{}/src/test/resources".format(xgboost4j))
| 5,347
| 31.023952
| 96
|
py
|
xgboost
|
xgboost-master/jvm-packages/xgboost4j-tester/generate_pom.py
|
import sys
pom_template = """
<?xml version="1.0" encoding="UTF-8"?>
<project xmlns="http://maven.apache.org/POM/4.0.0" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd">
<modelVersion>4.0.0</modelVersion>
<groupId>ml.dmlc</groupId>
<artifactId>xgboost4j-tester_{scala_binary_version}</artifactId>
<version>1.0-SNAPSHOT</version>
<name>xgboost4j-tester</name>
<properties>
<project.build.sourceEncoding>UTF-8</project.build.sourceEncoding>
<maven.compiler.source>{maven_compiler_source}</maven.compiler.source>
<maven.compiler.target>{maven_compiler_target}</maven.compiler.target>
<junit.version>4.13.2</junit.version>
<spark.version>{spark_version}</spark.version>
<scala.version>{scala_version}</scala.version>
<scalatest.version>3.2.15</scalatest.version>
<scala.binary.version>{scala_binary_version}</scala.binary.version>
<kryo.version>5.5.0</kryo.version>
</properties>
<dependencies>
<dependency>
<groupId>com.esotericsoftware</groupId>
<artifactId>kryo</artifactId>
<version>${{kryo.version}}</version>
</dependency>
<dependency>
<groupId>org.scala-lang</groupId>
<artifactId>scala-compiler</artifactId>
<version>${{scala.version}}</version>
</dependency>
<dependency>
<groupId>org.scala-lang</groupId>
<artifactId>scala-reflect</artifactId>
<version>${{scala.version}}</version>
</dependency>
<dependency>
<groupId>org.scala-lang</groupId>
<artifactId>scala-library</artifactId>
<version>${{scala.version}}</version>
</dependency>
<dependency>
<groupId>commons-logging</groupId>
<artifactId>commons-logging</artifactId>
<version>1.2</version>
</dependency>
<dependency>
<groupId>org.scalatest</groupId>
<artifactId>scalatest_${{scala.binary.version}}</artifactId>
<version>${{scalatest.version}}</version>
<scope>test</scope>
</dependency>
<dependency>
<groupId>org.apache.spark</groupId>
<artifactId>spark-core_${{scala.binary.version}}</artifactId>
<version>${{spark.version}}</version>
<scope>provided</scope>
</dependency>
<dependency>
<groupId>org.apache.spark</groupId>
<artifactId>spark-sql_${{scala.binary.version}}</artifactId>
<version>${{spark.version}}</version>
<scope>provided</scope>
</dependency>
<dependency>
<groupId>org.apache.spark</groupId>
<artifactId>spark-mllib_${{scala.binary.version}}</artifactId>
<version>${{spark.version}}</version>
<scope>provided</scope>
</dependency>
<dependency>
<groupId>junit</groupId>
<artifactId>junit</artifactId>
<version>${{junit.version}}</version>
<scope>test</scope>
</dependency>
<dependency>
<groupId>ml.dmlc</groupId>
<artifactId>xgboost4j_${{scala.binary.version}}</artifactId>
<version>{xgboost4j_version}</version>
</dependency>
<dependency>
<groupId>ml.dmlc</groupId>
<artifactId>xgboost4j_${{scala.binary.version}}</artifactId>
<version>{xgboost4j_version}</version>
<classifier>tests</classifier>
<type>test-jar</type>
<scope>test</scope>
</dependency>
<dependency>
<groupId>ml.dmlc</groupId>
<artifactId>xgboost4j-spark_${{scala.binary.version}}</artifactId>
<version>{xgboost4j_version}</version>
</dependency>
<dependency>
<groupId>ml.dmlc</groupId>
<artifactId>xgboost4j-example_${{scala.binary.version}}</artifactId>
<version>{xgboost4j_version}</version>
</dependency>
</dependencies>
<build>
<plugins>
<plugin>
<groupId>org.apache.maven.plugins</groupId>
<artifactId>maven-assembly-plugin</artifactId>
<configuration>
<descriptorRefs>
<descriptorRef>jar-with-dependencies</descriptorRef>
</descriptorRefs>
<archive>
<manifest>
<mainClass>ml.dmlc.xgboost4j.tester.App</mainClass>
</manifest>
</archive>
</configuration>
<executions>
<execution>
<phase>package</phase>
<goals>
<goal>single</goal>
</goals>
</execution>
</executions>
</plugin>
<plugin>
<groupId>org.apache.maven.plugins</groupId>
<artifactId>maven-surefire-plugin</artifactId>
<configuration>
<dependenciesToScan>
<dependency>ml.dmlc:xgboost4j_${{scala.binary.version}}</dependency>
</dependenciesToScan>
</configuration>
</plugin>
</plugins>
</build>
</project>
"""
if __name__ == '__main__':
if len(sys.argv) != 7:
print('Usage: {} [xgboost4j version] [maven compiler source level] [maven compiler target level] [spark version] [scala version] [scala binary version]'.format(sys.argv[0]))
sys.exit(1)
with open('pom.xml', 'w') as f:
print(pom_template.format(xgboost4j_version=sys.argv[1],
maven_compiler_source=sys.argv[2],
maven_compiler_target=sys.argv[3],
spark_version=sys.argv[4],
scala_version=sys.argv[5],
scala_binary_version=sys.argv[6]), file=f)
| 5,462
| 33.575949
| 177
|
py
|
xgboost
|
xgboost-master/jvm-packages/xgboost4j-tester/get_iris.py
|
import numpy as np
import pandas
from sklearn.datasets import load_iris
X, y = load_iris(return_X_y=True)
y = y.astype(np.int32)
df = pandas.DataFrame(data=X, columns=['sepal length', 'sepal width', 'petal length', 'petal width'])
class_id_to_name = {0:'Iris-setosa', 1:'Iris-versicolor', 2:'Iris-virginica'}
df['class'] = np.vectorize(class_id_to_name.get)(y)
df.to_csv('./iris.csv', float_format='%.1f', header=False, index=False)
| 434
| 38.545455
| 101
|
py
|
xgboost
|
xgboost-master/doc/sphinx_util.py
|
# -*- coding: utf-8 -*-
"""Helper utility function for customization."""
import os
import subprocess
import sys
READTHEDOCS_BUILD = (os.environ.get('READTHEDOCS', None) is not None)
if not os.path.exists('web-data'):
subprocess.call('rm -rf web-data;' +
'git clone https://github.com/dmlc/web-data', shell = True)
else:
subprocess.call('cd web-data; git pull', shell=True)
sys.stderr.write('READTHEDOCS=%s\n' % (READTHEDOCS_BUILD))
| 457
| 27.625
| 77
|
py
|
xgboost
|
xgboost-master/doc/conf.py
|
# -*- coding: utf-8 -*-
#
# documentation build configuration file, created by
# sphinx-quickstart on Thu Jul 23 19:40:08 2015.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import os
import re
import shutil
import subprocess
import sys
import tarfile
import urllib.request
import warnings
from subprocess import call
from urllib.error import HTTPError
from sh.contrib import git
CURR_PATH = os.path.dirname(os.path.abspath(os.path.expanduser(__file__)))
PROJECT_ROOT = os.path.normpath(os.path.join(CURR_PATH, os.path.pardir))
TMP_DIR = os.path.join(CURR_PATH, "tmp")
DOX_DIR = "doxygen"
def run_doxygen():
"""Run the doxygen make command in the designated folder."""
curdir = os.path.normpath(os.path.abspath(os.path.curdir))
if os.path.exists(TMP_DIR):
print(f"Delete directory {TMP_DIR}")
shutil.rmtree(TMP_DIR)
else:
print(f"Create directory {TMP_DIR}")
os.mkdir(TMP_DIR)
try:
os.chdir(PROJECT_ROOT)
if not os.path.exists(DOX_DIR):
os.mkdir(DOX_DIR)
os.chdir(os.path.join(PROJECT_ROOT, DOX_DIR))
print(
"Build doxygen at {}".format(
os.path.join(PROJECT_ROOT, DOX_DIR, "doc_doxygen")
)
)
subprocess.check_call(["cmake", "..", "-DBUILD_C_DOC=ON", "-GNinja"])
subprocess.check_call(["ninja", "doc_doxygen"])
src = os.path.join(PROJECT_ROOT, DOX_DIR, "doc_doxygen", "html")
dest = os.path.join(TMP_DIR, "dev")
print(f"Copy directory {src} -> {dest}")
shutil.copytree(src, dest)
except OSError as e:
sys.stderr.write("doxygen execution failed: %s" % e)
finally:
os.chdir(curdir)
def is_readthedocs_build():
if os.environ.get("READTHEDOCS", None) == "True":
return True
warnings.warn(
"Skipping Doxygen build... You won't have documentation for C/C++ functions. "
"Set environment variable READTHEDOCS=True if you want to build Doxygen. "
"(If you do opt in, make sure to install Doxygen, Graphviz, CMake, and C++ compiler "
"on your system.)"
)
return False
if is_readthedocs_build():
run_doxygen()
git_branch = os.getenv("SPHINX_GIT_BRANCH", default=None)
if not git_branch:
# If SPHINX_GIT_BRANCH environment variable is not given, run git
# to determine branch name
git_branch = [
re.sub(r"origin/", "", x.lstrip(" "))
for x in str(git.branch("-r", "--contains", "HEAD")).rstrip("\n").split("\n")
]
git_branch = [x for x in git_branch if "HEAD" not in x]
else:
git_branch = [git_branch]
print("git_branch = {}".format(git_branch[0]))
try:
filename, _ = urllib.request.urlretrieve(
f"https://s3-us-west-2.amazonaws.com/xgboost-docs/{git_branch[0]}.tar.bz2"
)
if not os.path.exists(TMP_DIR):
print(f"Create directory {TMP_DIR}")
os.mkdir(TMP_DIR)
jvm_doc_dir = os.path.join(TMP_DIR, "jvm")
if os.path.exists(jvm_doc_dir):
print(f"Delete directory {jvm_doc_dir}")
shutil.rmtree(jvm_doc_dir)
print(f"Create directory {jvm_doc_dir}")
os.mkdir(jvm_doc_dir)
with tarfile.open(filename, "r:bz2") as t:
t.extractall(jvm_doc_dir)
except HTTPError:
print("JVM doc not found. Skipping...")
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
libpath = os.path.join(PROJECT_ROOT, "python-package/")
sys.path.insert(0, libpath)
sys.path.insert(0, CURR_PATH)
# -- General configuration ------------------------------------------------
# General information about the project.
project = "xgboost"
author = "%s developers" % project
copyright = "2022, %s" % author
github_doc_root = "https://github.com/dmlc/xgboost/tree/master/doc/"
os.environ["XGBOOST_BUILD_DOC"] = "1"
# Version information.
import xgboost # NOQA
version = xgboost.__version__
release = xgboost.__version__
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom ones
extensions = [
"matplotlib.sphinxext.plot_directive",
"sphinxcontrib.jquery",
"sphinx.ext.autodoc",
"sphinx.ext.napoleon",
"sphinx.ext.mathjax",
"sphinx.ext.intersphinx",
"sphinx_gallery.gen_gallery",
"breathe",
"myst_parser",
]
sphinx_gallery_conf = {
# path to your example scripts
"examples_dirs": ["../demo/guide-python", "../demo/dask", "../demo/aft_survival"],
# path to where to save gallery generated output
"gallery_dirs": [
"python/examples",
"python/dask-examples",
"python/survival-examples",
],
"matplotlib_animations": True,
}
autodoc_typehints = "description"
graphviz_output_format = "png"
plot_formats = [("svg", 300), ("png", 100), ("hires.png", 300)]
plot_html_show_source_link = False
plot_html_show_formats = False
# Breathe extension variables
breathe_projects = {}
if is_readthedocs_build():
breathe_projects = {
"xgboost": os.path.join(PROJECT_ROOT, DOX_DIR, "doc_doxygen/xml")
}
breathe_default_project = "xgboost"
# Add any paths that contain templates here, relative to this directory.
templates_path = ["_templates"]
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
source_suffix = [".rst", ".md"]
# The encoding of source files.
# source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = "index"
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = "en"
autoclass_content = "both"
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
# today = ''
# Else, today_fmt is used as the format for a strftime call.
# today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ["_build"]
html_extra_path = []
if is_readthedocs_build():
html_extra_path = [TMP_DIR]
# The reST default role (used for this markup: `text`) to use for all
# documents.
# default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
# add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
# add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
# show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = "sphinx"
# A list of ignored prefixes for module index sorting.
# modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
# keep_warnings = False
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = "sphinx_rtd_theme"
html_theme_options = {"logo_only": True}
html_logo = "https://raw.githubusercontent.com/dmlc/dmlc.github.io/master/img/logo-m/xgboost.png"
html_css_files = ["css/custom.css"]
html_sidebars = {"**": ["logo-text.html", "globaltoc.html", "searchbox.html"]}
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ["_static"]
# Output file base name for HTML help builder.
htmlhelp_basename = project + "doc"
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, "%s.tex" % project, project, author, "manual"),
]
intersphinx_mapping = {
"python": ("https://docs.python.org/3.8", None),
"numpy": ("https://numpy.org/doc/stable/", None),
"scipy": ("https://docs.scipy.org/doc/scipy/", None),
"pandas": ("https://pandas.pydata.org/pandas-docs/stable/", None),
"sklearn": ("https://scikit-learn.org/stable", None),
"dask": ("https://docs.dask.org/en/stable/", None),
"distributed": ("https://distributed.dask.org/en/stable/", None),
"pyspark": ("https://spark.apache.org/docs/latest/api/python/", None),
}
def setup(app):
app.add_css_file("custom.css")
| 9,206
| 31.419014
| 97
|
py
|
missingpy
|
missingpy-master/setup.py
|
import setuptools
with open("README.md", "r") as fh:
long_description = fh.read()
setuptools.setup(
name="missingpy",
version="0.2.0",
author="Ashim Bhattarai",
description="Missing Data Imputation for Python",
long_description=long_description,
long_description_content_type="text/markdown",
url="https://github.com/epsilon-machine/missingpy",
packages=setuptools.find_packages(),
classifiers=(
"Programming Language :: Python :: 3",
"License :: OSI Approved :: GNU General Public License v3 (GPLv3)",
"Operating System :: OS Independent",
),
)
| 616
| 28.380952
| 75
|
py
|
missingpy
|
missingpy-master/missingpy/pairwise_external.py
|
# This file is a modification of sklearn.metrics.pairwise
# Modifications by Ashim Bhattarai
"""
New BSD License
Copyright (c) 2007–2018 The scikit-learn developers.
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
a. Redistributions of source code must retain the above copyright notice,
this list of conditions and the following disclaimer.
b. Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
c. Neither the name of the Scikit-learn Developers nor the names of
its contributors may be used to endorse or promote products
derived from this software without specific prior written
permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE FOR
ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
DAMAGE.
"""
from __future__ import division
from functools import partial
import itertools
import numpy as np
from scipy.spatial import distance
from scipy.sparse import issparse
from sklearn.metrics.pairwise import _VALID_METRICS, _return_float_dtype
from sklearn.metrics.pairwise import PAIRWISE_BOOLEAN_FUNCTIONS
from sklearn.metrics.pairwise import PAIRWISE_DISTANCE_FUNCTIONS
from sklearn.metrics.pairwise import _parallel_pairwise
from sklearn.utils import check_array
from .utils import masked_euclidean_distances
_MASKED_METRICS = ['masked_euclidean']
_VALID_METRICS += ['masked_euclidean']
def _get_mask(X, value_to_mask):
"""Compute the boolean mask X == missing_values."""
if value_to_mask == "NaN" or np.isnan(value_to_mask):
return np.isnan(X)
else:
return X == value_to_mask
def check_pairwise_arrays(X, Y, precomputed=False, dtype=None,
accept_sparse='csr', force_all_finite=True,
copy=False):
""" Set X and Y appropriately and checks inputs
If Y is None, it is set as a pointer to X (i.e. not a copy).
If Y is given, this does not happen.
All distance metrics should use this function first to assert that the
given parameters are correct and safe to use.
Specifically, this function first ensures that both X and Y are arrays,
then checks that they are at least two dimensional while ensuring that
their elements are floats (or dtype if provided). Finally, the function
checks that the size of the second dimension of the two arrays is equal, or
the equivalent check for a precomputed distance matrix.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples_a, n_features)
Y : {array-like, sparse matrix}, shape (n_samples_b, n_features)
precomputed : bool
True if X is to be treated as precomputed distances to the samples in
Y.
dtype : string, type, list of types or None (default=None)
Data type required for X and Y. If None, the dtype will be an
appropriate float type selected by _return_float_dtype.
.. versionadded:: 0.18
accept_sparse : string, boolean or list/tuple of strings
String[s] representing allowed sparse matrix formats, such as 'csc',
'csr', etc. If the input is sparse but not in the allowed format,
it will be converted to the first listed format. True allows the input
to be any format. False means that a sparse matrix input will
raise an error.
force_all_finite : bool
Whether to raise an error on np.inf and np.nan in X (or Y if it exists)
copy : bool
Whether a forced copy will be triggered. If copy=False, a copy might
be triggered by a conversion.
Returns
-------
safe_X : {array-like, sparse matrix}, shape (n_samples_a, n_features)
An array equal to X, guaranteed to be a numpy array.
safe_Y : {array-like, sparse matrix}, shape (n_samples_b, n_features)
An array equal to Y if Y was not None, guaranteed to be a numpy array.
If Y was None, safe_Y will be a pointer to X.
"""
X, Y, dtype_float = _return_float_dtype(X, Y)
warn_on_dtype = dtype is not None
estimator = 'check_pairwise_arrays'
if dtype is None:
dtype = dtype_float
if Y is X or Y is None:
X = Y = check_array(X, accept_sparse=accept_sparse, dtype=dtype,
copy=copy, force_all_finite=force_all_finite,
warn_on_dtype=warn_on_dtype, estimator=estimator)
else:
X = check_array(X, accept_sparse=accept_sparse, dtype=dtype,
copy=copy, force_all_finite=force_all_finite,
warn_on_dtype=warn_on_dtype, estimator=estimator)
Y = check_array(Y, accept_sparse=accept_sparse, dtype=dtype,
copy=copy, force_all_finite=force_all_finite,
warn_on_dtype=warn_on_dtype, estimator=estimator)
if precomputed:
if X.shape[1] != Y.shape[0]:
raise ValueError("Precomputed metric requires shape "
"(n_queries, n_indexed). Got (%d, %d) "
"for %d indexed." %
(X.shape[0], X.shape[1], Y.shape[0]))
elif X.shape[1] != Y.shape[1]:
raise ValueError("Incompatible dimension for X and Y matrices: "
"X.shape[1] == %d while Y.shape[1] == %d" % (
X.shape[1], Y.shape[1]))
return X, Y
def _pairwise_callable(X, Y, metric, **kwds):
"""Handle the callable case for pairwise_{distances,kernels}
"""
force_all_finite = False if callable(metric) else True
X, Y = check_pairwise_arrays(X, Y, force_all_finite=force_all_finite)
if X is Y:
# Only calculate metric for upper triangle
out = np.zeros((X.shape[0], Y.shape[0]), dtype='float')
iterator = itertools.combinations(range(X.shape[0]), 2)
for i, j in iterator:
out[i, j] = metric(X[i], Y[j], **kwds)
# Make symmetric
# NB: out += out.T will produce incorrect results
out = out + out.T
# Calculate diagonal
# NB: nonzero diagonals are allowed for both metrics and kernels
for i in range(X.shape[0]):
x = X[i]
out[i, i] = metric(x, x, **kwds)
else:
# Calculate all cells
out = np.empty((X.shape[0], Y.shape[0]), dtype='float')
iterator = itertools.product(range(X.shape[0]), range(Y.shape[0]))
for i, j in iterator:
out[i, j] = metric(X[i], Y[j], **kwds)
return out
# Helper functions - distance
PAIRWISE_DISTANCE_FUNCTIONS['masked_euclidean'] = masked_euclidean_distances
def pairwise_distances(X, Y=None, metric="euclidean", n_jobs=1, **kwds):
""" Compute the distance matrix from a vector array X and optional Y.
This method takes either a vector array or a distance matrix, and returns
a distance matrix. If the input is a vector array, the distances are
computed. If the input is a distances matrix, it is returned instead.
This method provides a safe way to take a distance matrix as input, while
preserving compatibility with many other algorithms that take a vector
array.
If Y is given (default is None), then the returned matrix is the pairwise
distance between the arrays from both X and Y.
Valid values for metric are:
- From scikit-learn: ['cityblock', 'cosine', 'euclidean', 'l1', 'l2',
'manhattan']. These metrics support sparse matrix
inputs.
Also, ['masked_euclidean'] but it does not yet support sparse matrices.
- From scipy.spatial.distance: ['braycurtis', 'canberra', 'chebyshev',
'correlation', 'dice', 'hamming', 'jaccard', 'kulsinski', 'mahalanobis',
'minkowski', 'rogerstanimoto', 'russellrao', 'seuclidean',
'sokalmichener', 'sokalsneath', 'sqeuclidean', 'yule']
See the documentation for scipy.spatial.distance for details on these
metrics. These metrics do not support sparse matrix inputs.
Note that in the case of 'cityblock', 'cosine' and 'euclidean' (which are
valid scipy.spatial.distance metrics), the scikit-learn implementation
will be used, which is faster and has support for sparse matrices (except
for 'cityblock'). For a verbose description of the metrics from
scikit-learn, see the __doc__ of the sklearn.pairwise.distance_metrics
function.
Read more in the :ref:`User Guide <metrics>`.
Parameters
----------
X : array [n_samples_a, n_samples_a] if metric == "precomputed", or, \
[n_samples_a, n_features] otherwise
Array of pairwise distances between samples, or a feature array.
Y : array [n_samples_b, n_features], optional
An optional second feature array. Only allowed if
metric != "precomputed".
metric : string, or callable
The metric to use when calculating distance between instances in a
feature array. If metric is a string, it must be one of the options
allowed by scipy.spatial.distance.pdist for its metric parameter, or
a metric listed in pairwise.PAIRWISE_DISTANCE_FUNCTIONS.
If metric is "precomputed", X is assumed to be a distance matrix.
Alternatively, if metric is a callable function, it is called on each
pair of instances (rows) and the resulting value recorded. The callable
should take two arrays from X as input and return a value indicating
the distance between them.
n_jobs : int
The number of jobs to use for the computation. This works by breaking
down the pairwise matrix into n_jobs even slices and computing them in
parallel.
If -1 all CPUs are used. If 1 is given, no parallel computing code is
used at all, which is useful for debugging. For n_jobs below -1,
(n_cpus + 1 + n_jobs) are used. Thus for n_jobs = -2, all CPUs but one
are used.
**kwds : optional keyword parameters
Any further parameters are passed directly to the distance function.
If using a scipy.spatial.distance metric, the parameters are still
metric dependent. See the scipy docs for usage examples.
Returns
-------
D : array [n_samples_a, n_samples_a] or [n_samples_a, n_samples_b]
A distance matrix D such that D_{i, j} is the distance between the
ith and jth vectors of the given matrix X, if Y is None.
If Y is not None, then D_{i, j} is the distance between the ith array
from X and the jth array from Y.
See also
--------
pairwise_distances_chunked : performs the same calculation as this funtion,
but returns a generator of chunks of the distance matrix, in order to
limit memory usage.
paired_distances : Computes the distances between corresponding
elements of two arrays
"""
if (metric not in _VALID_METRICS and
not callable(metric) and metric != "precomputed"):
raise ValueError("Unknown metric %s. "
"Valid metrics are %s, or 'precomputed', or a "
"callable" % (metric, _VALID_METRICS))
if metric in _MASKED_METRICS or callable(metric):
missing_values = kwds.get("missing_values") if kwds.get(
"missing_values") is not None else np.nan
if np.all(_get_mask(X.data if issparse(X) else X, missing_values)):
raise ValueError(
"One or more samples(s) only have missing values.")
if metric == "precomputed":
X, _ = check_pairwise_arrays(X, Y, precomputed=True)
return X
elif metric in PAIRWISE_DISTANCE_FUNCTIONS:
func = PAIRWISE_DISTANCE_FUNCTIONS[metric]
elif callable(metric):
func = partial(_pairwise_callable, metric=metric, **kwds)
else:
if issparse(X) or issparse(Y):
raise TypeError("scipy distance metrics do not"
" support sparse matrices.")
dtype = bool if metric in PAIRWISE_BOOLEAN_FUNCTIONS else None
X, Y = check_pairwise_arrays(X, Y, dtype=dtype)
if n_jobs == 1 and X is Y:
return distance.squareform(distance.pdist(X, metric=metric,
**kwds))
func = partial(distance.cdist, metric=metric, **kwds)
return _parallel_pairwise(X, Y, func, n_jobs, **kwds)
| 13,219
| 40.835443
| 79
|
py
|
missingpy
|
missingpy-master/missingpy/missforest.py
|
"""MissForest Imputer for Missing Data"""
# Author: Ashim Bhattarai
# License: GNU General Public License v3 (GPLv3)
import warnings
import numpy as np
from scipy.stats import mode
from sklearn.base import BaseEstimator, TransformerMixin
from sklearn.utils.validation import check_is_fitted, check_array
from sklearn.ensemble import RandomForestClassifier, RandomForestRegressor
from .pairwise_external import _get_mask
__all__ = [
'MissForest',
]
class MissForest(BaseEstimator, TransformerMixin):
"""Missing value imputation using Random Forests.
MissForest imputes missing values using Random Forests in an iterative
fashion. By default, the imputer begins imputing missing values of the
column (which is expected to be a variable) with the smallest number of
missing values -- let's call this the candidate column.
The first step involves filling any missing values of the remaining,
non-candidate, columns with an initial guess, which is the column mean for
columns representing numerical variables and the column mode for columns
representing categorical variables. After that, the imputer fits a random
forest model with the candidate column as the outcome variable and the
remaining columns as the predictors over all rows where the candidate
column values are not missing.
After the fit, the missing rows of the candidate column are
imputed using the prediction from the fitted Random Forest. The
rows of the non-candidate columns act as the input data for the fitted
model.
Following this, the imputer moves on to the next candidate column with the
second smallest number of missing values from among the non-candidate
columns in the first round. The process repeats itself for each column
with a missing value, possibly over multiple iterations or epochs for
each column, until the stopping criterion is met.
The stopping criterion is governed by the "difference" between the imputed
arrays over successive iterations. For numerical variables (num_vars_),
the difference is defined as follows:
sum((X_new[:, num_vars_] - X_old[:, num_vars_]) ** 2) /
sum((X_new[:, num_vars_]) ** 2)
For categorical variables(cat_vars_), the difference is defined as follows:
sum(X_new[:, cat_vars_] != X_old[:, cat_vars_])) / n_cat_missing
where X_new is the newly imputed array, X_old is the array imputed in the
previous round, n_cat_missing is the total number of categorical
values that are missing, and the sum() is performed both across rows
and columns. Following [1], the stopping criterion is considered to have
been met when difference between X_new and X_old increases for the first
time for both types of variables (if available).
Parameters
----------
NOTE: Most parameter definitions below are taken verbatim from the
Scikit-Learn documentation at [2] and [3].
max_iter : int, optional (default = 10)
The maximum iterations of the imputation process. Each column with a
missing value is imputed exactly once in a given iteration.
decreasing : boolean, optional (default = False)
If set to True, columns are sorted according to decreasing number of
missing values. In other words, imputation will move from imputing
columns with the largest number of missing values to columns with
fewest number of missing values.
missing_values : np.nan, integer, optional (default = np.nan)
The placeholder for the missing values. All occurrences of
`missing_values` will be imputed.
copy : boolean, optional (default = True)
If True, a copy of X will be created. If False, imputation will
be done in-place whenever possible.
criterion : tuple, optional (default = ('mse', 'gini'))
The function to measure the quality of a split.The first element of
the tuple is for the Random Forest Regressor (for imputing numerical
variables) while the second element is for the Random Forest
Classifier (for imputing categorical variables).
n_estimators : integer, optional (default=100)
The number of trees in the forest.
max_depth : integer or None, optional (default=None)
The maximum depth of the tree. If None, then nodes are expanded until
all leaves are pure or until all leaves contain less than
min_samples_split samples.
min_samples_split : int, float, optional (default=2)
The minimum number of samples required to split an internal node:
- If int, then consider `min_samples_split` as the minimum number.
- If float, then `min_samples_split` is a fraction and
`ceil(min_samples_split * n_samples)` are the minimum
number of samples for each split.
min_samples_leaf : int, float, optional (default=1)
The minimum number of samples required to be at a leaf node.
A split point at any depth will only be considered if it leaves at
least ``min_samples_leaf`` training samples in each of the left and
right branches. This may have the effect of smoothing the model,
especially in regression.
- If int, then consider `min_samples_leaf` as the minimum number.
- If float, then `min_samples_leaf` is a fraction and
`ceil(min_samples_leaf * n_samples)` are the minimum
number of samples for each node.
min_weight_fraction_leaf : float, optional (default=0.)
The minimum weighted fraction of the sum total of weights (of all
the input samples) required to be at a leaf node. Samples have
equal weight when sample_weight is not provided.
max_features : int, float, string or None, optional (default="auto")
The number of features to consider when looking for the best split:
- If int, then consider `max_features` features at each split.
- If float, then `max_features` is a fraction and
`int(max_features * n_features)` features are considered at each
split.
- If "auto", then `max_features=sqrt(n_features)`.
- If "sqrt", then `max_features=sqrt(n_features)` (same as "auto").
- If "log2", then `max_features=log2(n_features)`.
- If None, then `max_features=n_features`.
Note: the search for a split does not stop until at least one
valid partition of the node samples is found, even if it requires to
effectively inspect more than ``max_features`` features.
max_leaf_nodes : int or None, optional (default=None)
Grow trees with ``max_leaf_nodes`` in best-first fashion.
Best nodes are defined as relative reduction in impurity.
If None then unlimited number of leaf nodes.
min_impurity_decrease : float, optional (default=0.)
A node will be split if this split induces a decrease of the impurity
greater than or equal to this value.
The weighted impurity decrease equation is the following::
N_t / N * (impurity - N_t_R / N_t * right_impurity
- N_t_L / N_t * left_impurity)
where ``N`` is the total number of samples, ``N_t`` is the number of
samples at the current node, ``N_t_L`` is the number of samples in the
left child, and ``N_t_R`` is the number of samples in the right child.
``N``, ``N_t``, ``N_t_R`` and ``N_t_L`` all refer to the weighted sum,
if ``sample_weight`` is passed.
bootstrap : boolean, optional (default=True)
Whether bootstrap samples are used when building trees.
oob_score : bool (default=False)
Whether to use out-of-bag samples to estimate
the generalization accuracy.
n_jobs : int or None, optional (default=None)
The number of jobs to run in parallel for both `fit` and `predict`.
``None`` means 1 unless in a :obj:`joblib.parallel_backend` context.
``-1`` means using all processors. See :term:`Glossary <n_jobs>`
for more details.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
verbose : int, optional (default=0)
Controls the verbosity when fitting and predicting.
warm_start : bool, optional (default=False)
When set to ``True``, reuse the solution of the previous call to fit
and add more estimators to the ensemble, otherwise, just fit a whole
new forest. See :term:`the Glossary <warm_start>`.
class_weight : dict, list of dicts, "balanced", "balanced_subsample" or \
None, optional (default=None)
Weights associated with classes in the form ``{class_label: weight}``.
If not given, all classes are supposed to have weight one. For
multi-output problems, a list of dicts can be provided in the same
order as the columns of y.
Note that for multioutput (including multilabel) weights should be
defined for each class of every column in its own dict. For example,
for four-class multilabel classification weights should be
[{0: 1, 1: 1}, {0: 1, 1: 5}, {0: 1, 1: 1}, {0: 1, 1: 1}] instead of
[{1:1}, {2:5}, {3:1}, {4:1}].
The "balanced" mode uses the values of y to automatically adjust
weights inversely proportional to class frequencies in the input data
as ``n_samples / (n_classes * np.bincount(y))``
The "balanced_subsample" mode is the same as "balanced" except that
weights are computed based on the bootstrap sample for every tree
grown.
For multi-output, the weights of each column of y will be multiplied.
Note that these weights will be multiplied with sample_weight (passed
through the fit method) if sample_weight is specified.
NOTE: This parameter is only applicable for Random Forest Classifier
objects (i.e., for categorical variables).
Attributes
----------
statistics_ : Dictionary of length two
The first element is an array with the mean of each numerical feature
being imputed while the second element is an array of modes of
categorical features being imputed (if available, otherwise it
will be None).
References
----------
* [1] Stekhoven, Daniel J., and Peter Bühlmann. "MissForest—non-parametric
missing value imputation for mixed-type data." Bioinformatics 28.1
(2011): 112-118.
* [2] https://scikit-learn.org/stable/modules/generated/sklearn.ensemble.
RandomForestRegressor.html#sklearn.ensemble.RandomForestRegressor
* [3] https://scikit-learn.org/stable/modules/generated/sklearn.ensemble.
RandomForestClassifier.html#sklearn.ensemble.RandomForestClassifier
Examples
--------
>>> from missingpy import MissForest
>>> nan = float("NaN")
>>> X = [[1, 2, nan], [3, 4, 3], [nan, 6, 5], [8, 8, 7]]
>>> imputer = MissForest(random_state=1337)
>>> imputer.fit_transform(X)
Iteration: 0
Iteration: 1
Iteration: 2
array([[1. , 2. , 3.92 ],
[3. , 4. , 3. ],
[2.71, 6. , 5. ],
[8. , 8. , 7. ]])
"""
def __init__(self, max_iter=10, decreasing=False, missing_values=np.nan,
copy=True, n_estimators=100, criterion=('mse', 'gini'),
max_depth=None, min_samples_split=2, min_samples_leaf=1,
min_weight_fraction_leaf=0.0, max_features='auto',
max_leaf_nodes=None, min_impurity_decrease=0.0,
bootstrap=True, oob_score=False, n_jobs=-1, random_state=None,
verbose=0, warm_start=False, class_weight=None):
self.max_iter = max_iter
self.decreasing = decreasing
self.missing_values = missing_values
self.copy = copy
self.n_estimators = n_estimators
self.criterion = criterion
self.max_depth = max_depth
self.min_samples_split = min_samples_split
self.min_samples_leaf = min_samples_leaf
self.min_weight_fraction_leaf = min_weight_fraction_leaf
self.max_features = max_features
self.max_leaf_nodes = max_leaf_nodes
self.min_impurity_decrease = min_impurity_decrease
self.bootstrap = bootstrap
self.oob_score = oob_score
self.n_jobs = n_jobs
self.random_state = random_state
self.verbose = verbose
self.warm_start = warm_start
self.class_weight = class_weight
def _miss_forest(self, Ximp, mask):
"""The missForest algorithm"""
# Count missing per column
col_missing_count = mask.sum(axis=0)
# Get col and row indices for missing
missing_rows, missing_cols = np.where(mask)
if self.num_vars_ is not None:
# Only keep indices for numerical vars
keep_idx_num = np.in1d(missing_cols, self.num_vars_)
missing_num_rows = missing_rows[keep_idx_num]
missing_num_cols = missing_cols[keep_idx_num]
# Make initial guess for missing values
col_means = np.full(Ximp.shape[1], fill_value=np.nan)
col_means[self.num_vars_] = self.statistics_.get('col_means')
Ximp[missing_num_rows, missing_num_cols] = np.take(
col_means, missing_num_cols)
# Reg criterion
reg_criterion = self.criterion if type(self.criterion) == str \
else self.criterion[0]
# Instantiate regression model
rf_regressor = RandomForestRegressor(
n_estimators=self.n_estimators,
criterion=reg_criterion,
max_depth=self.max_depth,
min_samples_split=self.min_samples_split,
min_samples_leaf=self.min_samples_leaf,
min_weight_fraction_leaf=self.min_weight_fraction_leaf,
max_features=self.max_features,
max_leaf_nodes=self.max_leaf_nodes,
min_impurity_decrease=self.min_impurity_decrease,
bootstrap=self.bootstrap,
oob_score=self.oob_score,
n_jobs=self.n_jobs,
random_state=self.random_state,
verbose=self.verbose,
warm_start=self.warm_start)
# If needed, repeat for categorical variables
if self.cat_vars_ is not None:
# Calculate total number of missing categorical values (used later)
n_catmissing = np.sum(mask[:, self.cat_vars_])
# Only keep indices for categorical vars
keep_idx_cat = np.in1d(missing_cols, self.cat_vars_)
missing_cat_rows = missing_rows[keep_idx_cat]
missing_cat_cols = missing_cols[keep_idx_cat]
# Make initial guess for missing values
col_modes = np.full(Ximp.shape[1], fill_value=np.nan)
col_modes[self.cat_vars_] = self.statistics_.get('col_modes')
Ximp[missing_cat_rows, missing_cat_cols] = np.take(col_modes, missing_cat_cols)
# Classfication criterion
clf_criterion = self.criterion if type(self.criterion) == str \
else self.criterion[1]
# Instantiate classification model
rf_classifier = RandomForestClassifier(
n_estimators=self.n_estimators,
criterion=clf_criterion,
max_depth=self.max_depth,
min_samples_split=self.min_samples_split,
min_samples_leaf=self.min_samples_leaf,
min_weight_fraction_leaf=self.min_weight_fraction_leaf,
max_features=self.max_features,
max_leaf_nodes=self.max_leaf_nodes,
min_impurity_decrease=self.min_impurity_decrease,
bootstrap=self.bootstrap,
oob_score=self.oob_score,
n_jobs=self.n_jobs,
random_state=self.random_state,
verbose=self.verbose,
warm_start=self.warm_start,
class_weight=self.class_weight)
# 2. misscount_idx: sorted indices of cols in X based on missing count
misscount_idx = np.argsort(col_missing_count)
# Reverse order if decreasing is set to True
if self.decreasing is True:
misscount_idx = misscount_idx[::-1]
# 3. While new_gammas < old_gammas & self.iter_count_ < max_iter loop:
self.iter_count_ = 0
gamma_new = 0
gamma_old = np.inf
gamma_newcat = 0
gamma_oldcat = np.inf
col_index = np.arange(Ximp.shape[1])
while (
gamma_new < gamma_old or gamma_newcat < gamma_oldcat) and \
self.iter_count_ < self.max_iter:
# 4. store previously imputed matrix
Ximp_old = np.copy(Ximp)
if self.iter_count_ != 0:
gamma_old = gamma_new
gamma_oldcat = gamma_newcat
# 5. loop
for s in misscount_idx:
# Column indices other than the one being imputed
s_prime = np.delete(col_index, s)
# Get indices of rows where 's' is observed and missing
obs_rows = np.where(~mask[:, s])[0]
mis_rows = np.where(mask[:, s])[0]
# If no missing, then skip
if len(mis_rows) == 0:
continue
# Get observed values of 's'
yobs = Ximp[obs_rows, s]
# Get 'X' for both observed and missing 's' column
xobs = Ximp[np.ix_(obs_rows, s_prime)]
xmis = Ximp[np.ix_(mis_rows, s_prime)]
# 6. Fit a random forest over observed and predict the missing
if self.cat_vars_ is not None and s in self.cat_vars_:
rf_classifier.fit(X=xobs, y=yobs)
# 7. predict ymis(s) using xmis(x)
ymis = rf_classifier.predict(xmis)
# 8. update imputed matrix using predicted matrix ymis(s)
Ximp[mis_rows, s] = ymis
else:
rf_regressor.fit(X=xobs, y=yobs)
# 7. predict ymis(s) using xmis(x)
ymis = rf_regressor.predict(xmis)
# 8. update imputed matrix using predicted matrix ymis(s)
Ximp[mis_rows, s] = ymis
# 9. Update gamma (stopping criterion)
if self.cat_vars_ is not None:
gamma_newcat = np.sum(
(Ximp[:, self.cat_vars_] != Ximp_old[:, self.cat_vars_])) / n_catmissing
if self.num_vars_ is not None:
gamma_new = np.sum((Ximp[:, self.num_vars_] - Ximp_old[:, self.num_vars_]) ** 2) / np.sum((Ximp[:, self.num_vars_]) ** 2)
print("Iteration:", self.iter_count_)
self.iter_count_ += 1
return Ximp_old
def fit(self, X, y=None, cat_vars=None):
"""Fit the imputer on X.
Parameters
----------
X : {array-like}, shape (n_samples, n_features)
Input data, where ``n_samples`` is the number of samples and
``n_features`` is the number of features.
cat_vars : int or array of ints, optional (default = None)
An int or an array containing column indices of categorical
variable(s)/feature(s) present in the dataset X.
``None`` if there are no categorical variables in the dataset.
Returns
-------
self : object
Returns self.
"""
# Check data integrity and calling arguments
force_all_finite = False if self.missing_values in ["NaN",
np.nan] else True
X = check_array(X, accept_sparse=False, dtype=np.float64,
force_all_finite=force_all_finite, copy=self.copy)
# Check for +/- inf
if np.any(np.isinf(X)):
raise ValueError("+/- inf values are not supported.")
# Check if any column has all missing
mask = _get_mask(X, self.missing_values)
if np.any(mask.sum(axis=0) >= (X.shape[0])):
raise ValueError("One or more columns have all rows missing.")
# Check cat_vars type and convert if necessary
if cat_vars is not None:
if type(cat_vars) == int:
cat_vars = [cat_vars]
elif type(cat_vars) == list or type(cat_vars) == np.ndarray:
if np.array(cat_vars).dtype != int:
raise ValueError(
"cat_vars needs to be either an int or an array "
"of ints.")
else:
raise ValueError("cat_vars needs to be either an int or an array "
"of ints.")
# Identify numerical variables
num_vars = np.setdiff1d(np.arange(X.shape[1]), cat_vars)
num_vars = num_vars if len(num_vars) > 0 else None
# First replace missing values with NaN if it is something else
if self.missing_values not in ['NaN', np.nan]:
X[np.where(X == self.missing_values)] = np.nan
# Now, make initial guess for missing values
col_means = np.nanmean(X[:, num_vars], axis=0) if num_vars is not None else None
col_modes = mode(
X[:, cat_vars], axis=0, nan_policy='omit')[0] if cat_vars is not \
None else None
self.cat_vars_ = cat_vars
self.num_vars_ = num_vars
self.statistics_ = {"col_means": col_means, "col_modes": col_modes}
return self
def transform(self, X):
"""Impute all missing values in X.
Parameters
----------
X : {array-like}, shape = [n_samples, n_features]
The input data to complete.
Returns
-------
X : {array-like}, shape = [n_samples, n_features]
The imputed dataset.
"""
# Confirm whether fit() has been called
check_is_fitted(self, ["cat_vars_", "num_vars_", "statistics_"])
# Check data integrity
force_all_finite = False if self.missing_values in ["NaN",
np.nan] else True
X = check_array(X, accept_sparse=False, dtype=np.float64,
force_all_finite=force_all_finite, copy=self.copy)
# Check for +/- inf
if np.any(np.isinf(X)):
raise ValueError("+/- inf values are not supported.")
# Check if any column has all missing
mask = _get_mask(X, self.missing_values)
if np.any(mask.sum(axis=0) >= (X.shape[0])):
raise ValueError("One or more columns have all rows missing.")
# Get fitted X col count and ensure correct dimension
n_cols_fit_X = (0 if self.num_vars_ is None else len(self.num_vars_)) \
+ (0 if self.cat_vars_ is None else len(self.cat_vars_))
_, n_cols_X = X.shape
if n_cols_X != n_cols_fit_X:
raise ValueError("Incompatible dimension between the fitted "
"dataset and the one to be transformed.")
# Check if anything is actually missing and if not return original X
mask = _get_mask(X, self.missing_values)
if not mask.sum() > 0:
warnings.warn("No missing value located; returning original "
"dataset.")
return X
# row_total_missing = mask.sum(axis=1)
# if not np.any(row_total_missing):
# return X
# Call missForest function to impute missing
X = self._miss_forest(X, mask)
# Return imputed dataset
return X
def fit_transform(self, X, y=None, **fit_params):
"""Fit MissForest and impute all missing values in X.
Parameters
----------
X : {array-like}, shape (n_samples, n_features)
Input data, where ``n_samples`` is the number of samples and
``n_features`` is the number of features.
Returns
-------
X : {array-like}, shape (n_samples, n_features)
Returns imputed dataset.
"""
return self.fit(X, **fit_params).transform(X)
| 24,673
| 43.298025
| 137
|
py
|
missingpy
|
missingpy-master/missingpy/knnimpute.py
|
"""KNN Imputer for Missing Data"""
# Author: Ashim Bhattarai
# License: GNU General Public License v3 (GPLv3)
import warnings
import numpy as np
from sklearn.base import BaseEstimator, TransformerMixin
from sklearn.utils import check_array
from sklearn.utils.validation import check_is_fitted
from sklearn.utils.validation import FLOAT_DTYPES
from sklearn.neighbors.base import _check_weights
from sklearn.neighbors.base import _get_weights
from .pairwise_external import pairwise_distances
from .pairwise_external import _get_mask
from .pairwise_external import _MASKED_METRICS
__all__ = [
'KNNImputer',
]
class KNNImputer(BaseEstimator, TransformerMixin):
"""Imputation for completing missing values using k-Nearest Neighbors.
Each sample's missing values are imputed using values from ``n_neighbors``
nearest neighbors found in the training set. Each missing feature is then
imputed as the average, either weighted or unweighted, of these neighbors.
Note that if a sample has more than one feature missing, then the
neighbors for that sample can be different depending on the particular
feature being imputed. Finally, where the number of donor neighbors is
less than ``n_neighbors``, the training set average for that feature is
used during imputation.
Parameters
----------
missing_values : integer or "NaN", optional (default = "NaN")
The placeholder for the missing values. All occurrences of
`missing_values` will be imputed. For missing values encoded as
``np.nan``, use the string value "NaN".
n_neighbors : int, optional (default = 5)
Number of neighboring samples to use for imputation.
weights : str or callable, optional (default = "uniform")
Weight function used in prediction. Possible values:
- 'uniform' : uniform weights. All points in each neighborhood
are weighted equally.
- 'distance' : weight points by the inverse of their distance.
in this case, closer neighbors of a query point will have a
greater influence than neighbors which are further away.
- [callable] : a user-defined function which accepts an
array of distances, and returns an array of the same shape
containing the weights.
metric : str or callable, optional (default = "masked_euclidean")
Distance metric for searching neighbors. Possible values:
- 'masked_euclidean'
- [callable] : a user-defined function which conforms to the
definition of _pairwise_callable(X, Y, metric, **kwds). In other
words, the function accepts two arrays, X and Y, and a
``missing_values`` keyword in **kwds and returns a scalar distance
value.
row_max_missing : float, optional (default = 0.5)
The maximum fraction of columns (i.e. features) that can be missing
before the sample is excluded from nearest neighbor imputation. It
means that such rows will not be considered a potential donor in
``fit()``, and in ``transform()`` their missing feature values will be
imputed to be the column mean for the entire dataset.
col_max_missing : float, optional (default = 0.8)
The maximum fraction of rows (or samples) that can be missing
for any feature beyond which an error is raised.
copy : boolean, optional (default = True)
If True, a copy of X will be created. If False, imputation will
be done in-place whenever possible. Note that, if metric is
"masked_euclidean" and copy=False then missing_values in the
input matrix X will be overwritten with zeros.
Attributes
----------
statistics_ : 1-D array of length {n_features}
The 1-D array contains the mean of each feature calculated using
observed (i.e. non-missing) values. This is used for imputing
missing values in samples that are either excluded from nearest
neighbors search because they have too many ( > row_max_missing)
missing features or because all of the sample's k-nearest neighbors
(i.e., the potential donors) also have the relevant feature value
missing.
References
----------
* Olga Troyanskaya, Michael Cantor, Gavin Sherlock, Pat Brown, Trevor
Hastie, Robert Tibshirani, David Botstein and Russ B. Altman, Missing
value estimation methods for DNA microarrays, BIOINFORMATICS Vol. 17
no. 6, 2001 Pages 520-525.
Examples
--------
>>> from missingpy import KNNImputer
>>> nan = float("NaN")
>>> X = [[1, 2, nan], [3, 4, 3], [nan, 6, 5], [8, 8, 7]]
>>> imputer = KNNImputer(n_neighbors=2, weights="uniform")
>>> imputer.fit_transform(X)
array([[1. , 2. , 4. ],
[3. , 4. , 3. ],
[5.5, 6. , 5. ],
[8. , 8. , 7. ]])
"""
def __init__(self, missing_values="NaN", n_neighbors=5,
weights="uniform", metric="masked_euclidean",
row_max_missing=0.5, col_max_missing=0.8, copy=True):
self.missing_values = missing_values
self.n_neighbors = n_neighbors
self.weights = weights
self.metric = metric
self.row_max_missing = row_max_missing
self.col_max_missing = col_max_missing
self.copy = copy
def _impute(self, dist, X, fitted_X, mask, mask_fx):
"""Helper function to find and impute missing values"""
# For each column, find and impute
n_rows_X, n_cols_X = X.shape
for c in range(n_cols_X):
if not np.any(mask[:, c], axis=0):
continue
# Row index for receivers and potential donors (pdonors)
receivers_row_idx = np.where(mask[:, c])[0]
pdonors_row_idx = np.where(~mask_fx[:, c])[0]
# Impute using column mean if n_neighbors are not available
if len(pdonors_row_idx) < self.n_neighbors:
warnings.warn("Insufficient number of neighbors! "
"Filling in column mean.")
X[receivers_row_idx, c] = self.statistics_[c]
continue
# Get distance from potential donors
dist_pdonors = dist[receivers_row_idx][:, pdonors_row_idx]
dist_pdonors = dist_pdonors.reshape(-1,
len(pdonors_row_idx))
# Argpartition to separate actual donors from the rest
pdonors_idx = np.argpartition(
dist_pdonors, self.n_neighbors - 1, axis=1)
# Get final donors row index from pdonors
donors_idx = pdonors_idx[:, :self.n_neighbors]
# Get weights or None
dist_pdonors_rows = np.arange(len(donors_idx))[:, None]
weight_matrix = _get_weights(
dist_pdonors[
dist_pdonors_rows, donors_idx], self.weights)
donor_row_idx_ravel = donors_idx.ravel()
# Retrieve donor values and calculate kNN score
fitted_X_temp = fitted_X[pdonors_row_idx]
donors = fitted_X_temp[donor_row_idx_ravel, c].reshape(
(-1, self.n_neighbors))
donors_mask = _get_mask(donors, self.missing_values)
donors = np.ma.array(donors, mask=donors_mask)
# Final imputation
imputed = np.ma.average(donors, axis=1,
weights=weight_matrix)
X[receivers_row_idx, c] = imputed.data
return X
def fit(self, X, y=None):
"""Fit the imputer on X.
Parameters
----------
X : {array-like}, shape (n_samples, n_features)
Input data, where ``n_samples`` is the number of samples and
``n_features`` is the number of features.
Returns
-------
self : object
Returns self.
"""
# Check data integrity and calling arguments
force_all_finite = False if self.missing_values in ["NaN",
np.nan] else True
if not force_all_finite:
if self.metric not in _MASKED_METRICS and not callable(
self.metric):
raise ValueError(
"The selected metric does not support NaN values.")
X = check_array(X, accept_sparse=False, dtype=np.float64,
force_all_finite=force_all_finite, copy=self.copy)
self.weights = _check_weights(self.weights)
# Check for +/- inf
if np.any(np.isinf(X)):
raise ValueError("+/- inf values are not allowed.")
# Check if % missing in any column > col_max_missing
mask = _get_mask(X, self.missing_values)
if np.any(mask.sum(axis=0) > (X.shape[0] * self.col_max_missing)):
raise ValueError("Some column(s) have more than {}% missing values"
.format(self.col_max_missing * 100))
X_col_means = np.ma.array(X, mask=mask).mean(axis=0).data
# Check if % missing in any row > row_max_missing
bad_rows = mask.sum(axis=1) > (mask.shape[1] * self.row_max_missing)
if np.any(bad_rows):
warnings.warn(
"There are rows with more than {0}% missing values. These "
"rows are not included as donor neighbors."
.format(self.row_max_missing * 100))
# Remove rows that have more than row_max_missing % missing
X = X[~bad_rows, :]
# Check if sufficient neighboring samples available
if X.shape[0] < self.n_neighbors:
raise ValueError("There are only %d samples, but n_neighbors=%d."
% (X.shape[0], self.n_neighbors))
self.fitted_X_ = X
self.statistics_ = X_col_means
return self
def transform(self, X):
"""Impute all missing values in X.
Parameters
----------
X : {array-like}, shape = [n_samples, n_features]
The input data to complete.
Returns
-------
X : {array-like}, shape = [n_samples, n_features]
The imputed dataset.
"""
check_is_fitted(self, ["fitted_X_", "statistics_"])
force_all_finite = False if self.missing_values in ["NaN",
np.nan] else True
X = check_array(X, accept_sparse=False, dtype=FLOAT_DTYPES,
force_all_finite=force_all_finite, copy=self.copy)
# Check for +/- inf
if np.any(np.isinf(X)):
raise ValueError("+/- inf values are not allowed in data to be "
"transformed.")
# Get fitted data and ensure correct dimension
n_rows_fit_X, n_cols_fit_X = self.fitted_X_.shape
n_rows_X, n_cols_X = X.shape
if n_cols_X != n_cols_fit_X:
raise ValueError("Incompatible dimension between the fitted "
"dataset and the one to be transformed.")
mask = _get_mask(X, self.missing_values)
row_total_missing = mask.sum(axis=1)
if not np.any(row_total_missing):
return X
# Check for excessive missingness in rows
bad_rows = row_total_missing > (mask.shape[1] * self.row_max_missing)
if np.any(bad_rows):
warnings.warn(
"There are rows with more than {0}% missing values. The "
"missing features in these rows are imputed with column means."
.format(self.row_max_missing * 100))
X_bad = X[bad_rows, :]
X = X[~bad_rows, :]
mask = mask[~bad_rows]
row_total_missing = mask.sum(axis=1)
row_has_missing = row_total_missing.astype(np.bool)
if np.any(row_has_missing):
# Mask for fitted_X
mask_fx = _get_mask(self.fitted_X_, self.missing_values)
# Pairwise distances between receivers and fitted samples
dist = np.empty((len(X), len(self.fitted_X_)))
dist[row_has_missing] = pairwise_distances(
X[row_has_missing], self.fitted_X_, metric=self.metric,
squared=False, missing_values=self.missing_values)
# Find and impute missing
X = self._impute(dist, X, self.fitted_X_, mask, mask_fx)
# Merge bad rows to X and mean impute their missing values
if np.any(bad_rows):
bad_missing_index = np.where(_get_mask(X_bad, self.missing_values))
X_bad[bad_missing_index] = np.take(self.statistics_,
bad_missing_index[1])
X_merged = np.empty((n_rows_X, n_cols_X))
X_merged[bad_rows, :] = X_bad
X_merged[~bad_rows, :] = X
X = X_merged
return X
def fit_transform(self, X, y=None, **fit_params):
"""Fit KNNImputer and impute all missing values in X.
Parameters
----------
X : {array-like}, shape (n_samples, n_features)
Input data, where ``n_samples`` is the number of samples and
``n_features`` is the number of features.
Returns
-------
X : {array-like}, shape (n_samples, n_features)
Returns imputed dataset.
"""
return self.fit(X).transform(X)
| 13,456
| 39.902736
| 79
|
py
|
missingpy
|
missingpy-master/missingpy/utils.py
|
"""Utility Functions"""
# Author: Ashim Bhattarai
# License: BSD 3 clause
import numpy as np
def masked_euclidean_distances(X, Y=None, squared=False,
missing_values="NaN", copy=True):
"""Calculates euclidean distances in the presence of missing values
Computes the euclidean distance between each pair of samples (rows) in X
and Y, where Y=X is assumed if Y=None.
When calculating the distance between a pair of samples, this formulation
essentially zero-weights feature coordinates with a missing value in either
sample and scales up the weight of the remaining coordinates:
dist(x,y) = sqrt(weight * sq. distance from non-missing coordinates)
where,
weight = Total # of coordinates / # of non-missing coordinates
Note that if all the coordinates are missing or if there are no common
non-missing coordinates then NaN is returned for that pair.
Read more in the :ref:`User Guide <metrics>`.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples_1, n_features)
Y : {array-like, sparse matrix}, shape (n_samples_2, n_features)
squared : boolean, optional
Return squared Euclidean distances.
missing_values : "NaN" or integer, optional
Representation of missing value
copy : boolean, optional
Make and use a deep copy of X and Y (if Y exists)
Returns
-------
distances : {array}, shape (n_samples_1, n_samples_2)
Examples
--------
>>> from missingpy.utils import masked_euclidean_distances
>>> nan = float("NaN")
>>> X = [[0, 1], [1, nan]]
>>> # distance between rows of X
>>> masked_euclidean_distances(X, X)
array([[0. , 1.41421356],
[1.41421356, 0. ]])
>>> # get distance to origin
>>> masked_euclidean_distances(X, [[0, 0]])
array([[1. ],
[1.41421356]])
References
----------
* John K. Dixon, "Pattern Recognition with Partly Missing Data",
IEEE Transactions on Systems, Man, and Cybernetics, Volume: 9, Issue:
10, pp. 617 - 621, Oct. 1979.
http://ieeexplore.ieee.org/abstract/document/4310090/
See also
--------
paired_distances : distances betweens pairs of elements of X and Y.
"""
# Import here to prevent circular import
from .pairwise_external import _get_mask, check_pairwise_arrays
# NOTE: force_all_finite=False allows not only NaN but also +/- inf
X, Y = check_pairwise_arrays(X, Y, accept_sparse=False,
force_all_finite=False, copy=copy)
if (np.any(np.isinf(X)) or
(Y is not X and np.any(np.isinf(Y)))):
raise ValueError(
"+/- Infinite values are not allowed.")
# Get missing mask for X and Y.T
mask_X = _get_mask(X, missing_values)
YT = Y.T
mask_YT = mask_X.T if Y is X else _get_mask(YT, missing_values)
# Check if any rows have only missing value
if np.any(mask_X.sum(axis=1) == X.shape[1])\
or (Y is not X and np.any(mask_YT.sum(axis=0) == Y.shape[1])):
raise ValueError("One or more rows only contain missing values.")
# else:
if missing_values not in ["NaN", np.nan] and (
np.any(np.isnan(X)) or (Y is not X and np.any(np.isnan(Y)))):
raise ValueError(
"NaN values present but missing_value = {0}".format(
missing_values))
# Get mask of non-missing values set Y.T's missing to zero.
# Further, casting the mask to int to be used in formula later.
not_YT = (~mask_YT).astype(np.int32)
YT[mask_YT] = 0
# Get X's mask of non-missing values and set X's missing to zero
not_X = (~mask_X).astype(np.int32)
X[mask_X] = 0
# Calculate distances
# The following formula derived by:
# Shreya Bhattarai <shreya.bhattarai@gmail.com>
distances = (
(X.shape[1] / (np.dot(not_X, not_YT))) *
(np.dot(X * X, not_YT) - 2 * (np.dot(X, YT)) +
np.dot(not_X, YT * YT)))
if X is Y:
# Ensure that distances between vectors and themselves are set to 0.0.
# This may not be the case due to floating point rounding errors.
distances.flat[::distances.shape[0] + 1] = 0.0
return distances if squared else np.sqrt(distances, out=distances)
| 4,360
| 33.888
| 79
|
py
|
missingpy
|
missingpy-master/missingpy/__init__.py
|
from .knnimpute import KNNImputer
from .missforest import MissForest
__all__ = ['KNNImputer', 'MissForest']
| 109
| 21
| 38
|
py
|
missingpy
|
missingpy-master/missingpy/tests/test_knnimpute.py
|
import numpy as np
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_raise_message
from sklearn.utils.testing import assert_equal
from missingpy import KNNImputer
from missingpy.pairwise_external import masked_euclidean_distances
from missingpy.pairwise_external import pairwise_distances
def test_knn_imputation_shape():
# Verify the shapes of the imputed matrix for different weights and
# number of neighbors.
n_rows = 10
n_cols = 2
X = np.random.rand(n_rows, n_cols)
X[0, 0] = np.nan
for weights in ['uniform', 'distance']:
for n_neighbors in range(1, 6):
imputer = KNNImputer(n_neighbors=n_neighbors, weights=weights)
X_imputed = imputer.fit_transform(X)
assert_equal(X_imputed.shape, (n_rows, n_cols))
def test_knn_imputation_zero():
# Test imputation when missing_values == 0
missing_values = 0
n_neighbors = 2
imputer = KNNImputer(missing_values=missing_values,
n_neighbors=n_neighbors,
weights="uniform")
# Test with missing_values=0 when NaN present
X = np.array([
[np.nan, 0, 0, 0, 5],
[np.nan, 1, 0, np.nan, 3],
[np.nan, 2, 0, 0, 0],
[np.nan, 6, 0, 5, 13],
])
msg = "Input contains NaN, infinity or a value too large for %r." % X.dtype
assert_raise_message(ValueError, msg, imputer.fit, X)
# Test with % zeros in column > col_max_missing
X = np.array([
[1, 0, 0, 0, 5],
[2, 1, 0, 2, 3],
[3, 2, 0, 0, 0],
[4, 6, 0, 5, 13],
])
msg = "Some column(s) have more than {}% missing values".format(
imputer.col_max_missing * 100)
assert_raise_message(ValueError, msg, imputer.fit, X)
def test_knn_imputation_zero_p2():
# Test with an imputable matrix and also compare with missing_values="NaN"
X_zero = np.array([
[1, 0, 1, 1, 1.],
[2, 2, 2, 2, 2],
[3, 3, 3, 3, 0],
[6, 6, 0, 6, 6],
])
X_nan = np.array([
[1, np.nan, 1, 1, 1.],
[2, 2, 2, 2, 2],
[3, 3, 3, 3, np.nan],
[6, 6, np.nan, 6, 6],
])
statistics_mean = np.nanmean(X_nan, axis=0)
X_imputed = np.array([
[1, 2.5, 1, 1, 1.],
[2, 2, 2, 2, 2],
[3, 3, 3, 3, 1.5],
[6, 6, 2.5, 6, 6],
])
imputer_zero = KNNImputer(missing_values=0, n_neighbors=2,
weights="uniform")
imputer_nan = KNNImputer(missing_values="NaN",
n_neighbors=2,
weights="uniform")
assert_array_equal(imputer_zero.fit_transform(X_zero), X_imputed)
assert_array_equal(imputer_zero.statistics_, statistics_mean)
assert_array_equal(imputer_zero.fit_transform(X_zero),
imputer_nan.fit_transform(X_nan))
def test_knn_imputation_default():
# Test imputation with default parameter values
# Test with an imputable matrix
X = np.array([
[1, 0, 0, 1],
[2, 1, 2, np.nan],
[3, 2, 3, np.nan],
[np.nan, 4, 5, 5],
[6, np.nan, 6, 7],
[8, 8, 8, 8],
[16, 15, 18, 19],
])
statistics_mean = np.nanmean(X, axis=0)
X_imputed = np.array([
[1, 0, 0, 1],
[2, 1, 2, 8],
[3, 2, 3, 8],
[4, 4, 5, 5],
[6, 3, 6, 7],
[8, 8, 8, 8],
[16, 15, 18, 19],
])
imputer = KNNImputer()
assert_array_equal(imputer.fit_transform(X), X_imputed)
assert_array_equal(imputer.statistics_, statistics_mean)
# Test with % missing in row > row_max_missing
X = np.array([
[1, 0, 0, 1],
[2, 1, 2, np.nan],
[3, 2, 3, np.nan],
[np.nan, 4, 5, 5],
[6, np.nan, 6, 7],
[8, 8, 8, 8],
[19, 19, 19, 19],
[np.nan, np.nan, np.nan, 19],
])
statistics_mean = np.nanmean(X, axis=0)
r7c0, r7c1, r7c2, _ = statistics_mean
X_imputed = np.array([
[1, 0, 0, 1],
[2, 1, 2, 8],
[3, 2, 3, 8],
[4, 4, 5, 5],
[6, 3, 6, 7],
[8, 8, 8, 8],
[19, 19, 19, 19],
[r7c0, r7c1, r7c2, 19],
])
imputer = KNNImputer()
assert_array_almost_equal(imputer.fit_transform(X), X_imputed, decimal=6)
assert_array_almost_equal(imputer.statistics_, statistics_mean, decimal=6)
# Test with all neighboring donors also having missing feature values
X = np.array([
[1, 0, 0, np.nan],
[2, 1, 2, np.nan],
[3, 2, 3, np.nan],
[4, 4, 5, np.nan],
[6, 7, 6, np.nan],
[8, 8, 8, np.nan],
[20, 20, 20, 20],
[22, 22, 22, 22]
])
statistics_mean = np.nanmean(X, axis=0)
X_imputed = np.array([
[1, 0, 0, 21],
[2, 1, 2, 21],
[3, 2, 3, 21],
[4, 4, 5, 21],
[6, 7, 6, 21],
[8, 8, 8, 21],
[20, 20, 20, 20],
[22, 22, 22, 22]
])
imputer = KNNImputer()
assert_array_equal(imputer.fit_transform(X), X_imputed)
assert_array_equal(imputer.statistics_, statistics_mean)
# Test when data in fit() and transform() are different
X = np.array([
[0, 0],
[np.nan, 2],
[4, 3],
[5, 6],
[7, 7],
[9, 8],
[11, 16]
])
statistics_mean = np.nanmean(X, axis=0)
Y = np.array([
[1, 0],
[3, 2],
[4, np.nan]
])
Y_imputed = np.array([
[1, 0],
[3, 2],
[4, 4.8]
])
imputer = KNNImputer()
assert_array_equal(imputer.fit(X).transform(Y), Y_imputed)
assert_array_equal(imputer.statistics_, statistics_mean)
def test_default_with_invalid_input():
# Test imputation with default values and invalid input
# Test with % missing in a column > col_max_missing
X = np.array([
[np.nan, 0, 0, 0, 5],
[np.nan, 1, 0, np.nan, 3],
[np.nan, 2, 0, 0, 0],
[np.nan, 6, 0, 5, 13],
[np.nan, 7, 0, 7, 8],
[np.nan, 8, 0, 8, 9],
])
imputer = KNNImputer()
msg = "Some column(s) have more than {}% missing values".format(
imputer.col_max_missing * 100)
assert_raise_message(ValueError, msg, imputer.fit, X)
# Test with insufficient number of neighbors
X = np.array([
[1, 1, 1, 2, np.nan],
[2, 1, 2, 2, 3],
[3, 2, 3, 3, 8],
[6, 6, 2, 5, 13],
])
msg = "There are only %d samples, but n_neighbors=%d." % \
(X.shape[0], imputer.n_neighbors)
assert_raise_message(ValueError, msg, imputer.fit, X)
# Test with inf present
X = np.array([
[np.inf, 1, 1, 2, np.nan],
[2, 1, 2, 2, 3],
[3, 2, 3, 3, 8],
[np.nan, 6, 0, 5, 13],
[np.nan, 7, 0, 7, 8],
[6, 6, 2, 5, 7],
])
msg = "+/- inf values are not allowed."
assert_raise_message(ValueError, msg, KNNImputer().fit, X)
# Test with inf present in matrix passed in transform()
X = np.array([
[np.inf, 1, 1, 2, np.nan],
[2, 1, 2, 2, 3],
[3, 2, 3, 3, 8],
[np.nan, 6, 0, 5, 13],
[np.nan, 7, 0, 7, 8],
[6, 6, 2, 5, 7],
])
X_fit = np.array([
[0, 1, 1, 2, np.nan],
[2, 1, 2, 2, 3],
[3, 2, 3, 3, 8],
[np.nan, 6, 0, 5, 13],
[np.nan, 7, 0, 7, 8],
[6, 6, 2, 5, 7],
])
msg = "+/- inf values are not allowed in data to be transformed."
assert_raise_message(ValueError, msg, KNNImputer().fit(X_fit).transform, X)
def test_knn_n_neighbors():
X = np.array([
[0, 0],
[np.nan, 2],
[4, 3],
[5, np.nan],
[7, 7],
[np.nan, 8],
[14, 13]
])
statistics_mean = np.nanmean(X, axis=0)
# Test with 1 neighbor
X_imputed_1NN = np.array([
[0, 0],
[4, 2],
[4, 3],
[5, 3],
[7, 7],
[7, 8],
[14, 13]
])
n_neighbors = 1
imputer = KNNImputer(n_neighbors=n_neighbors)
assert_array_equal(imputer.fit_transform(X), X_imputed_1NN)
assert_array_equal(imputer.statistics_, statistics_mean)
# Test with 6 neighbors
X = np.array([
[0, 0],
[np.nan, 2],
[4, 3],
[5, np.nan],
[7, 7],
[np.nan, 8],
[14, 13]
])
X_imputed_6NN = np.array([
[0, 0],
[6, 2],
[4, 3],
[5, 5.5],
[7, 7],
[6, 8],
[14, 13]
])
n_neighbors = 6
imputer = KNNImputer(n_neighbors=6)
imputer_plus1 = KNNImputer(n_neighbors=n_neighbors + 1)
assert_array_equal(imputer.fit_transform(X), X_imputed_6NN)
assert_array_equal(imputer.statistics_, statistics_mean)
assert_array_equal(imputer.fit_transform(X), imputer_plus1.fit(
X).transform(X))
def test_weight_uniform():
X = np.array([
[0, 0],
[np.nan, 2],
[4, 3],
[5, 6],
[7, 7],
[9, 8],
[11, 10]
])
# Test with "uniform" weight (or unweighted)
X_imputed_uniform = np.array([
[0, 0],
[5, 2],
[4, 3],
[5, 6],
[7, 7],
[9, 8],
[11, 10]
])
imputer = KNNImputer(weights="uniform")
assert_array_equal(imputer.fit_transform(X), X_imputed_uniform)
# Test with "callable" weight
def no_weight(dist=None):
return None
imputer = KNNImputer(weights=no_weight)
assert_array_equal(imputer.fit_transform(X), X_imputed_uniform)
def test_weight_distance():
X = np.array([
[0, 0],
[np.nan, 2],
[4, 3],
[5, 6],
[7, 7],
[9, 8],
[11, 10]
])
# Test with "distance" weight
# Get distance of "n_neighbors" neighbors of row 1
dist_matrix = pairwise_distances(X, metric="masked_euclidean")
index = np.argsort(dist_matrix)[1, 1:6]
dist = dist_matrix[1, index]
weights = 1 / dist
values = X[index, 0]
imputed = np.dot(values, weights) / np.sum(weights)
# Manual calculation
X_imputed_distance1 = np.array([
[0, 0],
[3.850394, 2],
[4, 3],
[5, 6],
[7, 7],
[9, 8],
[11, 10]
])
# NearestNeighbor calculation
X_imputed_distance2 = np.array([
[0, 0],
[imputed, 2],
[4, 3],
[5, 6],
[7, 7],
[9, 8],
[11, 10]
])
imputer = KNNImputer(weights="distance")
assert_array_almost_equal(imputer.fit_transform(X), X_imputed_distance1,
decimal=6)
assert_array_almost_equal(imputer.fit_transform(X), X_imputed_distance2,
decimal=6)
# Test with weights = "distance" and n_neighbors=2
X = np.array([
[np.nan, 0, 0],
[2, 1, 2],
[3, 2, 3],
[4, 5, 5],
])
statistics_mean = np.nanmean(X, axis=0)
X_imputed = np.array([
[2.3828, 0, 0],
[2, 1, 2],
[3, 2, 3],
[4, 5, 5],
])
imputer = KNNImputer(n_neighbors=2, weights="distance")
assert_array_almost_equal(imputer.fit_transform(X), X_imputed,
decimal=4)
assert_array_equal(imputer.statistics_, statistics_mean)
# Test with varying missingness patterns
X = np.array([
[1, 0, 0, 1],
[0, np.nan, 1, np.nan],
[1, 1, 1, np.nan],
[0, 1, 0, 0],
[0, 0, 0, 0],
[1, 0, 1, 1],
[10, 10, 10, 10],
])
statistics_mean = np.nanmean(X, axis=0)
# Get weights of donor neighbors
dist = masked_euclidean_distances(X)
r1c1_nbor_dists = dist[1, [0, 2, 3, 4, 5]]
r1c3_nbor_dists = dist[1, [0, 3, 4, 5, 6]]
r1c1_nbor_wt = (1/r1c1_nbor_dists)
r1c3_nbor_wt = (1 / r1c3_nbor_dists)
r2c3_nbor_dists = dist[2, [0, 3, 4, 5, 6]]
r2c3_nbor_wt = 1/r2c3_nbor_dists
# Collect donor values
col1_donor_values = np.ma.masked_invalid(X[[0, 2, 3, 4, 5], 1]).copy()
col3_donor_values = np.ma.masked_invalid(X[[0, 3, 4, 5, 6], 3]).copy()
# Final imputed values
r1c1_imp = np.ma.average(col1_donor_values, weights=r1c1_nbor_wt)
r1c3_imp = np.ma.average(col3_donor_values, weights=r1c3_nbor_wt)
r2c3_imp = np.ma.average(col3_donor_values, weights=r2c3_nbor_wt)
print(r1c1_imp, r1c3_imp, r2c3_imp)
X_imputed = np.array([
[1, 0, 0, 1],
[0, r1c1_imp, 1, r1c3_imp],
[1, 1, 1, r2c3_imp],
[0, 1, 0, 0],
[0, 0, 0, 0],
[1, 0, 1, 1],
[10, 10, 10, 10],
])
imputer = KNNImputer(weights="distance")
assert_array_almost_equal(imputer.fit_transform(X), X_imputed, decimal=6)
assert_array_equal(imputer.statistics_, statistics_mean)
def test_metric_type():
X = np.array([
[0, 0],
[np.nan, 2],
[4, 3],
[5, 6],
[7, 7],
[9, 8],
[11, 10]
])
# Test with a metric type without NaN support
imputer = KNNImputer(metric="euclidean")
bad_metric_msg = "The selected metric does not support NaN values."
assert_raise_message(ValueError, bad_metric_msg, imputer.fit, X)
def test_callable_metric():
# Define callable metric that returns the l1 norm:
def custom_callable(x, y, missing_values="NaN", squared=False):
x = np.ma.array(x, mask=np.isnan(x))
y = np.ma.array(y, mask=np.isnan(y))
dist = np.nansum(np.abs(x-y))
return dist
X = np.array([
[4, 3, 3, np.nan],
[6, 9, 6, 9],
[4, 8, 6, 9],
[np.nan, 9, 11, 10.]
])
X_imputed = np.array([
[4, 3, 3, 9],
[6, 9, 6, 9],
[4, 8, 6, 9],
[5, 9, 11, 10.]
])
imputer = KNNImputer(n_neighbors=2, metric=custom_callable)
assert_array_equal(imputer.fit_transform(X), X_imputed)
def test_complete_features():
# Test with use_complete=True
X = np.array([
[0, np.nan, 0, np.nan],
[1, 1, 1, np.nan],
[2, 2, np.nan, 2],
[3, 3, 3, 3],
[4, 4, 4, 4],
[5, 5, 5, 5],
[6, 6, 6, 6],
[np.nan, 7, 7, 7]
])
r0c1 = np.mean(X[1:6, 1])
r0c3 = np.mean(X[2:-1, -1])
r1c3 = np.mean(X[2:-1, -1])
r2c2 = np.nanmean(X[:6, 2])
r7c0 = np.mean(X[2:-1, 0])
X_imputed = np.array([
[0, r0c1, 0, r0c3],
[1, 1, 1, r1c3],
[2, 2, r2c2, 2],
[3, 3, 3, 3],
[4, 4, 4, 4],
[5, 5, 5, 5],
[6, 6, 6, 6],
[r7c0, 7, 7, 7]
])
imputer_comp = KNNImputer()
assert_array_almost_equal(imputer_comp.fit_transform(X), X_imputed)
def test_complete_features_weighted():
# Test with use_complete=True
X = np.array([
[0, 0, 0, np.nan],
[1, 1, 1, np.nan],
[2, 2, np.nan, 2],
[3, 3, 3, 3],
[4, 4, 4, 4],
[5, 5, 5, 5],
[6, 6, 6, 6],
[np.nan, 7, 7, 7]
])
dist = pairwise_distances(X,
metric="masked_euclidean",
squared=False)
# Calculate weights
r0c3_w = 1.0 / dist[0, 2:-1]
r1c3_w = 1.0 / dist[1, 2:-1]
r2c2_w = 1.0 / dist[2, (0, 1, 3, 4, 5)]
r7c0_w = 1.0 / dist[7, 2:7]
# Calculate weighted averages
r0c3 = np.average(X[2:-1, -1], weights=r0c3_w)
r1c3 = np.average(X[2:-1, -1], weights=r1c3_w)
r2c2 = np.average(X[(0, 1, 3, 4, 5), 2], weights=r2c2_w)
r7c0 = np.average(X[2:7, 0], weights=r7c0_w)
X_imputed = np.array([
[0, 0, 0, r0c3],
[1, 1, 1, r1c3],
[2, 2, r2c2, 2],
[3, 3, 3, 3],
[4, 4, 4, 4],
[5, 5, 5, 5],
[6, 6, 6, 6],
[r7c0, 7, 7, 7]
])
imputer_comp_wt = KNNImputer(weights="distance")
assert_array_almost_equal(imputer_comp_wt.fit_transform(X), X_imputed)
| 17,527
| 27.924092
| 79
|
py
|
missingpy
|
missingpy-master/missingpy/tests/__init__.py
| 0
| 0
| 0
|
py
|
|
missingpy
|
missingpy-master/missingpy/tests/test_missforest.py
|
import numpy as np
from scipy.stats import mode
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_raise_message
from sklearn.utils.testing import assert_equal
from sklearn.ensemble import RandomForestClassifier, RandomForestRegressor
from missingpy import MissForest
def gen_array(n_rows=20, n_cols=5, missingness=0.2, min_val=0, max_val=10,
missing_values=np.nan, rand_seed=1337):
"""Generate an array with NaNs"""
rand_gen = np.random.RandomState(seed=rand_seed)
X = rand_gen.randint(
min_val, max_val, n_rows * n_cols).reshape(n_rows, n_cols).astype(
np.float)
# Introduce NaNs if missingness > 0
if missingness > 0:
# If missingness >= 1 then use it as approximate (see below) count
if missingness >= 1:
n_missing = missingness
else:
# If missingness is between (0, 1] then use it as approximate %
# of total cells that are NaNs
n_missing = int(np.ceil(missingness * n_rows * n_cols))
# Generate row, col index pairs and introduce NaNs
# NOTE: Below does not account for repeated index pairs so NaN
# count/percentage might be less than specified in function call
nan_row_idx = rand_gen.randint(0, n_rows, n_missing)
nan_col_idx = rand_gen.randint(0, n_cols, n_missing)
X[nan_row_idx, nan_col_idx] = missing_values
return X
def test_missforest_imputation_shape():
# Verify the shapes of the imputed matrix
n_rows = 10
n_cols = 2
X = gen_array(n_rows, n_cols)
imputer = MissForest()
X_imputed = imputer.fit_transform(X)
assert_equal(X_imputed.shape, (n_rows, n_cols))
def test_missforest_zero():
# Test imputation when missing_values == 0
missing_values = 0
imputer = MissForest(missing_values=missing_values,
random_state=0)
# Test with missing_values=0 when NaN present
X = gen_array(min_val=0)
msg = "Input contains NaN, infinity or a value too large for %r." % X.dtype
assert_raise_message(ValueError, msg, imputer.fit, X)
# Test with all zeroes in a column
X = np.array([
[1, 0, 0, 0, 5],
[2, 1, 0, 2, 3],
[3, 2, 0, 0, 0],
[4, 6, 0, 5, 13],
])
msg = "One or more columns have all rows missing."
assert_raise_message(ValueError, msg, imputer.fit, X)
def test_missforest_zero_part2():
# Test with an imputable matrix and compare with missing_values="NaN"
X_zero = gen_array(min_val=1, missing_values=0)
X_nan = gen_array(min_val=1, missing_values=np.nan)
statistics_mean = np.nanmean(X_nan, axis=0)
imputer_zero = MissForest(missing_values=0, random_state=1337)
imputer_nan = MissForest(missing_values=np.nan, random_state=1337)
assert_array_equal(imputer_zero.fit_transform(X_zero),
imputer_nan.fit_transform(X_nan))
assert_array_equal(imputer_zero.statistics_.get("col_means"),
statistics_mean)
def test_missforest_numerical_single():
# Test imputation with default parameter values
# Test with a single missing value
df = np.array([
[1, 0, 0, 1],
[2, 1, 2, 2],
[3, 2, 3, 2],
[np.nan, 4, 5, 5],
[6, 7, 6, 7],
[8, 8, 8, 8],
[16, 15, 18, 19],
])
statistics_mean = np.nanmean(df, axis=0)
y = df[:, 0]
X = df[:, 1:]
good_rows = np.where(~np.isnan(y))[0]
bad_rows = np.where(np.isnan(y))[0]
rf = RandomForestRegressor(n_estimators=10, random_state=1337)
rf.fit(X=X[good_rows], y=y[good_rows])
pred_val = rf.predict(X[bad_rows])
df_imputed = np.array([
[1, 0, 0, 1],
[2, 1, 2, 2],
[3, 2, 3, 2],
[pred_val, 4, 5, 5],
[6, 7, 6, 7],
[8, 8, 8, 8],
[16, 15, 18, 19],
])
imputer = MissForest(n_estimators=10, random_state=1337)
assert_array_equal(imputer.fit_transform(df), df_imputed)
assert_array_equal(imputer.statistics_.get('col_means'), statistics_mean)
def test_missforest_numerical_multiple():
# Test with two missing values for multiple iterations
df = np.array([
[1, 0, np.nan, 1],
[2, 1, 2, 2],
[3, 2, 3, 2],
[np.nan, 4, 5, 5],
[6, 7, 6, 7],
[8, 8, 8, 8],
[16, 15, 18, 19],
])
statistics_mean = np.nanmean(df, axis=0)
n_rows, n_cols = df.shape
# Fit missforest and transform
imputer = MissForest(random_state=1337)
df_imp1 = imputer.fit_transform(df)
# Get iterations used by missforest above
max_iter = imputer.iter_count_
# Get NaN mask
nan_mask = np.isnan(df)
nan_rows, nan_cols = np.where(nan_mask)
# Make initial guess for missing values
df_imp2 = df.copy()
df_imp2[nan_rows, nan_cols] = np.take(statistics_mean, nan_cols)
# Loop for max_iter count over the columns with NaNs
for _ in range(max_iter):
for c in nan_cols:
# Identify all other columns (i.e. predictors)
not_c = np.setdiff1d(np.arange(n_cols), c)
# Identify rows with NaN and those without in 'c'
y = df_imp2[:, c]
X = df_imp2[:, not_c]
good_rows = np.where(~nan_mask[:, c])[0]
bad_rows = np.where(nan_mask[:, c])[0]
# Fit model and predict
rf = RandomForestRegressor(n_estimators=100, random_state=1337)
rf.fit(X=X[good_rows], y=y[good_rows])
pred_val = rf.predict(X[bad_rows])
# Fill in values
df_imp2[bad_rows, c] = pred_val
assert_array_equal(df_imp1, df_imp2)
assert_array_equal(imputer.statistics_.get('col_means'), statistics_mean)
def test_missforest_categorical_single():
# Test imputation with default parameter values
# Test with a single missing value
df = np.array([
[0, 0, 0, 1],
[0, 1, 2, 2],
[0, 2, 3, 2],
[np.nan, 4, 5, 5],
[1, 7, 6, 7],
[1, 8, 8, 8],
[1, 15, 18, 19],
])
y = df[:, 0]
X = df[:, 1:]
good_rows = np.where(~np.isnan(y))[0]
bad_rows = np.where(np.isnan(y))[0]
rf = RandomForestClassifier(n_estimators=10, random_state=1337)
rf.fit(X=X[good_rows], y=y[good_rows])
pred_val = rf.predict(X[bad_rows])
df_imputed = np.array([
[0, 0, 0, 1],
[0, 1, 2, 2],
[0, 2, 3, 2],
[pred_val, 4, 5, 5],
[1, 7, 6, 7],
[1, 8, 8, 8],
[1, 15, 18, 19],
])
imputer = MissForest(n_estimators=10, random_state=1337)
assert_array_equal(imputer.fit_transform(df, cat_vars=0), df_imputed)
assert_array_equal(imputer.fit_transform(df, cat_vars=[0]), df_imputed)
def test_missforest_categorical_multiple():
# Test with two missing values for multiple iterations
df = np.array([
[0, 0, np.nan, 1],
[0, 1, 1, 2],
[0, 2, 1, 2],
[np.nan, 4, 1, 5],
[1, 7, 0, 7],
[1, 8, 0, 8],
[1, 15, 0, 19],
[1, 18, 0, 17],
])
cat_vars = [0, 2]
statistics_mode = mode(df, axis=0, nan_policy='omit').mode[0]
n_rows, n_cols = df.shape
# Fit missforest and transform
imputer = MissForest(random_state=1337)
df_imp1 = imputer.fit_transform(df, cat_vars=cat_vars)
# Get iterations used by missforest above
max_iter = imputer.iter_count_
# Get NaN mask
nan_mask = np.isnan(df)
nan_rows, nan_cols = np.where(nan_mask)
# Make initial guess for missing values
df_imp2 = df.copy()
df_imp2[nan_rows, nan_cols] = np.take(statistics_mode, nan_cols)
# Loop for max_iter count over the columns with NaNs
for _ in range(max_iter):
for c in nan_cols:
# Identify all other columns (i.e. predictors)
not_c = np.setdiff1d(np.arange(n_cols), c)
# Identify rows with NaN and those without in 'c'
y = df_imp2[:, c]
X = df_imp2[:, not_c]
good_rows = np.where(~nan_mask[:, c])[0]
bad_rows = np.where(nan_mask[:, c])[0]
# Fit model and predict
rf = RandomForestClassifier(n_estimators=100, random_state=1337)
rf.fit(X=X[good_rows], y=y[good_rows])
pred_val = rf.predict(X[bad_rows])
# Fill in values
df_imp2[bad_rows, c] = pred_val
assert_array_equal(df_imp1, df_imp2)
assert_array_equal(imputer.statistics_.get('col_modes')[0],
statistics_mode[cat_vars])
def test_missforest_mixed_multiple():
# Test with mixed data type
df = np.array([
[np.nan, 0, 0, 1],
[0, 1, 2, 2],
[0, 2, 3, 2],
[1, 4, 5, 5],
[1, 7, 6, 7],
[1, 8, 8, 8],
[1, 15, 18, np.nan],
])
n_rows, n_cols = df.shape
cat_vars = [0]
num_vars = np.setdiff1d(range(n_cols), cat_vars)
statistics_mode = mode(df, axis=0, nan_policy='omit').mode[0]
statistics_mean = np.nanmean(df, axis=0)
# Fit missforest and transform
imputer = MissForest(random_state=1337)
df_imp1 = imputer.fit_transform(df, cat_vars=cat_vars)
# Get iterations used by missforest above
max_iter = imputer.iter_count_
# Get NaN mask
nan_mask = np.isnan(df)
nan_rows, nan_cols = np.where(nan_mask)
# Make initial guess for missing values
df_imp2 = df.copy()
df_imp2[0, 0] = statistics_mode[0]
df_imp2[6, 3] = statistics_mean[3]
# Loop for max_iter count over the columns with NaNs
for _ in range(max_iter):
for c in nan_cols:
# Identify all other columns (i.e. predictors)
not_c = np.setdiff1d(np.arange(n_cols), c)
# Identify rows with NaN and those without in 'c'
y = df_imp2[:, c]
X = df_imp2[:, not_c]
good_rows = np.where(~nan_mask[:, c])[0]
bad_rows = np.where(nan_mask[:, c])[0]
# Fit model and predict
if c in cat_vars:
rf = RandomForestClassifier(n_estimators=100,
random_state=1337)
else:
rf = RandomForestRegressor(n_estimators=100,
random_state=1337)
rf.fit(X=X[good_rows], y=y[good_rows])
pred_val = rf.predict(X[bad_rows])
# Fill in values
df_imp2[bad_rows, c] = pred_val
assert_array_equal(df_imp1, df_imp2)
assert_array_equal(imputer.statistics_.get('col_means'),
statistics_mean[num_vars])
assert_array_equal(imputer.statistics_.get('col_modes')[0],
statistics_mode[cat_vars])
def test_statstics_fit_transform():
# Test statistics_ when data in fit() and transform() are different
X = np.array([
[1, 0, 0, 1],
[2, 1, 2, 2],
[3, 2, 3, 2],
[np.nan, 4, 5, 5],
[6, 7, 6, 7],
[8, 8, 8, 8],
[16, 15, 18, 19],
])
statistics_mean = np.nanmean(X, axis=0)
Y = np.array([
[0, 0, 0, 0],
[2, 2, 2, 1],
[3, 2, 3, 2],
[np.nan, 4, 5, 5],
[6, 7, 6, 7],
[9, 9, 8, 8],
[16, 15, 18, 19],
])
imputer = MissForest()
imputer.fit(X).transform(Y)
assert_array_equal(imputer.statistics_.get('col_means'), statistics_mean)
def test_default_with_invalid_input():
# Test imputation with default values and invalid input
# Test with all rows missing in a column
X = np.array([
[np.nan, 0, 0, 1],
[np.nan, 1, 2, np.nan],
[np.nan, 2, 3, np.nan],
[np.nan, 4, 5, 5],
])
imputer = MissForest(random_state=1337)
msg = "One or more columns have all rows missing."
assert_raise_message(ValueError, msg, imputer.fit, X)
# Test with inf present
X = np.array([
[np.inf, 1, 1, 2, np.nan],
[2, 1, 2, 2, 3],
[3, 2, 3, 3, 8],
[np.nan, 6, 0, 5, 13],
[np.nan, 7, 0, 7, 8],
[6, 6, 2, 5, 7],
])
msg = "+/- inf values are not supported."
assert_raise_message(ValueError, msg, MissForest().fit, X)
# Test with inf present in matrix passed in transform()
X = np.array([
[np.inf, 1, 1, 2, np.nan],
[2, 1, 2, 2, 3],
[3, 2, 3, 3, 8],
[np.nan, 6, 0, 5, 13],
[np.nan, 7, 0, 7, 8],
[6, 6, 2, 5, 7],
])
X_fit = np.array([
[0, 1, 1, 2, np.nan],
[2, 1, 2, 2, 3],
[3, 2, 3, 3, 8],
[np.nan, 6, 0, 5, 13],
[np.nan, 7, 0, 7, 8],
[6, 6, 2, 5, 7],
])
msg = "+/- inf values are not supported."
assert_raise_message(ValueError, msg, MissForest().fit(X_fit).transform, X)
| 13,725
| 32.478049
| 79
|
py
|
basho_bench
|
basho_bench-master/priv/results-browser.py
|
#!/usr/bin/env python
import SimpleHTTPServer
import SocketServer
import logging
import cgi
import base64
import argparse
import os
class ServerHandler(SimpleHTTPServer.SimpleHTTPRequestHandler):
def do_GET(self):
logging.warning("======= GET STARTED =======")
logging.warning(self.headers)
SimpleHTTPServer.SimpleHTTPRequestHandler.do_GET(self)
def do_POST(self):
logging.warning("======= POST STARTED =======")
length = self.headers['content-length']
data = self.rfile.read(int(length))
with open(os.path.join("." , "summary.png"), 'w') as fh:
fh.write(base64.b64decode(data.decode()))
self.send_response(200)
def startServer(host, port):
httpd = SocketServer.TCPServer((host, port), ServerHandler)
print 'Serving at: http://{host}:{port}'.format(host=host, port=port)
httpd.serve_forever()
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Results generator')
parser.add_argument('--port', '-p', type=int, help='Port for results generator to bind to', default=8080, required=False)
parser.add_argument('--host', type=str, help='Host for results generator to bind to', default='localhost', required=False)
args = parser.parse_args()
startServer(args.host, args.port)
| 1,280
| 32.710526
| 124
|
py
|
deep_direct_stat
|
deep_direct_stat-master/setup.py
|
from setuptools import setup, find_packages
setup(
name="datasets",
version=0.1,
description="Scripts to load preprocessed datasets (PASCAL3D+, CAVIAR, TownCentre, IDIAP)",
author="Sergey Prokudin",
author_email="sergey.prokudin@gmail.com",
packages=["datasets"],
)
setup(
name="utils",
version=0.1,
description="Misc utils for the project (converters, von Mises losses, etc.)",
author="Sergey Prokudin",
author_email="sergey.prokudin@gmail.com",
packages=["utils"],
)
setup(
name="models",
version=0.1,
description="Keras models for object orientation prediction",
author="Sergey Prokudin",
author_email="sergey.prokudin@gmail.com",
packages=["models"],
)
| 731
| 25.142857
| 95
|
py
|
deep_direct_stat
|
deep_direct_stat-master/view_estimation/run_evaluation.py
|
import os
import sys
BASE_DIR = os.path.dirname(os.path.abspath(__file__))
sys.path.append(BASE_DIR)
sys.path.append(os.path.dirname(BASE_DIR))
from global_variables import *
from evaluation_helper import *
cls_names = g_shape_names
# img_name_file_list = [os.path.join(g_real_images_voc12val_det_bbox_folder, name+'.txt') for name in cls_names]
# det_bbox_mat_file_list = [os.path.join(g_detection_results_folder, x.rstrip()) for x in open(g_rcnn_detection_bbox_mat_filelist)]
# result_folder = os.path.join(BASE_DIR, 'avp_test_results')
# test_avp_nv(cls_names, img_name_file_list, det_bbox_mat_file_list, result_folder)
img_name_file_list = [os.path.join(g_real_images_voc12val_easy_gt_bbox_folder, name+'.txt') for name in cls_names]
view_label_folder = g_real_images_voc12val_easy_gt_bbox_folder
result_folder = os.path.join(BASE_DIR, 'vp_test_results')
test_vp_acc(cls_names, img_name_file_list, result_folder, view_label_folder)
| 940
| 43.809524
| 131
|
py
|
deep_direct_stat
|
deep_direct_stat-master/view_estimation/prepare_training_data.py
|
#!/usr/bin/python
'''
Prepare Training Data
Running this program will populate following folders:
g_syn_images_lmdb_folder
with img-label files and g_syn_images_lmdb_pathname_prefix+[_label,_image] LMDBs
g_real_images_voc12train_all_gt_bbox_folder
with cropped images and img-label files and g_real_images_voc12train_all_gt_bbox_lmdb_prefix+[_label,_image] LMDBs
'''
import os
import sys
from data_prep_helper import *
BASE_DIR = os.path.dirname(os.path.abspath(__file__))
sys.path.append(BASE_DIR)
sys.path.append(os.path.dirname(BASE_DIR))
from global_variables import *
if __name__ == '__main__':
# ----------------------------------
# ---- SYNTHESIZED IMAGES ----------
# ----------------------------------
if not os.path.exists(g_syn_images_lmdb_folder):
os.mkdir(g_syn_images_lmdb_folder)
# get image filenames and labels, separated to train/test sets
for idx, synset in enumerate(g_shape_synsets):
name = g_shape_names[idx]
get_one_category_image_label_file(synset, os.path.join(g_syn_images_lmdb_folder, name+'_train.txt'), os.path.join(g_syn_images_lmdb_folder, name+'_test.txt'))
for keyword in ['train', 'test']:
# combine filenames&labels from all 12 classes (shuffled)
input_file_list = [os.path.join(g_syn_images_lmdb_folder, '%s_%s.txt' % (name, keyword)) for name in g_shape_names]
output_file = os.path.join(g_syn_images_lmdb_folder, 'all_%s.txt' % (keyword))
combine_files(input_file_list, output_file)
# generate LMDB
generate_image_view_lmdb(output_file, '%s_%s' % (g_syn_images_lmdb_pathname_prefix, keyword))
# ----------------------------------
# ---- VOC12 TRAIN SET -------------
# ----------------------------------
# prepare voc12train gt bbox images and its LMDB
matlab_cmd = "addpath('%s'); prepare_voc12_imgs('train','%s',struct('flip',%d,'aug_n',%d,'jitter_IoU',%d,'difficult',1,'truncated',1,'occluded',1));" % (BASE_DIR, g_real_images_voc12train_all_gt_bbox_folder, g_real_images_voc12train_flip, g_real_images_voc12train_aug_n, g_real_images_voc12train_jitter_IoU)
print matlab_cmd
os.system('%s -nodisplay -r "try %s ; catch; end; quit;"' % (g_matlab_executable_path, matlab_cmd))
if not os.path.exists(g_real_images_lmdb_folder):
os.mkdir(g_real_images_lmdb_folder)
# generate lmdb
input_file_list = [os.path.join(g_real_images_voc12train_all_gt_bbox_folder,name+'.txt') for name in g_shape_names]
output_file = os.path.join(g_real_images_voc12train_all_gt_bbox_folder, 'all.txt')
combine_files(input_file_list, output_file)
generate_image_view_lmdb(output_file, g_real_images_voc12train_all_gt_bbox_lmdb_prefix)
| 2,744
| 42.571429
| 311
|
py
|
deep_direct_stat
|
deep_direct_stat-master/view_estimation/prepare_testing_data.py
|
#!/usr/bin/python
'''
Prepare Testing Data
prepare filelist and LMDB for real images
running this program will populate following folders:
g_real_images_voc12val_det_bbox_folder
g_real_images_voc12val_easy_gt_bbox_folder
'''
import os
import sys
from data_prep_helper import *
BASE_DIR = os.path.dirname(os.path.abspath(__file__))
sys.path.append(BASE_DIR)
sys.path.append(os.path.dirname(BASE_DIR))
from global_variables import *
if __name__ == '__main__':
# prepare voc12val det bbox (from rcnn) images
matlab_cmd = "addpath('%s'); prepare_voc12val_det_imgs('%s','%s', 0);" % (BASE_DIR, g_real_images_voc12val_det_bbox_folder, g_rcnn_detection_bbox_mat_filelist)
print matlab_cmd
os.system('%s -nodisplay -r "try %s ; catch; end; quit;"' % (g_matlab_executable_path, matlab_cmd))
# prepare voc12val gt bbox easy (no trunction/occlusion) images
matlab_cmd = "addpath('%s'); prepare_voc12_imgs('val','%s',struct('flip',0,'aug_n',1,'jitter_IoU',1,'difficult',0,'truncated',0,'occluded',0));" % (BASE_DIR, g_real_images_voc12val_easy_gt_bbox_folder)
print matlab_cmd
os.system('%s -nodisplay -r "try %s ; catch; end; quit;"' % (g_matlab_executable_path, matlab_cmd))
| 1,226
| 33.083333
| 205
|
py
|
deep_direct_stat
|
deep_direct_stat-master/models/single_density.py
|
import tensorflow as tf
import keras
import numpy as np
from keras import backend as K
from keras.models import Sequential
from keras.layers import Input, Dense, Dropout, Flatten, Activation, Lambda
from keras.layers import Conv2D, MaxPooling2D
from keras.layers.normalization import BatchNormalization
from keras.models import Model
from keras.layers.merge import concatenate
from utils.angles import deg2bit, bit2deg, rad2bit
from utils.losses import mad_loss_tf, cosine_loss_tf, von_mises_loss_tf, von_mises_log_likelihood_tf
from utils.losses import von_mises_log_likelihood_np, von_mises_neg_log_likelihood_keras
from utils.losses import maad_from_deg
from scipy.stats import sem
def vgg_model(n_outputs=1, final_layer=False, l2_normalize_final=False,
image_height=50, image_width=50, n_channels=3,
conv_dropout_val=0.2, fc_dropout_val=0.5, fc_layer_size=512):
model = Sequential(name='VGG')
model.add(Conv2D(24, kernel_size=(3, 3),
activation=None,
input_shape=[image_height, image_width, n_channels]))
model.add(BatchNormalization())
model.add(Activation('relu'))
model.add(Conv2D(24, (3, 3), activation=None))
model.add(BatchNormalization())
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Activation('relu'))
model.add(Conv2D(48, (3, 3), activation=None))
model.add(BatchNormalization())
model.add(Activation('relu'))
model.add(Conv2D(48, (3, 3), activation=None))
model.add(BatchNormalization())
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Activation('relu'))
model.add(Conv2D(64, (3, 3), activation=None))
model.add(BatchNormalization())
model.add(Activation('relu'))
model.add(Conv2D(64, (3, 3), activation=None))
model.add(BatchNormalization())
model.add(Activation('relu'))
model.add(Dropout(conv_dropout_val))
model.add(Flatten())
model.add(Dense(512, activation='relu'))
model.add(Dropout(fc_dropout_val))
if final_layer:
model.add(Dense(n_outputs, activation=None))
if l2_normalize_final:
model.add(Lambda(lambda x: K.l2_normalize(x, axis=1)))
return model
class DegreeVGG:
def __init__(self,
image_height=50,
image_width=50,
n_channels=3,
n_outputs=1,
predict_kappa=False,
fixed_kappa_value=1.0):
self.image_height = image_height
self.image_width = image_width
self.n_channels = n_channels
self.X = Input(shape=[image_height, image_width, self.n_channels])
self.vgg_x = vgg_model(n_outputs=1,
final_layer=True,
image_height=self.image_height,
image_width=self.image_width,
n_channels=self.n_channels)(self.X)
self.model = Model(self.X, self.vgg_x)
def evaluate(self, x, ytrue_deg, data_part):
ypreds_deg = np.squeeze(self.model.predict(x))
loss = maad_from_deg(ypreds_deg, ytrue_deg)
results = dict()
results['maad_loss'] = float(np.mean(loss))
results['maad_loss_sem'] = float(sem(loss, axis=None))
print("MAAD error (%s) : %f ± %fSEM" % (data_part,
results['maad_loss'],
results['maad_loss_sem']))
return results
class BiternionVGG:
def __init__(self,
image_height=50,
image_width=50,
n_channels=3,
loss_type='cosine',
predict_kappa=False,
fixed_kappa_value=1.0,
**kwargs):
self.image_height = image_height
self.image_width = image_width
self.n_channels = n_channels
self.predict_kappa = predict_kappa
self.fixed_kappa_value = fixed_kappa_value
self.hyp_params = kwargs
self.n_u = kwargs.get('n_hidden_units', 8)
self.learning_rate = kwargs.get('learning_rate', 1.0e-3)
self.beta1 = kwargs.get('beta1', 0.9)
self.beta2 = kwargs.get('beta2', 0.999)
self.epsilon = kwargs.get('epsilon', 1.0e-7)
self.conv_dropout = kwargs.get('conv_dropout', 0.2)
self.fc_dropout = kwargs.get('fc_dropout', 0.5)
self.vgg_fc_layer_size = kwargs.get('vgg_fc_layer_size', 512)
self.loss_type = loss_type
self.loss = self._pick_loss()
self.X = Input(shape=[image_height, image_width, self.n_channels])
vgg_x = vgg_model(final_layer=False,
image_height=self.image_height,
image_width=self.image_width,
n_channels=self.n_channels,
conv_dropout_val=self.conv_dropout,
fc_dropout_val=self.fc_dropout,
fc_layer_size=self.vgg_fc_layer_size)(self.X)
self.y_pred = Lambda(lambda x: K.l2_normalize(x, axis=1))(Dense(2)(vgg_x))
if self.predict_kappa:
self.kappa_pred = Lambda(lambda x: K.abs(x))(Dense(1)(vgg_x))
self.model = Model(self.X, concatenate([self.y_pred, self.kappa_pred]))
else:
self.model = Model(self.X, self.y_pred)
self.optimizer = keras.optimizers.Adam(lr=self.learning_rate,
beta_1=self.beta1,
beta_2=self.beta2,
epsilon=self.epsilon)
self.model.compile(optimizer=self.optimizer, loss=self.loss)
def _pick_loss(self):
if self.loss_type == 'cosine':
print("using cosine loss..")
loss = cosine_loss_tf
elif self.loss_type == 'von_mises':
print("using von-mises loss..")
loss = von_mises_loss_tf
elif self.loss_type == 'mad':
print("using mad loss..")
loss = mad_loss_tf
elif self.loss_type == 'vm_likelihood':
print("using likelihood loss..")
if self.predict_kappa:
loss = von_mises_neg_log_likelihood_keras
else:
def _von_mises_neg_log_likelihood_keras_fixed(y_true, y_pred):
mu_pred = y_pred[:, 0:2]
kappa_pred = tf.ones([tf.shape(y_pred[:, 2:])[0], 1])*self.fixed_kappa_value
return -K.mean(von_mises_log_likelihood_tf(y_true, mu_pred, kappa_pred))
loss = _von_mises_neg_log_likelihood_keras_fixed
else:
raise ValueError("loss should be 'mad','cosine','von_mises' or 'vm_likelihood'")
return loss
def fit(self, train_data, val_data, n_epochs, batch_size, callbacks=None):
xtr, ytr_bit, = train_data
xval, yval_bit = val_data
self.model.fit(xtr, ytr_bit,
batch_size=batch_size,
epochs=n_epochs,
validation_data=(xval, yval_bit),
callbacks=callbacks)
if not self.predict_kappa:
self.finetune_kappa(xval, yval_bit)
return
def save_weights(self, path):
self.model.save_weights(path)
def load_weights(self, path):
self.model.load_weights(path)
def finetune_kappa(self, x, y_bit, max_kappa=1000.0, verbose=False):
ytr_preds_bit = self.model.predict(x)[:, 0:2]
kappa_vals = np.arange(0, max_kappa, 1.0)
log_likelihoods = np.zeros(kappa_vals.shape)
for i, kappa_val in enumerate(kappa_vals):
kappa_preds = np.ones([x.shape[0], 1]) * kappa_val
log_likelihoods[i] = np.mean(von_mises_log_likelihood_np(y_bit, ytr_preds_bit, kappa_preds))
if verbose:
print("kappa: %f, log-likelihood: %f" % (kappa_val, log_likelihoods[i]))
max_ix = np.argmax(log_likelihoods)
self.fixed_kappa_value = kappa_vals[max_ix]
if verbose:
print("best kappa : %f" % self.fixed_kappa_value)
return self.fixed_kappa_value
def evaluate(self, x, ytrue_deg, data_part, return_per_image=False):
ytrue_bit = deg2bit(ytrue_deg)
ypreds = self.model.predict(x)
ypreds_bit = ypreds[:, 0:2]
ypreds_deg = bit2deg(ypreds_bit)
if self.predict_kappa:
kappa_preds = ypreds[:, 2:]
else:
kappa_preds = np.ones([ytrue_deg.shape[0], 1]) * self.fixed_kappa_value
loss = maad_from_deg(ypreds_deg, ytrue_deg)
results = dict()
results['maad_loss'] = float(np.mean(loss))
results['maad_loss_sem'] = float(sem(loss))
print("MAAD error (%s) : %f pm %fSEM" % (data_part,
results['maad_loss'],
results['maad_loss_sem']))
results['mean_kappa'] = float(np.mean(kappa_preds))
results['std_kappa'] = float(np.std(kappa_preds))
log_likelihoods = von_mises_log_likelihood_np(ytrue_bit, ypreds_bit, kappa_preds)
results['log_likelihood_mean'] = float(np.mean(log_likelihoods))
results['log_likelihood_sem'] = float(sem(log_likelihoods, axis=None))
print("log-likelihood (%s) : %f pm %fSEM" % (data_part,
results['log_likelihood_mean'],
results['log_likelihood_sem']))
if return_per_image:
results['point_preds'] = bit2deg(deg2bit(ypreds_deg))
results['maad'] = loss
results['log_likelihood'] = log_likelihoods
return results
def pdf(self, x, x_vals):
n_images = x.shape[0]
x_vals_tiled = np.ones(n_images)
preds = self.model.predict(x)
mu_preds_bit = preds[:, 0:2]
if self.predict_kappa:
kappa_preds = preds[:, 2:]
else:
kappa_preds = np.ones([x.shape[0], 1]) * self.fixed_kappa_value
log_likelihoods = np.zeros([n_images, len(x_vals)])
for xid, xval in enumerate(x_vals):
x_bit = rad2bit(x_vals_tiled*xval)
log_likelihoods[:, xid] = np.exp(np.squeeze(von_mises_log_likelihood_np(x_bit, mu_preds_bit, kappa_preds)))
return log_likelihoods
| 10,409
| 35.271777
| 119
|
py
|
deep_direct_stat
|
deep_direct_stat-master/models/finite_mixture.py
|
import tensorflow as tf
import keras
import numpy as np
from keras import backend as K
from keras.models import Sequential
from keras.layers import Input, Dense, Dropout, Flatten, Activation, Lambda
from keras.layers import Conv2D, MaxPooling2D
from keras.layers.normalization import BatchNormalization
from keras.models import Model
from keras.layers.merge import concatenate
from utils.angles import deg2bit, bit2deg_multi, rad2bit, bit2deg
from utils.losses import maad_from_deg, von_mises_log_likelihood_np, von_mises_log_likelihood_tf
from scipy.stats import sem
from utils.sampling import sample_von_mises_mixture_multi
from utils.losses import maximum_expected_utility
N_BITERNION_OUTPUT = 2
def vgg_model(n_outputs=1, final_layer=False, l2_normalize_final=False,
image_height=50, image_width=50,
conv_dropout_val=0.2, fc_dropout_val=0.5, fc_layer_size=512):
model = Sequential()
model.add(Conv2D(24, kernel_size=(3, 3),
activation=None,
input_shape=[image_height, image_width, 3]))
model.add(BatchNormalization())
model.add(Activation('relu'))
model.add(Conv2D(24, (3, 3), activation=None))
model.add(BatchNormalization())
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Activation('relu'))
model.add(Conv2D(48, (3, 3), activation=None))
model.add(BatchNormalization())
model.add(Activation('relu'))
model.add(Conv2D(48, (3, 3), activation=None))
model.add(BatchNormalization())
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Activation('relu'))
model.add(Conv2D(64, (3, 3), activation=None))
model.add(BatchNormalization())
model.add(Activation('relu'))
model.add(Conv2D(64, (3, 3), activation=None))
model.add(BatchNormalization())
model.add(Activation('relu'))
model.add(Dropout(conv_dropout_val))
model.add(Flatten())
model.add(Dense(512, activation='relu'))
model.add(Dropout(fc_dropout_val))
if final_layer:
model.add(Dense(n_outputs, activation=None))
if l2_normalize_final:
model.add(Lambda(lambda x: K.l2_normalize(x, axis=1)))
return model
class BiternionVGGMixture:
def __init__(self,
image_height=50,
image_width=50,
n_channels=3,
**kwargs):
self.image_height = image_height
self.image_width = image_width
self.n_channels = n_channels
self.hyp_params = kwargs
self.n_components = kwargs.get('n_components', 8)
self.learning_rate = kwargs.get('learning_rate', 1.0e-3)
self.beta1 = kwargs.get('beta1', 0.9)
self.beta2 = kwargs.get('beta2', 0.999)
self.epsilon = kwargs.get('epsilon', 1.0e-7)
self.conv_dropout = kwargs.get('conv_dropout', 0.2)
self.fc_dropout = kwargs.get('fc_dropout', 0.5)
self.vgg_fc_layer_size = kwargs.get('vgg_fc_layer_size', 512)
self.mix_fc_layer_size = kwargs.get('mix_fc_layer_size', 512)
self.X = Input(shape=[image_height, image_width, 3])
vgg_x = vgg_model(final_layer=False,
image_height=self.image_height,
image_width=self.image_width,
conv_dropout_val=self.conv_dropout,
fc_dropout_val=self.fc_dropout,
fc_layer_size=self.vgg_fc_layer_size)(self.X)
mu_preds = []
for i in range(0, self.n_components):
mu_pred = Dense(N_BITERNION_OUTPUT)(Dense(self.mix_fc_layer_size)(vgg_x))
mu_pred_normalized = Lambda(lambda x: K.l2_normalize(x, axis=1))(mu_pred)
# mu_pred_norm_reshaped = Lambda(lambda x: K.reshape(x, [-1, 1, N_BITERNION_OUTPUT]))(mu_pred_normalized)
mu_preds.append(mu_pred_normalized)
self.mu_preds = concatenate(mu_preds)
self.kappa_preds = Lambda(lambda x: K.abs(x))(Dense(self.n_components)(Dense(self.mix_fc_layer_size)(vgg_x)))
# kappa_preds = Lambda(lambda x: K.reshape(x, [-1, self.n_components, 1]))(kappa_preds)
self.component_probs = Lambda(lambda x: K.softmax(x))(Dense(self.n_components)(Dense(self.mix_fc_layer_size)(vgg_x)))
# self.component_probs = Lambda(lambda x: K.reshape(x, [-1, self.n_components, 1]))(component_probs)
self.y_pred = concatenate([self.mu_preds, self.kappa_preds, self.component_probs])
self.model = Model(inputs=self.X, outputs=self.y_pred)
self.optimizer = keras.optimizers.Adam(lr=self.learning_rate,
beta_1=self.beta1,
beta_2=self.beta2,
epsilon=self.epsilon)
self.model.compile(optimizer=self.optimizer, loss=self._neg_mean_vmm_loglikelihood_tf)
def fit(self, train_data, val_data, n_epochs, batch_size, callbacks=None):
xtr, ytr_bit, = train_data
xval, yval_bit = val_data
self.model.fit(xtr, ytr_bit,
batch_size=batch_size,
epochs=n_epochs,
validation_data=(xval, yval_bit),
callbacks=callbacks)
return
def save_weights(self, path):
self.model.save_weights(path)
def load_weights(self, path):
self.model.load_weights(path)
def parse_output_tf(self, y_preds):
mu_preds = K.reshape(y_preds[:, 0:self.n_components*N_BITERNION_OUTPUT],
[-1, self.n_components, N_BITERNION_OUTPUT])
kappa_ptr = self.n_components*N_BITERNION_OUTPUT
kappa_preds = K.reshape(y_preds[:, kappa_ptr:kappa_ptr+self.n_components], [-1, self.n_components, 1])
cprobs_ptr = kappa_ptr + self.n_components
component_probs = K.reshape(y_preds[:, cprobs_ptr:cprobs_ptr+self.n_components], [-1, self.n_components])
return mu_preds, kappa_preds, component_probs
def parse_output_np(self, y_preds):
mu_preds = np.reshape(y_preds[:, 0:self.n_components*N_BITERNION_OUTPUT],
[-1, self.n_components, N_BITERNION_OUTPUT])
kappa_ptr = self.n_components*N_BITERNION_OUTPUT
kappa_preds = np.reshape(y_preds[:, kappa_ptr:kappa_ptr+self.n_components], [-1, self.n_components, 1])
cprobs_ptr = kappa_ptr + self.n_components
component_probs = np.reshape(y_preds[:, cprobs_ptr:cprobs_ptr+self.n_components], [-1, self.n_components])
return mu_preds, kappa_preds, component_probs
def pdf(self, x, x_vals):
""" Compute probability density function on a circle given images
Parameters
----------
x: numpy array of shape [n_images, image_width, image_height, n_channels]
angles in biternion (cos, sin) representation that will be used to compute likelihood
x_vals: numpy array of shape [n_points]
angles (in rads) at which pdf values were computed
Returns
-------
pdfs: numpy array of shape [n_images, n_components, n_points]
array containing pdf values for each CVAE sample on circle [0, 2pi] for each values
acc_pdf: numpy array of shape [n_images, n_points]
array containing accumulated pdf value on circle [0, 2pi] for each values
"""
n_images = x.shape[0]
x_vals_tiled = np.ones(n_images)
preds = self.model.predict(x)
mu_preds, kappa_preds, component_probs = self.parse_output_np(preds)
component_probs = np.tile(component_probs.reshape([n_images, self.n_components, 1]), [1, 1, len(x_vals)])
vm_pdfs = np.zeros([n_images, self.n_components, len(x_vals)])
for xid, xval in enumerate(x_vals):
for cid in range(0, self.n_components):
x_bit = rad2bit(x_vals_tiled*xval)
vm_pdfs[:, cid, xid] = np.exp(np.squeeze(von_mises_log_likelihood_np(x_bit,
mu_preds[:, cid, :],
kappa_preds[:, cid])))
acc_pdf = np.sum((component_probs*vm_pdfs), axis=1)
return vm_pdfs, acc_pdf, component_probs
def _von_mises_mixture_log_likelihood_np(self, y_true, y_pred):
component_log_likelihoods = []
mu, kappa, comp_probs = self.parse_output_np(y_pred)
comp_probs = np.squeeze(comp_probs)
for cid in range(0, self.n_components):
component_log_likelihoods.append(von_mises_log_likelihood_np(y_true, mu[:, cid], kappa[:, cid]))
component_log_likelihoods = np.concatenate(component_log_likelihoods, axis=1)
log_likelihoods = np.log(np.sum(comp_probs*np.exp(component_log_likelihoods), axis=1))
return log_likelihoods
def _von_mises_mixture_log_likelihood_tf(self, y_true, y_pred):
component_log_likelihoods = []
mu, kappa, comp_probs = self.parse_output_tf(y_pred)
for cid in range(0, self.n_components):
component_log_likelihoods.append(von_mises_log_likelihood_tf(y_true, mu[:, cid], kappa[:, cid]))
component_log_likelihoods = tf.concat(component_log_likelihoods, axis=1, name='component_likelihoods')
log_likelihoods = tf.log(tf.reduce_sum(comp_probs*tf.exp(component_log_likelihoods), axis=1))
return log_likelihoods
def _neg_mean_vmm_loglikelihood_tf(self, y_true, y_pred):
log_likelihoods = self._von_mises_mixture_log_likelihood_tf(y_true, y_pred)
return -tf.reduce_mean(log_likelihoods)
def evaluate(self, x, ytrue_deg, data_part, return_per_image=False):
ytrue_bit = deg2bit(ytrue_deg)
ypreds = self.model.predict(x)
results = dict()
vmmix_mu, vmmix_kappas, vmmix_probs = self.parse_output_np(ypreds)
vmmix_mu_rad = np.deg2rad(bit2deg_multi(vmmix_mu))
samples = sample_von_mises_mixture_multi(vmmix_mu_rad, vmmix_kappas, vmmix_probs, n_samples=100)
point_preds = maximum_expected_utility(np.rad2deg(samples))
maad_errs = maad_from_deg(point_preds, ytrue_deg)
results['maad_loss'] = float(np.mean(maad_errs))
results['maad_sem'] = float(sem(maad_errs))
log_likelihoods = self._von_mises_mixture_log_likelihood_np(ytrue_bit, ypreds)
results['log_likelihood_mean'] = float(np.mean(log_likelihoods))
results['log_likelihood_sem'] = float(sem(log_likelihoods, axis=None))
print("MAAD error (%s) : %f pm %fSEM" % (data_part,
results['maad_loss'],
results['maad_sem']))
print("log-likelihood (%s) : %f pm %fSEM" % (data_part,
results['log_likelihood_mean'],
results['log_likelihood_sem']))
if return_per_image:
results['point_preds'] = bit2deg(deg2bit(point_preds))
results['maad'] = maad_errs
results['log_likelihood'] = log_likelihoods
return results
| 11,224
| 38.111498
| 125
|
py
|
deep_direct_stat
|
deep_direct_stat-master/models/infinite_mixture.py
|
import tensorflow as tf
import keras
import numpy as np
import os
from scipy import stats
from scipy.misc import imresize
from keras import backend as K
from keras.models import Sequential
from keras.layers import Input, Dense, Dropout, Flatten, Activation, Lambda, GlobalAveragePooling2D
from keras.layers import Conv2D, MaxPooling2D
from keras.layers.normalization import BatchNormalization
from keras.models import Model
from keras.layers.merge import concatenate
from keras.optimizers import Adam
from keras.applications.inception_resnet_v2 import InceptionResNetV2
from keras.applications.densenet import DenseNet169
from keras.applications.mobilenetv2 import MobileNetV2
from keras.callbacks import EarlyStopping, ModelCheckpoint
from utils.custom_keras_callbacks import ModelCheckpointEveryNBatch
from utils.losses import maad_from_deg, maximum_expected_utility
from utils.losses import cosine_loss_tf, von_mises_log_likelihood_tf
from utils.losses import von_mises_log_likelihood_np
from utils.angles import bit2deg, rad2bit, bit2rad
P_UNIFORM = 0.15916927
class BiternionMixture:
def __init__(self,
input_shape=[224, 224, 3],
debug=False,
backbone_cnn='inception',
backbone_weights='imagenet',
learning_rate=1.0e-4,
z_size=8,
n_samples=5,
hlayer_size=512,
noise_std=1.0,
gammas=[1.0e-1, 1.0e-1, 1.0e-1]):
self.input_shape = input_shape
self.learning_rate = learning_rate
self.set_gammas(gammas)
self.hlayer_size = hlayer_size
self.z_size = z_size
self.n_samples = n_samples
self.noise_std = noise_std
self.n_sample_outputs = 9
self.backbone_weights = backbone_weights
if debug:
x_in = Input(shape=input_shape)
x = Flatten(input_shape=input_shape)(x_in)
x = Dense(128, activation='relu')(x)
else:
if backbone_cnn == 'inception':
backbone_model = InceptionResNetV2(weights=self.backbone_weights, include_top=False,
input_shape=input_shape)
elif backbone_cnn == 'densenet':
backbone_model = DenseNet169(weights=self.backbone_weights, include_top=False,
input_shape=input_shape)
elif backbone_cnn == 'mobilenet':
backbone_model = MobileNetV2(weights=self.backbone_weights, include_top=False,
input_shape=input_shape)
x = backbone_model.output
x = GlobalAveragePooling2D()(x)
x = Dense(z_size*16, activation='relu', input_shape=[1])(x)
x = Dense(z_size*4, activation='relu', input_shape=[1])(x)
x = Dense(z_size, activation='relu', input_shape=[1])(x)
az_mean, az_kappa = self.decoder_seq("azimuth")
el_mean, el_kappa = self.decoder_seq("elevation")
ti_mean, ti_kappa = self.decoder_seq("tilt")
z_lst = []
x_z_lst = []
x_z_decoded_lst = []
for k in range(0, n_samples):
z_lst.append(Lambda(self._sample_z)(x))
x_z_lst.append(concatenate([x, z_lst[k]]))
kth_preds = concatenate([az_mean(x_z_lst[k]), az_kappa(x_z_lst[k]),
el_mean(x_z_lst[k]), el_kappa(x_z_lst[k]),
ti_mean(x_z_lst[k]), ti_kappa(x_z_lst[k])])
x_z_decoded_lst.append(kth_preds)
y_pred = concatenate(x_z_decoded_lst)
if debug:
self.model = Model(x_in, y_pred, name='bi')
else:
self.model = Model(backbone_model.input, y_pred, name='BiternionInception')
opt = Adam(lr=learning_rate)
self.model.compile(optimizer=opt, loss=self._mc_loss)
def _mc_loss(self, y_target, y_pred):
az_target, el_target, ti_target = self.unpack_target(y_target)
sample_az_likelihoods = []
sample_el_likelihoods = []
sample_ti_likelihoods = []
n_feat = 9
for sid in range(0, self.n_samples):
az_mean, az_kappa, el_mean, el_kappa, ti_mean, ti_kappa = \
self.unpack_sample_preds(y_pred[:, sid * n_feat:sid * n_feat + n_feat])
sample_az_likelihoods.append(K.exp(von_mises_log_likelihood_tf(az_target,
az_mean,
az_kappa)))
sample_el_likelihoods.append(K.exp(von_mises_log_likelihood_tf(el_target,
el_mean,
el_kappa)))
sample_ti_likelihoods.append(K.exp(von_mises_log_likelihood_tf(ti_target,
ti_mean,
ti_kappa)))
az_likelihood = -K.log(P_UNIFORM * self.az_gamma +
(1 - self.az_gamma) * K.mean(concatenate(sample_az_likelihoods), axis=1))
el_likelihood = -K.log(P_UNIFORM * self.el_gamma +
(1 - self.el_gamma) * K.mean(concatenate(sample_el_likelihoods), axis=1))
ti_likelihood = -K.log(P_UNIFORM * self.ti_gamma +
(1 - self.ti_gamma) * K.mean(concatenate(sample_ti_likelihoods), axis=1))
return az_likelihood+el_likelihood+ti_likelihood
def _sample_z(self, x):
return K.random_normal(shape=K.shape(x), mean=0., stddev=self.noise_std)
def decoder_seq(self, name):
decoder_seq = Sequential(name='decoder_%s'%name)
decoder_seq.add(Dense(self.hlayer_size, activation='relu', input_shape=[self.z_size*2]))
decoder_seq.add(Dense(self.hlayer_size, activation='relu'))
decoder_mean = Sequential()
decoder_mean.add(decoder_seq)
decoder_mean.add(Dense(128, activation='relu'))
decoder_mean.add(Dense(2, activation='linear'))
decoder_mean.add(Lambda(lambda x: K.l2_normalize(x, axis=1), name='%s_mean' % name))
decoder_kappa = Sequential()
decoder_kappa.add(decoder_seq)
decoder_kappa.add(Dense(128, activation='relu'))
decoder_kappa.add((Dense(1, activation='linear')))
decoder_kappa.add(Lambda(lambda x: K.abs(x), name='%s_kappa' % name))
return decoder_mean, decoder_kappa
def set_gammas(self, gammas):
self.az_gamma = gammas[0]
self.el_gamma = gammas[1]
self.ti_gamma = gammas[2]
def unpack_sample_preds(self, y_pred):
az_mean = y_pred[:, 0:2]
az_kappa = y_pred[:, 2:3]
el_mean = y_pred[:, 3:5]
el_kappa = y_pred[:, 5:6]
ti_mean = y_pred[:, 6:8]
ti_kappa = y_pred[:, 8:9]
return az_mean, az_kappa, el_mean, el_kappa, ti_mean, ti_kappa
def unpack_all_preds(self, y_pred):
az_means = []
az_kappas = []
el_means = []
el_kappas = []
ti_means = []
ti_kappas = []
n_feat = 9
for sid in range(0, self.n_samples):
az_mean, az_kappa, el_mean, el_kappa, ti_mean, ti_kappa = \
self.unpack_sample_preds(y_pred[:, sid * n_feat:sid * n_feat + n_feat])
az_means.append(az_mean)
az_kappas.append(az_kappa)
el_means.append(el_mean)
el_kappas.append(el_kappa)
ti_means.append(ti_mean)
ti_kappas.append(ti_kappa)
return az_means, az_kappas, el_means, el_kappas, ti_means, ti_kappas
def unpack_target(self, y_target):
az_target = y_target[:, 0:2]
el_target = y_target[:, 2:4]
ti_target = y_target[:, 4:6]
return az_target, el_target, ti_target
def fit(self, x, y, validation_data, ckpt_path, epochs=1, batch_size=32, patience=5):
early_stop_cb = EarlyStopping(monitor='val_loss', min_delta=0, patience=patience, verbose=1, mode='auto')
model_ckpt = ModelCheckpoint(ckpt_path, monitor='val_loss', verbose=1,
save_best_only=True, save_weights_only=True)
self.model.fit(x, y, validation_data=validation_data,
epochs=epochs,
batch_size=batch_size,
callbacks=[early_stop_cb, model_ckpt])
self.model.load_weights(ckpt_path)
def predict(self, x):
""" Predict orientation angles (azimuth, elevation, tilt) from images, in degrees
"""
y_pred = self.model.predict(np.asarray(x))
az_preds_bit, az_preds_kappa, el_preds_bit, el_preds_kappa, ti_preds_bit, ti_preds_kappa = \
self.unpack_all_preds(y_pred)
az_preds_deg, el_preds_deg, ti_preds_deg = self.convert_to_deg_preds(y_pred)
return az_preds_deg, el_preds_deg, ti_preds_deg
def log_likelihood(self, y_true_bit, y_preds_bit, kappa_preds, gamma, angle='', verbose=1):
likelihoods = np.hstack([np.exp(von_mises_log_likelihood_np(y_true_bit, y_preds_bit[sid], kappa_preds[sid]))
for sid in range(0, self.n_samples)])
vm_lls = np.log(P_UNIFORM*gamma +
(1-gamma)*np.mean(likelihoods, axis=1))
vm_ll_mean = np.mean(vm_lls)
vm_ll_sem = stats.sem(vm_lls)
if verbose:
print("Log-likelihood %s : %2.2f+-%2.2fSE" % (angle, vm_ll_mean, vm_ll_sem))
return vm_lls, vm_ll_mean, vm_ll_sem
def maad(self, y_true_deg, y_pred_deg, angle='', verbose=1):
""" Compute Mean Absolute Angular Deviation between ground truth and predictions (in degrees)
"""
aads = maad_from_deg(y_true_deg, y_pred_deg)
maad = np.mean(aads)
sem = stats.sem(aads)
if verbose:
print("MAAD %s : %2.2f+-%2.2fSE" % (angle, maad, sem))
return aads, maad, sem
def convert_to_deg_preds(self, y_pred):
az_preds_bit, az_preds_kappa, el_preds_bit, el_preds_kappa, ti_preds_bit, ti_preds_kappa = self.unpack_all_preds(y_pred)
az_preds_deg_lst = np.vstack([bit2deg(az_preds_bit[sid]) for sid in range(0, self.n_samples)]).T
az_preds_deg = maximum_expected_utility(az_preds_deg_lst)
el_preds_deg_lst = np.vstack([bit2deg(el_preds_bit[sid]) for sid in range(0, self.n_samples)]).T
el_preds_deg = maximum_expected_utility(el_preds_deg_lst)
ti_preds_deg_lst = np.vstack([bit2deg(ti_preds_bit[sid]) for sid in range(0, self.n_samples)]).T
ti_preds_deg = maximum_expected_utility(ti_preds_deg_lst)
return az_preds_deg, el_preds_deg, ti_preds_deg
def evaluate(self, x, y_true, verbose=1, return_full=False):
y_pred = self.model.predict(np.asarray(x))
az_preds_bit, az_preds_kappa, el_preds_bit, el_preds_kappa, ti_preds_bit, ti_preds_kappa = \
self.unpack_all_preds(y_pred)
az_preds_deg, el_preds_deg, ti_preds_deg = self.convert_to_deg_preds(y_pred)
az_true_bit, el_true_bit, ti_true_bit = self.unpack_target(y_true)
az_true_deg = bit2deg(az_true_bit)
el_true_deg = bit2deg(el_true_bit)
ti_true_deg = bit2deg(ti_true_bit)
az_aads, az_maad, az_sem = self.maad(az_true_deg, az_preds_deg, 'azimuth', verbose=verbose)
el_aads, el_maad, el_sem = self.maad(el_true_deg, el_preds_deg, 'elevation', verbose=verbose)
ti_aads, ti_maad, ti_sem = self.maad(ti_true_deg, ti_preds_deg, 'tilt', verbose=verbose)
az_lls, az_ll_mean, az_ll_sem = self.log_likelihood(az_true_bit, az_preds_bit, az_preds_kappa, self.az_gamma,
'azimuth', verbose=verbose)
el_lls, el_ll_mean, el_ll_sem = self.log_likelihood(el_true_bit, el_preds_bit, el_preds_kappa, self.el_gamma,
'elevation', verbose=verbose)
ti_lls, el_ll_mean, ti_ll_sem = self.log_likelihood(ti_true_bit, ti_preds_bit, ti_preds_kappa, self.ti_gamma,
'tilt', verbose=verbose)
lls = az_lls + el_lls + ti_lls
ll_mean = np.mean(lls)
ll_sem = stats.sem(lls)
maad_mean = np.mean([az_maad, el_maad, ti_maad])
print("MAAD TOTAL: %2.2f+-%2.2fSE" % (maad_mean, az_sem))
print("Log-likelihood TOTAL: %2.2f+-%2.2fSE" % (ll_mean, ll_sem))
if return_full:
return maad_mean, ll_mean, ll_sem, lls
else:
return maad_mean, ll_mean, ll_sem
def save_detections_for_official_eval(self, x, save_path):
# det path example: '/home/sprokudin/RenderForCNN/view_estimation/vp_test_results/aeroplane_pred_view.txt'
y_pred = self.model.predict(np.asarray(x))
az_preds_bit, az_preds_kappa, el_preds_bit, el_preds_kappa, ti_preds_bit, ti_preds_kappa = \
self.unpack_sample_preds(y_pred)
az_preds_deg, el_preds_deg, ti_preds_deg = self.convert_to_deg_preds(y_pred)
y_pred = np.vstack([az_preds_deg, el_preds_deg, ti_preds_deg]).T
np.savetxt(save_path, y_pred, delimiter=' ', fmt='%i')
print("evaluation data saved to %s" % save_path)
return
def train_finetune_eval(self, x_train, y_train, x_val, y_val, x_test, y_test,
ckpt_path, batch_size=32, patience=10, epochs=200):
self.fit(x_train, y_train, [x_val, y_val], epochs=epochs,
ckpt_path=ckpt_path, patience=patience, batch_size=batch_size)
print("EVALUATING ON TRAIN")
train_maad, train_ll, train_ll_sem = self.evaluate(x_train, y_train)
print("EVALUATING ON VALIDAITON")
val_maad, val_ll, val_ll_sem = self.evaluate(x_val, y_val)
print("EVALUATING ON TEST")
test_maad, test_ll, test_ll_sem = self.evaluate(x_test, y_test)
return train_maad, train_ll, val_maad, val_ll, test_maad, test_ll
def pdf(self, x, gamma=1.0e-1, angle='azimuth', step=0.01):
"""
:param x: input images
:param gamma: weight of a default uniform distribution added to mixture
:param angle: azimuth, elevation or tilt
:param step: step of pdf
:return: points at (0, 2pi) and corresponding pdf values
"""
vals = np.arange(0, 2*np.pi, step)
n_images = x.shape[0]
x_vals_tiled = np.ones(n_images)
az_preds_bit, az_preds_kappa, el_preds_bit, el_preds_kappa, ti_preds_bit, ti_preds_kappa = \
self.unpack_all_preds(self.model.predict(x))
if angle == 'azimuth':
mu_preds_bit = az_preds_bit
kappa_preds = az_preds_kappa
gamma = self.az_gamma
elif angle == 'elevation':
mu_preds_bit = el_preds_bit
kappa_preds = el_preds_kappa
gamma = self.el_gamma
elif angle == 'tilt':
mu_preds_bit = ti_preds_bit
kappa_preds = ti_preds_kappa
gamma = self.ti_gamma
pdf_vals = np.zeros([n_images, len(vals)])
for xid, xval in enumerate(vals):
x_bit = rad2bit(x_vals_tiled*xval)
pdf_vals[:, xid] = np.exp(self.log_likelihood(x_bit,
mu_preds_bit, kappa_preds, gamma, angle=angle, verbose=0)[0])
return vals, pdf_vals
def plot_pdf(self, vals, pdf_vals, ax=None, target=None, predicted=None, step=1.0e-2):
import matplotlib.pyplot as plt
if ax is None:
fig, ax = plt.subplots(1, 1, figsize=(5, 5))
x = np.arange(0, 2*np.pi, step)
xticks = [0., .5*np.pi, np.pi, 1.5*np.pi, 2*np.pi]
xticks_labels = ["$0$", r"$\frac{\pi}{2}$", r"$\pi$", r"$\frac{3\pi}{2}$", r"$2\pi$"]
ax.set_xticks(xticks)
ax.set_xticklabels(xticks_labels)
ax.plot(vals, pdf_vals, label='pdf')
# mu = np.sum(pdf_vals*vals*step)
# ax.axvline(mu, c='blue', label='mean')
if target is not None:
ax.axvline(target, c='orange', label='ground truth')
if predicted is not None:
ax.axvline(predicted, c='darkblue', label='predicted value')
ax.set_xlim((0, 2*np.pi))
ax.set_ylim(0, 1.0)
ax.legend(loc=4)
return
def visualize_detections(self, x, y_true=None, kappa=1.0):
import matplotlib.pyplot as plt
n_images = x.shape[0]
y_pred = self.model.predict(x)
az_preds_bit, az_preds_kappa, el_preds_bit, el_preds_kappa, ti_preds_bit, ti_preds_kappa = \
self.unpack_sample_preds(y_pred)
az_preds_deg, el_preds_deg, ti_preds_deg = self.convert_to_deg_preds(y_pred)
az_preds_rad = np.deg2rad(az_preds_deg)
if y_true is not None:
az_true_bit, el_true_bit, ti_true_bit = self.unpack_target(y_true)
az_true_rad = bit2rad(az_true_bit)
xvals, pdf_vals = self.pdf(x, gamma=self.az_gamma)
for i in range(0, n_images):
fig, axs = plt.subplots(1, 2, figsize=(10, 5))
axs[0].imshow(x[i])
if y_true is not None:
self.plot_pdf(xvals, pdf_vals[i], target=az_true_rad[i], predicted=az_preds_rad[i], ax=axs[1])
else:
self.plot_pdf(xvals, pdf_vals[i], ax=axs[1])
fig.show()
return
def make_halo(self, img, standard_size=[224, 224], black_canvas=False):
img_halo = np.copy(img)
lx, ly = img.shape[0:2]
X, Y = np.ogrid[0:lx, 0:ly]
mask = (X - lx / 2) ** 2 + (Y - ly / 2) ** 2 > lx * ly / 4
if black_canvas:
img_halo[mask] = 0
else:
img_halo[mask] = 255
img_halo = imresize(img_halo, size=standard_size)
return img_halo
def frame_image(self, img, frame_width, black_canvas=False):
b = frame_width # border size in pixel
ny, nx = img.shape[0], img.shape[1] # resolution / number of pixels in x and y
if black_canvas:
framed_img = np.zeros((b+ny+b, b+nx+b, img.shape[2]), dtype='uint8')*255
else:
framed_img = np.ones((b+ny+b, b+nx+b, img.shape[2]), dtype='uint8')*255
for i in range(0, 3):
framed_img[b:-b, b:-b,i] = img[:,:,i]
return framed_img
def plot_pdf_circle(self, img, xvals, pdf, ypred_rad=None, ytrue_rad=None, show_legend=True,
theta_zero_location='E', show_ticks=True, pdf_scaler=15.0, pdf_color='green',
pred_color='blue', fontsize=10):
import matplotlib.pyplot as plt
fig = plt.figure(figsize=(10, 4))
ax_img = fig.add_subplot(1, 1, 1, frameon=False)
ax_pdf = fig.add_subplot(1, 1, 1, projection='polar')
ax_img.axis("off")
img_halo = self.frame_image(self.make_halo(img, standard_size=[224, 224]), frame_width=90)
ax_img.imshow(img_halo)
ax_pdf.axvline(0, ymin=0.54, color='white', linewidth=3, linestyle='dashed')
ax_pdf.set_yticks([])
ax_pdf.set_xticks(([]))
if show_ticks:
ax_pdf.set_xticklabels(["0°", "%d°" % np.rad2deg(ypred_rad)], fontsize=fontsize)
if (ypred_rad is not None) and (ytrue_rad is not None):
ax_pdf.set_xticks(([0, ypred_rad, ytrue_rad]))
ax_pdf.set_xticklabels(["0°", "%d°" % np.rad2deg(ypred_rad), "%d°" % np.rad2deg(ytrue_rad)],
fontsize=fontsize)
elif ytrue_rad is not None:
ax_pdf.set_xticks(([0, ytrue_rad]))
ax_pdf.set_xticklabels(["0°", "%d°" % np.rad2deg(ytrue_rad)], fontsize=fontsize)
elif ypred_rad is not None:
ax_pdf.set_xticks(([0, ypred_rad]))
ax_pdf.set_xticklabels(["0°", "%d°" % np.rad2deg(ypred_rad)], fontsize=fontsize)
else:
ax_pdf.set_xticklabels([])
ax_pdf.set_ylim(0, 20)
ax_pdf.patch.set_alpha(0.1)
ax_pdf.set_theta_zero_location(theta_zero_location)
margin = 10.2
border = 0.8
ax_pdf.fill_between(xvals, np.ones(xvals.shape[0])*(margin+border), pdf*pdf_scaler+margin+border,
color=pdf_color, alpha=0.5, label='$p_{\\theta}(\phi | \mathbf{x})$')
if ytrue_rad is not None:
ax_pdf.axvline(ytrue_rad, ymin=0.54, color='orange', linewidth=4, label='ground truth')
if ypred_rad is not None:
ax_pdf.axvline(ypred_rad, ls='dashed', ymin=0.54, color=pred_color, linewidth=4, label='prediction')
if show_legend:
ax_pdf.legend(fontsize=fontsize, loc=1, framealpha=1.0)
return fig
def visualize_detections_on_circle(self, x, y_true=None, show_legend=True, save_figs=False, save_path=None):
n_images = x.shape[0]
if save_figs:
if not os.path.exists(save_path):
os.makedirs(save_path)
y_pred = self.model.predict(x)
az_preds_bit, az_preds_kappa, el_preds_bit, el_preds_kappa, ti_preds_bit, ti_preds_kappa = \
self.unpack_sample_preds(y_pred)
az_preds_deg, el_preds_deg, ti_preds_deg = self.convert_to_deg_preds(y_pred)
az_preds_rad = np.deg2rad(az_preds_deg)
if y_true is not None:
az_true_bit, el_true_bit, ti_true_bit = self.unpack_target(y_true)
az_true_rad = bit2rad(az_true_bit)
else:
az_true_rad = list(None for i in range(0, n_images))
xvals, pdf_vals = self.pdf(x, gamma=self.az_gamma)
for i in range(0, n_images):
if i > 0:
show_legend = False
fig = self.plot_pdf_circle(x[i], xvals, pdf_vals[i],
ypred_rad=az_preds_rad[i],
ytrue_rad=az_true_rad[i],
theta_zero_location='N',
show_ticks=True,
pdf_color='blue',
pred_color='blue',
show_legend=show_legend)
if save_figs:
fig_save_path = os.path.join(save_path, 'frame_%d.png' % i)
print("saving frame detections to %s" % fig_save_path)
fig.savefig(fig_save_path)
fig.show()
return
| 22,658
| 39.753597
| 128
|
py
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.