repo
stringlengths
2
99
file
stringlengths
13
225
code
stringlengths
0
18.3M
file_length
int64
0
18.3M
avg_line_length
float64
0
1.36M
max_line_length
int64
0
4.26M
extension_type
stringclasses
1 value
xgboost
xgboost-master/tests/python/test_cli.py
import json import os import platform import subprocess import tempfile import numpy import xgboost from xgboost import testing as tm class TestCLI: template = ''' booster = gbtree objective = reg:squarederror eta = 1.0 gamma = 1.0 seed = {seed} min_child_weight = 0 max_depth = 3 task = {task} model_in = {model_in} model_out = {model_out} test_path = {test_path} name_pred = {name_pred} model_dir = {model_dir} num_round = 10 data = {data_path} eval[test] = {data_path} ''' PROJECT_ROOT = tm.project_root(__file__) def get_exe(self): if platform.system() == 'Windows': exe = 'xgboost.exe' else: exe = 'xgboost' exe = os.path.join(self.PROJECT_ROOT, exe) assert os.path.exists(exe) return exe def test_cli_model(self): data_path = "{root}/demo/data/agaricus.txt.train?format=libsvm".format( root=self.PROJECT_ROOT) exe = self.get_exe() seed = 1994 with tempfile.TemporaryDirectory() as tmpdir: model_out_cli = os.path.join( tmpdir, 'test_load_cli_model-cli.json') model_out_py = os.path.join( tmpdir, 'test_cli_model-py.json') config_path = os.path.join( tmpdir, 'test_load_cli_model.conf') train_conf = self.template.format(data_path=data_path, seed=seed, task='train', model_in='NULL', model_out=model_out_cli, test_path='NULL', name_pred='NULL', model_dir='NULL') with open(config_path, 'w') as fd: fd.write(train_conf) subprocess.run([exe, config_path]) predict_out = os.path.join(tmpdir, 'test_load_cli_model-prediction') predict_conf = self.template.format(task='pred', seed=seed, data_path=data_path, model_in=model_out_cli, model_out='NULL', test_path=data_path, name_pred=predict_out, model_dir='NULL') with open(config_path, 'w') as fd: fd.write(predict_conf) subprocess.run([exe, config_path]) cli_predt = numpy.loadtxt(predict_out) parameters = { 'booster': 'gbtree', 'objective': 'reg:squarederror', 'eta': 1.0, 'gamma': 1.0, 'seed': seed, 'min_child_weight': 0, 'max_depth': 3 } data = xgboost.DMatrix(data_path) booster = xgboost.train(parameters, data, num_boost_round=10) # CLI model doesn't contain feature info. booster.feature_names = None booster.feature_types = None booster.set_attr(best_iteration=None) booster.save_model(model_out_py) py_predt = booster.predict(data) numpy.testing.assert_allclose(cli_predt, py_predt) cli_model = xgboost.Booster(model_file=model_out_cli) cli_predt = cli_model.predict(data) numpy.testing.assert_allclose(cli_predt, py_predt) with open(model_out_cli, 'rb') as fd: cli_model_bin = fd.read() with open(model_out_py, 'rb') as fd: py_model_bin = fd.read() assert hash(cli_model_bin) == hash(py_model_bin) def test_cli_help(self): exe = self.get_exe() completed = subprocess.run([exe], stdout=subprocess.PIPE) error_msg = completed.stdout.decode('utf-8') ret = completed.returncode assert ret == 1 assert error_msg.find('Usage') != -1 assert error_msg.find('eval[NAME]') != -1 completed = subprocess.run([exe, '-V'], stdout=subprocess.PIPE) msg = completed.stdout.decode('utf-8') assert msg.find('XGBoost') != -1 v = xgboost.__version__ if v.find('dev') != -1: assert msg.split(':')[1].strip() == v.split('-')[0] elif v.find('rc') != -1: assert msg.split(':')[1].strip() == v.split('rc')[0] else: assert msg.split(':')[1].strip() == v def test_cli_model_json(self): exe = self.get_exe() data_path = "{root}/demo/data/agaricus.txt.train?format=libsvm".format( root=self.PROJECT_ROOT) seed = 1994 with tempfile.TemporaryDirectory() as tmpdir: model_out_cli = os.path.join( tmpdir, 'test_load_cli_model-cli.json') config_path = os.path.join(tmpdir, 'test_load_cli_model.conf') train_conf = self.template.format(data_path=data_path, seed=seed, task='train', model_in='NULL', model_out=model_out_cli, test_path='NULL', name_pred='NULL', model_dir='NULL') with open(config_path, 'w') as fd: fd.write(train_conf) subprocess.run([exe, config_path]) with open(model_out_cli, 'r') as fd: model = json.load(fd) assert model['learner']['gradient_booster']['name'] == 'gbtree' def test_cli_save_model(self): '''Test save on final round''' exe = self.get_exe() data_path = "{root}/demo/data/agaricus.txt.train?format=libsvm".format( root=self.PROJECT_ROOT) seed = 1994 with tempfile.TemporaryDirectory() as tmpdir: model_out_cli = os.path.join(tmpdir, '0010.model') config_path = os.path.join(tmpdir, 'test_load_cli_model.conf') train_conf = self.template.format(data_path=data_path, seed=seed, task='train', model_in='NULL', model_out='NULL', test_path='NULL', name_pred='NULL', model_dir=tmpdir) with open(config_path, 'w') as fd: fd.write(train_conf) subprocess.run([exe, config_path]) assert os.path.exists(model_out_cli)
7,100
35.603093
79
py
xgboost
xgboost-master/tests/python/test_plotting.py
import json import numpy as np import pytest import xgboost as xgb from xgboost import testing as tm try: import matplotlib matplotlib.use('Agg') from graphviz import Source from matplotlib.axes import Axes except ImportError: pass pytestmark = pytest.mark.skipif(**tm.no_multiple(tm.no_matplotlib(), tm.no_graphviz())) class TestPlotting: def test_plotting(self): m, _ = tm.load_agaricus(__file__) booster = xgb.train({'max_depth': 2, 'eta': 1, 'objective': 'binary:logistic'}, m, num_boost_round=2) ax = xgb.plot_importance(booster) assert isinstance(ax, Axes) assert ax.get_title() == 'Feature importance' assert ax.get_xlabel() == 'F score' assert ax.get_ylabel() == 'Features' assert len(ax.patches) == 4 ax = xgb.plot_importance(booster, color='r', title='t', xlabel='x', ylabel='y') assert isinstance(ax, Axes) assert ax.get_title() == 't' assert ax.get_xlabel() == 'x' assert ax.get_ylabel() == 'y' assert len(ax.patches) == 4 for p in ax.patches: assert p.get_facecolor() == (1.0, 0, 0, 1.0) # red ax = xgb.plot_importance(booster, color=['r', 'r', 'b', 'b'], title=None, xlabel=None, ylabel=None) assert isinstance(ax, Axes) assert ax.get_title() == '' assert ax.get_xlabel() == '' assert ax.get_ylabel() == '' assert len(ax.patches) == 4 assert ax.patches[0].get_facecolor() == (1.0, 0, 0, 1.0) # red assert ax.patches[1].get_facecolor() == (1.0, 0, 0, 1.0) # red assert ax.patches[2].get_facecolor() == (0, 0, 1.0, 1.0) # blue assert ax.patches[3].get_facecolor() == (0, 0, 1.0, 1.0) # blue g = xgb.to_graphviz(booster, num_trees=0) assert isinstance(g, Source) ax = xgb.plot_tree(booster, num_trees=0) assert isinstance(ax, Axes) def test_importance_plot_lim(self): np.random.seed(1) dm = xgb.DMatrix(np.random.randn(100, 100), label=[0, 1] * 50) bst = xgb.train({}, dm) assert len(bst.get_fscore()) == 71 ax = xgb.plot_importance(bst) assert ax.get_xlim() == (0., 11.) assert ax.get_ylim() == (-1., 71.) ax = xgb.plot_importance(bst, xlim=(0, 5), ylim=(10, 71)) assert ax.get_xlim() == (0., 5.) assert ax.get_ylim() == (10., 71.) def run_categorical(self, tree_method: str) -> None: X, y = tm.make_categorical(1000, 31, 19, onehot=False) reg = xgb.XGBRegressor( enable_categorical=True, n_estimators=10, tree_method=tree_method ) reg.fit(X, y) trees = reg.get_booster().get_dump(dump_format="json") for tree in trees: j_tree = json.loads(tree) assert "leaf" in j_tree.keys() or isinstance( j_tree["split_condition"], list ) graph = xgb.to_graphviz(reg, num_trees=len(j_tree) - 1) assert isinstance(graph, Source) ax = xgb.plot_tree(reg, num_trees=len(j_tree) - 1) assert isinstance(ax, Axes) @pytest.mark.skipif(**tm.no_pandas()) def test_categorical(self) -> None: self.run_categorical("approx")
3,420
34.268041
77
py
xgboost
xgboost-master/tests/python/test_callback.py
import json import os import tempfile from contextlib import nullcontext from typing import Union import pytest import xgboost as xgb from xgboost import testing as tm # We use the dataset for tests. pytestmark = pytest.mark.skipif(**tm.no_sklearn()) class TestCallbacks: @classmethod def setup_class(cls): from sklearn.datasets import load_breast_cancer X, y = load_breast_cancer(return_X_y=True) cls.X = X cls.y = y split = int(X.shape[0]*0.8) cls.X_train = X[: split, ...] cls.y_train = y[: split, ...] cls.X_valid = X[split:, ...] cls.y_valid = y[split:, ...] def run_evaluation_monitor( self, D_train: xgb.DMatrix, D_valid: xgb.DMatrix, rounds: int, verbose_eval: Union[bool, int] ): def check_output(output: str) -> None: if int(verbose_eval) == 1: # Should print each iteration info assert len(output.split('\n')) == rounds elif int(verbose_eval) > rounds: # Should print first and latest iteration info assert len(output.split('\n')) == 2 else: # Should print info by each period additionaly to first and latest # iteration num_periods = rounds // int(verbose_eval) # Extra information is required for latest iteration is_extra_info_required = num_periods * int(verbose_eval) < (rounds - 1) assert len(output.split('\n')) == ( 1 + num_periods + int(is_extra_info_required) ) evals_result: xgb.callback.TrainingCallback.EvalsLog = {} params = {'objective': 'binary:logistic', 'eval_metric': 'error'} with tm.captured_output() as (out, err): xgb.train( params, D_train, evals=[(D_train, 'Train'), (D_valid, 'Valid')], num_boost_round=rounds, evals_result=evals_result, verbose_eval=verbose_eval, ) output: str = out.getvalue().strip() check_output(output) with tm.captured_output() as (out, err): xgb.cv(params, D_train, num_boost_round=rounds, verbose_eval=verbose_eval) output = out.getvalue().strip() check_output(output) def test_evaluation_monitor(self): D_train = xgb.DMatrix(self.X_train, self.y_train) D_valid = xgb.DMatrix(self.X_valid, self.y_valid) evals_result = {} rounds = 10 xgb.train({'objective': 'binary:logistic', 'eval_metric': 'error'}, D_train, evals=[(D_train, 'Train'), (D_valid, 'Valid')], num_boost_round=rounds, evals_result=evals_result, verbose_eval=True) assert len(evals_result['Train']['error']) == rounds assert len(evals_result['Valid']['error']) == rounds self.run_evaluation_monitor(D_train, D_valid, rounds, True) self.run_evaluation_monitor(D_train, D_valid, rounds, 2) self.run_evaluation_monitor(D_train, D_valid, rounds, 4) self.run_evaluation_monitor(D_train, D_valid, rounds, rounds + 1) def test_early_stopping(self): D_train = xgb.DMatrix(self.X_train, self.y_train) D_valid = xgb.DMatrix(self.X_valid, self.y_valid) evals_result = {} rounds = 30 early_stopping_rounds = 5 booster = xgb.train({'objective': 'binary:logistic', 'eval_metric': 'error'}, D_train, evals=[(D_train, 'Train'), (D_valid, 'Valid')], num_boost_round=rounds, evals_result=evals_result, verbose_eval=True, early_stopping_rounds=early_stopping_rounds) dump = booster.get_dump(dump_format='json') assert len(dump) - booster.best_iteration == early_stopping_rounds + 1 # No early stopping, best_iteration should be set to last epoch booster = xgb.train({'objective': 'binary:logistic', 'eval_metric': 'error'}, D_train, evals=[(D_train, 'Train'), (D_valid, 'Valid')], num_boost_round=10, evals_result=evals_result, verbose_eval=True) assert booster.num_boosted_rounds() - 1 == booster.best_iteration def test_early_stopping_custom_eval(self): D_train = xgb.DMatrix(self.X_train, self.y_train) D_valid = xgb.DMatrix(self.X_valid, self.y_valid) early_stopping_rounds = 5 booster = xgb.train({'objective': 'binary:logistic', 'eval_metric': 'error', 'tree_method': 'hist'}, D_train, evals=[(D_train, 'Train'), (D_valid, 'Valid')], feval=tm.eval_error_metric, num_boost_round=1000, early_stopping_rounds=early_stopping_rounds, verbose_eval=False) dump = booster.get_dump(dump_format='json') assert len(dump) - booster.best_iteration == early_stopping_rounds + 1 def test_early_stopping_customize(self): D_train = xgb.DMatrix(self.X_train, self.y_train) D_valid = xgb.DMatrix(self.X_valid, self.y_valid) early_stopping_rounds = 5 early_stop = xgb.callback.EarlyStopping(rounds=early_stopping_rounds, metric_name='CustomErr', data_name='Train') # Specify which dataset and which metric should be used for early stopping. booster = xgb.train( {'objective': 'binary:logistic', 'eval_metric': ['error', 'rmse'], 'tree_method': 'hist'}, D_train, evals=[(D_train, 'Train'), (D_valid, 'Valid')], feval=tm.eval_error_metric, num_boost_round=1000, callbacks=[early_stop], verbose_eval=False) dump = booster.get_dump(dump_format='json') assert len(dump) - booster.best_iteration == early_stopping_rounds + 1 assert len(early_stop.stopping_history['Train']['CustomErr']) == len(dump) rounds = 100 early_stop = xgb.callback.EarlyStopping( rounds=early_stopping_rounds, metric_name='CustomErr', data_name='Train', min_delta=100, save_best=True, ) booster = xgb.train( { 'objective': 'binary:logistic', 'eval_metric': ['error', 'rmse'], 'tree_method': 'hist' }, D_train, evals=[(D_train, 'Train'), (D_valid, 'Valid')], feval=tm.eval_error_metric, num_boost_round=rounds, callbacks=[early_stop], verbose_eval=False ) # No iteration can be made with min_delta == 100 assert booster.best_iteration == 0 assert booster.num_boosted_rounds() == 1 def test_early_stopping_skl(self): from sklearn.datasets import load_breast_cancer X, y = load_breast_cancer(return_X_y=True) early_stopping_rounds = 5 cls = xgb.XGBClassifier( early_stopping_rounds=early_stopping_rounds, eval_metric='error' ) cls.fit(X, y, eval_set=[(X, y)]) booster = cls.get_booster() dump = booster.get_dump(dump_format='json') assert len(dump) - booster.best_iteration == early_stopping_rounds + 1 def test_early_stopping_custom_eval_skl(self): from sklearn.datasets import load_breast_cancer X, y = load_breast_cancer(return_X_y=True) early_stopping_rounds = 5 early_stop = xgb.callback.EarlyStopping(rounds=early_stopping_rounds) cls = xgb.XGBClassifier( eval_metric=tm.eval_error_metric_skl, callbacks=[early_stop] ) cls.fit(X, y, eval_set=[(X, y)]) booster = cls.get_booster() dump = booster.get_dump(dump_format='json') assert len(dump) - booster.best_iteration == early_stopping_rounds + 1 def test_early_stopping_save_best_model(self): from sklearn.datasets import load_breast_cancer X, y = load_breast_cancer(return_X_y=True) n_estimators = 100 early_stopping_rounds = 5 early_stop = xgb.callback.EarlyStopping(rounds=early_stopping_rounds, save_best=True) cls = xgb.XGBClassifier( n_estimators=n_estimators, eval_metric=tm.eval_error_metric_skl, callbacks=[early_stop] ) cls.fit(X, y, eval_set=[(X, y)]) booster = cls.get_booster() dump = booster.get_dump(dump_format='json') assert len(dump) == booster.best_iteration + 1 early_stop = xgb.callback.EarlyStopping(rounds=early_stopping_rounds, save_best=True) cls = xgb.XGBClassifier( booster='gblinear', n_estimators=10, eval_metric=tm.eval_error_metric_skl ) with pytest.raises(ValueError): cls.fit(X, y, eval_set=[(X, y)], callbacks=[early_stop]) # No error early_stop = xgb.callback.EarlyStopping(rounds=early_stopping_rounds, save_best=False) xgb.XGBClassifier( booster='gblinear', n_estimators=10, eval_metric=tm.eval_error_metric_skl ).fit(X, y, eval_set=[(X, y)], callbacks=[early_stop]) def test_early_stopping_continuation(self): from sklearn.datasets import load_breast_cancer X, y = load_breast_cancer(return_X_y=True) cls = xgb.XGBClassifier(eval_metric=tm.eval_error_metric_skl) early_stopping_rounds = 5 early_stop = xgb.callback.EarlyStopping( rounds=early_stopping_rounds, save_best=True ) with pytest.warns(UserWarning): cls.fit(X, y, eval_set=[(X, y)], callbacks=[early_stop]) booster = cls.get_booster() assert booster.num_boosted_rounds() == booster.best_iteration + 1 with tempfile.TemporaryDirectory() as tmpdir: path = os.path.join(tmpdir, 'model.json') cls.save_model(path) cls = xgb.XGBClassifier() cls.load_model(path) assert cls._Booster is not None early_stopping_rounds = 3 cls.set_params(eval_metric=tm.eval_error_metric_skl) cls.fit(X, y, eval_set=[(X, y)], early_stopping_rounds=early_stopping_rounds) booster = cls.get_booster() assert booster.num_boosted_rounds() == \ booster.best_iteration + early_stopping_rounds + 1 def test_deprecated(self): from sklearn.datasets import load_breast_cancer X, y = load_breast_cancer(return_X_y=True) early_stopping_rounds = 5 early_stop = xgb.callback.EarlyStopping( rounds=early_stopping_rounds, save_best=True ) clf = xgb.XGBClassifier( eval_metric=tm.eval_error_metric_skl, callbacks=[early_stop] ) with pytest.raises(ValueError, match=r".*set_params.*"): clf.fit(X, y, eval_set=[(X, y)], callbacks=[early_stop]) def run_eta_decay(self, tree_method): """Test learning rate scheduler, used by both CPU and GPU tests.""" scheduler = xgb.callback.LearningRateScheduler dtrain, dtest = tm.load_agaricus(__file__) watchlist = [(dtest, 'eval'), (dtrain, 'train')] num_round = 4 warning_check = nullcontext() # learning_rates as a list # init eta with 0 to check whether learning_rates work param = {'max_depth': 2, 'eta': 0, 'verbosity': 0, 'objective': 'binary:logistic', 'eval_metric': 'error', 'tree_method': tree_method} evals_result = {} with warning_check: bst = xgb.train(param, dtrain, num_round, watchlist, callbacks=[scheduler([ 0.8, 0.7, 0.6, 0.5 ])], evals_result=evals_result) eval_errors_0 = list(map(float, evals_result['eval']['error'])) assert isinstance(bst, xgb.core.Booster) # validation error should decrease, if eta > 0 assert eval_errors_0[0] > eval_errors_0[-1] # init learning_rate with 0 to check whether learning_rates work param = {'max_depth': 2, 'learning_rate': 0, 'verbosity': 0, 'objective': 'binary:logistic', 'eval_metric': 'error', 'tree_method': tree_method} evals_result = {} with warning_check: bst = xgb.train(param, dtrain, num_round, watchlist, callbacks=[scheduler( [0.8, 0.7, 0.6, 0.5])], evals_result=evals_result) eval_errors_1 = list(map(float, evals_result['eval']['error'])) assert isinstance(bst, xgb.core.Booster) # validation error should decrease, if learning_rate > 0 assert eval_errors_1[0] > eval_errors_1[-1] # check if learning_rates override default value of eta/learning_rate param = { 'max_depth': 2, 'verbosity': 0, 'objective': 'binary:logistic', 'eval_metric': 'error', 'tree_method': tree_method } evals_result = {} with warning_check: bst = xgb.train(param, dtrain, num_round, watchlist, callbacks=[scheduler( [0, 0, 0, 0] )], evals_result=evals_result) eval_errors_2 = list(map(float, evals_result['eval']['error'])) assert isinstance(bst, xgb.core.Booster) # validation error should not decrease, if eta/learning_rate = 0 assert eval_errors_2[0] == eval_errors_2[-1] # learning_rates as a customized decay function def eta_decay(ithround, num_boost_round=num_round): return num_boost_round / (ithround + 1) evals_result = {} with warning_check: bst = xgb.train(param, dtrain, num_round, watchlist, callbacks=[ scheduler(eta_decay) ], evals_result=evals_result) eval_errors_3 = list(map(float, evals_result['eval']['error'])) assert isinstance(bst, xgb.core.Booster) assert eval_errors_3[0] == eval_errors_2[0] for i in range(1, len(eval_errors_0)): assert eval_errors_3[i] != eval_errors_2[i] with warning_check: xgb.cv(param, dtrain, num_round, callbacks=[scheduler(eta_decay)]) def run_eta_decay_leaf_output(self, tree_method: str, objective: str) -> None: # check decay has effect on leaf output. num_round = 4 scheduler = xgb.callback.LearningRateScheduler dtrain, dtest = tm.load_agaricus(__file__) watchlist = [(dtest, 'eval'), (dtrain, 'train')] param = { "max_depth": 2, "objective": objective, "eval_metric": "error", "tree_method": tree_method, } if objective == "reg:quantileerror": param["quantile_alpha"] = 0.3 def eta_decay_0(i): return num_round / (i + 1) bst0 = xgb.train( param, dtrain, num_round, watchlist, callbacks=[scheduler(eta_decay_0)], ) def eta_decay_1(i: int) -> float: if i > 1: return 5.0 return num_round / (i + 1) bst1 = xgb.train( param, dtrain, num_round, watchlist, callbacks=[scheduler(eta_decay_1)], ) bst_json0 = bst0.save_raw(raw_format="json") bst_json1 = bst1.save_raw(raw_format="json") j0 = json.loads(bst_json0) j1 = json.loads(bst_json1) tree_2th_0 = j0["learner"]["gradient_booster"]["model"]["trees"][2] tree_2th_1 = j1["learner"]["gradient_booster"]["model"]["trees"][2] assert tree_2th_0["base_weights"] == tree_2th_1["base_weights"] assert tree_2th_0["split_conditions"] == tree_2th_1["split_conditions"] tree_3th_0 = j0["learner"]["gradient_booster"]["model"]["trees"][3] tree_3th_1 = j1["learner"]["gradient_booster"]["model"]["trees"][3] assert tree_3th_0["base_weights"] != tree_3th_1["base_weights"] assert tree_3th_0["split_conditions"] != tree_3th_1["split_conditions"] @pytest.mark.parametrize("tree_method", ["hist", "approx", "approx"]) def test_eta_decay(self, tree_method): self.run_eta_decay(tree_method) @pytest.mark.parametrize( "tree_method,objective", [ ("hist", "binary:logistic"), ("hist", "reg:absoluteerror"), ("hist", "reg:quantileerror"), ("approx", "binary:logistic"), ("approx", "reg:absoluteerror"), ("approx", "reg:quantileerror"), ], ) def test_eta_decay_leaf_output(self, tree_method: str, objective: str) -> None: self.run_eta_decay_leaf_output(tree_method, objective) def test_check_point(self): from sklearn.datasets import load_breast_cancer X, y = load_breast_cancer(return_X_y=True) m = xgb.DMatrix(X, y) with tempfile.TemporaryDirectory() as tmpdir: check_point = xgb.callback.TrainingCheckPoint( directory=tmpdir, iterations=1, name="model" ) xgb.train( {"objective": "binary:logistic"}, m, num_boost_round=10, verbose_eval=False, callbacks=[check_point], ) for i in range(1, 10): assert os.path.exists(os.path.join(tmpdir, "model_" + str(i) + ".json")) check_point = xgb.callback.TrainingCheckPoint( directory=tmpdir, iterations=1, as_pickle=True, name="model" ) xgb.train( {"objective": "binary:logistic"}, m, num_boost_round=10, verbose_eval=False, callbacks=[check_point], ) for i in range(1, 10): assert os.path.exists(os.path.join(tmpdir, "model_" + str(i) + ".pkl")) def test_callback_list(self): X, y = tm.data.get_california_housing() m = xgb.DMatrix(X, y) callbacks = [xgb.callback.EarlyStopping(rounds=10)] for i in range(4): xgb.train( {"objective": "reg:squarederror", "eval_metric": "rmse"}, m, evals=[(m, "Train")], num_boost_round=1, verbose_eval=True, callbacks=callbacks, ) assert len(callbacks) == 1
19,424
39.72327
89
py
xgboost
xgboost-master/tests/python/test_tracker.py
import re import sys import numpy as np import pytest import xgboost as xgb from xgboost import RabitTracker, collective from xgboost import testing as tm if sys.platform.startswith("win"): pytest.skip("Skipping dask tests on Windows", allow_module_level=True) def test_rabit_tracker(): tracker = RabitTracker(host_ip="127.0.0.1", n_workers=1) tracker.start(1) with xgb.collective.CommunicatorContext(**tracker.worker_envs()): ret = xgb.collective.broadcast("test1234", 0) assert str(ret) == "test1234" def run_rabit_ops(client, n_workers): from xgboost.dask import CommunicatorContext, _get_dask_config, _get_rabit_args workers = tm.get_client_workers(client) rabit_args = client.sync(_get_rabit_args, len(workers), _get_dask_config(), client) assert not collective.is_distributed() n_workers_from_dask = len(workers) assert n_workers == n_workers_from_dask def local_test(worker_id): with CommunicatorContext(**rabit_args): a = 1 assert collective.is_distributed() a = np.array([a]) reduced = collective.allreduce(a, collective.Op.SUM) assert reduced[0] == n_workers worker_id = np.array([worker_id]) reduced = collective.allreduce(worker_id, collective.Op.MAX) assert reduced == n_workers - 1 return 1 futures = client.map(local_test, range(len(workers)), workers=workers) results = client.gather(futures) assert sum(results) == n_workers @pytest.mark.skipif(**tm.no_dask()) def test_rabit_ops(): from distributed import Client, LocalCluster n_workers = 3 with LocalCluster(n_workers=n_workers) as cluster: with Client(cluster) as client: run_rabit_ops(client, n_workers) @pytest.mark.skipif(**tm.no_ipv6()) @pytest.mark.skipif(**tm.no_dask()) def test_rabit_ops_ipv6(): import dask from distributed import Client, LocalCluster n_workers = 3 with dask.config.set({"xgboost.scheduler_address": "[::1]"}): with LocalCluster(n_workers=n_workers, host="[::1]") as cluster: with Client(cluster) as client: run_rabit_ops(client, n_workers) def test_rank_assignment() -> None: from distributed import Client, LocalCluster def local_test(worker_id): with xgb.dask.CommunicatorContext(**args) as ctx: task_id = ctx["DMLC_TASK_ID"] matched = re.search(".*-([0-9]).*", task_id) rank = xgb.collective.get_rank() # As long as the number of workers is lesser than 10, rank and worker id # should be the same assert rank == int(matched.group(1)) with LocalCluster(n_workers=8) as cluster: with Client(cluster) as client: workers = tm.get_client_workers(client) args = client.sync( xgb.dask._get_rabit_args, len(workers), None, client, ) futures = client.map(local_test, range(len(workers)), workers=workers) client.gather(futures)
3,138
31.030612
87
py
xgboost
xgboost-master/tests/python/test_ranking.py
import itertools import json import os import shutil from typing import Optional import numpy as np import pytest from hypothesis import given, note, settings from scipy.sparse import csr_matrix import xgboost from xgboost import testing as tm from xgboost.testing.data import RelDataCV, simulate_clicks, sort_ltr_samples from xgboost.testing.params import lambdarank_parameter_strategy def test_ndcg_custom_gain(): def ndcg_gain(y: np.ndarray) -> np.ndarray: return np.exp2(y.astype(np.float64)) - 1.0 X, y, q, w = tm.make_ltr(n_samples=1024, n_features=4, n_query_groups=3, max_rel=3) y_gain = ndcg_gain(y) byxgb = xgboost.XGBRanker(tree_method="hist", ndcg_exp_gain=True, n_estimators=10) byxgb.fit( X, y, qid=q, sample_weight=w, eval_set=[(X, y)], eval_qid=(q,), sample_weight_eval_set=(w,), verbose=True, ) byxgb_json = json.loads(byxgb.get_booster().save_raw(raw_format="json")) bynp = xgboost.XGBRanker(tree_method="hist", ndcg_exp_gain=False, n_estimators=10) bynp.fit( X, y_gain, qid=q, sample_weight=w, eval_set=[(X, y_gain)], eval_qid=(q,), sample_weight_eval_set=(w,), verbose=True, ) bynp_json = json.loads(bynp.get_booster().save_raw(raw_format="json")) # Remove the difference in parameter for comparison byxgb_json["learner"]["objective"]["lambdarank_param"]["ndcg_exp_gain"] = "0" assert byxgb.evals_result() == bynp.evals_result() assert byxgb_json == bynp_json def test_ranking_with_unweighted_data(): Xrow = np.array([1, 2, 6, 8, 11, 14, 16, 17]) Xcol = np.array([0, 0, 1, 1, 2, 2, 3, 3]) X = csr_matrix((np.ones(shape=8), (Xrow, Xcol)), shape=(20, 4)) y = np.array([0.0, 1.0, 1.0, 0.0, 0.0, 0.0, 1.0, 0.0, 1.0, 0.0, 0.0, 1.0, 0.0, 0.0, 1.0, 0.0, 1.0, 1.0, 0.0, 0.0]) group = np.array([5, 5, 5, 5], dtype=np.uint) dtrain = xgboost.DMatrix(X, label=y) dtrain.set_group(group) params = {'eta': 1, 'tree_method': 'exact', 'objective': 'rank:pairwise', 'eval_metric': ['auc', 'aucpr'], 'max_depth': 1} evals_result = {} bst = xgboost.train(params, dtrain, 10, evals=[(dtrain, 'train')], evals_result=evals_result) auc_rec = evals_result['train']['auc'] assert all(p <= q for p, q in zip(auc_rec, auc_rec[1:])) auc_rec = evals_result['train']['aucpr'] assert all(p <= q for p, q in zip(auc_rec, auc_rec[1:])) def test_ranking_with_weighted_data(): Xrow = np.array([1, 2, 6, 8, 11, 14, 16, 17]) Xcol = np.array([0, 0, 1, 1, 2, 2, 3, 3]) X = csr_matrix((np.ones(shape=8), (Xrow, Xcol)), shape=(20, 4)) y = np.array([0.0, 1.0, 1.0, 0.0, 0.0, 0.0, 1.0, 0.0, 1.0, 0.0, 0.0, 1.0, 0.0, 0.0, 1.0, 0.0, 1.0, 1.0, 0.0, 0.0]) weights = np.array([1.0, 2.0, 3.0, 4.0]) group = np.array([5, 5, 5, 5], dtype=np.uint) dtrain = xgboost.DMatrix(X, label=y, weight=weights) dtrain.set_group(group) params = {'eta': 1, 'tree_method': 'exact', 'objective': 'rank:pairwise', 'eval_metric': ['auc', 'aucpr'], 'max_depth': 1} evals_result = {} bst = xgboost.train(params, dtrain, 10, evals=[(dtrain, 'train')], evals_result=evals_result) auc_rec = evals_result['train']['auc'] assert all(p <= q for p, q in zip(auc_rec, auc_rec[1:])) auc_rec = evals_result['train']['aucpr'] assert all(p <= q for p, q in zip(auc_rec, auc_rec[1:])) for i in range(1, 11): pred = bst.predict(dtrain, iteration_range=(0, i)) # is_sorted[i]: is i-th group correctly sorted by the ranking predictor? is_sorted = [] for k in range(0, 20, 5): ind = np.argsort(-pred[k:k+5]) z = y[ind+k] is_sorted.append(all(i >= j for i, j in zip(z, z[1:]))) # Since we give weights 1, 2, 3, 4 to the four query groups, # the ranking predictor will first try to correctly sort the last query group # before correctly sorting other groups. assert all(p <= q for p, q in zip(is_sorted, is_sorted[1:])) def test_error_msg() -> None: X, y, qid, w = tm.make_ltr(10, 2, 2, 2) ranker = xgboost.XGBRanker() with pytest.raises(ValueError, match=r"equal to the number of query groups"): ranker.fit(X, y, qid=qid, sample_weight=y) @given(lambdarank_parameter_strategy) @settings(deadline=None, print_blob=True) def test_lambdarank_parameters(params): if params["objective"] == "rank:map": rel = 1 else: rel = 4 X, y, q, w = tm.make_ltr(4096, 3, 13, rel) ranker = xgboost.XGBRanker(tree_method="hist", n_estimators=64, **params) ranker.fit(X, y, qid=q, sample_weight=w, eval_set=[(X, y)], eval_qid=[q]) for k, v in ranker.evals_result()["validation_0"].items(): note(v) assert v[-1] >= v[0] assert ranker.n_features_in_ == 3 @pytest.mark.skipif(**tm.no_pandas()) @pytest.mark.skipif(**tm.no_sklearn()) def test_unbiased() -> None: import pandas as pd from sklearn.model_selection import train_test_split X, y, q, w = tm.make_ltr(8192, 2, n_query_groups=6, max_rel=4) X, Xe, y, ye, q, qe = train_test_split(X, y, q, test_size=0.2, random_state=3) X = csr_matrix(X) Xe = csr_matrix(Xe) data = RelDataCV((X, y, q), (Xe, ye, qe), max_rel=4) train, _ = simulate_clicks(data) x, c, y, q = sort_ltr_samples( train.X, train.y, train.qid, train.click, train.pos ) df: Optional[pd.DataFrame] = None class Position(xgboost.callback.TrainingCallback): def after_training(self, model) -> bool: nonlocal df config = json.loads(model.save_config()) ti_plus = np.array(config["learner"]["objective"]["ti+"]) tj_minus = np.array(config["learner"]["objective"]["tj-"]) df = pd.DataFrame({"ti+": ti_plus, "tj-": tj_minus}) return model ltr = xgboost.XGBRanker( n_estimators=8, tree_method="hist", lambdarank_unbiased=True, lambdarank_num_pair_per_sample=12, lambdarank_pair_method="topk", objective="rank:ndcg", callbacks=[Position()], boost_from_average=0, ) ltr.fit(x, c, qid=q, eval_set=[(x, c)], eval_qid=[q]) assert df is not None # normalized np.testing.assert_allclose(df["ti+"].iloc[0], 1.0) np.testing.assert_allclose(df["tj-"].iloc[0], 1.0) # less biased on low ranks. assert df["ti+"].iloc[-1] < df["ti+"].iloc[0] class TestRanking: @classmethod def setup_class(cls): """ Download and setup the test fixtures """ cls.dpath = 'demo/rank/' (x_train, y_train, qid_train, x_test, y_test, qid_test, x_valid, y_valid, qid_valid) = tm.data.get_mq2008(cls.dpath) # instantiate the matrices cls.dtrain = xgboost.DMatrix(x_train, y_train) cls.dvalid = xgboost.DMatrix(x_valid, y_valid) cls.dtest = xgboost.DMatrix(x_test, y_test) # set the group counts from the query IDs cls.dtrain.set_group([len(list(items)) for _key, items in itertools.groupby(qid_train)]) cls.dtest.set_group([len(list(items)) for _key, items in itertools.groupby(qid_test)]) cls.dvalid.set_group([len(list(items)) for _key, items in itertools.groupby(qid_valid)]) # save the query IDs for testing cls.qid_train = qid_train cls.qid_test = qid_test cls.qid_valid = qid_valid # model training parameters cls.params = {'objective': 'rank:pairwise', 'booster': 'gbtree', 'eval_metric': ['ndcg'] } @classmethod def teardown_class(cls): """ Cleanup test artifacts from download and unpacking :return: """ zip_f = cls.dpath + "MQ2008.zip" if os.path.exists(zip_f): os.remove(zip_f) directory = cls.dpath + "MQ2008" if os.path.exists(directory): shutil.rmtree(directory) def test_training(self): """ Train an XGBoost ranking model """ # specify validations set to watch performance watchlist = [(self.dtest, 'eval'), (self.dtrain, 'train')] bst = xgboost.train(self.params, self.dtrain, num_boost_round=2500, early_stopping_rounds=10, evals=watchlist) assert bst.best_score > 0.98 def test_cv(self): """ Test cross-validation with a group specified """ cv = xgboost.cv(self.params, self.dtrain, num_boost_round=2500, early_stopping_rounds=10, nfold=10, as_pandas=False) assert isinstance(cv, dict) assert set(cv.keys()) == { 'test-ndcg-mean', 'train-ndcg-mean', 'test-ndcg-std', 'train-ndcg-std' }, "CV results dict key mismatch." def test_cv_no_shuffle(self): """ Test cross-validation with a group specified """ cv = xgboost.cv(self.params, self.dtrain, num_boost_round=2500, early_stopping_rounds=10, shuffle=False, nfold=10, as_pandas=False) assert isinstance(cv, dict) assert len(cv) == 4 def test_get_group(self): """ Retrieve the group number from the dmatrix """ # test the new getter self.dtrain.get_uint_info('group_ptr') for d, qid in [(self.dtrain, self.qid_train), (self.dvalid, self.qid_valid), (self.dtest, self.qid_test)]: # size of each group group_sizes = np.array([len(list(items)) for _key, items in itertools.groupby(qid)]) # indexes of group boundaries group_limits = d.get_uint_info('group_ptr') assert len(group_limits) == len(group_sizes)+1 assert np.array_equal(np.diff(group_limits), group_sizes) assert np.array_equal( group_sizes, np.diff(d.get_uint_info('group_ptr'))) assert np.array_equal(group_sizes, np.diff(d.get_uint_info('group_ptr'))) assert np.array_equal(group_limits, d.get_uint_info('group_ptr'))
10,581
35.743056
87
py
xgboost
xgboost-master/tests/python/test_model_compatibility.py
import copy import json import os import urllib.request import zipfile import generate_models as gm import pytest import xgboost from xgboost import testing as tm def run_model_param_check(config): assert config['learner']['learner_model_param']['num_feature'] == str(4) assert config['learner']['learner_train_param']['booster'] == 'gbtree' def run_booster_check(booster, name): config = json.loads(booster.save_config()) run_model_param_check(config) if name.find('cls') != -1: assert (len(booster.get_dump()) == gm.kForests * gm.kRounds * gm.kClasses) assert float( config['learner']['learner_model_param']['base_score']) == 0.5 assert config['learner']['learner_train_param'][ 'objective'] == 'multi:softmax' elif name.find('logitraw') != -1: assert len(booster.get_dump()) == gm.kForests * gm.kRounds assert config['learner']['learner_model_param']['num_class'] == str(0) assert config['learner']['learner_train_param']['objective'] == 'binary:logitraw' elif name.find('logit') != -1: assert len(booster.get_dump()) == gm.kForests * gm.kRounds assert config['learner']['learner_model_param']['num_class'] == str(0) assert config['learner']['learner_train_param'][ 'objective'] == 'binary:logistic' elif name.find('ltr') != -1: assert config['learner']['learner_train_param'][ 'objective'] == 'rank:ndcg' else: assert name.find('reg') != -1 assert len(booster.get_dump()) == gm.kForests * gm.kRounds assert float( config['learner']['learner_model_param']['base_score']) == 0.5 assert config['learner']['learner_train_param'][ 'objective'] == 'reg:squarederror' def run_scikit_model_check(name, path): if name.find('reg') != -1: reg = xgboost.XGBRegressor() reg.load_model(path) config = json.loads(reg.get_booster().save_config()) if name.find('0.90') != -1: assert config['learner']['learner_train_param'][ 'objective'] == 'reg:linear' else: assert config['learner']['learner_train_param'][ 'objective'] == 'reg:squarederror' assert (len(reg.get_booster().get_dump()) == gm.kRounds * gm.kForests) run_model_param_check(config) elif name.find('cls') != -1: cls = xgboost.XGBClassifier() cls.load_model(path) if name.find('0.90') == -1: assert len(cls.classes_) == gm.kClasses assert cls.n_classes_ == gm.kClasses assert (len(cls.get_booster().get_dump()) == gm.kRounds * gm.kForests * gm.kClasses), path config = json.loads(cls.get_booster().save_config()) assert config['learner']['learner_train_param'][ 'objective'] == 'multi:softprob', path run_model_param_check(config) elif name.find('ltr') != -1: ltr = xgboost.XGBRanker() ltr.load_model(path) assert (len(ltr.get_booster().get_dump()) == gm.kRounds * gm.kForests) config = json.loads(ltr.get_booster().save_config()) assert config['learner']['learner_train_param'][ 'objective'] == 'rank:ndcg' run_model_param_check(config) elif name.find('logitraw') != -1: logit = xgboost.XGBClassifier() logit.load_model(path) assert (len(logit.get_booster().get_dump()) == gm.kRounds * gm.kForests) config = json.loads(logit.get_booster().save_config()) assert config['learner']['learner_train_param']['objective'] == 'binary:logitraw' elif name.find('logit') != -1: logit = xgboost.XGBClassifier() logit.load_model(path) assert (len(logit.get_booster().get_dump()) == gm.kRounds * gm.kForests) config = json.loads(logit.get_booster().save_config()) assert config['learner']['learner_train_param'][ 'objective'] == 'binary:logistic' else: assert False @pytest.mark.skipif(**tm.no_sklearn()) def test_model_compatibility(): """Test model compatibility, can only be run on CI as others don't have the credentials. """ path = os.path.dirname(os.path.abspath(__file__)) path = os.path.join(path, "models") if not os.path.exists(path): zip_path, _ = urllib.request.urlretrieve( "https://xgboost-ci-jenkins-artifacts.s3-us-west-2" + ".amazonaws.com/xgboost_model_compatibility_test.zip" ) with zipfile.ZipFile(zip_path, "r") as z: z.extractall(path) models = [ os.path.join(root, f) for root, subdir, files in os.walk(path) for f in files if f != "version" ] assert models for path in models: name = os.path.basename(path) if name.startswith("xgboost-"): booster = xgboost.Booster(model_file=path) run_booster_check(booster, name) # Do full serialization. booster = copy.copy(booster) run_booster_check(booster, name) elif name.startswith("xgboost_scikit"): run_scikit_model_check(name, path) else: assert False
5,321
36.744681
89
py
xgboost
xgboost-master/tests/python/test_with_sklearn.py
import json import os import pickle import random import tempfile import warnings from typing import Callable, Optional import numpy as np import pytest from sklearn.utils.estimator_checks import parametrize_with_checks import xgboost as xgb from xgboost import testing as tm from xgboost.testing.ranking import run_ranking_qid_df from xgboost.testing.shared import get_feature_weights, validate_data_initialization from xgboost.testing.updater import get_basescore rng = np.random.RandomState(1994) pytestmark = [pytest.mark.skipif(**tm.no_sklearn()), tm.timeout(30)] def test_binary_classification(): from sklearn.datasets import load_digits from sklearn.model_selection import KFold digits = load_digits(n_class=2) y = digits['target'] X = digits['data'] kf = KFold(n_splits=2, shuffle=True, random_state=rng) for cls in (xgb.XGBClassifier, xgb.XGBRFClassifier): for train_index, test_index in kf.split(X, y): clf = cls(random_state=42) xgb_model = clf.fit(X[train_index], y[train_index], eval_metric=['auc', 'logloss']) preds = xgb_model.predict(X[test_index]) labels = y[test_index] err = sum(1 for i in range(len(preds)) if int(preds[i] > 0.5) != labels[i]) / float(len(preds)) assert err < 0.1 @pytest.mark.parametrize("objective", ["multi:softmax", "multi:softprob"]) def test_multiclass_classification(objective): from sklearn.datasets import load_iris from sklearn.model_selection import KFold def check_pred(preds, labels, output_margin): if output_margin: err = sum( 1 for i in range(len(preds)) if preds[i].argmax() != labels[i] ) / float(len(preds)) else: err = sum(1 for i in range(len(preds)) if preds[i] != labels[i]) / float( len(preds) ) assert err < 0.4 X, y = load_iris(return_X_y=True) kf = KFold(n_splits=2, shuffle=True, random_state=rng) for train_index, test_index in kf.split(X, y): xgb_model = xgb.XGBClassifier(objective=objective).fit( X[train_index], y[train_index] ) assert xgb_model.get_booster().num_boosted_rounds() == 100 preds = xgb_model.predict(X[test_index]) # test other params in XGBClassifier().fit preds2 = xgb_model.predict( X[test_index], output_margin=True, iteration_range=(0, 1) ) preds3 = xgb_model.predict( X[test_index], output_margin=True, iteration_range=None ) preds4 = xgb_model.predict( X[test_index], output_margin=False, iteration_range=(0, 1) ) labels = y[test_index] check_pred(preds, labels, output_margin=False) check_pred(preds2, labels, output_margin=True) check_pred(preds3, labels, output_margin=True) check_pred(preds4, labels, output_margin=False) cls = xgb.XGBClassifier(n_estimators=4).fit(X, y) assert cls.n_classes_ == 3 proba = cls.predict_proba(X) assert proba.shape[0] == X.shape[0] assert proba.shape[1] == cls.n_classes_ # custom objective, the default is multi:softprob so no transformation is required. cls = xgb.XGBClassifier(n_estimators=4, objective=tm.softprob_obj(3)).fit(X, y) proba = cls.predict_proba(X) assert proba.shape[0] == X.shape[0] assert proba.shape[1] == cls.n_classes_ def test_best_iteration(): from sklearn.datasets import load_iris X, y = load_iris(return_X_y=True) def train(booster: str, forest: Optional[int]) -> None: rounds = 4 cls = xgb.XGBClassifier( n_estimators=rounds, num_parallel_tree=forest, booster=booster ).fit( X, y, eval_set=[(X, y)], early_stopping_rounds=3 ) assert cls.best_iteration == rounds - 1 # best_iteration is used by default, assert that under gblinear it's # automatically ignored due to being 0. cls.predict(X) num_parallel_tree = 4 train('gbtree', num_parallel_tree) train('dart', num_parallel_tree) train('gblinear', None) def test_ranking(): # generate random data x_train = np.random.rand(1000, 10) y_train = np.random.randint(5, size=1000) train_group = np.repeat(50, 20) x_valid = np.random.rand(200, 10) y_valid = np.random.randint(5, size=200) valid_group = np.repeat(50, 4) x_test = np.random.rand(100, 10) params = { "tree_method": "exact", "objective": "rank:pairwise", "learning_rate": 0.1, "gamma": 1.0, "min_child_weight": 0.1, "max_depth": 6, "n_estimators": 4, } model = xgb.sklearn.XGBRanker(**params) model.fit( x_train, y_train, group=train_group, eval_set=[(x_valid, y_valid)], eval_group=[valid_group], ) assert model.evals_result() pred = model.predict(x_test) train_data = xgb.DMatrix(x_train, y_train) valid_data = xgb.DMatrix(x_valid, y_valid) test_data = xgb.DMatrix(x_test) train_data.set_group(train_group) assert train_data.get_label().shape[0] == x_train.shape[0] valid_data.set_group(valid_group) params_orig = { "tree_method": "exact", "objective": "rank:pairwise", "eta": 0.1, "gamma": 1.0, "min_child_weight": 0.1, "max_depth": 6, } xgb_model_orig = xgb.train( params_orig, train_data, num_boost_round=4, evals=[(valid_data, "validation")] ) pred_orig = xgb_model_orig.predict(test_data) np.testing.assert_almost_equal(pred, pred_orig) def test_ranking_metric() -> None: from sklearn.metrics import roc_auc_score X, y, qid, w = tm.make_ltr(512, 4, 3, 1) # use auc for test as ndcg_score in sklearn works only on label gain instead of exp # gain. # note that the auc in sklearn is different from the one in XGBoost. The one in # sklearn compares the number of mis-classified docs, while the one in xgboost # compares the number of mis-classified pairs. ltr = xgb.XGBRanker( eval_metric=roc_auc_score, n_estimators=10, tree_method="hist", max_depth=2, objective="rank:pairwise", ) ltr.fit( X, y, qid=qid, sample_weight=w, eval_set=[(X, y)], eval_qid=[qid], sample_weight_eval_set=[w], verbose=True, ) results = ltr.evals_result() assert results["validation_0"]["roc_auc_score"][-1] > 0.6 @pytest.mark.skipif(**tm.no_pandas()) def test_ranking_qid_df(): import pandas as pd run_ranking_qid_df(pd, "hist") def test_stacking_regression(): from sklearn.datasets import load_diabetes from sklearn.ensemble import RandomForestRegressor, StackingRegressor from sklearn.linear_model import RidgeCV from sklearn.model_selection import train_test_split X, y = load_diabetes(return_X_y=True) estimators = [ ('gbm', xgb.sklearn.XGBRegressor(objective='reg:squarederror')), ('lr', RidgeCV()) ] reg = StackingRegressor( estimators=estimators, final_estimator=RandomForestRegressor(n_estimators=10, random_state=42) ) X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=42) reg.fit(X_train, y_train).score(X_test, y_test) def test_stacking_classification(): from sklearn.datasets import load_iris from sklearn.ensemble import StackingClassifier from sklearn.linear_model import LogisticRegression from sklearn.model_selection import train_test_split from sklearn.pipeline import make_pipeline from sklearn.preprocessing import StandardScaler from sklearn.svm import LinearSVC X, y = load_iris(return_X_y=True) estimators = [ ('gbm', xgb.sklearn.XGBClassifier()), ('svr', make_pipeline(StandardScaler(), LinearSVC(random_state=42))) ] clf = StackingClassifier( estimators=estimators, final_estimator=LogisticRegression() ) X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=42) clf.fit(X_train, y_train).score(X_test, y_test) @pytest.mark.skipif(**tm.no_pandas()) def test_feature_importances_weight(): from sklearn.datasets import load_digits digits = load_digits(n_class=2) y = digits["target"] X = digits["data"] xgb_model = xgb.XGBClassifier( random_state=0, tree_method="exact", learning_rate=0.1, importance_type="weight", base_score=0.5, ).fit(X, y) exp = np.array([0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0.00833333, 0., 0., 0., 0., 0., 0., 0., 0., 0.025, 0.14166667, 0., 0., 0., 0., 0., 0., 0.00833333, 0.25833333, 0., 0., 0., 0., 0.03333334, 0.03333334, 0., 0.32499999, 0., 0., 0., 0., 0.05, 0.06666667, 0., 0., 0., 0., 0., 0., 0., 0.04166667, 0., 0., 0., 0., 0., 0., 0., 0.00833333, 0., 0., 0., 0., 0.], dtype=np.float32) np.testing.assert_almost_equal(xgb_model.feature_importances_, exp) # numeric columns import pandas as pd y = pd.Series(digits['target']) X = pd.DataFrame(digits['data']) xgb_model = xgb.XGBClassifier( random_state=0, tree_method="exact", learning_rate=0.1, base_score=.5, importance_type="weight" ).fit(X, y) np.testing.assert_almost_equal(xgb_model.feature_importances_, exp) xgb_model = xgb.XGBClassifier( random_state=0, tree_method="exact", learning_rate=0.1, importance_type="weight", base_score=.5, ).fit(X, y) np.testing.assert_almost_equal(xgb_model.feature_importances_, exp) with pytest.raises(ValueError): xgb_model.set_params(importance_type="foo") xgb_model.feature_importances_ X, y = load_digits(n_class=3, return_X_y=True) cls = xgb.XGBClassifier(booster="gblinear", n_estimators=4) cls.fit(X, y) assert cls.feature_importances_.shape[0] == X.shape[1] assert cls.feature_importances_.shape[1] == 3 with tempfile.TemporaryDirectory() as tmpdir: path = os.path.join(tmpdir, "model.json") cls.save_model(path) with open(path, "r") as fd: model = json.load(fd) weights = np.array( model["learner"]["gradient_booster"]["model"]["weights"] ).reshape((cls.n_features_in_ + 1, 3)) weights = weights[:-1, ...] np.testing.assert_allclose( weights / weights.sum(), cls.feature_importances_, rtol=1e-6 ) with pytest.raises(ValueError): cls.set_params(importance_type="cover") cls.feature_importances_ @pytest.mark.skipif(**tm.no_pandas()) def test_feature_importances_gain(): from sklearn.datasets import load_digits digits = load_digits(n_class=2) y = digits['target'] X = digits['data'] xgb_model = xgb.XGBClassifier( random_state=0, tree_method="exact", learning_rate=0.1, importance_type="gain", base_score=0.5, ).fit(X, y) exp = np.array([0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0.00326159, 0., 0., 0., 0., 0., 0., 0., 0., 0.00297238, 0.00988034, 0., 0., 0., 0., 0., 0., 0.03512521, 0.41123885, 0., 0., 0., 0., 0.01326332, 0.00160674, 0., 0.4206952, 0., 0., 0., 0., 0.00616747, 0.01237546, 0., 0., 0., 0., 0., 0., 0., 0.08240705, 0., 0., 0., 0., 0., 0., 0., 0.00100649, 0., 0., 0., 0., 0.], dtype=np.float32) np.testing.assert_almost_equal(xgb_model.feature_importances_, exp) # numeric columns import pandas as pd y = pd.Series(digits['target']) X = pd.DataFrame(digits['data']) xgb_model = xgb.XGBClassifier( random_state=0, tree_method="exact", learning_rate=0.1, importance_type="gain", base_score=0.5, ).fit(X, y) np.testing.assert_almost_equal(xgb_model.feature_importances_, exp) xgb_model = xgb.XGBClassifier( random_state=0, tree_method="exact", learning_rate=0.1, importance_type="gain", base_score=0.5, ).fit(X, y) np.testing.assert_almost_equal(xgb_model.feature_importances_, exp) # no split can be found cls = xgb.XGBClassifier(min_child_weight=1000, tree_method="hist", n_estimators=1) cls.fit(X, y) assert np.all(cls.feature_importances_ == 0) def test_select_feature(): from sklearn.datasets import load_digits from sklearn.feature_selection import SelectFromModel digits = load_digits(n_class=2) y = digits['target'] X = digits['data'] cls = xgb.XGBClassifier() cls.fit(X, y) selector = SelectFromModel(cls, prefit=True, max_features=1) X_selected = selector.transform(X) assert X_selected.shape[1] == 1 def test_num_parallel_tree(): from sklearn.datasets import load_diabetes reg = xgb.XGBRegressor(n_estimators=4, num_parallel_tree=4, tree_method="hist") X, y = load_diabetes(return_X_y=True) bst = reg.fit(X=X, y=y) dump = bst.get_booster().get_dump(dump_format="json") assert len(dump) == 16 reg = xgb.XGBRFRegressor(n_estimators=4) bst = reg.fit(X=X, y=y) dump = bst.get_booster().get_dump(dump_format="json") assert len(dump) == 4 config = json.loads(bst.get_booster().save_config()) assert ( int( config["learner"]["gradient_booster"]["gbtree_model_param"][ "num_parallel_tree" ] ) == 4 ) def test_regression(): from sklearn.datasets import fetch_california_housing from sklearn.metrics import mean_squared_error from sklearn.model_selection import KFold X, y = fetch_california_housing(return_X_y=True) kf = KFold(n_splits=2, shuffle=True, random_state=rng) for train_index, test_index in kf.split(X, y): xgb_model = xgb.XGBRegressor().fit(X[train_index], y[train_index]) preds = xgb_model.predict(X[test_index]) # test other params in XGBRegressor().fit preds2 = xgb_model.predict( X[test_index], output_margin=True, iteration_range=(0, 3) ) preds3 = xgb_model.predict( X[test_index], output_margin=True, iteration_range=None ) preds4 = xgb_model.predict( X[test_index], output_margin=False, iteration_range=(0, 3) ) labels = y[test_index] assert mean_squared_error(preds, labels) < 25 assert mean_squared_error(preds2, labels) < 350 assert mean_squared_error(preds3, labels) < 25 assert mean_squared_error(preds4, labels) < 350 with pytest.raises(AttributeError, match="feature_names_in_"): xgb_model.feature_names_in_ def run_housing_rf_regression(tree_method): from sklearn.datasets import fetch_california_housing from sklearn.metrics import mean_squared_error from sklearn.model_selection import KFold X, y = fetch_california_housing(return_X_y=True) kf = KFold(n_splits=2, shuffle=True, random_state=rng) for train_index, test_index in kf.split(X, y): xgb_model = xgb.XGBRFRegressor(random_state=42, tree_method=tree_method).fit( X[train_index], y[train_index] ) preds = xgb_model.predict(X[test_index]) labels = y[test_index] assert mean_squared_error(preds, labels) < 35 rfreg = xgb.XGBRFRegressor() with pytest.raises(NotImplementedError): rfreg.fit(X, y, early_stopping_rounds=10) def test_rf_regression(): run_housing_rf_regression("hist") @pytest.mark.parametrize("tree_method", ["exact", "hist", "approx"]) def test_parameter_tuning(tree_method: str) -> None: from sklearn.datasets import fetch_california_housing from sklearn.model_selection import GridSearchCV X, y = fetch_california_housing(return_X_y=True) reg = xgb.XGBRegressor(learning_rate=0.1, tree_method=tree_method) grid_cv = GridSearchCV( reg, {"max_depth": [2, 4], "n_estimators": [50, 200]}, cv=2, verbose=1 ) grid_cv.fit(X, y) assert grid_cv.best_score_ < 0.7 assert grid_cv.best_params_ == { "n_estimators": 200, "max_depth": 4 if tree_method == "exact" else 2, } def test_regression_with_custom_objective(): from sklearn.datasets import fetch_california_housing from sklearn.metrics import mean_squared_error from sklearn.model_selection import KFold def objective_ls(y_true, y_pred): grad = (y_pred - y_true) hess = np.ones(len(y_true)) return grad, hess X, y = fetch_california_housing(return_X_y=True) kf = KFold(n_splits=2, shuffle=True, random_state=rng) for train_index, test_index in kf.split(X, y): xgb_model = xgb.XGBRegressor(objective=objective_ls).fit( X[train_index], y[train_index] ) preds = xgb_model.predict(X[test_index]) labels = y[test_index] assert mean_squared_error(preds, labels) < 25 # Test that the custom objective function is actually used class XGBCustomObjectiveException(Exception): pass def dummy_objective(y_true, y_pred): raise XGBCustomObjectiveException() xgb_model = xgb.XGBRegressor(objective=dummy_objective) np.testing.assert_raises(XGBCustomObjectiveException, xgb_model.fit, X, y) def test_classification_with_custom_objective(): from sklearn.datasets import load_digits from sklearn.model_selection import KFold def logregobj(y_true, y_pred): y_pred = 1.0 / (1.0 + np.exp(-y_pred)) grad = y_pred - y_true hess = y_pred * (1.0 - y_pred) return grad, hess digits = load_digits(n_class=2) y = digits['target'] X = digits['data'] kf = KFold(n_splits=2, shuffle=True, random_state=rng) for train_index, test_index in kf.split(X, y): xgb_model = xgb.XGBClassifier(objective=logregobj) xgb_model.fit(X[train_index], y[train_index]) preds = xgb_model.predict(X[test_index]) labels = y[test_index] err = sum(1 for i in range(len(preds)) if int(preds[i] > 0.5) != labels[i]) / float(len(preds)) assert err < 0.1 # Test that the custom objective function is actually used class XGBCustomObjectiveException(Exception): pass def dummy_objective(y_true, y_preds): raise XGBCustomObjectiveException() xgb_model = xgb.XGBClassifier(objective=dummy_objective) np.testing.assert_raises( XGBCustomObjectiveException, xgb_model.fit, X, y ) cls = xgb.XGBClassifier(n_estimators=1) cls.fit(X, y) is_called = [False] def wrapped(y, p): is_called[0] = True return logregobj(y, p) cls.set_params(objective=wrapped) cls.predict(X) # no throw cls.fit(X, y) assert is_called[0] def run_sklearn_api(booster, error, n_est): from sklearn.datasets import load_iris from sklearn.model_selection import train_test_split iris = load_iris() tr_d, te_d, tr_l, te_l = train_test_split(iris.data, iris.target, train_size=120, test_size=0.2) classifier = xgb.XGBClassifier(booster=booster, n_estimators=n_est) classifier.fit(tr_d, tr_l) preds = classifier.predict(te_d) labels = te_l err = sum([1 for p, l in zip(preds, labels) if p != l]) * 1.0 / len(te_l) assert err < error def test_sklearn_api(): run_sklearn_api("gbtree", 0.2, 10) run_sklearn_api("gblinear", 0.5, 100) @pytest.mark.skipif(**tm.no_matplotlib()) @pytest.mark.skipif(**tm.no_graphviz()) def test_sklearn_plotting(): from sklearn.datasets import load_iris iris = load_iris() classifier = xgb.XGBClassifier() classifier.fit(iris.data, iris.target) import matplotlib matplotlib.use('Agg') from graphviz import Source from matplotlib.axes import Axes ax = xgb.plot_importance(classifier) assert isinstance(ax, Axes) assert ax.get_title() == 'Feature importance' assert ax.get_xlabel() == 'F score' assert ax.get_ylabel() == 'Features' assert len(ax.patches) == 4 g = xgb.to_graphviz(classifier, num_trees=0) assert isinstance(g, Source) ax = xgb.plot_tree(classifier, num_trees=0) assert isinstance(ax, Axes) @pytest.mark.skipif(**tm.no_pandas()) def test_sklearn_nfolds_cv(): from sklearn.datasets import load_digits from sklearn.model_selection import StratifiedKFold digits = load_digits(n_class=3) X = digits['data'] y = digits['target'] dm = xgb.DMatrix(X, label=y) params = { 'max_depth': 2, 'eta': 1, 'verbosity': 0, 'objective': 'multi:softprob', 'num_class': 3 } seed = 2016 nfolds = 5 skf = StratifiedKFold(n_splits=nfolds, shuffle=True, random_state=seed) cv1 = xgb.cv(params, dm, num_boost_round=10, nfold=nfolds, seed=seed, as_pandas=True) cv2 = xgb.cv(params, dm, num_boost_round=10, nfold=nfolds, folds=skf, seed=seed, as_pandas=True) cv3 = xgb.cv(params, dm, num_boost_round=10, nfold=nfolds, stratified=True, seed=seed, as_pandas=True) assert cv1.shape[0] == cv2.shape[0] and cv2.shape[0] == cv3.shape[0] assert cv2.iloc[-1, 0] == cv3.iloc[-1, 0] @pytest.mark.skipif(**tm.no_pandas()) def test_split_value_histograms(): from sklearn.datasets import load_digits digits_2class = load_digits(n_class=2) X = digits_2class["data"] y = digits_2class["target"] dm = xgb.DMatrix(X, label=y) params = { "max_depth": 6, "eta": 0.01, "verbosity": 0, "objective": "binary:logistic", "base_score": 0.5, } gbdt = xgb.train(params, dm, num_boost_round=10) assert gbdt.get_split_value_histogram("not_there", as_pandas=True).shape[0] == 0 assert gbdt.get_split_value_histogram("not_there", as_pandas=False).shape[0] == 0 assert gbdt.get_split_value_histogram("f28", bins=0).shape[0] == 1 assert gbdt.get_split_value_histogram("f28", bins=1).shape[0] == 1 assert gbdt.get_split_value_histogram("f28", bins=2).shape[0] == 2 assert gbdt.get_split_value_histogram("f28", bins=5).shape[0] == 2 assert gbdt.get_split_value_histogram("f28", bins=None).shape[0] == 2 def test_sklearn_random_state(): clf = xgb.XGBClassifier(random_state=402) assert clf.get_xgb_params()['random_state'] == 402 clf = xgb.XGBClassifier(random_state=401) assert clf.get_xgb_params()['random_state'] == 401 random_state = np.random.RandomState(seed=403) clf = xgb.XGBClassifier(random_state=random_state) assert isinstance(clf.get_xgb_params()['random_state'], int) def test_sklearn_n_jobs(): clf = xgb.XGBClassifier(n_jobs=1) assert clf.get_xgb_params()['n_jobs'] == 1 clf = xgb.XGBClassifier(n_jobs=2) assert clf.get_xgb_params()['n_jobs'] == 2 def test_parameters_access(): from sklearn import datasets params = {"updater": "grow_gpu_hist", "subsample": 0.5, "n_jobs": -1} clf = xgb.XGBClassifier(n_estimators=1000, **params) assert clf.get_params()["updater"] == "grow_gpu_hist" assert clf.get_params()["subsample"] == 0.5 assert clf.get_params()["n_estimators"] == 1000 clf = xgb.XGBClassifier(n_estimators=1, nthread=4) X, y = datasets.load_iris(return_X_y=True) clf.fit(X, y) config = json.loads(clf.get_booster().save_config()) assert int(config["learner"]["generic_param"]["nthread"]) == 4 clf.set_params(nthread=16) config = json.loads(clf.get_booster().save_config()) assert int(config["learner"]["generic_param"]["nthread"]) == 16 clf.predict(X) config = json.loads(clf.get_booster().save_config()) assert int(config["learner"]["generic_param"]["nthread"]) == 16 clf = xgb.XGBClassifier(n_estimators=2) assert clf.tree_method is None assert clf.get_params()["tree_method"] is None clf.fit(X, y) assert clf.get_params()["tree_method"] is None def save_load(clf: xgb.XGBClassifier) -> xgb.XGBClassifier: with tempfile.TemporaryDirectory() as tmpdir: path = os.path.join(tmpdir, "model.json") clf.save_model(path) clf = xgb.XGBClassifier() clf.load_model(path) return clf def get_tm(clf: xgb.XGBClassifier) -> str: tm = json.loads(clf.get_booster().save_config())["learner"]["gradient_booster"][ "gbtree_train_param" ]["tree_method"] return tm assert get_tm(clf) == "auto" # Kept as auto, immutable since 2.0 clf = pickle.loads(pickle.dumps(clf)) assert clf.tree_method is None assert clf.n_estimators == 2 assert clf.get_params()["tree_method"] is None assert clf.get_params()["n_estimators"] == 2 assert get_tm(clf) == "auto" # preserved for pickle clf = save_load(clf) assert clf.tree_method is None assert clf.n_estimators is None assert clf.get_params()["tree_method"] is None assert clf.get_params()["n_estimators"] is None assert get_tm(clf) == "auto" # discarded for save/load_model clf.set_params(tree_method="hist") assert clf.get_params()["tree_method"] == "hist" clf = pickle.loads(pickle.dumps(clf)) assert clf.get_params()["tree_method"] == "hist" clf = save_load(clf) assert clf.get_params()["tree_method"] is None def test_kwargs_error(): params = {'updater': 'grow_gpu_hist', 'subsample': .5, 'n_jobs': -1} with pytest.raises(TypeError): clf = xgb.XGBClassifier(n_jobs=1000, **params) assert isinstance(clf, xgb.XGBClassifier) def test_kwargs_grid_search(): from sklearn import datasets from sklearn.model_selection import GridSearchCV params = {'tree_method': 'hist'} clf = xgb.XGBClassifier(n_estimators=1, learning_rate=1.0, **params) assert clf.get_params()['tree_method'] == 'hist' # 'max_leaves' is not a default argument of XGBClassifier # Check we can still do grid search over this parameter search_params = {'max_leaves': range(2, 5)} grid_cv = GridSearchCV(clf, search_params, cv=5) iris = datasets.load_iris() grid_cv.fit(iris.data, iris.target) # Expect unique results for each parameter value # This confirms sklearn is able to successfully update the parameter means = grid_cv.cv_results_['mean_test_score'] assert len(means) == len(set(means)) def test_sklearn_clone(): from sklearn.base import clone clf = xgb.XGBClassifier(n_jobs=2) clf.n_jobs = -1 clone(clf) def test_sklearn_get_default_params(): from sklearn.datasets import load_digits digits_2class = load_digits(n_class=2) X = digits_2class["data"] y = digits_2class["target"] cls = xgb.XGBClassifier() assert cls.get_params()["base_score"] is None cls.fit(X[:4, ...], y[:4, ...]) base_score = get_basescore(cls) np.testing.assert_equal(base_score, 0.5) def run_validation_weights(model): from sklearn.datasets import make_hastie_10_2 # prepare training and test data X, y = make_hastie_10_2(n_samples=2000, random_state=42) labels, y = np.unique(y, return_inverse=True) X_train, X_test = X[:1600], X[1600:] y_train, y_test = y[:1600], y[1600:] # instantiate model param_dist = {'objective': 'binary:logistic', 'n_estimators': 2, 'random_state': 123} clf = model(**param_dist) # train it using instance weights only in the training set weights_train = np.random.choice([1, 2], len(X_train)) clf.fit(X_train, y_train, sample_weight=weights_train, eval_set=[(X_test, y_test)], eval_metric='logloss', verbose=False) # evaluate logloss metric on test set *without* using weights evals_result_without_weights = clf.evals_result() logloss_without_weights = evals_result_without_weights[ "validation_0"]["logloss"] # now use weights for the test set np.random.seed(0) weights_test = np.random.choice([1, 2], len(X_test)) clf.fit(X_train, y_train, sample_weight=weights_train, eval_set=[(X_test, y_test)], sample_weight_eval_set=[weights_test], eval_metric='logloss', verbose=False) evals_result_with_weights = clf.evals_result() logloss_with_weights = evals_result_with_weights["validation_0"]["logloss"] # check that the logloss in the test set is actually different when using # weights than when not using them assert all((logloss_with_weights[i] != logloss_without_weights[i] for i in [0, 1])) with pytest.raises(ValueError): # length of eval set and sample weight doesn't match. clf.fit(X_train, y_train, sample_weight=weights_train, eval_set=[(X_train, y_train), (X_test, y_test)], sample_weight_eval_set=[weights_train]) with pytest.raises(ValueError): cls = xgb.XGBClassifier() cls.fit(X_train, y_train, sample_weight=weights_train, eval_set=[(X_train, y_train), (X_test, y_test)], sample_weight_eval_set=[weights_train]) def test_validation_weights(): run_validation_weights(xgb.XGBModel) run_validation_weights(xgb.XGBClassifier) def save_load_model(model_path): from sklearn.datasets import load_digits from sklearn.model_selection import KFold digits = load_digits(n_class=2) y = digits['target'] X = digits['data'] kf = KFold(n_splits=2, shuffle=True, random_state=rng) for train_index, test_index in kf.split(X, y): xgb_model = xgb.XGBClassifier().fit(X[train_index], y[train_index]) xgb_model.save_model(model_path) xgb_model = xgb.XGBClassifier() xgb_model.load_model(model_path) assert isinstance(xgb_model.classes_, np.ndarray) np.testing.assert_equal(xgb_model.classes_, np.array([0, 1])) assert isinstance(xgb_model._Booster, xgb.Booster) preds = xgb_model.predict(X[test_index]) labels = y[test_index] err = sum(1 for i in range(len(preds)) if int(preds[i] > 0.5) != labels[i]) / float(len(preds)) assert err < 0.1 assert xgb_model.get_booster().attr('scikit_learn') is None # test native booster preds = xgb_model.predict(X[test_index], output_margin=True) booster = xgb.Booster(model_file=model_path) predt_1 = booster.predict(xgb.DMatrix(X[test_index]), output_margin=True) assert np.allclose(preds, predt_1) with pytest.raises(TypeError): xgb_model = xgb.XGBModel() xgb_model.load_model(model_path) def test_save_load_model(): with tempfile.TemporaryDirectory() as tempdir: model_path = os.path.join(tempdir, 'digits.model') save_load_model(model_path) with tempfile.TemporaryDirectory() as tempdir: model_path = os.path.join(tempdir, 'digits.model.json') save_load_model(model_path) from sklearn.datasets import load_digits from sklearn.model_selection import train_test_split with tempfile.TemporaryDirectory() as tempdir: model_path = os.path.join(tempdir, 'digits.model.ubj') digits = load_digits(n_class=2) y = digits['target'] X = digits['data'] booster = xgb.train({'tree_method': 'hist', 'objective': 'binary:logistic'}, dtrain=xgb.DMatrix(X, y), num_boost_round=4) predt_0 = booster.predict(xgb.DMatrix(X)) booster.save_model(model_path) cls = xgb.XGBClassifier() cls.load_model(model_path) proba = cls.predict_proba(X) assert proba.shape[0] == X.shape[0] assert proba.shape[1] == 2 # binary predt_1 = cls.predict_proba(X)[:, 1] assert np.allclose(predt_0, predt_1) cls = xgb.XGBModel() cls.load_model(model_path) predt_1 = cls.predict(X) assert np.allclose(predt_0, predt_1) # mclass X, y = load_digits(n_class=10, return_X_y=True) # small test_size to force early stop X_train, X_test, y_train, y_test = train_test_split( X, y, test_size=0.01, random_state=1 ) clf = xgb.XGBClassifier( n_estimators=64, tree_method="hist", early_stopping_rounds=2 ) clf.fit(X_train, y_train, eval_set=[(X_test, y_test)]) score = clf.best_score clf.save_model(model_path) clf = xgb.XGBClassifier() clf.load_model(model_path) assert clf.classes_.size == 10 np.testing.assert_equal(clf.classes_, np.arange(10)) assert clf.n_classes_ == 10 assert clf.best_iteration == 27 assert clf.best_score == score def test_RFECV(): from sklearn.datasets import load_breast_cancer, load_diabetes, load_iris from sklearn.feature_selection import RFECV # Regression X, y = load_diabetes(return_X_y=True) bst = xgb.XGBRegressor(booster='gblinear', learning_rate=0.1, n_estimators=10, objective='reg:squarederror', random_state=0, verbosity=0) rfecv = RFECV( estimator=bst, step=1, cv=3, scoring='neg_mean_squared_error') rfecv.fit(X, y) # Binary classification X, y = load_breast_cancer(return_X_y=True) bst = xgb.XGBClassifier(booster='gblinear', learning_rate=0.1, n_estimators=10, objective='binary:logistic', random_state=0, verbosity=0) rfecv = RFECV(estimator=bst, step=0.5, cv=3, scoring='roc_auc') rfecv.fit(X, y) # Multi-class classification X, y = load_iris(return_X_y=True) bst = xgb.XGBClassifier(base_score=0.4, booster='gblinear', learning_rate=0.1, n_estimators=10, objective='multi:softprob', random_state=0, reg_alpha=0.001, reg_lambda=0.01, scale_pos_weight=0.5, verbosity=0) rfecv = RFECV(estimator=bst, step=0.5, cv=3, scoring='neg_log_loss') rfecv.fit(X, y) X[0:4, :] = np.nan # verify scikit_learn doesn't throw with nan reg = xgb.XGBRegressor() rfecv = RFECV(estimator=reg) rfecv.fit(X, y) cls = xgb.XGBClassifier() rfecv = RFECV(estimator=cls, step=0.5, cv=3, scoring='neg_mean_squared_error') rfecv.fit(X, y) def test_XGBClassifier_resume(): from sklearn.datasets import load_breast_cancer from sklearn.metrics import log_loss with tempfile.TemporaryDirectory() as tempdir: model1_path = os.path.join(tempdir, 'test_XGBClassifier.model') model1_booster_path = os.path.join(tempdir, 'test_XGBClassifier.booster') X, Y = load_breast_cancer(return_X_y=True) model1 = xgb.XGBClassifier( learning_rate=0.3, random_state=0, n_estimators=8) model1.fit(X, Y) pred1 = model1.predict(X) log_loss1 = log_loss(pred1, Y) # file name of stored xgb model model1.save_model(model1_path) model2 = xgb.XGBClassifier( learning_rate=0.3, random_state=0, n_estimators=8) model2.fit(X, Y, xgb_model=model1_path) pred2 = model2.predict(X) log_loss2 = log_loss(pred2, Y) assert np.any(pred1 != pred2) assert log_loss1 > log_loss2 # file name of 'Booster' instance Xgb model model1.get_booster().save_model(model1_booster_path) model2 = xgb.XGBClassifier( learning_rate=0.3, random_state=0, n_estimators=8) model2.fit(X, Y, xgb_model=model1_booster_path) pred2 = model2.predict(X) log_loss2 = log_loss(pred2, Y) assert np.any(pred1 != pred2) assert log_loss1 > log_loss2 def test_constraint_parameters(): reg = xgb.XGBRegressor(interaction_constraints="[[0, 1], [2, 3, 4]]") X = np.random.randn(10, 10) y = np.random.randn(10) reg.fit(X, y) config = json.loads(reg.get_booster().save_config()) assert ( config["learner"]["gradient_booster"]["tree_train_param"][ "interaction_constraints" ] == "[[0, 1], [2, 3, 4]]" ) @pytest.mark.filterwarnings("error") def test_parameter_validation(): reg = xgb.XGBRegressor(foo="bar", verbosity=1) X = np.random.randn(10, 10) y = np.random.randn(10) with pytest.warns(Warning, match="foo"): reg.fit(X, y) reg = xgb.XGBRegressor( n_estimators=2, missing=3, importance_type="gain", verbosity=1 ) X = np.random.randn(10, 10) y = np.random.randn(10) with warnings.catch_warnings(): reg.fit(X, y) def test_deprecate_position_arg(): from sklearn.datasets import load_digits X, y = load_digits(return_X_y=True, n_class=2) w = y with pytest.warns(FutureWarning): xgb.XGBRegressor(3, learning_rate=0.1) model = xgb.XGBRegressor(n_estimators=1) with pytest.warns(FutureWarning): model.fit(X, y, w) with pytest.warns(FutureWarning): xgb.XGBClassifier(1) model = xgb.XGBClassifier(n_estimators=1) with pytest.warns(FutureWarning): model.fit(X, y, w) with pytest.warns(FutureWarning): xgb.XGBRanker('rank:ndcg', learning_rate=0.1) model = xgb.XGBRanker(n_estimators=1) group = np.repeat(1, X.shape[0]) with pytest.warns(FutureWarning): model.fit(X, y, group) with pytest.warns(FutureWarning): xgb.XGBRFRegressor(1, learning_rate=0.1) model = xgb.XGBRFRegressor(n_estimators=1) with pytest.warns(FutureWarning): model.fit(X, y, w) model = xgb.XGBRFClassifier(n_estimators=1) with pytest.warns(FutureWarning): model.fit(X, y, w) @pytest.mark.skipif(**tm.no_pandas()) def test_pandas_input(): import pandas as pd from sklearn.calibration import CalibratedClassifierCV rng = np.random.RandomState(1994) kRows = 100 kCols = 6 X = rng.randint(low=0, high=2, size=kRows * kCols) X = X.reshape(kRows, kCols) df = pd.DataFrame(X) feature_names = [] for i in range(1, kCols): feature_names += ["k" + str(i)] df.columns = ["status"] + feature_names target = df["status"] train = df.drop(columns=["status"]) model = xgb.XGBClassifier() model.fit(train, target) np.testing.assert_equal(model.feature_names_in_, np.array(feature_names)) columns = list(train.columns) random.shuffle(columns, lambda: 0.1) df_incorrect = df[columns] with pytest.raises(ValueError): model.predict(df_incorrect) clf_isotonic = CalibratedClassifierCV(model, cv="prefit", method="isotonic") clf_isotonic.fit(train, target) assert isinstance( clf_isotonic.calibrated_classifiers_[0].estimator, xgb.XGBClassifier ) np.testing.assert_allclose(np.array(clf_isotonic.classes_), np.array([0, 1])) train_ser = train["k1"] assert isinstance(train_ser, pd.Series) model = xgb.XGBClassifier(n_estimators=8) model.fit(train_ser, target, eval_set=[(train_ser, target)]) assert tm.non_increasing(model.evals_result()["validation_0"]["logloss"]) @pytest.mark.parametrize("tree_method", ["approx", "hist"]) def test_feature_weights(tree_method): kRows = 512 kCols = 64 X = rng.randn(kRows, kCols) y = rng.randn(kRows) fw = np.ones(shape=(kCols,)) for i in range(kCols): fw[i] *= float(i) parser_path = os.path.join(tm.demo_dir(__file__), "json-model", "json_parser.py") poly_increasing = get_feature_weights( X, y, fw, parser_path, tree_method, xgb.XGBRegressor ) fw = np.ones(shape=(kCols,)) for i in range(kCols): fw[i] *= float(kCols - i) poly_decreasing = get_feature_weights( X, y, fw, parser_path, tree_method, xgb.XGBRegressor ) # Approxmated test, this is dependent on the implementation of random # number generator in std library. assert poly_increasing[0] > 0.08 assert poly_decreasing[0] < -0.08 def run_boost_from_prediction_binary(tree_method, X, y, as_frame: Optional[Callable]): """ Parameters ---------- as_frame: A callable function to convert margin into DataFrame, useful for different df implementations. """ model_0 = xgb.XGBClassifier( learning_rate=0.3, random_state=0, n_estimators=4, tree_method=tree_method ) model_0.fit(X=X, y=y) margin = model_0.predict(X, output_margin=True) if as_frame is not None: margin = as_frame(margin) model_1 = xgb.XGBClassifier( learning_rate=0.3, random_state=0, n_estimators=4, tree_method=tree_method ) model_1.fit(X=X, y=y, base_margin=margin) predictions_1 = model_1.predict(X, base_margin=margin) cls_2 = xgb.XGBClassifier( learning_rate=0.3, random_state=0, n_estimators=8, tree_method=tree_method ) cls_2.fit(X=X, y=y) predictions_2 = cls_2.predict(X) np.testing.assert_allclose(predictions_1, predictions_2) def run_boost_from_prediction_multi_clasas( estimator, tree_method, X, y, as_frame: Optional[Callable] ): # Multi-class model_0 = estimator( learning_rate=0.3, random_state=0, n_estimators=4, tree_method=tree_method ) model_0.fit(X=X, y=y) margin = model_0.get_booster().inplace_predict(X, predict_type="margin") if as_frame is not None: margin = as_frame(margin) model_1 = estimator( learning_rate=0.3, random_state=0, n_estimators=4, tree_method=tree_method ) model_1.fit(X=X, y=y, base_margin=margin) predictions_1 = model_1.get_booster().predict( xgb.DMatrix(X, base_margin=margin), output_margin=True ) model_2 = estimator( learning_rate=0.3, random_state=0, n_estimators=8, tree_method=tree_method ) model_2.fit(X=X, y=y) predictions_2 = model_2.get_booster().inplace_predict(X, predict_type="margin") if hasattr(predictions_1, "get"): predictions_1 = predictions_1.get() if hasattr(predictions_2, "get"): predictions_2 = predictions_2.get() np.testing.assert_allclose(predictions_1, predictions_2, atol=1e-6) @pytest.mark.parametrize("tree_method", ["hist", "approx", "exact"]) def test_boost_from_prediction(tree_method): import pandas as pd from sklearn.datasets import load_breast_cancer, load_iris, make_regression X, y = load_breast_cancer(return_X_y=True) run_boost_from_prediction_binary(tree_method, X, y, None) run_boost_from_prediction_binary(tree_method, X, y, pd.DataFrame) X, y = load_iris(return_X_y=True) run_boost_from_prediction_multi_clasas(xgb.XGBClassifier, tree_method, X, y, None) run_boost_from_prediction_multi_clasas( xgb.XGBClassifier, tree_method, X, y, pd.DataFrame ) X, y = make_regression(n_samples=100, n_targets=4) run_boost_from_prediction_multi_clasas(xgb.XGBRegressor, tree_method, X, y, None) def test_estimator_type(): assert xgb.XGBClassifier._estimator_type == "classifier" assert xgb.XGBRFClassifier._estimator_type == "classifier" assert xgb.XGBRegressor._estimator_type == "regressor" assert xgb.XGBRFRegressor._estimator_type == "regressor" assert xgb.XGBRanker._estimator_type == "ranker" from sklearn.datasets import load_digits X, y = load_digits(n_class=2, return_X_y=True) cls = xgb.XGBClassifier(n_estimators=2).fit(X, y) with tempfile.TemporaryDirectory() as tmpdir: path = os.path.join(tmpdir, "cls.json") cls.save_model(path) reg = xgb.XGBRegressor() with pytest.raises(TypeError): reg.load_model(path) cls = xgb.XGBClassifier() cls.load_model(path) # no error def test_multilabel_classification() -> None: from sklearn.datasets import make_multilabel_classification X, y = make_multilabel_classification( n_samples=32, n_classes=5, n_labels=3, random_state=0 ) clf = xgb.XGBClassifier(tree_method="hist") clf.fit(X, y) booster = clf.get_booster() learner = json.loads(booster.save_config())["learner"] assert int(learner["learner_model_param"]["num_target"]) == 5 np.testing.assert_allclose(clf.predict(X), y) predt = (clf.predict_proba(X) > 0.5).astype(np.int64) np.testing.assert_allclose(clf.predict(X), predt) assert predt.dtype == np.int64 y = y.tolist() clf.fit(X, y) np.testing.assert_allclose(clf.predict(X), predt) def test_data_initialization() -> None: from sklearn.datasets import load_digits X, y = load_digits(return_X_y=True) validate_data_initialization(xgb.QuantileDMatrix, xgb.XGBClassifier, X, y) @parametrize_with_checks([xgb.XGBRegressor()]) def test_estimator_reg(estimator, check): if os.environ["PYTEST_CURRENT_TEST"].find("check_supervised_y_no_nan") != -1: # The test uses float64 and requires the error message to contain: # # "value too large for dtype(float64)", # # while XGBoost stores values as float32. But XGBoost does verify the label # internally, so we replace this test with custom check. rng = np.random.RandomState(888) X = rng.randn(10, 5) y = np.full(10, np.inf) with pytest.raises( ValueError, match="contains NaN, infinity or a value too large" ): estimator.fit(X, y) return if os.environ["PYTEST_CURRENT_TEST"].find("check_estimators_overwrite_params") != -1: # A hack to pass the scikit-learn parameter mutation tests. XGBoost regressor # returns actual internal default values for parameters in `get_params`, but those # are set as `None` in sklearn interface to avoid duplication. So we fit a dummy # model and obtain the default parameters here for the mutation tests. from sklearn.datasets import make_regression X, y = make_regression(n_samples=2, n_features=1) estimator.set_params(**xgb.XGBRegressor().fit(X, y).get_params()) check(estimator) def test_categorical(): X, y = tm.make_categorical(n_samples=32, n_features=2, n_categories=3, onehot=False) ft = ["c"] * X.shape[1] reg = xgb.XGBRegressor( feature_types=ft, max_cat_to_onehot=1, enable_categorical=True, ) reg.fit(X.values, y, eval_set=[(X.values, y)]) from_cat = reg.evals_result()["validation_0"]["rmse"] predt_cat = reg.predict(X.values) assert reg.get_booster().feature_types == ft with tempfile.TemporaryDirectory() as tmpdir: path = os.path.join(tmpdir, "model.json") reg.save_model(path) reg = xgb.XGBRegressor() reg.load_model(path) assert reg.feature_types == ft onehot, y = tm.make_categorical( n_samples=32, n_features=2, n_categories=3, onehot=True ) reg = xgb.XGBRegressor() reg.fit(onehot, y, eval_set=[(onehot, y)]) from_enc = reg.evals_result()["validation_0"]["rmse"] predt_enc = reg.predict(onehot) np.testing.assert_allclose(from_cat, from_enc) np.testing.assert_allclose(predt_cat, predt_enc) def test_evaluation_metric(): from sklearn.datasets import load_diabetes, load_digits from sklearn.metrics import mean_absolute_error X, y = load_diabetes(return_X_y=True) n_estimators = 16 with tm.captured_output() as (out, err): reg = xgb.XGBRegressor( tree_method="hist", eval_metric=mean_absolute_error, n_estimators=n_estimators, ) reg.fit(X, y, eval_set=[(X, y)]) lines = out.getvalue().strip().split('\n') assert len(lines) == n_estimators for line in lines: assert line.find("mean_absolute_error") != -1 def metric(predt: np.ndarray, Xy: xgb.DMatrix): y = Xy.get_label() return "m", np.abs(predt - y).sum() with pytest.warns(UserWarning): reg = xgb.XGBRegressor( tree_method="hist", n_estimators=1, ) reg.fit(X, y, eval_set=[(X, y)], eval_metric=metric) def merror(y_true: np.ndarray, predt: np.ndarray): n_samples = y_true.shape[0] assert n_samples == predt.size errors = np.zeros(y_true.shape[0]) errors[y != predt] = 1.0 return np.sum(errors) / n_samples X, y = load_digits(n_class=10, return_X_y=True) clf = xgb.XGBClassifier( tree_method="hist", eval_metric=merror, n_estimators=16, objective="multi:softmax" ) clf.fit(X, y, eval_set=[(X, y)]) custom = clf.evals_result() clf = xgb.XGBClassifier( tree_method="hist", eval_metric="merror", n_estimators=16, objective="multi:softmax" ) clf.fit(X, y, eval_set=[(X, y)]) internal = clf.evals_result() np.testing.assert_allclose( custom["validation_0"]["merror"], internal["validation_0"]["merror"], atol=1e-6 ) clf = xgb.XGBRFClassifier( tree_method="hist", n_estimators=16, objective=tm.softprob_obj(10), eval_metric=merror, ) with pytest.raises(AssertionError): # shape check inside the `merror` function clf.fit(X, y, eval_set=[(X, y)]) def test_weighted_evaluation_metric(): from sklearn.datasets import make_hastie_10_2 from sklearn.metrics import log_loss X, y = make_hastie_10_2(n_samples=2000, random_state=42) labels, y = np.unique(y, return_inverse=True) X_train, X_test = X[:1600], X[1600:] y_train, y_test = y[:1600], y[1600:] weights_eval_set = np.random.choice([1, 2], len(X_test)) np.random.seed(0) weights_train = np.random.choice([1, 2], len(X_train)) clf = xgb.XGBClassifier( tree_method="hist", eval_metric=log_loss, n_estimators=16, objective="binary:logistic", ) clf.fit(X_train, y_train, sample_weight=weights_train, eval_set=[(X_test, y_test)], sample_weight_eval_set=[weights_eval_set]) custom = clf.evals_result() clf = xgb.XGBClassifier( tree_method="hist", eval_metric="logloss", n_estimators=16, objective="binary:logistic" ) clf.fit(X_train, y_train, sample_weight=weights_train, eval_set=[(X_test, y_test)], sample_weight_eval_set=[weights_eval_set]) internal = clf.evals_result() np.testing.assert_allclose( custom["validation_0"]["log_loss"], internal["validation_0"]["logloss"], atol=1e-6 )
51,130
32.462696
95
py
xgboost
xgboost-master/tests/python/test_basic.py
import json import os import tempfile from pathlib import Path import numpy as np import pytest import xgboost as xgb from xgboost import testing as tm dpath = 'demo/data/' rng = np.random.RandomState(1994) class TestBasic: def test_compat(self): from xgboost.compat import lazy_isinstance a = np.array([1, 2, 3]) assert lazy_isinstance(a, 'numpy', 'ndarray') assert not lazy_isinstance(a, 'numpy', 'dataframe') def test_basic(self): dtrain, dtest = tm.load_agaricus(__file__) param = {'max_depth': 2, 'eta': 1, 'objective': 'binary:logistic'} # specify validations set to watch performance watchlist = [(dtrain, 'train')] num_round = 2 bst = xgb.train(param, dtrain, num_round, watchlist, verbose_eval=True) preds = bst.predict(dtrain) labels = dtrain.get_label() err = sum(1 for i in range(len(preds)) if int(preds[i] > 0.5) != labels[i]) / float(len(preds)) # error must be smaller than 10% assert err < 0.1 preds = bst.predict(dtest) labels = dtest.get_label() err = sum(1 for i in range(len(preds)) if int(preds[i] > 0.5) != labels[i]) / float(len(preds)) # error must be smaller than 10% assert err < 0.1 with tempfile.TemporaryDirectory() as tmpdir: dtest_path = os.path.join(tmpdir, 'dtest.dmatrix') # save dmatrix into binary buffer dtest.save_binary(dtest_path) # save model model_path = os.path.join(tmpdir, 'model.booster') bst.save_model(model_path) # load model and data in bst2 = xgb.Booster(model_file=model_path) dtest2 = xgb.DMatrix(dtest_path) preds2 = bst2.predict(dtest2) # assert they are the same assert np.sum(np.abs(preds2 - preds)) == 0 def test_metric_config(self): # Make sure that the metric configuration happens in booster so the # string `['error', 'auc']` doesn't get passed down to core. dtrain, dtest = tm.load_agaricus(__file__) param = {'max_depth': 2, 'eta': 1, 'verbosity': 0, 'objective': 'binary:logistic', 'eval_metric': ['error', 'auc']} watchlist = [(dtest, 'eval'), (dtrain, 'train')] num_round = 2 booster = xgb.train(param, dtrain, num_round, watchlist) predt_0 = booster.predict(dtrain) with tempfile.TemporaryDirectory() as tmpdir: path = os.path.join(tmpdir, 'model.json') booster.save_model(path) booster = xgb.Booster(params=param, model_file=path) predt_1 = booster.predict(dtrain) np.testing.assert_allclose(predt_0, predt_1) def test_multiclass(self): dtrain, dtest = tm.load_agaricus(__file__) param = {'max_depth': 2, 'eta': 1, 'verbosity': 0, 'num_class': 2} # specify validations set to watch performance watchlist = [(dtest, 'eval'), (dtrain, 'train')] num_round = 2 bst = xgb.train(param, dtrain, num_round, watchlist) # this is prediction preds = bst.predict(dtest) labels = dtest.get_label() err = sum(1 for i in range(len(preds)) if preds[i] != labels[i]) / float(len(preds)) # error must be smaller than 10% assert err < 0.1 with tempfile.TemporaryDirectory() as tmpdir: dtest_path = os.path.join(tmpdir, 'dtest.buffer') model_path = os.path.join(tmpdir, 'xgb.model') # save dmatrix into binary buffer dtest.save_binary(dtest_path) # save model bst.save_model(model_path) # load model and data in bst2 = xgb.Booster(model_file=model_path) dtest2 = xgb.DMatrix(dtest_path) preds2 = bst2.predict(dtest2) # assert they are the same assert np.sum(np.abs(preds2 - preds)) == 0 def test_dump(self): data = np.random.randn(100, 2) target = np.array([0, 1] * 50) features = ['Feature1', 'Feature2'] dm = xgb.DMatrix(data, label=target, feature_names=features) params = {'objective': 'binary:logistic', 'eval_metric': 'logloss', 'eta': 0.3, 'max_depth': 1} bst = xgb.train(params, dm, num_boost_round=1) # number of feature importances should == number of features dump1 = bst.get_dump() assert len(dump1) == 1, 'Expected only 1 tree to be dumped.' len(dump1[0].splitlines()) == 3, 'Expected 1 root and 2 leaves - 3 lines in dump.' dump2 = bst.get_dump(with_stats=True) assert dump2[0].count('\n') == 3, 'Expected 1 root and 2 leaves - 3 lines in dump.' msg = 'Expected more info when with_stats=True is given.' assert dump2[0].find('\n') > dump1[0].find('\n'), msg dump3 = bst.get_dump(dump_format="json") dump3j = json.loads(dump3[0]) assert dump3j['nodeid'] == 0, 'Expected the root node on top.' dump4 = bst.get_dump(dump_format="json", with_stats=True) dump4j = json.loads(dump4[0]) assert 'gain' in dump4j, "Expected 'gain' to be dumped in JSON." with pytest.raises(ValueError): bst.get_dump(fmap="foo") def test_feature_score(self): rng = np.random.RandomState(0) data = rng.randn(100, 2) target = np.array([0, 1] * 50) features = ["F0"] with pytest.raises(ValueError): xgb.DMatrix(data, label=target, feature_names=features) params = {"objective": "binary:logistic"} dm = xgb.DMatrix(data, label=target, feature_names=["F0", "F1"]) booster = xgb.train(params, dm, num_boost_round=1) # no error since feature names might be assigned before the booster seeing data # and booster doesn't known about the actual number of features. booster.feature_names = ["F0"] with pytest.raises(ValueError): booster.get_fscore() booster.feature_names = None # Use JSON to make sure the output has native Python type scores = json.loads(json.dumps(booster.get_fscore())) np.testing.assert_allclose(scores["f0"], 6.0) def test_load_file_invalid(self): with pytest.raises(xgb.core.XGBoostError): xgb.Booster(model_file='incorrect_path') with pytest.raises(xgb.core.XGBoostError): xgb.Booster(model_file=u'不正なパス') def test_dmatrix_numpy_init_omp(self): rows = [1000, 11326, 15000] cols = 50 for row in rows: X = np.random.randn(row, cols) y = np.random.randn(row).astype('f') dm = xgb.DMatrix(X, y, nthread=0) np.testing.assert_array_equal(dm.get_label(), y) assert dm.num_row() == row assert dm.num_col() == cols dm = xgb.DMatrix(X, y, nthread=10) np.testing.assert_array_equal(dm.get_label(), y) assert dm.num_row() == row assert dm.num_col() == cols def test_cv(self): dm, _ = tm.load_agaricus(__file__) params = {'max_depth': 2, 'eta': 1, 'verbosity': 0, 'objective': 'binary:logistic'} # return np.ndarray cv = xgb.cv(params, dm, num_boost_round=10, nfold=10, as_pandas=False) assert isinstance(cv, dict) assert len(cv) == (4) def test_cv_no_shuffle(self): dm, _ = tm.load_agaricus(__file__) params = {'max_depth': 2, 'eta': 1, 'verbosity': 0, 'objective': 'binary:logistic'} # return np.ndarray cv = xgb.cv(params, dm, num_boost_round=10, shuffle=False, nfold=10, as_pandas=False) assert isinstance(cv, dict) assert len(cv) == (4) def test_cv_explicit_fold_indices(self): dm, _ = tm.load_agaricus(__file__) params = {'max_depth': 2, 'eta': 1, 'verbosity': 0, 'objective': 'binary:logistic'} folds = [ # Train Test ([1, 3], [5, 8]), ([7, 9], [23, 43]), ] # return np.ndarray cv = xgb.cv(params, dm, num_boost_round=10, folds=folds, as_pandas=False) assert isinstance(cv, dict) assert len(cv) == (4) @pytest.mark.skipif(**tm.skip_s390x()) def test_cv_explicit_fold_indices_labels(self): params = {'max_depth': 2, 'eta': 1, 'verbosity': 0, 'objective': 'reg:squarederror'} N = 100 F = 3 dm = xgb.DMatrix(data=np.random.randn(N, F), label=np.arange(N)) folds = [ # Train Test ([1, 3], [5, 8]), ([7, 9], [23, 43, 11]), ] # Use callback to log the test labels in each fold class Callback(xgb.callback.TrainingCallback): def __init__(self) -> None: super().__init__() def after_iteration( self, model, epoch: int, evals_log: xgb.callback.TrainingCallback.EvalsLog ): print([fold.dtest.get_label() for fold in model.cvfolds]) cb = Callback() # Run cross validation and capture standard out to test callback result with tm.captured_output() as (out, err): xgb.cv( params, dm, num_boost_round=1, folds=folds, callbacks=[cb], as_pandas=False ) output = out.getvalue().strip() solution = ('[array([5., 8.], dtype=float32), array([23., 43., 11.],' + ' dtype=float32)]') assert output == solution class TestBasicPathLike: """Unit tests using pathlib.Path for file interaction.""" def test_DMatrix_init_from_path(self): """Initialization from the data path.""" dtrain, _ = tm.load_agaricus(__file__) assert dtrain.num_row() == 6513 assert dtrain.num_col() == 127 def test_DMatrix_save_to_path(self): """Saving to a binary file using pathlib from a DMatrix.""" data = np.random.randn(100, 2) target = np.array([0, 1] * 50) features = ['Feature1', 'Feature2'] dm = xgb.DMatrix(data, label=target, feature_names=features) # save, assert exists, remove file binary_path = Path("dtrain.bin") dm.save_binary(binary_path) assert binary_path.exists() Path.unlink(binary_path) def test_Booster_init_invalid_path(self): """An invalid model_file path should raise XGBoostError.""" with pytest.raises(xgb.core.XGBoostError): xgb.Booster(model_file=Path("invalidpath")) def test_Booster_save_and_load(self): """Saving and loading model files from paths.""" save_path = Path("saveload.model") data = np.random.randn(100, 2) target = np.array([0, 1] * 50) features = ['Feature1', 'Feature2'] dm = xgb.DMatrix(data, label=target, feature_names=features) params = {'objective': 'binary:logistic', 'eval_metric': 'logloss', 'eta': 0.3, 'max_depth': 1} bst = xgb.train(params, dm, num_boost_round=1) # save, assert exists bst.save_model(save_path) assert save_path.exists() def dump_assertions(dump): """Assertions for the expected dump from Booster""" assert len(dump) == 1, 'Exepcted only 1 tree to be dumped.' assert len(dump[0].splitlines()) == 3, 'Expected 1 root and 2 leaves - 3 lines.' # load the model again using Path bst2 = xgb.Booster(model_file=save_path) dump2 = bst2.get_dump() dump_assertions(dump2) # load again using load_model bst3 = xgb.Booster() bst3.load_model(save_path) dump3 = bst3.get_dump() dump_assertions(dump3) # remove file Path.unlink(save_path)
12,151
35.93617
92
py
xgboost
xgboost-master/tests/python/test_with_modin.py
import numpy as np import pytest from test_dmatrix import set_base_margin_info import xgboost as xgb from xgboost import testing as tm try: import modin.pandas as md except ImportError: pass pytestmark = pytest.mark.skipif(**tm.no_modin()) class TestModin: @pytest.mark.xfail def test_modin(self): df = md.DataFrame([[1, 2., True], [2, 3., False]], columns=['a', 'b', 'c']) dm = xgb.DMatrix(df, label=md.Series([1, 2])) assert dm.feature_names == ['a', 'b', 'c'] assert dm.feature_types == ['int', 'float', 'i'] assert dm.num_row() == 2 assert dm.num_col() == 3 np.testing.assert_array_equal(dm.get_label(), np.array([1, 2])) # overwrite feature_names and feature_types dm = xgb.DMatrix(df, label=md.Series([1, 2]), feature_names=['x', 'y', 'z'], feature_types=['q', 'q', 'q']) assert dm.feature_names == ['x', 'y', 'z'] assert dm.feature_types == ['q', 'q', 'q'] assert dm.num_row() == 2 assert dm.num_col() == 3 # incorrect dtypes df = md.DataFrame([[1, 2., 'x'], [2, 3., 'y']], columns=['a', 'b', 'c']) with pytest.raises(ValueError): xgb.DMatrix(df) # numeric columns df = md.DataFrame([[1, 2., True], [2, 3., False]]) dm = xgb.DMatrix(df, label=md.Series([1, 2])) assert dm.feature_names == ['0', '1', '2'] assert dm.feature_types == ['int', 'float', 'i'] assert dm.num_row() == 2 assert dm.num_col() == 3 np.testing.assert_array_equal(dm.get_label(), np.array([1, 2])) df = md.DataFrame([[1, 2., 1], [2, 3., 1]], columns=[4, 5, 6]) dm = xgb.DMatrix(df, label=md.Series([1, 2])) assert dm.feature_names == ['4', '5', '6'] assert dm.feature_types == ['int', 'float', 'int'] assert dm.num_row() == 2 assert dm.num_col() == 3 df = md.DataFrame({'A': ['X', 'Y', 'Z'], 'B': [1, 2, 3]}) dummies = md.get_dummies(df) # B A_X A_Y A_Z # 0 1 1 0 0 # 1 2 0 1 0 # 2 3 0 0 1 result, _, _ = xgb.data._transform_pandas_df(dummies, enable_categorical=False) exp = np.array([[1., 1., 0., 0.], [2., 0., 1., 0.], [3., 0., 0., 1.]]) np.testing.assert_array_equal(result, exp) dm = xgb.DMatrix(dummies) assert dm.feature_names == ['B', 'A_X', 'A_Y', 'A_Z'] assert dm.feature_types == ['int', 'int', 'int', 'int'] assert dm.num_row() == 3 assert dm.num_col() == 4 df = md.DataFrame({'A=1': [1, 2, 3], 'A=2': [4, 5, 6]}) dm = xgb.DMatrix(df) assert dm.feature_names == ['A=1', 'A=2'] assert dm.feature_types == ['int', 'int'] assert dm.num_row() == 3 assert dm.num_col() == 2 df_int = md.DataFrame([[1, 1.1], [2, 2.2]], columns=[9, 10]) dm_int = xgb.DMatrix(df_int) df_range = md.DataFrame([[1, 1.1], [2, 2.2]], columns=range(9, 11, 1)) dm_range = xgb.DMatrix(df_range) assert dm_int.feature_names == ['9', '10'] # assert not "9 " assert dm_int.feature_names == dm_range.feature_names # test MultiIndex as columns df = md.DataFrame( [ (1, 2, 3, 4, 5, 6), (6, 5, 4, 3, 2, 1) ], columns=md.MultiIndex.from_tuples(( ('a', 1), ('a', 2), ('a', 3), ('b', 1), ('b', 2), ('b', 3), )) ) dm = xgb.DMatrix(df) assert dm.feature_names == ['a 1', 'a 2', 'a 3', 'b 1', 'b 2', 'b 3'] assert dm.feature_types == ['int', 'int', 'int', 'int', 'int', 'int'] assert dm.num_row() == 2 assert dm.num_col() == 6 def test_modin_label(self): # label must be a single column df = md.DataFrame({'A': ['X', 'Y', 'Z'], 'B': [1, 2, 3]}) with pytest.raises(ValueError): xgb.data._transform_pandas_df(df, False, None, None, 'label', 'float') # label must be supported dtype df = md.DataFrame({'A': np.array(['a', 'b', 'c'], dtype=object)}) with pytest.raises(ValueError): xgb.data._transform_pandas_df(df, False, None, None, 'label', 'float') df = md.DataFrame({'A': np.array([1, 2, 3], dtype=int)}) result, _, _ = xgb.data._transform_pandas_df(df, False, None, None, 'label', 'float') np.testing.assert_array_equal(result, np.array([[1.], [2.], [3.]], dtype=float)) dm = xgb.DMatrix(np.random.randn(3, 2), label=df) assert dm.num_row() == 3 assert dm.num_col() == 2 def test_modin_weight(self): kRows = 32 kCols = 8 X = np.random.randn(kRows, kCols) y = np.random.randn(kRows) w = np.random.uniform(size=kRows).astype(np.float32) w_pd = md.DataFrame(w) data = xgb.DMatrix(X, y, w_pd) assert data.num_row() == kRows assert data.num_col() == kCols np.testing.assert_array_equal(data.get_weight(), w) def test_base_margin(self): set_base_margin_info(md.DataFrame, xgb.DMatrix, "hist")
5,475
36.506849
82
py
xgboost
xgboost-master/tests/python/test_dt.py
import numpy as np import pytest import xgboost as xgb dt = pytest.importorskip("datatable") pd = pytest.importorskip("pandas") class TestDataTable: def test_dt(self) -> None: df = pd.DataFrame([[1, 2.0, True], [2, 3.0, False]], columns=["a", "b", "c"]) dtable = dt.Frame(df) labels = dt.Frame([1, 2]) dm = xgb.DMatrix(dtable, label=labels) assert dm.feature_names == ["a", "b", "c"] assert dm.feature_types == ["int", "float", "i"] assert dm.num_row() == 2 assert dm.num_col() == 3 np.testing.assert_array_equal(np.array([1, 2]), dm.get_label()) # overwrite feature_names dm = xgb.DMatrix(dtable, label=pd.Series([1, 2]), feature_names=["x", "y", "z"]) assert dm.feature_names == ["x", "y", "z"] assert dm.num_row() == 2 assert dm.num_col() == 3 # incorrect dtypes df = pd.DataFrame([[1, 2.0, "x"], [2, 3.0, "y"]], columns=["a", "b", "c"]) dtable = dt.Frame(df) with pytest.raises(ValueError): xgb.DMatrix(dtable) df = pd.DataFrame({"A=1": [1, 2, 3], "A=2": [4, 5, 6]}) dtable = dt.Frame(df) dm = xgb.DMatrix(dtable) assert dm.feature_names == ["A=1", "A=2"] assert dm.feature_types == ["int", "int"] assert dm.num_row() == 3 assert dm.num_col() == 2
1,376
31.785714
88
py
xgboost
xgboost-master/tests/python/test_pickling.py
import json import os import pickle import numpy as np import xgboost as xgb kRows = 100 kCols = 10 def generate_data(): X = np.random.randn(kRows, kCols) y = np.random.randn(kRows) return X, y class TestPickling: def run_model_pickling(self, xgb_params) -> str: X, y = generate_data() dtrain = xgb.DMatrix(X, y) bst = xgb.train(xgb_params, dtrain) dump_0 = bst.get_dump(dump_format='json') assert dump_0 config_0 = bst.save_config() filename = 'model.pkl' with open(filename, 'wb') as fd: pickle.dump(bst, fd) with open(filename, 'rb') as fd: bst = pickle.load(fd) with open(filename, 'wb') as fd: pickle.dump(bst, fd) with open(filename, 'rb') as fd: bst = pickle.load(fd) assert bst.get_dump(dump_format='json') == dump_0 if os.path.exists(filename): os.remove(filename) config_1 = bst.save_config() assert config_0 == config_1 return json.loads(config_0) def test_model_pickling_json(self): def check(config): tree_param = config["learner"]["gradient_booster"]["tree_train_param"] subsample = tree_param["subsample"] assert float(subsample) == 0.5 params = {"nthread": 8, "tree_method": "hist", "subsample": 0.5} config = self.run_model_pickling(params) check(config) params = {"nthread": 8, "tree_method": "exact", "subsample": 0.5} config = self.run_model_pickling(params) check(config)
1,611
24.1875
82
py
xgboost
xgboost-master/tests/python/test_config.py
import multiprocessing from concurrent.futures import ThreadPoolExecutor import pytest import xgboost as xgb @pytest.mark.parametrize("verbosity_level", [0, 1, 2, 3]) def test_global_config_verbosity(verbosity_level): def get_current_verbosity(): return xgb.get_config()["verbosity"] old_verbosity = get_current_verbosity() with xgb.config_context(verbosity=verbosity_level): new_verbosity = get_current_verbosity() assert new_verbosity == verbosity_level assert old_verbosity == get_current_verbosity() @pytest.mark.parametrize("use_rmm", [False, True]) def test_global_config_use_rmm(use_rmm): def get_current_use_rmm_flag(): return xgb.get_config()["use_rmm"] old_use_rmm_flag = get_current_use_rmm_flag() with xgb.config_context(use_rmm=use_rmm): new_use_rmm_flag = get_current_use_rmm_flag() assert new_use_rmm_flag == use_rmm assert old_use_rmm_flag == get_current_use_rmm_flag() def test_nested_config(): with xgb.config_context(verbosity=3): assert xgb.get_config()["verbosity"] == 3 with xgb.config_context(verbosity=2): assert xgb.get_config()["verbosity"] == 2 with xgb.config_context(verbosity=1): assert xgb.get_config()["verbosity"] == 1 assert xgb.get_config()["verbosity"] == 2 assert xgb.get_config()["verbosity"] == 3 with xgb.config_context(verbosity=3): assert xgb.get_config()["verbosity"] == 3 with xgb.config_context(verbosity=None): assert xgb.get_config()["verbosity"] == 3 # None has no effect verbosity = xgb.get_config()["verbosity"] xgb.set_config(verbosity=2) assert xgb.get_config()["verbosity"] == 2 with xgb.config_context(verbosity=3): assert xgb.get_config()["verbosity"] == 3 xgb.set_config(verbosity=verbosity) # reset def test_thread_safty(): n_threads = multiprocessing.cpu_count() futures = [] with ThreadPoolExecutor(max_workers=n_threads) as executor: for i in range(256): f = executor.submit(test_nested_config) futures.append(f) for f in futures: f.result()
2,195
32.272727
75
py
xgboost
xgboost-master/tests/python/generate_models.py
import os import numpy as np import xgboost kRounds = 2 kRows = 1000 kCols = 4 kForests = 2 kMaxDepth = 2 kClasses = 3 X = np.random.randn(kRows, kCols) w = np.random.uniform(size=kRows) version = xgboost.__version__ np.random.seed(1994) target_dir = 'models' def booster_bin(model): return os.path.join(target_dir, 'xgboost-' + version + '.' + model + '.bin') def booster_json(model): return os.path.join(target_dir, 'xgboost-' + version + '.' + model + '.json') def skl_bin(model): return os.path.join(target_dir, 'xgboost_scikit-' + version + '.' + model + '.bin') def skl_json(model): return os.path.join(target_dir, 'xgboost_scikit-' + version + '.' + model + '.json') def generate_regression_model(): print('Regression') y = np.random.randn(kRows) data = xgboost.DMatrix(X, label=y, weight=w) booster = xgboost.train({'tree_method': 'hist', 'num_parallel_tree': kForests, 'max_depth': kMaxDepth}, num_boost_round=kRounds, dtrain=data) booster.save_model(booster_bin('reg')) booster.save_model(booster_json('reg')) reg = xgboost.XGBRegressor(tree_method='hist', num_parallel_tree=kForests, max_depth=kMaxDepth, n_estimators=kRounds) reg.fit(X, y, w) reg.save_model(skl_bin('reg')) reg.save_model(skl_json('reg')) def generate_logistic_model(): print('Logistic') y = np.random.randint(0, 2, size=kRows) assert y.max() == 1 and y.min() == 0 for objective, name in [('binary:logistic', 'logit'), ('binary:logitraw', 'logitraw')]: data = xgboost.DMatrix(X, label=y, weight=w) booster = xgboost.train({'tree_method': 'hist', 'num_parallel_tree': kForests, 'max_depth': kMaxDepth, 'objective': objective}, num_boost_round=kRounds, dtrain=data) booster.save_model(booster_bin(name)) booster.save_model(booster_json(name)) reg = xgboost.XGBClassifier(tree_method='hist', num_parallel_tree=kForests, max_depth=kMaxDepth, n_estimators=kRounds, objective=objective) reg.fit(X, y, w) reg.save_model(skl_bin(name)) reg.save_model(skl_json(name)) def generate_classification_model(): print('Classification') y = np.random.randint(0, kClasses, size=kRows) data = xgboost.DMatrix(X, label=y, weight=w) booster = xgboost.train({'num_class': kClasses, 'tree_method': 'hist', 'num_parallel_tree': kForests, 'max_depth': kMaxDepth}, num_boost_round=kRounds, dtrain=data) booster.save_model(booster_bin('cls')) booster.save_model(booster_json('cls')) cls = xgboost.XGBClassifier(tree_method='hist', num_parallel_tree=kForests, max_depth=kMaxDepth, n_estimators=kRounds) cls.fit(X, y, w) cls.save_model(skl_bin('cls')) cls.save_model(skl_json('cls')) def generate_ranking_model(): print('Learning to Rank') y = np.random.randint(5, size=kRows) w = np.random.uniform(size=20) g = np.repeat(50, 20) data = xgboost.DMatrix(X, y, weight=w) data.set_group(g) booster = xgboost.train({'objective': 'rank:ndcg', 'num_parallel_tree': kForests, 'tree_method': 'hist', 'max_depth': kMaxDepth}, num_boost_round=kRounds, dtrain=data) booster.save_model(booster_bin('ltr')) booster.save_model(booster_json('ltr')) ranker = xgboost.sklearn.XGBRanker(n_estimators=kRounds, tree_method='hist', objective='rank:ndcg', max_depth=kMaxDepth, num_parallel_tree=kForests) ranker.fit(X, y, g, sample_weight=w) ranker.save_model(skl_bin('ltr')) ranker.save_model(skl_json('ltr')) def write_versions(): versions = {'numpy': np.__version__, 'xgboost': version} with open(os.path.join(target_dir, 'version'), 'w') as fd: fd.write(str(versions)) if __name__ == '__main__': if not os.path.exists(target_dir): os.mkdir(target_dir) generate_regression_model() generate_logistic_model() generate_classification_model() generate_ranking_model() write_versions()
4,998
31.673203
91
py
xgboost
xgboost-master/tests/python/test_with_pandas.py
from typing import Type import numpy as np import pytest from test_dmatrix import set_base_margin_info import xgboost as xgb from xgboost import testing as tm from xgboost.testing.data import pd_arrow_dtypes, pd_dtypes try: import pandas as pd except ImportError: pass pytestmark = pytest.mark.skipif(**tm.no_pandas()) dpath = 'demo/data/' rng = np.random.RandomState(1994) class TestPandas: def test_pandas(self): df = pd.DataFrame([[1, 2., True], [2, 3., False]], columns=['a', 'b', 'c']) dm = xgb.DMatrix(df, label=pd.Series([1, 2])) assert dm.feature_names == ['a', 'b', 'c'] assert dm.feature_types == ['int', 'float', 'i'] assert dm.num_row() == 2 assert dm.num_col() == 3 np.testing.assert_array_equal(dm.get_label(), np.array([1, 2])) # overwrite feature_names and feature_types dm = xgb.DMatrix(df, label=pd.Series([1, 2]), feature_names=['x', 'y', 'z'], feature_types=['q', 'q', 'q']) assert dm.feature_names == ['x', 'y', 'z'] assert dm.feature_types == ['q', 'q', 'q'] assert dm.num_row() == 2 assert dm.num_col() == 3 # incorrect dtypes df = pd.DataFrame([[1, 2., 'x'], [2, 3., 'y']], columns=['a', 'b', 'c']) with pytest.raises(ValueError): xgb.DMatrix(df) # numeric columns df = pd.DataFrame([[1, 2., True], [2, 3., False]]) dm = xgb.DMatrix(df, label=pd.Series([1, 2])) assert dm.feature_names == ['0', '1', '2'] assert dm.feature_types == ['int', 'float', 'i'] assert dm.num_row() == 2 assert dm.num_col() == 3 np.testing.assert_array_equal(dm.get_label(), np.array([1, 2])) df = pd.DataFrame([[1, 2., 1], [2, 3., 1]], columns=[4, 5, 6]) dm = xgb.DMatrix(df, label=pd.Series([1, 2])) assert dm.feature_names == ['4', '5', '6'] assert dm.feature_types == ['int', 'float', 'int'] assert dm.num_row() == 2 assert dm.num_col() == 3 df = pd.DataFrame({'A': ['X', 'Y', 'Z'], 'B': [1, 2, 3]}) dummies = pd.get_dummies(df) # B A_X A_Y A_Z # 0 1 1 0 0 # 1 2 0 1 0 # 2 3 0 0 1 result, _, _ = xgb.data._transform_pandas_df(dummies, enable_categorical=False) exp = np.array([[1., 1., 0., 0.], [2., 0., 1., 0.], [3., 0., 0., 1.]]) np.testing.assert_array_equal(result, exp) dm = xgb.DMatrix(dummies) assert dm.feature_names == ['B', 'A_X', 'A_Y', 'A_Z'] if int(pd.__version__[0]) >= 2: assert dm.feature_types == ['int', 'i', 'i', 'i'] else: assert dm.feature_types == ['int', 'int', 'int', 'int'] assert dm.num_row() == 3 assert dm.num_col() == 4 df = pd.DataFrame({'A=1': [1, 2, 3], 'A=2': [4, 5, 6]}) dm = xgb.DMatrix(df) assert dm.feature_names == ['A=1', 'A=2'] assert dm.feature_types == ['int', 'int'] assert dm.num_row() == 3 assert dm.num_col() == 2 df_int = pd.DataFrame([[1, 1.1], [2, 2.2]], columns=[9, 10]) dm_int = xgb.DMatrix(df_int) df_range = pd.DataFrame([[1, 1.1], [2, 2.2]], columns=range(9, 11, 1)) dm_range = xgb.DMatrix(df_range) assert dm_int.feature_names == ['9', '10'] # assert not "9 " assert dm_int.feature_names == dm_range.feature_names # test MultiIndex as columns df = pd.DataFrame( [ (1, 2, 3, 4, 5, 6), (6, 5, 4, 3, 2, 1) ], columns=pd.MultiIndex.from_tuples(( ('a', 1), ('a', 2), ('a', 3), ('b', 1), ('b', 2), ('b', 3), )) ) dm = xgb.DMatrix(df) assert dm.feature_names == ['a 1', 'a 2', 'a 3', 'b 1', 'b 2', 'b 3'] assert dm.feature_types == ['int', 'int', 'int', 'int', 'int', 'int'] assert dm.num_row() == 2 assert dm.num_col() == 6 # test Index as columns df = pd.DataFrame([[1, 1.1], [2, 2.2]], columns=pd.Index([1, 2])) Xy = xgb.DMatrix(df) np.testing.assert_equal(np.array(Xy.feature_names), np.array(["1", "2"])) def test_slice(self): rng = np.random.RandomState(1994) rows = 100 X = rng.randint(3, 7, size=rows) X = pd.DataFrame({'f0': X}) y = rng.randn(rows) ridxs = [1, 2, 3, 4, 5, 6] m = xgb.DMatrix(X, y) sliced = m.slice(ridxs) assert m.feature_types == sliced.feature_types def test_pandas_categorical(self): rng = np.random.RandomState(1994) rows = 100 X = rng.randint(3, 7, size=rows) X = pd.Series(X, dtype="category") X = pd.DataFrame({'f0': X}) y = rng.randn(rows) m = xgb.DMatrix(X, y, enable_categorical=True) assert m.feature_types[0] == 'c' X_0 = ["f", "o", "o"] X_1 = [4, 3, 2] X = pd.DataFrame({"feat_0": X_0, "feat_1": X_1}) X["feat_0"] = X["feat_0"].astype("category") transformed, _, feature_types = xgb.data._transform_pandas_df( X, enable_categorical=True ) assert transformed[:, 0].min() == 0 # test missing value X = pd.DataFrame({"f0": ["a", "b", np.NaN]}) X["f0"] = X["f0"].astype("category") arr, _, _ = xgb.data._transform_pandas_df(X, enable_categorical=True) assert not np.any(arr == -1.0) X = X["f0"] y = y[:X.shape[0]] with pytest.raises(ValueError, match=r".*enable_categorical.*"): xgb.DMatrix(X, y) Xy = xgb.DMatrix(X, y, enable_categorical=True) assert Xy.num_row() == 3 assert Xy.num_col() == 1 def test_pandas_sparse(self): import pandas as pd rows = 100 X = pd.DataFrame( {"A": pd.arrays.SparseArray(np.random.randint(0, 10, size=rows)), "B": pd.arrays.SparseArray(np.random.randn(rows)), "C": pd.arrays.SparseArray(np.random.permutation( [True, False] * (rows // 2)))} ) y = pd.Series(pd.arrays.SparseArray(np.random.randn(rows))) dtrain = xgb.DMatrix(X, y) booster = xgb.train({}, dtrain, num_boost_round=4) predt_sparse = booster.predict(xgb.DMatrix(X)) predt_dense = booster.predict(xgb.DMatrix(X.sparse.to_dense())) np.testing.assert_allclose(predt_sparse, predt_dense) def test_pandas_label(self): # label must be a single column df = pd.DataFrame({'A': ['X', 'Y', 'Z'], 'B': [1, 2, 3]}) with pytest.raises(ValueError): xgb.data._transform_pandas_df(df, False, None, None, 'label', 'float') # label must be supported dtype df = pd.DataFrame({'A': np.array(['a', 'b', 'c'], dtype=object)}) with pytest.raises(ValueError): xgb.data._transform_pandas_df(df, False, None, None, 'label', 'float') df = pd.DataFrame({'A': np.array([1, 2, 3], dtype=int)}) result, _, _ = xgb.data._transform_pandas_df(df, False, None, None, 'label', 'float') np.testing.assert_array_equal(result, np.array([[1.], [2.], [3.]], dtype=float)) dm = xgb.DMatrix(np.random.randn(3, 2), label=df) assert dm.num_row() == 3 assert dm.num_col() == 2 def test_pandas_weight(self): kRows = 32 kCols = 8 X = np.random.randn(kRows, kCols) y = np.random.randn(kRows) w = np.random.uniform(size=kRows).astype(np.float32) w_pd = pd.DataFrame(w) data = xgb.DMatrix(X, y, w_pd) assert data.num_row() == kRows assert data.num_col() == kCols np.testing.assert_array_equal(data.get_weight(), w) def test_base_margin(self): set_base_margin_info(pd.DataFrame, xgb.DMatrix, "hist") def test_cv_as_pandas(self): dm, _ = tm.load_agaricus(__file__) params = {'max_depth': 2, 'eta': 1, 'verbosity': 0, 'objective': 'binary:logistic', 'eval_metric': 'error'} cv = xgb.cv(params, dm, num_boost_round=10, nfold=10) assert isinstance(cv, pd.DataFrame) exp = pd.Index([u'test-error-mean', u'test-error-std', u'train-error-mean', u'train-error-std']) assert len(cv.columns.intersection(exp)) == 4 # show progress log (result is the same as above) cv = xgb.cv(params, dm, num_boost_round=10, nfold=10, verbose_eval=True) assert isinstance(cv, pd.DataFrame) exp = pd.Index([u'test-error-mean', u'test-error-std', u'train-error-mean', u'train-error-std']) assert len(cv.columns.intersection(exp)) == 4 cv = xgb.cv(params, dm, num_boost_round=10, nfold=10, verbose_eval=True, show_stdv=False) assert isinstance(cv, pd.DataFrame) exp = pd.Index([u'test-error-mean', u'test-error-std', u'train-error-mean', u'train-error-std']) assert len(cv.columns.intersection(exp)) == 4 params = {'max_depth': 2, 'eta': 1, 'verbosity': 0, 'objective': 'binary:logistic', 'eval_metric': 'auc'} cv = xgb.cv(params, dm, num_boost_round=10, nfold=10, as_pandas=True) assert 'eval_metric' in params assert 'auc' in cv.columns[0] params = {'max_depth': 2, 'eta': 1, 'verbosity': 0, 'objective': 'binary:logistic', 'eval_metric': ['auc']} cv = xgb.cv(params, dm, num_boost_round=10, nfold=10, as_pandas=True) assert 'eval_metric' in params assert 'auc' in cv.columns[0] params = {'max_depth': 2, 'eta': 1, 'verbosity': 0, 'objective': 'binary:logistic', 'eval_metric': ['auc']} cv = xgb.cv(params, dm, num_boost_round=10, nfold=10, as_pandas=True, early_stopping_rounds=1) assert 'eval_metric' in params assert 'auc' in cv.columns[0] assert cv.shape[0] < 10 params = {'max_depth': 2, 'eta': 1, 'verbosity': 0, 'objective': 'binary:logistic'} cv = xgb.cv(params, dm, num_boost_round=10, nfold=10, as_pandas=True, metrics='auc') assert 'auc' in cv.columns[0] params = {'max_depth': 2, 'eta': 1, 'verbosity': 0, 'objective': 'binary:logistic'} cv = xgb.cv(params, dm, num_boost_round=10, nfold=10, as_pandas=True, metrics=['auc']) assert 'auc' in cv.columns[0] params = {'max_depth': 2, 'eta': 1, 'verbosity': 0, 'objective': 'binary:logistic', 'eval_metric': ['auc']} cv = xgb.cv(params, dm, num_boost_round=10, nfold=10, as_pandas=True, metrics='error') assert 'eval_metric' in params assert 'auc' not in cv.columns[0] assert 'error' in cv.columns[0] cv = xgb.cv(params, dm, num_boost_round=10, nfold=10, as_pandas=True, metrics=['error']) assert 'eval_metric' in params assert 'auc' not in cv.columns[0] assert 'error' in cv.columns[0] params = list(params.items()) cv = xgb.cv(params, dm, num_boost_round=10, nfold=10, as_pandas=True, metrics=['error']) assert isinstance(params, list) assert 'auc' not in cv.columns[0] assert 'error' in cv.columns[0] @pytest.mark.parametrize("DMatrixT", [xgb.DMatrix, xgb.QuantileDMatrix]) def test_nullable_type(self, DMatrixT) -> None: from pandas.api.types import is_categorical_dtype for orig, df in pd_dtypes(): if hasattr(df.dtypes, "__iter__"): enable_categorical = any(is_categorical_dtype for dtype in df.dtypes) else: # series enable_categorical = is_categorical_dtype(df.dtype) f0_orig = orig[orig.columns[0]] if isinstance(orig, pd.DataFrame) else orig f0 = df[df.columns[0]] if isinstance(df, pd.DataFrame) else df y_orig = f0_orig.astype(pd.Float32Dtype()).fillna(0) y = f0.astype(pd.Float32Dtype()).fillna(0) m_orig = DMatrixT(orig, enable_categorical=enable_categorical, label=y_orig) # extension types copy = df.copy() m_etype = DMatrixT(df, enable_categorical=enable_categorical, label=y) # no mutation assert df.equals(copy) # different from pd.BooleanDtype(), None is converted to False with bool if hasattr(orig.dtypes, "__iter__") and any( dtype == "bool" for dtype in orig.dtypes ): assert not tm.predictor_equal(m_orig, m_etype) else: assert tm.predictor_equal(m_orig, m_etype) np.testing.assert_allclose(m_orig.get_label(), m_etype.get_label()) np.testing.assert_allclose(m_etype.get_label(), y.values.astype(np.float32)) if isinstance(df, pd.DataFrame): f0 = df["f0"] with pytest.raises(ValueError, match="Label contains NaN"): xgb.DMatrix(df, f0, enable_categorical=enable_categorical) @pytest.mark.skipif(**tm.no_arrow()) @pytest.mark.parametrize("DMatrixT", [xgb.DMatrix, xgb.QuantileDMatrix]) def test_pyarrow_type(self, DMatrixT: Type[xgb.DMatrix]) -> None: for orig, df in pd_arrow_dtypes(): f0_orig: pd.Series = orig["f0"] f0 = df["f0"] if f0.dtype.name.startswith("bool"): y = None y_orig = None else: y_orig = f0_orig.fillna(0, inplace=False) y = f0.fillna(0, inplace=False) m_orig = DMatrixT(orig, enable_categorical=True, label=y_orig) m_etype = DMatrixT(df, enable_categorical=True, label=y) assert tm.predictor_equal(m_orig, m_etype) if y is not None: np.testing.assert_allclose(m_orig.get_label(), m_etype.get_label()) np.testing.assert_allclose(m_etype.get_label(), y.values)
14,507
39.188366
88
py
xgboost
xgboost-master/tests/python/test_quantile_dmatrix.py
from typing import Any, Dict, List import numpy as np import pytest from hypothesis import given, settings, strategies from scipy import sparse import xgboost as xgb from xgboost.testing import ( IteratorForTest, make_batches, make_batches_sparse, make_categorical, make_ltr, make_sparse_regression, predictor_equal, ) from xgboost.testing.data import check_inf, np_dtypes class TestQuantileDMatrix: def test_basic(self) -> None: """Checks for np array, list, tuple.""" n_samples = 234 n_features = 8 rng = np.random.default_rng() X = rng.normal(loc=0, scale=3, size=n_samples * n_features).reshape( n_samples, n_features ) y = rng.normal(0, 3, size=n_samples) Xy = xgb.QuantileDMatrix(X, y) assert Xy.num_row() == n_samples assert Xy.num_col() == n_features X = sparse.random(n_samples, n_features, density=0.1, format="csr") Xy = xgb.QuantileDMatrix(X, y) assert Xy.num_row() == n_samples assert Xy.num_col() == n_features X = sparse.random(n_samples, n_features, density=0.8, format="csr") Xy = xgb.QuantileDMatrix(X, y) assert Xy.num_row() == n_samples assert Xy.num_col() == n_features n_samples = 64 data = [] for f in range(n_samples): row = [f] * n_features data.append(row) assert np.array(data).shape == (n_samples, n_features) Xy = xgb.QuantileDMatrix(data, max_bin=256) assert Xy.num_row() == n_samples assert Xy.num_col() == n_features r = np.arange(1.0, n_samples) np.testing.assert_allclose(Xy.get_data().toarray()[1:, 0], r) def test_error(self): from sklearn.model_selection import train_test_split rng = np.random.default_rng(1994) X, y = make_categorical( n_samples=128, n_features=2, n_categories=3, onehot=False ) reg = xgb.XGBRegressor(tree_method="hist", enable_categorical=True) w = rng.uniform(0, 1, size=y.shape[0]) X_train, X_test, y_train, y_test, w_train, w_test = train_test_split( X, y, w, random_state=1994 ) with pytest.raises(ValueError, match="sample weight"): reg.fit( X, y, sample_weight=w_train, eval_set=[(X_test, y_test)], sample_weight_eval_set=[w_test], ) with pytest.raises(ValueError, match="sample weight"): reg.fit( X_train, y_train, sample_weight=w, eval_set=[(X_test, y_test)], sample_weight_eval_set=[w_test], ) @pytest.mark.parametrize("sparsity", [0.0, 0.1, 0.8, 0.9]) def test_with_iterator(self, sparsity: float) -> None: n_samples_per_batch = 317 n_features = 8 n_batches = 7 if sparsity == 0.0: it = IteratorForTest( *make_batches(n_samples_per_batch, n_features, n_batches, False), None ) else: it = IteratorForTest( *make_batches_sparse( n_samples_per_batch, n_features, n_batches, sparsity ), None ) Xy = xgb.QuantileDMatrix(it) assert Xy.num_row() == n_samples_per_batch * n_batches assert Xy.num_col() == n_features @pytest.mark.parametrize("sparsity", [0.0, 0.1, 0.5, 0.8, 0.9]) def test_training(self, sparsity: float) -> None: n_samples_per_batch = 317 n_features = 8 n_batches = 7 if sparsity == 0.0: it = IteratorForTest( *make_batches(n_samples_per_batch, n_features, n_batches, False), None ) else: it = IteratorForTest( *make_batches_sparse( n_samples_per_batch, n_features, n_batches, sparsity ), None ) parameters = {"tree_method": "hist", "max_bin": 256} Xy_it = xgb.QuantileDMatrix(it, max_bin=parameters["max_bin"]) from_it = xgb.train(parameters, Xy_it) X, y, w = it.as_arrays() w_it = Xy_it.get_weight() np.testing.assert_allclose(w_it, w) Xy_arr = xgb.DMatrix(X, y, weight=w) from_arr = xgb.train(parameters, Xy_arr) np.testing.assert_allclose(from_arr.predict(Xy_it), from_it.predict(Xy_arr)) y -= y.min() y += 0.01 Xy = xgb.QuantileDMatrix(X, y, weight=w) with pytest.raises(ValueError, match=r"Only.*hist.*"): parameters = { "tree_method": "approx", "max_bin": 256, "objective": "reg:gamma", } xgb.train(parameters, Xy) def run_ref_dmatrix(self, rng: Any, tree_method: str, enable_cat: bool) -> None: n_samples, n_features = 2048, 17 if enable_cat: X, y = make_categorical( n_samples, n_features, n_categories=13, onehot=False ) if tree_method == "gpu_hist": import cudf X = cudf.from_pandas(X) y = cudf.from_pandas(y) else: X = rng.normal(loc=0, scale=3, size=n_samples * n_features).reshape( n_samples, n_features ) y = rng.normal(0, 3, size=n_samples) # Use ref Xy = xgb.QuantileDMatrix(X, y, enable_categorical=enable_cat) Xy_valid = xgb.QuantileDMatrix(X, y, ref=Xy, enable_categorical=enable_cat) qdm_results: Dict[str, Dict[str, List[float]]] = {} xgb.train( {"tree_method": tree_method}, Xy, evals=[(Xy, "Train"), (Xy_valid, "valid")], evals_result=qdm_results, ) np.testing.assert_allclose( qdm_results["Train"]["rmse"], qdm_results["valid"]["rmse"] ) # No ref Xy_valid = xgb.QuantileDMatrix(X, y, enable_categorical=enable_cat) qdm_results = {} xgb.train( {"tree_method": tree_method}, Xy, evals=[(Xy, "Train"), (Xy_valid, "valid")], evals_result=qdm_results, ) np.testing.assert_allclose( qdm_results["Train"]["rmse"], qdm_results["valid"]["rmse"] ) # Different number of features Xy = xgb.QuantileDMatrix(X, y, enable_categorical=enable_cat) dXy = xgb.DMatrix(X, y, enable_categorical=enable_cat) n_samples, n_features = 256, 15 X = rng.normal(loc=0, scale=3, size=n_samples * n_features).reshape( n_samples, n_features ) y = rng.normal(0, 3, size=n_samples) with pytest.raises(ValueError, match=r".*features\."): xgb.QuantileDMatrix(X, y, ref=Xy, enable_categorical=enable_cat) # Compare training results n_samples, n_features = 256, 17 if enable_cat: X, y = make_categorical(n_samples, n_features, 13, onehot=False) if tree_method == "gpu_hist": import cudf X = cudf.from_pandas(X) y = cudf.from_pandas(y) else: X = rng.normal(loc=0, scale=3, size=n_samples * n_features).reshape( n_samples, n_features ) y = rng.normal(0, 3, size=n_samples) Xy_valid = xgb.QuantileDMatrix(X, y, ref=Xy, enable_categorical=enable_cat) # use DMatrix as ref Xy_valid_d = xgb.QuantileDMatrix(X, y, ref=dXy, enable_categorical=enable_cat) dXy_valid = xgb.DMatrix(X, y, enable_categorical=enable_cat) qdm_results = {} xgb.train( {"tree_method": tree_method}, Xy, evals=[(Xy, "Train"), (Xy_valid, "valid")], evals_result=qdm_results, ) dm_results: Dict[str, Dict[str, List[float]]] = {} xgb.train( {"tree_method": tree_method}, dXy, evals=[(dXy, "Train"), (dXy_valid, "valid"), (Xy_valid_d, "dvalid")], evals_result=dm_results, ) np.testing.assert_allclose( dm_results["Train"]["rmse"], qdm_results["Train"]["rmse"] ) np.testing.assert_allclose( dm_results["valid"]["rmse"], qdm_results["valid"]["rmse"] ) np.testing.assert_allclose( dm_results["dvalid"]["rmse"], qdm_results["valid"]["rmse"] ) def test_ref_dmatrix(self) -> None: rng = np.random.RandomState(1994) self.run_ref_dmatrix(rng, "hist", True) self.run_ref_dmatrix(rng, "hist", False) @pytest.mark.parametrize("sparsity", [0.0, 0.5]) def test_predict(self, sparsity: float) -> None: n_samples, n_features = 256, 4 X, y = make_categorical( n_samples, n_features, n_categories=13, onehot=False, sparsity=sparsity ) Xy = xgb.DMatrix(X, y, enable_categorical=True) booster = xgb.train({"tree_method": "hist"}, Xy) Xy = xgb.DMatrix(X, y, enable_categorical=True) a = booster.predict(Xy) qXy = xgb.QuantileDMatrix(X, y, enable_categorical=True) b = booster.predict(qXy) np.testing.assert_allclose(a, b) def test_ltr(self) -> None: X, y, qid, w = make_ltr(100, 3, 3, 5) Xy_qdm = xgb.QuantileDMatrix(X, y, qid=qid, weight=w) Xy = xgb.DMatrix(X, y, qid=qid, weight=w) xgb.train({"tree_method": "hist", "objective": "rank:ndcg"}, Xy) from_qdm = xgb.QuantileDMatrix(X, weight=w, ref=Xy_qdm) from_dm = xgb.QuantileDMatrix(X, weight=w, ref=Xy) assert predictor_equal(from_qdm, from_dm) def test_check_inf(self) -> None: rng = np.random.default_rng(1994) check_inf(rng) # we don't test empty Quantile DMatrix in single node construction. @given( strategies.integers(1, 1000), strategies.integers(1, 100), strategies.fractions(0, 0.99), ) @settings(deadline=None, print_blob=True) def test_to_csr(self, n_samples: int, n_features: int, sparsity: float) -> None: csr, y = make_sparse_regression(n_samples, n_features, sparsity, False) csr = csr.astype(np.float32) qdm = xgb.QuantileDMatrix(data=csr, label=y) ret = qdm.get_data() np.testing.assert_equal(csr.indptr, ret.indptr) np.testing.assert_equal(csr.indices, ret.indices) booster = xgb.train({"tree_method": "hist"}, dtrain=qdm) np.testing.assert_allclose( booster.predict(qdm), booster.predict(xgb.DMatrix(qdm.get_data())) ) def test_dtypes(self) -> None: """Checks for both np array and pd DataFrame.""" n_samples = 128 n_features = 16 for orig, x in np_dtypes(n_samples, n_features): m0 = xgb.QuantileDMatrix(orig) m1 = xgb.QuantileDMatrix(x) assert predictor_equal(m0, m1) # unsupported types for dtype in [ np.string_, np.complex64, np.complex128, ]: X: np.ndarray = np.array(orig, dtype=dtype) with pytest.raises(ValueError): xgb.QuantileDMatrix(X) def test_changed_max_bin(self) -> None: n_samples = 128 n_features = 16 csr, y = make_sparse_regression(n_samples, n_features, 0.5, False) Xy = xgb.QuantileDMatrix(csr, y, max_bin=9) booster = xgb.train({"max_bin": 9}, Xy, num_boost_round=2) Xy = xgb.QuantileDMatrix(csr, y, max_bin=11) with pytest.raises(ValueError, match="consistent"): xgb.train({}, Xy, num_boost_round=2, xgb_model=booster)
11,842
34.142433
86
py
xgboost
xgboost-master/tests/python/test_openmp.py
import os import subprocess import tempfile import numpy as np import pytest import xgboost as xgb from xgboost import testing as tm pytestmark = tm.timeout(10) class TestOMP: def test_omp(self): dtrain, dtest = tm.load_agaricus(__file__) param = {'booster': 'gbtree', 'objective': 'binary:logistic', 'grow_policy': 'depthwise', 'tree_method': 'hist', 'eval_metric': 'error', 'max_depth': 5, 'min_child_weight': 0} watchlist = [(dtest, 'eval'), (dtrain, 'train')] num_round = 5 def run_trial(): res = {} bst = xgb.train(param, dtrain, num_round, watchlist, evals_result=res) metrics = [res['train']['error'][-1], res['eval']['error'][-1]] preds = bst.predict(dtest) return metrics, preds def consist_test(title, n): auc, pred = run_trial() for i in range(n-1): auc2, pred2 = run_trial() try: assert auc == auc2 assert np.array_equal(pred, pred2) except Exception as e: print('-------test %s failed, num_trial: %d-------' % (title, i)) raise e auc, pred = auc2, pred2 return auc, pred print('test approx ...') param['tree_method'] = 'approx' n_trials = 10 param['nthread'] = 1 auc_1, pred_1 = consist_test('approx_thread_1', n_trials) param['nthread'] = 2 auc_2, pred_2 = consist_test('approx_thread_2', n_trials) param['nthread'] = 3 auc_3, pred_3 = consist_test('approx_thread_3', n_trials) assert auc_1 == auc_2 == auc_3 assert np.array_equal(auc_1, auc_2) assert np.array_equal(auc_1, auc_3) print('test hist ...') param['tree_method'] = 'hist' param['nthread'] = 1 auc_1, pred_1 = consist_test('hist_thread_1', n_trials) param['nthread'] = 2 auc_2, pred_2 = consist_test('hist_thread_2', n_trials) param['nthread'] = 3 auc_3, pred_3 = consist_test('hist_thread_3', n_trials) assert auc_1 == auc_2 == auc_3 assert np.array_equal(auc_1, auc_2) assert np.array_equal(auc_1, auc_3) @pytest.mark.skipif(**tm.no_sklearn()) def test_with_omp_thread_limit(self): args = [ "python", os.path.join( os.path.dirname(tm.normpath(__file__)), "with_omp_limit.py" ) ] results = [] with tempfile.TemporaryDirectory() as tmpdir: for i in (1, 2, 16): path = os.path.join(tmpdir, str(i)) with open(path, "w") as fd: fd.write("\n") cp = args.copy() cp.append(path) env = os.environ.copy() env["OMP_THREAD_LIMIT"] = str(i) status = subprocess.call(cp, env=env) assert status == 0 with open(path, "r") as fd: results.append(float(fd.read())) for auc in results: np.testing.assert_allclose(auc, results[0])
3,285
29.146789
85
py
xgboost
xgboost-master/tests/python/test_eval_metrics.py
import numpy as np import pytest import xgboost as xgb from xgboost import testing as tm from xgboost.testing.metrics import check_precision_score, check_quantile_error rng = np.random.RandomState(1337) class TestEvalMetrics: xgb_params_01 = { 'verbosity': 0, 'nthread': 1, 'eval_metric': 'error' } xgb_params_02 = { 'verbosity': 0, 'nthread': 1, 'eval_metric': ['error'] } xgb_params_03 = { 'verbosity': 0, 'nthread': 1, 'eval_metric': ['rmse', 'error'] } xgb_params_04 = { 'verbosity': 0, 'nthread': 1, 'eval_metric': ['error', 'rmse'] } def evalerror_01(self, preds, dtrain): labels = dtrain.get_label() return 'error', float(sum(labels != (preds > 0.0))) / len(labels) def evalerror_02(self, preds, dtrain): labels = dtrain.get_label() return [('error', float(sum(labels != (preds > 0.0))) / len(labels))] @pytest.mark.skipif(**tm.no_sklearn()) def evalerror_03(self, preds, dtrain): from sklearn.metrics import mean_squared_error labels = dtrain.get_label() return [('rmse', mean_squared_error(labels, preds)), ('error', float(sum(labels != (preds > 0.0))) / len(labels))] @pytest.mark.skipif(**tm.no_sklearn()) def evalerror_04(self, preds, dtrain): from sklearn.metrics import mean_squared_error labels = dtrain.get_label() return [('error', float(sum(labels != (preds > 0.0))) / len(labels)), ('rmse', mean_squared_error(labels, preds))] @pytest.mark.skipif(**tm.no_sklearn()) def test_eval_metrics(self): try: from sklearn.model_selection import train_test_split except ImportError: from sklearn.cross_validation import train_test_split from sklearn.datasets import load_digits digits = load_digits(n_class=2) X = digits['data'] y = digits['target'] Xt, Xv, yt, yv = train_test_split(X, y, test_size=0.2, random_state=0) dtrain = xgb.DMatrix(Xt, label=yt) dvalid = xgb.DMatrix(Xv, label=yv) watchlist = [(dtrain, 'train'), (dvalid, 'val')] gbdt_01 = xgb.train(self.xgb_params_01, dtrain, num_boost_round=10) gbdt_02 = xgb.train(self.xgb_params_02, dtrain, num_boost_round=10) gbdt_03 = xgb.train(self.xgb_params_03, dtrain, num_boost_round=10) assert gbdt_01.predict(dvalid)[0] == gbdt_02.predict(dvalid)[0] assert gbdt_01.predict(dvalid)[0] == gbdt_03.predict(dvalid)[0] gbdt_01 = xgb.train(self.xgb_params_01, dtrain, 10, watchlist, early_stopping_rounds=2) gbdt_02 = xgb.train(self.xgb_params_02, dtrain, 10, watchlist, early_stopping_rounds=2) gbdt_03 = xgb.train(self.xgb_params_03, dtrain, 10, watchlist, early_stopping_rounds=2) gbdt_04 = xgb.train(self.xgb_params_04, dtrain, 10, watchlist, early_stopping_rounds=2) assert gbdt_01.predict(dvalid)[0] == gbdt_02.predict(dvalid)[0] assert gbdt_01.predict(dvalid)[0] == gbdt_03.predict(dvalid)[0] assert gbdt_03.predict(dvalid)[0] != gbdt_04.predict(dvalid)[0] gbdt_01 = xgb.train(self.xgb_params_01, dtrain, 10, watchlist, early_stopping_rounds=2, feval=self.evalerror_01) gbdt_02 = xgb.train(self.xgb_params_02, dtrain, 10, watchlist, early_stopping_rounds=2, feval=self.evalerror_02) gbdt_03 = xgb.train(self.xgb_params_03, dtrain, 10, watchlist, early_stopping_rounds=2, feval=self.evalerror_03) gbdt_04 = xgb.train(self.xgb_params_04, dtrain, 10, watchlist, early_stopping_rounds=2, feval=self.evalerror_04) assert gbdt_01.predict(dvalid)[0] == gbdt_02.predict(dvalid)[0] assert gbdt_01.predict(dvalid)[0] == gbdt_03.predict(dvalid)[0] assert gbdt_03.predict(dvalid)[0] != gbdt_04.predict(dvalid)[0] @pytest.mark.skipif(**tm.no_sklearn()) def test_gamma_deviance(self): from sklearn.metrics import mean_gamma_deviance rng = np.random.RandomState(1994) n_samples = 100 n_features = 30 X = rng.randn(n_samples, n_features) y = rng.randn(n_samples) y = y - y.min() * 100 reg = xgb.XGBRegressor(tree_method="hist", objective="reg:gamma", n_estimators=10) reg.fit(X, y, eval_metric="gamma-deviance") booster = reg.get_booster() score = reg.predict(X) gamma_dev = float(booster.eval(xgb.DMatrix(X, y)).split(":")[1].split(":")[0]) skl_gamma_dev = mean_gamma_deviance(y, score) np.testing.assert_allclose(gamma_dev, skl_gamma_dev, rtol=1e-6) @pytest.mark.skipif(**tm.no_sklearn()) def test_gamma_lik(self) -> None: import scipy.stats as stats rng = np.random.default_rng(1994) n_samples = 32 n_features = 10 X = rng.normal(0, 1, size=n_samples * n_features).reshape((n_samples, n_features)) alpha, loc, beta = 5.0, 11.1, 22 y = stats.gamma.rvs(alpha, loc=loc, scale=beta, size=n_samples, random_state=rng) reg = xgb.XGBRegressor(tree_method="hist", objective="reg:gamma", n_estimators=64) reg.fit(X, y, eval_metric="gamma-nloglik", eval_set=[(X, y)]) score = reg.predict(X) booster = reg.get_booster() nloglik = float(booster.eval(xgb.DMatrix(X, y)).split(":")[1].split(":")[0]) # \beta_i = - (1 / \theta_i a) # where \theta_i is the canonical parameter # XGBoost uses the canonical link function of gamma in evaluation function. # so \theta = - (1.0 / y) # dispersion is hardcoded as 1.0, so shape (a in scipy parameter) is also 1.0 beta = - (1.0 / (- (1.0 / y))) # == y nloglik_stats = -stats.gamma.logpdf(score, a=1.0, scale=beta) np.testing.assert_allclose(nloglik, np.mean(nloglik_stats), rtol=1e-3) def run_roc_auc_binary(self, tree_method, n_samples): import numpy as np from sklearn.datasets import make_classification from sklearn.metrics import roc_auc_score rng = np.random.RandomState(1994) n_samples = n_samples n_features = 10 X, y = make_classification( n_samples, n_features, n_informative=n_features, n_redundant=0, random_state=rng ) Xy = xgb.DMatrix(X, y) booster = xgb.train( { "tree_method": tree_method, "eval_metric": "auc", "objective": "binary:logistic", }, Xy, num_boost_round=1, ) score = booster.predict(Xy) skl_auc = roc_auc_score(y, score) auc = float(booster.eval(Xy).split(":")[1]) np.testing.assert_allclose(skl_auc, auc, rtol=1e-6) X = rng.randn(*X.shape) score = booster.predict(xgb.DMatrix(X)) skl_auc = roc_auc_score(y, score) auc = float(booster.eval(xgb.DMatrix(X, y)).split(":")[1]) np.testing.assert_allclose(skl_auc, auc, rtol=1e-6) @pytest.mark.skipif(**tm.no_sklearn()) @pytest.mark.parametrize("n_samples", [100, 1000, 10000]) def test_roc_auc(self, n_samples): self.run_roc_auc_binary("hist", n_samples) def run_roc_auc_multi(self, tree_method, n_samples, weighted): import numpy as np from sklearn.datasets import make_classification from sklearn.metrics import roc_auc_score rng = np.random.RandomState(1994) n_samples = n_samples n_features = 10 n_classes = 4 X, y = make_classification( n_samples, n_features, n_informative=n_features, n_redundant=0, n_classes=n_classes, random_state=rng ) if weighted: weights = rng.randn(n_samples) weights -= weights.min() weights /= weights.max() else: weights = None Xy = xgb.DMatrix(X, y, weight=weights) booster = xgb.train( { "tree_method": tree_method, "eval_metric": "auc", "objective": "multi:softprob", "num_class": n_classes, }, Xy, num_boost_round=1, ) score = booster.predict(Xy) skl_auc = roc_auc_score( y, score, average="weighted", sample_weight=weights, multi_class="ovr" ) auc = float(booster.eval(Xy).split(":")[1]) np.testing.assert_allclose(skl_auc, auc, rtol=1e-6) X = rng.randn(*X.shape) score = booster.predict(xgb.DMatrix(X, weight=weights)) skl_auc = roc_auc_score( y, score, average="weighted", sample_weight=weights, multi_class="ovr" ) auc = float(booster.eval(xgb.DMatrix(X, y, weight=weights)).split(":")[1]) np.testing.assert_allclose(skl_auc, auc, rtol=1e-5) @pytest.mark.parametrize( "n_samples,weighted", [(4, False), (100, False), (1000, False), (10000, True)] ) def test_roc_auc_multi(self, n_samples, weighted): self.run_roc_auc_multi("hist", n_samples, weighted) def run_pr_auc_binary(self, tree_method): from sklearn.datasets import make_classification from sklearn.metrics import auc, precision_recall_curve X, y = make_classification(128, 4, n_classes=2, random_state=1994) clf = xgb.XGBClassifier(tree_method=tree_method, n_estimators=1) clf.fit(X, y, eval_metric="aucpr", eval_set=[(X, y)]) evals_result = clf.evals_result()["validation_0"]["aucpr"][-1] y_score = clf.predict_proba(X)[:, 1] # get the positive column precision, recall, _ = precision_recall_curve(y, y_score) prauc = auc(recall, precision) # Interpolation results are slightly different from sklearn, but overall should be # similar. np.testing.assert_allclose(prauc, evals_result, rtol=1e-2) clf = xgb.XGBClassifier(tree_method=tree_method, n_estimators=10) clf.fit(X, y, eval_metric="aucpr", eval_set=[(X, y)]) evals_result = clf.evals_result()["validation_0"]["aucpr"][-1] np.testing.assert_allclose(0.99, evals_result, rtol=1e-2) def test_pr_auc_binary(self): self.run_pr_auc_binary("hist") def run_pr_auc_multi(self, tree_method): from sklearn.datasets import make_classification X, y = make_classification( 64, 16, n_informative=8, n_classes=3, random_state=1994 ) clf = xgb.XGBClassifier(tree_method=tree_method, n_estimators=1) clf.fit(X, y, eval_metric="aucpr", eval_set=[(X, y)]) evals_result = clf.evals_result()["validation_0"]["aucpr"][-1] # No available implementation for comparison, just check that XGBoost converges to # 1.0 clf = xgb.XGBClassifier(tree_method=tree_method, n_estimators=10) clf.fit(X, y, eval_metric="aucpr", eval_set=[(X, y)]) evals_result = clf.evals_result()["validation_0"]["aucpr"][-1] np.testing.assert_allclose(1.0, evals_result, rtol=1e-2) def test_pr_auc_multi(self): self.run_pr_auc_multi("hist") def run_pr_auc_ltr(self, tree_method): from sklearn.datasets import make_classification X, y = make_classification(128, 4, n_classes=2, random_state=1994) ltr = xgb.XGBRanker( tree_method=tree_method, n_estimators=16, objective="rank:pairwise" ) groups = np.array([32, 32, 64]) ltr.fit( X, y, group=groups, eval_set=[(X, y)], eval_group=[groups], eval_metric="aucpr", ) results = ltr.evals_result()["validation_0"]["aucpr"] assert results[-1] >= 0.99 def test_pr_auc_ltr(self): self.run_pr_auc_ltr("hist") def test_precision_score(self): check_precision_score("hist") @pytest.mark.skipif(**tm.no_sklearn()) def test_quantile_error(self) -> None: check_quantile_error("hist")
12,366
36.935583
90
py
xgboost
xgboost-master/tests/python/test_training_continuation.py
import os import tempfile import numpy as np import pytest import xgboost as xgb from xgboost import testing as tm rng = np.random.RandomState(1337) class TestTrainingContinuation: num_parallel_tree = 3 def generate_parameters(self): xgb_params_01_binary = { 'nthread': 1, } xgb_params_02_binary = { 'nthread': 1, 'num_parallel_tree': self.num_parallel_tree } xgb_params_03_binary = { 'nthread': 1, 'num_class': 5, 'num_parallel_tree': self.num_parallel_tree } return [ xgb_params_01_binary, xgb_params_02_binary, xgb_params_03_binary ] def run_training_continuation(self, xgb_params_01, xgb_params_02, xgb_params_03): from sklearn.datasets import load_digits from sklearn.metrics import mean_squared_error digits_2class = load_digits(n_class=2) digits_5class = load_digits(n_class=5) X_2class = digits_2class['data'] y_2class = digits_2class['target'] X_5class = digits_5class['data'] y_5class = digits_5class['target'] dtrain_2class = xgb.DMatrix(X_2class, label=y_2class) dtrain_5class = xgb.DMatrix(X_5class, label=y_5class) gbdt_01 = xgb.train(xgb_params_01, dtrain_2class, num_boost_round=10) ntrees_01 = len(gbdt_01.get_dump()) assert ntrees_01 == 10 gbdt_02 = xgb.train(xgb_params_01, dtrain_2class, num_boost_round=0) gbdt_02.save_model('xgb_tc.json') gbdt_02a = xgb.train(xgb_params_01, dtrain_2class, num_boost_round=10, xgb_model=gbdt_02) gbdt_02b = xgb.train(xgb_params_01, dtrain_2class, num_boost_round=10, xgb_model="xgb_tc.json") ntrees_02a = len(gbdt_02a.get_dump()) ntrees_02b = len(gbdt_02b.get_dump()) assert ntrees_02a == 10 assert ntrees_02b == 10 res1 = mean_squared_error(y_2class, gbdt_01.predict(dtrain_2class)) res2 = mean_squared_error(y_2class, gbdt_02a.predict(dtrain_2class)) assert res1 == res2 res1 = mean_squared_error(y_2class, gbdt_01.predict(dtrain_2class)) res2 = mean_squared_error(y_2class, gbdt_02b.predict(dtrain_2class)) assert res1 == res2 gbdt_03 = xgb.train(xgb_params_01, dtrain_2class, num_boost_round=3) gbdt_03.save_model('xgb_tc.json') gbdt_03a = xgb.train(xgb_params_01, dtrain_2class, num_boost_round=7, xgb_model=gbdt_03) gbdt_03b = xgb.train(xgb_params_01, dtrain_2class, num_boost_round=7, xgb_model="xgb_tc.json") ntrees_03a = len(gbdt_03a.get_dump()) ntrees_03b = len(gbdt_03b.get_dump()) assert ntrees_03a == 10 assert ntrees_03b == 10 os.remove('xgb_tc.json') res1 = mean_squared_error(y_2class, gbdt_03a.predict(dtrain_2class)) res2 = mean_squared_error(y_2class, gbdt_03b.predict(dtrain_2class)) assert res1 == res2 gbdt_04 = xgb.train(xgb_params_02, dtrain_2class, num_boost_round=3) res1 = mean_squared_error(y_2class, gbdt_04.predict(dtrain_2class)) res2 = mean_squared_error( y_2class, gbdt_04.predict( dtrain_2class, iteration_range=(0, gbdt_04.best_iteration + 1) ) ) assert res1 == res2 gbdt_04 = xgb.train( xgb_params_02, dtrain_2class, num_boost_round=7, xgb_model=gbdt_04 ) res1 = mean_squared_error(y_2class, gbdt_04.predict(dtrain_2class)) res2 = mean_squared_error( y_2class, gbdt_04.predict( dtrain_2class, iteration_range=(0, gbdt_04.best_iteration + 1) ) ) assert res1 == res2 gbdt_05 = xgb.train(xgb_params_03, dtrain_5class, num_boost_round=7) gbdt_05 = xgb.train(xgb_params_03, dtrain_5class, num_boost_round=3, xgb_model=gbdt_05) res1 = gbdt_05.predict(dtrain_5class) res2 = gbdt_05.predict( dtrain_5class, iteration_range=(0, gbdt_05.best_iteration + 1) ) np.testing.assert_almost_equal(res1, res2) @pytest.mark.skipif(**tm.no_sklearn()) def test_training_continuation_json(self): params = self.generate_parameters() self.run_training_continuation(params[0], params[1], params[2]) @pytest.mark.skipif(**tm.no_sklearn()) def test_training_continuation_updaters_json(self): # Picked up from R tests. updaters = 'grow_colmaker,prune,refresh' params = self.generate_parameters() for p in params: p['updater'] = updaters self.run_training_continuation(params[0], params[1], params[2]) @pytest.mark.skipif(**tm.no_sklearn()) def test_changed_parameter(self): from sklearn.datasets import load_breast_cancer X, y = load_breast_cancer(return_X_y=True) clf = xgb.XGBClassifier(n_estimators=2) clf.fit(X, y, eval_set=[(X, y)], eval_metric="logloss") assert tm.non_increasing(clf.evals_result()["validation_0"]["logloss"]) with tempfile.TemporaryDirectory() as tmpdir: clf.save_model(os.path.join(tmpdir, "clf.json")) loaded = xgb.XGBClassifier() loaded.load_model(os.path.join(tmpdir, "clf.json")) clf = xgb.XGBClassifier(n_estimators=2) # change metric to error clf.fit(X, y, eval_set=[(X, y)], eval_metric="error") assert tm.non_increasing(clf.evals_result()["validation_0"]["error"])
5,883
34.878049
79
py
xgboost
xgboost-master/tests/python/test_linear.py
from hypothesis import given, note, settings, strategies import xgboost as xgb from xgboost import testing as tm pytestmark = tm.timeout(20) parameter_strategy = strategies.fixed_dictionaries({ 'booster': strategies.just('gblinear'), 'eta': strategies.floats(0.01, 0.25), 'tolerance': strategies.floats(1e-5, 1e-2), 'nthread': strategies.integers(1, 4), }) coord_strategy = strategies.fixed_dictionaries({ 'feature_selector': strategies.sampled_from(['cyclic', 'shuffle', 'greedy', 'thrifty']), 'top_k': strategies.integers(1, 10), }) def train_result(param, dmat, num_rounds): result = {} xgb.train(param, dmat, num_rounds, [(dmat, 'train')], verbose_eval=False, evals_result=result) return result class TestLinear: @given( parameter_strategy, strategies.integers(10, 50), tm.make_dataset_strategy(), coord_strategy ) @settings(deadline=None, max_examples=20, print_blob=True) def test_coordinate(self, param, num_rounds, dataset, coord_param): param['updater'] = 'coord_descent' param.update(coord_param) param = dataset.set_params(param) result = train_result(param, dataset.get_dmat(), num_rounds)['train'][dataset.metric] note(result) assert tm.non_increasing(result, 5e-4) # Loss is not guaranteed to always decrease because of regularisation parameters # We test a weaker condition that the loss has not increased between the first and last # iteration @given( parameter_strategy, strategies.integers(10, 50), tm.make_dataset_strategy(), coord_strategy, strategies.floats(1e-5, 0.8), strategies.floats(1e-5, 0.8) ) @settings(deadline=None, max_examples=20, print_blob=True) def test_coordinate_regularised(self, param, num_rounds, dataset, coord_param, alpha, lambd): param['updater'] = 'coord_descent' param['alpha'] = alpha param['lambda'] = lambd param.update(coord_param) param = dataset.set_params(param) result = train_result(param, dataset.get_dmat(), num_rounds)['train'][dataset.metric] note(result) assert tm.non_increasing([result[0], result[-1]]) @given( parameter_strategy, strategies.integers(10, 50), tm.make_dataset_strategy() ) @settings(deadline=None, max_examples=20, print_blob=True) def test_shotgun(self, param, num_rounds, dataset): param['updater'] = 'shotgun' param = dataset.set_params(param) result = train_result(param, dataset.get_dmat(), num_rounds)['train'][dataset.metric] note(result) # shotgun is non-deterministic, so we relax the test by only using first and last # iteration. if len(result) > 2: sampled_result = (result[0], result[-1]) else: sampled_result = result assert tm.non_increasing(sampled_result) @given( parameter_strategy, strategies.integers(10, 50), tm.make_dataset_strategy(), strategies.floats(1e-5, 1.0), strategies.floats(1e-5, 1.0) ) @settings(deadline=None, max_examples=20, print_blob=True) def test_shotgun_regularised(self, param, num_rounds, dataset, alpha, lambd): param['updater'] = 'shotgun' param['alpha'] = alpha param['lambda'] = lambd param = dataset.set_params(param) result = train_result(param, dataset.get_dmat(), num_rounds)['train'][dataset.metric] note(result) assert tm.non_increasing([result[0], result[-1]])
3,673
35.376238
97
py
xgboost
xgboost-master/tests/python/test_shap.py
import itertools import re import numpy as np import scipy import scipy.special import xgboost as xgb from xgboost import testing as tm class TestSHAP: def test_feature_importances(self) -> None: rng = np.random.RandomState(1994) data = rng.randn(100, 5) target = np.array([0, 1] * 50) features = ["Feature1", "Feature2", "Feature3", "Feature4", "Feature5"] dm = xgb.DMatrix(data, label=target, feature_names=features) params = { "objective": "multi:softprob", "eval_metric": "mlogloss", "eta": 0.3, "num_class": 3, } bst = xgb.train(params, dm, num_boost_round=10) # number of feature importances should == number of features scores1 = bst.get_score() scores2 = bst.get_score(importance_type="weight") scores3 = bst.get_score(importance_type="cover") scores4 = bst.get_score(importance_type="gain") scores5 = bst.get_score(importance_type="total_cover") scores6 = bst.get_score(importance_type="total_gain") assert len(scores1) == len(features) assert len(scores2) == len(features) assert len(scores3) == len(features) assert len(scores4) == len(features) assert len(scores5) == len(features) assert len(scores6) == len(features) # check backwards compatibility of get_fscore fscores = bst.get_fscore() assert scores1 == fscores dtrain, dtest = tm.load_agaricus(__file__) def fn(max_depth: int, num_rounds: int) -> None: # train params = {"max_depth": max_depth, "eta": 1, "verbosity": 0} bst = xgb.train(params, dtrain, num_boost_round=num_rounds) # predict preds = bst.predict(dtest) contribs = bst.predict(dtest, pred_contribs=True) # result should be (number of features + BIAS) * number of rows assert contribs.shape == (dtest.num_row(), dtest.num_col() + 1) # sum of contributions should be same as predictions np.testing.assert_array_almost_equal(np.sum(contribs, axis=1), preds) # for max_depth, num_rounds in itertools.product(range(0, 3), range(1, 5)): # yield fn, max_depth, num_rounds # check that we get the right SHAP values for a basic AND example # (https://arxiv.org/abs/1706.06060) X = np.zeros((4, 2)) X[0, :] = 1 X[1, 0] = 1 X[2, 1] = 1 y = np.zeros(4) y[0] = 1 param = {"max_depth": 2, "base_score": 0.0, "eta": 1.0, "lambda": 0} bst = xgb.train(param, xgb.DMatrix(X, label=y), 1) out = bst.predict(xgb.DMatrix(X[0:1, :]), pred_contribs=True) assert out[0, 0] == 0.375 assert out[0, 1] == 0.375 assert out[0, 2] == 0.25 def parse_model(model: xgb.Booster) -> list: trees = [] r_exp = r"([0-9]+):\[f([0-9]+)<([0-9\.e-]+)\] yes=([0-9]+),no=([0-9]+).*cover=([0-9e\.]+)" r_exp_leaf = r"([0-9]+):leaf=([0-9\.e-]+),cover=([0-9e\.]+)" for tree in model.get_dump(with_stats=True): lines = list(tree.splitlines()) trees.append([None for i in range(len(lines))]) for line in lines: match = re.search(r_exp, line) if match is not None: ind = int(match.group(1)) assert trees[-1] is not None while ind >= len(trees[-1]): assert isinstance(trees[-1], list) trees[-1].append(None) trees[-1][ind] = { "yes_ind": int(match.group(4)), "no_ind": int(match.group(5)), "value": None, "threshold": float(match.group(3)), "feature_index": int(match.group(2)), "cover": float(match.group(6)), } else: match = re.search(r_exp_leaf, line) ind = int(match.group(1)) while ind >= len(trees[-1]): trees[-1].append(None) trees[-1][ind] = { "value": float(match.group(2)), "cover": float(match.group(3)), } return trees def exp_value_rec(tree, z, x, i=0): if tree[i]["value"] is not None: return tree[i]["value"] else: ind = tree[i]["feature_index"] if z[ind] == 1: # 1e-6 for numeric error from parsing text dump. if x[ind] + 1e-6 <= tree[i]["threshold"]: return exp_value_rec(tree, z, x, tree[i]["yes_ind"]) else: return exp_value_rec(tree, z, x, tree[i]["no_ind"]) else: r_yes = tree[tree[i]["yes_ind"]]["cover"] / tree[i]["cover"] out = exp_value_rec(tree, z, x, tree[i]["yes_ind"]) val = out * r_yes r_no = tree[tree[i]["no_ind"]]["cover"] / tree[i]["cover"] out = exp_value_rec(tree, z, x, tree[i]["no_ind"]) val += out * r_no return val def exp_value(trees, z, x): "E[f(z)|Z_s = X_s]" return np.sum([exp_value_rec(tree, z, x) for tree in trees]) def all_subsets(ss): return itertools.chain( *map(lambda x: itertools.combinations(ss, x), range(0, len(ss) + 1)) ) def shap_value(trees, x, i, cond=None, cond_value=None): M = len(x) z = np.zeros(M) other_inds = list(set(range(M)) - set([i])) if cond is not None: other_inds = list(set(other_inds) - set([cond])) z[cond] = cond_value M -= 1 total = 0.0 for subset in all_subsets(other_inds): if len(subset) > 0: z[list(subset)] = 1 v1 = exp_value(trees, z, x) z[i] = 1 v2 = exp_value(trees, z, x) total += (v2 - v1) / (scipy.special.binom(M - 1, len(subset)) * M) z[i] = 0 z[list(subset)] = 0 return total def shap_values(trees, x): vals = [shap_value(trees, x, i) for i in range(len(x))] vals.append(exp_value(trees, np.zeros(len(x)), x)) return np.array(vals) def interaction_values(trees, x): M = len(x) out = np.zeros((M + 1, M + 1)) for i in range(len(x)): for j in range(len(x)): if i != j: out[i, j] = interaction_value(trees, x, i, j) / 2 svals = shap_values(trees, x) main_effects = svals - out.sum(1) out[np.diag_indices_from(out)] = main_effects return out def interaction_value(trees, x, i, j): M = len(x) z = np.zeros(M) other_inds = list(set(range(M)) - set([i, j])) total = 0.0 for subset in all_subsets(other_inds): if len(subset) > 0: z[list(subset)] = 1 v00 = exp_value(trees, z, x) z[i] = 1 v10 = exp_value(trees, z, x) z[j] = 1 v11 = exp_value(trees, z, x) z[i] = 0 v01 = exp_value(trees, z, x) z[j] = 0 total += (v11 - v01 - v10 + v00) / ( scipy.special.binom(M - 2, len(subset)) * (M - 1) ) z[list(subset)] = 0 return total # test a simple and function M = 2 N = 4 X = np.zeros((N, M)) X[0, :] = 1 X[1, 0] = 1 X[2, 1] = 1 y = np.zeros(N) y[0] = 1 param = {"max_depth": 2, "base_score": 0.0, "eta": 1.0, "lambda": 0} bst = xgb.train(param, xgb.DMatrix(X, label=y), 1) brute_force = shap_values(parse_model(bst), X[0, :]) fast_method = bst.predict(xgb.DMatrix(X[0:1, :]), pred_contribs=True) assert np.linalg.norm(brute_force - fast_method[0, :]) < 1e-4 brute_force = interaction_values(parse_model(bst), X[0, :]) fast_method = bst.predict(xgb.DMatrix(X[0:1, :]), pred_interactions=True) assert np.linalg.norm(brute_force - fast_method[0, :, :]) < 1e-4 # test a random function M = 2 N = 4 X = rng.randn(N, M) y = rng.randn(N) param = {"max_depth": 2, "base_score": 0.0, "eta": 1.0, "lambda": 0} bst = xgb.train(param, xgb.DMatrix(X, label=y), 1) brute_force = shap_values(parse_model(bst), X[0, :]) fast_method = bst.predict(xgb.DMatrix(X[0:1, :]), pred_contribs=True) assert np.linalg.norm(brute_force - fast_method[0, :]) < 1e-4 brute_force = interaction_values(parse_model(bst), X[0, :]) fast_method = bst.predict(xgb.DMatrix(X[0:1, :]), pred_interactions=True) assert np.linalg.norm(brute_force - fast_method[0, :, :]) < 1e-4 # test another larger more complex random function M = 5 N = 100 X = rng.randn(N, M) y = rng.randn(N) base_score = 1.0 param = {"max_depth": 5, "base_score": base_score, "eta": 0.1, "gamma": 2.0} bst = xgb.train(param, xgb.DMatrix(X, label=y), 10) brute_force = shap_values(parse_model(bst), X[0, :]) brute_force[-1] += base_score fast_method = bst.predict(xgb.DMatrix(X[0:1, :]), pred_contribs=True) assert np.linalg.norm(brute_force - fast_method[0, :]) < 1e-4 brute_force = interaction_values(parse_model(bst), X[0, :]) brute_force[-1, -1] += base_score fast_method = bst.predict(xgb.DMatrix(X[0:1, :]), pred_interactions=True) assert np.linalg.norm(brute_force - fast_method[0, :, :]) < 1e-4
10,334
38.903475
102
py
xgboost
xgboost-master/tests/python/test_predict.py
"""Tests for running inplace prediction.""" from concurrent.futures import ThreadPoolExecutor import numpy as np import pandas as pd import pytest from scipy import sparse import xgboost as xgb from xgboost import testing as tm from xgboost.testing.data import np_dtypes, pd_dtypes from xgboost.testing.shared import validate_leaf_output def run_threaded_predict(X, rows, predict_func): results = [] per_thread = 20 with ThreadPoolExecutor(max_workers=10) as e: for i in range(0, rows, int(rows / per_thread)): if hasattr(X, "iloc"): predictor = X.iloc[i : i + per_thread, :] else: predictor = X[i : i + per_thread, ...] f = e.submit(predict_func, predictor) results.append(f) for f in results: assert f.result() def run_predict_leaf(device: str) -> np.ndarray: rows = 100 cols = 4 classes = 5 num_parallel_tree = 4 num_boost_round = 10 rng = np.random.RandomState(1994) X = rng.randn(rows, cols) y = rng.randint(low=0, high=classes, size=rows) m = xgb.DMatrix(X, y) booster = xgb.train( { "num_parallel_tree": num_parallel_tree, "num_class": classes, "tree_method": "hist", }, m, num_boost_round=num_boost_round, ) booster.set_param({"device": device}) empty = xgb.DMatrix(np.ones(shape=(0, cols))) empty_leaf = booster.predict(empty, pred_leaf=True) assert empty_leaf.shape[0] == 0 leaf = booster.predict(m, pred_leaf=True, strict_shape=True) assert leaf.shape[0] == rows assert leaf.shape[1] == num_boost_round assert leaf.shape[2] == classes assert leaf.shape[3] == num_parallel_tree validate_leaf_output(leaf, num_parallel_tree) n_iters = 2 sliced = booster.predict( m, pred_leaf=True, iteration_range=(0, n_iters), strict_shape=True, ) first = sliced[0, ...] assert np.prod(first.shape) == classes * num_parallel_tree * n_iters # When there's only 1 tree, the output is a 1 dim vector booster = xgb.train({"tree_method": "hist"}, num_boost_round=1, dtrain=m) booster.set_param({"device": device}) assert booster.predict(m, pred_leaf=True).shape == (rows,) return leaf def test_predict_leaf() -> None: run_predict_leaf("cpu") def test_predict_shape(): from sklearn.datasets import fetch_california_housing X, y = fetch_california_housing(return_X_y=True) reg = xgb.XGBRegressor(n_estimators=1) reg.fit(X, y) predt = reg.get_booster().predict(xgb.DMatrix(X), strict_shape=True) assert len(predt.shape) == 2 assert predt.shape[0] == X.shape[0] assert predt.shape[1] == 1 contrib = reg.get_booster().predict( xgb.DMatrix(X), pred_contribs=True, strict_shape=True ) assert len(contrib.shape) == 3 assert contrib.shape[1] == 1 contrib = reg.get_booster().predict( xgb.DMatrix(X), pred_contribs=True, approx_contribs=True ) assert len(contrib.shape) == 2 assert contrib.shape[1] == X.shape[1] + 1 interaction = reg.get_booster().predict( xgb.DMatrix(X), pred_interactions=True, approx_contribs=True ) assert len(interaction.shape) == 3 assert interaction.shape[1] == X.shape[1] + 1 assert interaction.shape[2] == X.shape[1] + 1 interaction = reg.get_booster().predict( xgb.DMatrix(X), pred_interactions=True, approx_contribs=True, strict_shape=True ) assert len(interaction.shape) == 4 assert interaction.shape[1] == 1 assert interaction.shape[2] == X.shape[1] + 1 assert interaction.shape[3] == X.shape[1] + 1 class TestInplacePredict: """Tests for running inplace prediction""" @classmethod def setup_class(cls): cls.rows = 1000 cls.cols = 10 cls.missing = 11 # set to integer for testing cls.rng = np.random.RandomState(1994) cls.X = cls.rng.randn(cls.rows, cls.cols) missing_idx = [i for i in range(0, cls.cols, 4)] cls.X[:, missing_idx] = cls.missing # set to be missing cls.y = cls.rng.randn(cls.rows) dtrain = xgb.DMatrix(cls.X, cls.y) cls.test = xgb.DMatrix(cls.X[:10, ...], missing=cls.missing) cls.num_boost_round = 10 cls.booster = xgb.train({"tree_method": "hist"}, dtrain, num_boost_round=10) def test_predict(self): booster = self.booster X = self.X test = self.test predt_from_array = booster.inplace_predict(X[:10, ...], missing=self.missing) predt_from_dmatrix = booster.predict(test) X_obj = X.copy().astype(object) assert X_obj.dtype.hasobject is True assert X.dtype.hasobject is False np.testing.assert_allclose( booster.inplace_predict(X_obj), booster.inplace_predict(X) ) np.testing.assert_allclose(predt_from_dmatrix, predt_from_array) predt_from_array = booster.inplace_predict( X[:10, ...], iteration_range=(0, 4), missing=self.missing ) predt_from_dmatrix = booster.predict(test, iteration_range=(0, 4)) np.testing.assert_allclose(predt_from_dmatrix, predt_from_array) with pytest.raises(ValueError): booster.predict(test, iteration_range=(0, booster.best_iteration + 2)) default = booster.predict(test) range_full = booster.predict(test, iteration_range=(0, self.num_boost_round)) np.testing.assert_allclose(range_full, default) range_full = booster.predict( test, iteration_range=(0, booster.best_iteration + 1) ) np.testing.assert_allclose(range_full, default) def predict_dense(x): inplace_predt = booster.inplace_predict(x) d = xgb.DMatrix(x) copied_predt = booster.predict(d) return np.all(copied_predt == inplace_predt) for i in range(10): run_threaded_predict(X, self.rows, predict_dense) def predict_csr(x): inplace_predt = booster.inplace_predict(sparse.csr_matrix(x)) d = xgb.DMatrix(x) copied_predt = booster.predict(d) return np.all(copied_predt == inplace_predt) for i in range(10): run_threaded_predict(X, self.rows, predict_csr) @pytest.mark.skipif(**tm.no_pandas()) def test_predict_pd(self): X = self.X # construct it in column major style df = pd.DataFrame({str(i): X[:, i] for i in range(X.shape[1])}) booster = self.booster df_predt = booster.inplace_predict(df) arr_predt = booster.inplace_predict(X) dmat_predt = booster.predict(xgb.DMatrix(X)) X = df.values X = np.asfortranarray(X) fort_predt = booster.inplace_predict(X) np.testing.assert_allclose(dmat_predt, arr_predt) np.testing.assert_allclose(df_predt, arr_predt) np.testing.assert_allclose(fort_predt, arr_predt) def test_base_margin(self): booster = self.booster base_margin = self.rng.randn(self.rows) from_inplace = booster.inplace_predict(data=self.X, base_margin=base_margin) dtrain = xgb.DMatrix(self.X, self.y, base_margin=base_margin) from_dmatrix = booster.predict(dtrain) np.testing.assert_allclose(from_dmatrix, from_inplace) @pytest.mark.skipif(**tm.no_pandas()) def test_dtypes(self) -> None: for orig, x in np_dtypes(self.rows, self.cols): predt_orig = self.booster.inplace_predict(orig) predt = self.booster.inplace_predict(x) np.testing.assert_allclose(predt, predt_orig) # unsupported types for dtype in [ np.string_, np.complex64, np.complex128, ]: X: np.ndarray = np.array(orig, dtype=dtype) with pytest.raises(ValueError): self.booster.inplace_predict(X) @pytest.mark.skipif(**tm.no_pandas()) def test_pd_dtypes(self) -> None: from pandas.api.types import is_bool_dtype for orig, x in pd_dtypes(): dtypes = orig.dtypes if isinstance(orig, pd.DataFrame) else [orig.dtypes] if isinstance(orig, pd.DataFrame) and is_bool_dtype(dtypes[0]): continue y = np.arange(x.shape[0]) Xy = xgb.DMatrix(orig, y, enable_categorical=True) booster = xgb.train({"tree_method": "hist"}, Xy, num_boost_round=1) predt_orig = booster.inplace_predict(orig) predt = booster.inplace_predict(x) np.testing.assert_allclose(predt, predt_orig)
8,720
31.909434
87
py
xgboost
xgboost-master/tests/python/test_tree_regularization.py
import numpy as np from numpy.testing import assert_approx_equal import xgboost as xgb train_data = xgb.DMatrix(np.array([[1]]), label=np.array([1])) class TestTreeRegularization: def test_alpha(self): params = { "tree_method": "exact", "verbosity": 0, "objective": "reg:squarederror", "eta": 1, "lambda": 0, "alpha": 0.1, "base_score": 0.5, } model = xgb.train(params, train_data, 1) preds = model.predict(train_data) # Default prediction (with no trees) is 0.5 # sum_grad = (0.5 - 1.0) # sum_hess = 1.0 # 0.9 = 0.5 - (sum_grad - alpha * sgn(sum_grad)) / sum_hess assert_approx_equal(preds[0], 0.9) def test_lambda(self): params = { "tree_method": "exact", "verbosity": 0, "objective": "reg:squarederror", "eta": 1, "lambda": 1, "alpha": 0, "base_score": 0.5, } model = xgb.train(params, train_data, 1) preds = model.predict(train_data) # Default prediction (with no trees) is 0.5 # sum_grad = (0.5 - 1.0) # sum_hess = 1.0 # 0.75 = 0.5 - sum_grad / (sum_hess + lambda) assert_approx_equal(preds[0], 0.75) def test_alpha_and_lambda(self): params = { "tree_method": "exact", "verbosity": 1, "objective": "reg:squarederror", "eta": 1, "lambda": 1, "alpha": 0.1, "base_score": 0.5, } model = xgb.train(params, train_data, 1) preds = model.predict(train_data) # Default prediction (with no trees) is 0.5 # sum_grad = (0.5 - 1.0) # sum_hess = 1.0 # 0.7 = 0.5 - (sum_grad - alpha * sgn(sum_grad)) / (sum_hess + lambda) assert_approx_equal(preds[0], 0.7) def test_unlimited_depth(self): x = np.array([[0], [1], [2], [3]]) y = np.array([0, 1, 2, 3]) model = xgb.XGBRegressor( n_estimators=1, eta=1, tree_method="hist", grow_policy="lossguide", reg_lambda=0, max_leaves=128, max_depth=0, ).fit(x, y) assert np.array_equal(model.predict(x), y)
2,356
27.059524
78
py
xgboost
xgboost-master/tests/python/test_collective.py
import multiprocessing import socket import sys import time import numpy as np import pytest import xgboost as xgb from xgboost import RabitTracker, build_info, federated if sys.platform.startswith("win"): pytest.skip("Skipping collective tests on Windows", allow_module_level=True) def run_rabit_worker(rabit_env, world_size): with xgb.collective.CommunicatorContext(**rabit_env): assert xgb.collective.get_world_size() == world_size assert xgb.collective.is_distributed() assert xgb.collective.get_processor_name() == socket.gethostname() ret = xgb.collective.broadcast('test1234', 0) assert str(ret) == 'test1234' ret = xgb.collective.allreduce(np.asarray([1, 2, 3]), xgb.collective.Op.SUM) assert np.array_equal(ret, np.asarray([2, 4, 6])) def test_rabit_communicator(): world_size = 2 tracker = RabitTracker(host_ip='127.0.0.1', n_workers=world_size) tracker.start(world_size) workers = [] for _ in range(world_size): worker = multiprocessing.Process(target=run_rabit_worker, args=(tracker.worker_envs(), world_size)) workers.append(worker) worker.start() for worker in workers: worker.join() assert worker.exitcode == 0 # TODO(rongou): remove this once we remove the rabit api. def run_rabit_api_worker(rabit_env, world_size): with xgb.rabit.RabitContext(rabit_env): assert xgb.rabit.get_world_size() == world_size assert xgb.rabit.is_distributed() assert xgb.rabit.get_processor_name().decode() == socket.gethostname() ret = xgb.rabit.broadcast('test1234', 0) assert str(ret) == 'test1234' ret = xgb.rabit.allreduce(np.asarray([1, 2, 3]), xgb.rabit.Op.SUM) assert np.array_equal(ret, np.asarray([2, 4, 6])) # TODO(rongou): remove this once we remove the rabit api. def test_rabit_api(): world_size = 2 tracker = RabitTracker(host_ip='127.0.0.1', n_workers=world_size) tracker.start(world_size) rabit_env = [] for k, v in tracker.worker_envs().items(): rabit_env.append(f"{k}={v}".encode()) workers = [] for _ in range(world_size): worker = multiprocessing.Process(target=run_rabit_api_worker, args=(rabit_env, world_size)) workers.append(worker) worker.start() for worker in workers: worker.join() assert worker.exitcode == 0 def run_federated_worker(port, world_size, rank): with xgb.collective.CommunicatorContext(xgboost_communicator='federated', federated_server_address=f'localhost:{port}', federated_world_size=world_size, federated_rank=rank): assert xgb.collective.get_world_size() == world_size assert xgb.collective.is_distributed() assert xgb.collective.get_processor_name() == f'rank{rank}' ret = xgb.collective.broadcast('test1234', 0) assert str(ret) == 'test1234' ret = xgb.collective.allreduce(np.asarray([1, 2, 3]), xgb.collective.Op.SUM) assert np.array_equal(ret, np.asarray([2, 4, 6])) def test_federated_communicator(): if not build_info()["USE_FEDERATED"]: pytest.skip("XGBoost not built with federated learning enabled") port = 9091 world_size = 2 server = multiprocessing.Process(target=xgb.federated.run_federated_server, args=(port, world_size)) server.start() time.sleep(1) if not server.is_alive(): raise Exception("Error starting Federated Learning server") workers = [] for rank in range(world_size): worker = multiprocessing.Process(target=run_federated_worker, args=(port, world_size, rank)) workers.append(worker) worker.start() for worker in workers: worker.join() assert worker.exitcode == 0 server.terminate()
4,053
36.192661
104
py
xgboost
xgboost-master/tests/python/test_basic_models.py
import json import locale import os import tempfile import numpy as np import pytest import xgboost as xgb from xgboost import testing as tm dpath = tm.data_dir(__file__) rng = np.random.RandomState(1994) def json_model(model_path: str, parameters: dict) -> dict: datasets = pytest.importorskip("sklearn.datasets") X, y = datasets.make_classification(64, n_features=8, n_classes=3, n_informative=6) if parameters.get("objective", None) == "multi:softmax": parameters["num_class"] = 3 dm1 = xgb.DMatrix(X, y) bst = xgb.train(parameters, dm1) bst.save_model(model_path) if model_path.endswith("ubj"): import ubjson with open(model_path, "rb") as ubjfd: model = ubjson.load(ubjfd) else: with open(model_path, 'r') as fd: model = json.load(fd) return model class TestModels: def test_glm(self): param = {'verbosity': 0, 'objective': 'binary:logistic', 'booster': 'gblinear', 'alpha': 0.0001, 'lambda': 1, 'nthread': 1} dtrain, dtest = tm.load_agaricus(__file__) watchlist = [(dtest, 'eval'), (dtrain, 'train')] num_round = 4 bst = xgb.train(param, dtrain, num_round, watchlist) assert isinstance(bst, xgb.core.Booster) preds = bst.predict(dtest) labels = dtest.get_label() err = sum(1 for i in range(len(preds)) if int(preds[i] > 0.5) != labels[i]) / float(len(preds)) assert err < 0.2 def test_dart(self): dtrain, dtest = tm.load_agaricus(__file__) param = {'max_depth': 5, 'objective': 'binary:logistic', 'eval_metric': 'logloss', 'booster': 'dart', 'verbosity': 1} # specify validations set to watch performance watchlist = [(dtest, 'eval'), (dtrain, 'train')] num_round = 2 bst = xgb.train(param, dtrain, num_round, watchlist) # this is prediction preds = bst.predict(dtest, iteration_range=(0, num_round)) labels = dtest.get_label() err = sum(1 for i in range(len(preds)) if int(preds[i] > 0.5) != labels[i]) / float(len(preds)) # error must be smaller than 10% assert err < 0.1 with tempfile.TemporaryDirectory() as tmpdir: dtest_path = os.path.join(tmpdir, 'dtest.dmatrix') model_path = os.path.join(tmpdir, 'xgboost.model.dart') # save dmatrix into binary buffer dtest.save_binary(dtest_path) model_path = model_path # save model bst.save_model(model_path) # load model and data in bst2 = xgb.Booster(params=param, model_file=model_path) dtest2 = xgb.DMatrix(dtest_path) preds2 = bst2.predict(dtest2, iteration_range=(0, num_round)) # assert they are the same assert np.sum(np.abs(preds2 - preds)) == 0 def my_logloss(preds, dtrain): labels = dtrain.get_label() return 'logloss', np.sum( np.log(np.where(labels, preds, 1 - preds))) # check whether custom evaluation metrics work bst = xgb.train(param, dtrain, num_round, watchlist, feval=my_logloss) preds3 = bst.predict(dtest, iteration_range=(0, num_round)) assert all(preds3 == preds) # check whether sample_type and normalize_type work num_round = 50 param['verbosity'] = 0 param['learning_rate'] = 0.1 param['rate_drop'] = 0.1 preds_list = [] for p in [[p0, p1] for p0 in ['uniform', 'weighted'] for p1 in ['tree', 'forest']]: param['sample_type'] = p[0] param['normalize_type'] = p[1] bst = xgb.train(param, dtrain, num_round, watchlist) preds = bst.predict(dtest, iteration_range=(0, num_round)) err = sum(1 for i in range(len(preds)) if int(preds[i] > 0.5) != labels[i]) / float(len(preds)) assert err < 0.1 preds_list.append(preds) for ii in range(len(preds_list)): for jj in range(ii + 1, len(preds_list)): assert np.sum(np.abs(preds_list[ii] - preds_list[jj])) > 0 def test_boost_from_prediction(self): # Re-construct dtrain here to avoid modification margined, _ = tm.load_agaricus(__file__) bst = xgb.train({'tree_method': 'hist'}, margined, 1) predt_0 = bst.predict(margined, output_margin=True) margined.set_base_margin(predt_0) bst = xgb.train({'tree_method': 'hist'}, margined, 1) predt_1 = bst.predict(margined) assert np.any(np.abs(predt_1 - predt_0) > 1e-6) dtrain, _ = tm.load_agaricus(__file__) bst = xgb.train({'tree_method': 'hist'}, dtrain, 2) predt_2 = bst.predict(dtrain) assert np.all(np.abs(predt_2 - predt_1) < 1e-6) def test_boost_from_existing_model(self): X, _ = tm.load_agaricus(__file__) booster = xgb.train({'tree_method': 'hist'}, X, num_boost_round=4) assert booster.num_boosted_rounds() == 4 booster = xgb.train({'tree_method': 'hist'}, X, num_boost_round=4, xgb_model=booster) assert booster.num_boosted_rounds() == 8 booster = xgb.train({'updater': 'prune', 'process_type': 'update'}, X, num_boost_round=4, xgb_model=booster) # Trees are moved for update, the rounds is reduced. This test is # written for being compatible with current code (1.0.0). If the # behaviour is considered sub-optimal, feel free to change. assert booster.num_boosted_rounds() == 4 def run_custom_objective(self, tree_method=None): param = { 'max_depth': 2, 'eta': 1, 'objective': 'reg:logistic', "tree_method": tree_method } dtrain, dtest = tm.load_agaricus(__file__) watchlist = [(dtest, 'eval'), (dtrain, 'train')] num_round = 10 def logregobj(preds, dtrain): labels = dtrain.get_label() preds = 1.0 / (1.0 + np.exp(-preds)) grad = preds - labels hess = preds * (1.0 - preds) return grad, hess def evalerror(preds, dtrain): labels = dtrain.get_label() preds = 1.0 / (1.0 + np.exp(-preds)) return 'error', float(sum(labels != (preds > 0.5))) / len(labels) # test custom_objective in training bst = xgb.train(param, dtrain, num_round, watchlist, obj=logregobj, feval=evalerror) assert isinstance(bst, xgb.core.Booster) preds = bst.predict(dtest) labels = dtest.get_label() err = sum(1 for i in range(len(preds)) if int(preds[i] > 0.5) != labels[i]) / float(len(preds)) assert err < 0.1 # test custom_objective in cross-validation xgb.cv(param, dtrain, num_round, nfold=5, seed=0, obj=logregobj, feval=evalerror) # test maximize parameter def neg_evalerror(preds, dtrain): labels = dtrain.get_label() return 'error', float(sum(labels == (preds > 0.0))) / len(labels) bst2 = xgb.train(param, dtrain, num_round, watchlist, logregobj, neg_evalerror, maximize=True) preds2 = bst2.predict(dtest) err2 = sum(1 for i in range(len(preds2)) if int(preds2[i] > 0.5) != labels[i]) / float(len(preds2)) assert err == err2 def test_custom_objective(self): self.run_custom_objective() def test_multi_eval_metric(self): dtrain, dtest = tm.load_agaricus(__file__) watchlist = [(dtest, 'eval'), (dtrain, 'train')] param = {'max_depth': 2, 'eta': 0.2, 'verbosity': 1, 'objective': 'binary:logistic'} param['eval_metric'] = ["auc", "logloss", 'error'] evals_result = {} bst = xgb.train(param, dtrain, 4, watchlist, evals_result=evals_result) assert isinstance(bst, xgb.core.Booster) assert len(evals_result['eval']) == 3 assert set(evals_result['eval'].keys()) == {'auc', 'error', 'logloss'} def test_fpreproc(self): param = {'max_depth': 2, 'eta': 1, 'verbosity': 0, 'objective': 'binary:logistic'} num_round = 2 def fpreproc(dtrain, dtest, param): label = dtrain.get_label() ratio = float(np.sum(label == 0)) / np.sum(label == 1) param['scale_pos_weight'] = ratio return (dtrain, dtest, param) dtrain, _ = tm.load_agaricus(__file__) xgb.cv(param, dtrain, num_round, nfold=5, metrics={'auc'}, seed=0, fpreproc=fpreproc) def test_show_stdv(self): param = {'max_depth': 2, 'eta': 1, 'verbosity': 0, 'objective': 'binary:logistic'} num_round = 2 dtrain, _ = tm.load_agaricus(__file__) xgb.cv(param, dtrain, num_round, nfold=5, metrics={'error'}, seed=0, show_stdv=False) def test_prediction_cache(self) -> None: X, y = tm.make_sparse_regression(512, 4, 0.5, as_dense=False) Xy = xgb.DMatrix(X, y) param = {"max_depth": 8} booster = xgb.train(param, Xy, num_boost_round=1) with tempfile.TemporaryDirectory() as tmpdir: path = os.path.join(tmpdir, "model.json") booster.save_model(path) predt_0 = booster.predict(Xy) param["max_depth"] = 2 booster = xgb.train(param, Xy, num_boost_round=1) predt_1 = booster.predict(Xy) assert not np.isclose(predt_0, predt_1).all() booster.load_model(path) predt_2 = booster.predict(Xy) np.testing.assert_allclose(predt_0, predt_2) def test_feature_names_validation(self): X = np.random.random((10, 3)) y = np.random.randint(2, size=(10,)) dm1 = xgb.DMatrix(X, y, feature_names=("a", "b", "c")) dm2 = xgb.DMatrix(X, y) bst = xgb.train([], dm1) bst.predict(dm1) # success with pytest.raises(ValueError): bst.predict(dm2) bst.predict(dm1) # success bst = xgb.train([], dm2) bst.predict(dm2) # success def test_model_binary_io(self): model_path = 'test_model_binary_io.bin' parameters = {'tree_method': 'hist', 'booster': 'gbtree', 'scale_pos_weight': '0.5'} X = np.random.random((10, 3)) y = np.random.random((10,)) dtrain = xgb.DMatrix(X, y) bst = xgb.train(parameters, dtrain, num_boost_round=2) bst.save_model(model_path) bst = xgb.Booster(model_file=model_path) os.remove(model_path) config = json.loads(bst.save_config()) assert float(config['learner']['objective'][ 'reg_loss_param']['scale_pos_weight']) == 0.5 buf = bst.save_raw() from_raw = xgb.Booster() from_raw.load_model(buf) buf_from_raw = from_raw.save_raw() assert buf == buf_from_raw def run_model_json_io(self, parameters: dict, ext: str) -> None: if ext == "ubj" and tm.no_ubjson()["condition"]: pytest.skip(tm.no_ubjson()["reason"]) loc = locale.getpreferredencoding(False) model_path = 'test_model_json_io.' + ext j_model = json_model(model_path, parameters) assert isinstance(j_model['learner'], dict) bst = xgb.Booster(model_file=model_path) bst.save_model(fname=model_path) if ext == "ubj": import ubjson with open(model_path, "rb") as ubjfd: j_model = ubjson.load(ubjfd) else: with open(model_path, 'r') as fd: j_model = json.load(fd) assert isinstance(j_model['learner'], dict) os.remove(model_path) assert locale.getpreferredencoding(False) == loc json_raw = bst.save_raw(raw_format="json") from_jraw = xgb.Booster() from_jraw.load_model(json_raw) ubj_raw = bst.save_raw(raw_format="ubj") from_ubjraw = xgb.Booster() from_ubjraw.load_model(ubj_raw) if parameters.get("multi_strategy", None) != "multi_output_tree": # old binary model is not supported. old_from_json = from_jraw.save_raw(raw_format="deprecated") old_from_ubj = from_ubjraw.save_raw(raw_format="deprecated") assert old_from_json == old_from_ubj raw_json = bst.save_raw(raw_format="json") pretty = json.dumps(json.loads(raw_json), indent=2) + "\n\n" bst.load_model(bytearray(pretty, encoding="ascii")) if parameters.get("multi_strategy", None) != "multi_output_tree": # old binary model is not supported. old_from_json = from_jraw.save_raw(raw_format="deprecated") old_from_ubj = from_ubjraw.save_raw(raw_format="deprecated") assert old_from_json == old_from_ubj rng = np.random.default_rng() X = rng.random(size=from_jraw.num_features() * 10).reshape( (10, from_jraw.num_features()) ) predt_from_jraw = from_jraw.predict(xgb.DMatrix(X)) predt_from_bst = bst.predict(xgb.DMatrix(X)) np.testing.assert_allclose(predt_from_jraw, predt_from_bst) @pytest.mark.parametrize("ext", ["json", "ubj"]) def test_model_json_io(self, ext: str) -> None: parameters = {"booster": "gbtree", "tree_method": "hist"} self.run_model_json_io(parameters, ext) parameters = { "booster": "gbtree", "tree_method": "hist", "multi_strategy": "multi_output_tree", "objective": "multi:softmax", } self.run_model_json_io(parameters, ext) parameters = {"booster": "gblinear"} self.run_model_json_io(parameters, ext) parameters = {"booster": "dart", "tree_method": "hist"} self.run_model_json_io(parameters, ext) @pytest.mark.skipif(**tm.no_json_schema()) def test_json_io_schema(self): import jsonschema model_path = 'test_json_schema.json' path = os.path.dirname( os.path.dirname(os.path.dirname(os.path.abspath(__file__)))) doc = os.path.join(path, 'doc', 'model.schema') with open(doc, 'r') as fd: schema = json.load(fd) parameters = {'tree_method': 'hist', 'booster': 'gbtree'} jsonschema.validate(instance=json_model(model_path, parameters), schema=schema) os.remove(model_path) parameters = {'tree_method': 'hist', 'booster': 'dart'} jsonschema.validate(instance=json_model(model_path, parameters), schema=schema) os.remove(model_path) try: dtrain, _ = tm.load_agaricus(__file__) xgb.train({'objective': 'foo'}, dtrain, num_boost_round=1) except ValueError as e: e_str = str(e) beg = e_str.find('Objective candidate') end = e_str.find('Stack trace') e_str = e_str[beg: end] e_str = e_str.strip() splited = e_str.splitlines() objectives = [s.split(': ')[1] for s in splited] j_objectives = schema['properties']['learner']['properties'][ 'objective']['oneOf'] objectives_from_schema = set() for j_obj in j_objectives: objectives_from_schema.add( j_obj['properties']['name']['const']) objectives = set(objectives) assert objectives == objectives_from_schema @pytest.mark.skipif(**tm.no_json_schema()) def test_json_dump_schema(self): import jsonschema def validate_model(parameters): X = np.random.random((100, 30)) y = np.random.randint(0, 4, size=(100,)) parameters['num_class'] = 4 m = xgb.DMatrix(X, y) booster = xgb.train(parameters, m) dump = booster.get_dump(dump_format='json') for i in range(len(dump)): jsonschema.validate(instance=json.loads(dump[i]), schema=schema) path = os.path.dirname( os.path.dirname(os.path.dirname(os.path.abspath(__file__)))) doc = os.path.join(path, 'doc', 'dump.schema') with open(doc, 'r') as fd: schema = json.load(fd) parameters = {'tree_method': 'hist', 'booster': 'gbtree', 'objective': 'multi:softmax'} validate_model(parameters) parameters = {'tree_method': 'hist', 'booster': 'dart', 'objective': 'multi:softmax'} validate_model(parameters) def test_categorical_model_io(self): X, y = tm.make_categorical(256, 16, 71, False) Xy = xgb.DMatrix(X, y, enable_categorical=True) booster = xgb.train({"tree_method": "approx"}, Xy, num_boost_round=16) predt_0 = booster.predict(Xy) with tempfile.TemporaryDirectory() as tempdir: path = os.path.join(tempdir, "model.binary") with pytest.raises(ValueError, match=r".*JSON/UBJSON.*"): booster.save_model(path) path = os.path.join(tempdir, "model.json") booster.save_model(path) booster = xgb.Booster(model_file=path) predt_1 = booster.predict(Xy) np.testing.assert_allclose(predt_0, predt_1) path = os.path.join(tempdir, "model.ubj") booster.save_model(path) booster = xgb.Booster(model_file=path) predt_1 = booster.predict(Xy) np.testing.assert_allclose(predt_0, predt_1) @pytest.mark.skipif(**tm.no_sklearn()) def test_attributes(self): from sklearn.datasets import load_iris X, y = load_iris(return_X_y=True) cls = xgb.XGBClassifier(n_estimators=2) cls.fit(X, y, early_stopping_rounds=1, eval_set=[(X, y)]) assert cls.get_booster().best_iteration == cls.n_estimators - 1 assert cls.best_iteration == cls.get_booster().best_iteration with tempfile.TemporaryDirectory() as tmpdir: path = os.path.join(tmpdir, "cls.json") cls.save_model(path) cls = xgb.XGBClassifier(n_estimators=2) cls.load_model(path) assert cls.get_booster().best_iteration == cls.n_estimators - 1 assert cls.best_iteration == cls.get_booster().best_iteration def run_slice( self, booster: xgb.Booster, dtrain: xgb.DMatrix, num_parallel_tree: int, num_classes: int, num_boost_round: int ): beg = 3 end = 7 sliced: xgb.Booster = booster[beg:end] assert sliced.feature_types == booster.feature_types sliced_trees = (end - beg) * num_parallel_tree * num_classes assert sliced_trees == len(sliced.get_dump()) sliced_trees = sliced_trees // 2 sliced = booster[beg:end:2] assert sliced_trees == len(sliced.get_dump()) sliced = booster[beg: ...] sliced_trees = (num_boost_round - beg) * num_parallel_tree * num_classes assert sliced_trees == len(sliced.get_dump()) sliced = booster[beg:] sliced_trees = (num_boost_round - beg) * num_parallel_tree * num_classes assert sliced_trees == len(sliced.get_dump()) sliced = booster[:end] sliced_trees = end * num_parallel_tree * num_classes assert sliced_trees == len(sliced.get_dump()) sliced = booster[...: end] sliced_trees = end * num_parallel_tree * num_classes assert sliced_trees == len(sliced.get_dump()) with pytest.raises(ValueError, match=r">= 0"): booster[-1:0] # we do not accept empty slice. with pytest.raises(ValueError, match="Empty slice"): booster[1:1] # stop can not be smaller than begin with pytest.raises(ValueError, match=r"Invalid.*"): booster[3:0] with pytest.raises(ValueError, match=r"Invalid.*"): booster[3:-1] # negative step is not supported. with pytest.raises(ValueError, match=r".*>= 1.*"): booster[0:2:-1] # step can not be 0. with pytest.raises(ValueError, match=r".*>= 1.*"): booster[0:2:0] trees = [_ for _ in booster] assert len(trees) == num_boost_round with pytest.raises(TypeError): booster["wrong type"] with pytest.raises(IndexError): booster[: num_boost_round + 1] with pytest.raises(ValueError): booster[1, 2] # too many dims # setitem is not implemented as model is immutable during slicing. with pytest.raises(TypeError): booster[...: end] = booster sliced_0 = booster[1:3] np.testing.assert_allclose( booster.predict(dtrain, iteration_range=(1, 3)), sliced_0.predict(dtrain) ) sliced_1 = booster[3:7] np.testing.assert_allclose( booster.predict(dtrain, iteration_range=(3, 7)), sliced_1.predict(dtrain) ) predt_0 = sliced_0.predict(dtrain, output_margin=True) predt_1 = sliced_1.predict(dtrain, output_margin=True) merged = predt_0 + predt_1 - 0.5 # base score. single = booster[1:7].predict(dtrain, output_margin=True) np.testing.assert_allclose(merged, single, atol=1e-6) sliced_0 = booster[1:7:2] # 1,3,5 sliced_1 = booster[2:8:2] # 2,4,6 predt_0 = sliced_0.predict(dtrain, output_margin=True) predt_1 = sliced_1.predict(dtrain, output_margin=True) merged = predt_0 + predt_1 - 0.5 single = booster[1:7].predict(dtrain, output_margin=True) np.testing.assert_allclose(merged, single, atol=1e-6) @pytest.mark.skipif(**tm.no_sklearn()) @pytest.mark.parametrize("booster", ["gbtree", "dart"]) def test_slice(self, booster): from sklearn.datasets import make_classification num_classes = 3 X, y = make_classification( n_samples=1000, n_informative=5, n_classes=num_classes ) dtrain = xgb.DMatrix(data=X, label=y) num_parallel_tree = 4 num_boost_round = 16 total_trees = num_parallel_tree * num_classes * num_boost_round booster = xgb.train( { "num_parallel_tree": num_parallel_tree, "subsample": 0.5, "num_class": num_classes, "booster": booster, "objective": "multi:softprob", }, num_boost_round=num_boost_round, dtrain=dtrain, ) booster.feature_types = ["q"] * X.shape[1] assert len(booster.get_dump()) == total_trees self.run_slice(booster, dtrain, num_parallel_tree, num_classes, num_boost_round) bytesarray = booster.save_raw(raw_format="ubj") booster = xgb.Booster(model_file=bytesarray) self.run_slice(booster, dtrain, num_parallel_tree, num_classes, num_boost_round) bytesarray = booster.save_raw(raw_format="deprecated") booster = xgb.Booster(model_file=bytesarray) self.run_slice(booster, dtrain, num_parallel_tree, num_classes, num_boost_round) def test_slice_multi(self) -> None: from sklearn.datasets import make_classification num_classes = 3 X, y = make_classification( n_samples=1000, n_informative=5, n_classes=num_classes ) Xy = xgb.DMatrix(data=X, label=y) num_parallel_tree = 4 num_boost_round = 16 class ResetStrategy(xgb.callback.TrainingCallback): def after_iteration(self, model, epoch: int, evals_log) -> bool: model.set_param({"multi_strategy": "multi_output_tree"}) return False booster = xgb.train( { "num_parallel_tree": num_parallel_tree, "num_class": num_classes, "booster": "gbtree", "objective": "multi:softprob", "multi_strategy": "multi_output_tree", "tree_method": "hist", "base_score": 0, }, num_boost_round=num_boost_round, dtrain=Xy, callbacks=[ResetStrategy()] ) sliced = [t for t in booster] assert len(sliced) == 16 predt0 = booster.predict(Xy, output_margin=True) predt1 = np.zeros(predt0.shape) for t in booster: predt1 += t.predict(Xy, output_margin=True) np.testing.assert_allclose(predt0, predt1, atol=1e-5) @pytest.mark.skipif(**tm.no_pandas()) def test_feature_info(self): import pandas as pd rows = 100 cols = 10 X = rng.randn(rows, cols) y = rng.randn(rows) feature_names = ["test_feature_" + str(i) for i in range(cols)] X_pd = pd.DataFrame(X, columns=feature_names) X_pd[f"test_feature_{3}"] = X_pd.iloc[:, 3].astype(np.int32) Xy = xgb.DMatrix(X_pd, y) assert Xy.feature_types[3] == "int" booster = xgb.train({}, dtrain=Xy, num_boost_round=1) assert booster.feature_names == Xy.feature_names assert booster.feature_names == feature_names assert booster.feature_types == Xy.feature_types with tempfile.TemporaryDirectory() as tmpdir: path = tmpdir + "model.json" booster.save_model(path) booster = xgb.Booster() booster.load_model(path) assert booster.feature_names == Xy.feature_names assert booster.feature_types == Xy.feature_types
26,002
37.183554
88
py
xgboost
xgboost-master/tests/python/with_omp_limit.py
import sys from sklearn.datasets import make_classification from sklearn.metrics import roc_auc_score import xgboost as xgb def run_omp(output_path: str): X, y = make_classification( n_samples=200, n_features=32, n_classes=3, n_informative=8 ) Xy = xgb.DMatrix(X, y, nthread=16) booster = xgb.train( {"num_class": 3, "objective": "multi:softprob", "n_jobs": 16}, Xy, num_boost_round=8, ) score = booster.predict(Xy) auc = roc_auc_score(y, score, average="weighted", multi_class="ovr") with open(output_path, "w") as fd: fd.write(str(auc)) if __name__ == "__main__": out = sys.argv[1] run_omp(out)
683
23.428571
72
py
xgboost
xgboost-master/tests/python/test_demos.py
import os import subprocess import sys import tempfile import pytest import xgboost from xgboost import testing as tm pytestmark = tm.timeout(30) DEMO_DIR = tm.demo_dir(__file__) PYTHON_DEMO_DIR = os.path.join(DEMO_DIR, 'guide-python') CLI_DEMO_DIR = os.path.join(DEMO_DIR, 'CLI') def test_basic_walkthrough(): script = os.path.join(PYTHON_DEMO_DIR, 'basic_walkthrough.py') cmd = ['python', script] with tempfile.TemporaryDirectory() as tmpdir: subprocess.check_call(cmd, cwd=tmpdir) @pytest.mark.skipif(**tm.no_matplotlib()) def test_custom_multiclass_objective(): script = os.path.join(PYTHON_DEMO_DIR, 'custom_softmax.py') cmd = ['python', script, '--plot=0'] subprocess.check_call(cmd) @pytest.mark.skipif(**tm.no_matplotlib()) def test_custom_rmsle_objective(): script = os.path.join(PYTHON_DEMO_DIR, 'custom_rmsle.py') cmd = ['python', script, '--plot=0'] subprocess.check_call(cmd) @pytest.mark.skipif(**tm.no_matplotlib()) def test_feature_weights_demo(): script = os.path.join(PYTHON_DEMO_DIR, 'feature_weights.py') cmd = ['python', script, '--plot=0'] subprocess.check_call(cmd) @pytest.mark.skipif(**tm.no_sklearn()) def test_sklearn_demo(): script = os.path.join(PYTHON_DEMO_DIR, 'sklearn_examples.py') cmd = ['python', script] subprocess.check_call(cmd) assert os.path.exists('best_calif.pkl') os.remove('best_calif.pkl') @pytest.mark.skipif(**tm.no_sklearn()) def test_sklearn_parallel_demo(): script = os.path.join(PYTHON_DEMO_DIR, 'sklearn_parallel.py') cmd = ['python', script] subprocess.check_call(cmd) @pytest.mark.skipif(**tm.no_sklearn()) def test_sklearn_evals_result_demo(): script = os.path.join(PYTHON_DEMO_DIR, 'sklearn_evals_result.py') cmd = ['python', script] subprocess.check_call(cmd) def test_boost_from_prediction_demo(): script = os.path.join(PYTHON_DEMO_DIR, 'boost_from_prediction.py') cmd = ['python', script] subprocess.check_call(cmd) def test_predict_first_ntree_demo(): script = os.path.join(PYTHON_DEMO_DIR, 'predict_first_ntree.py') cmd = ['python', script] subprocess.check_call(cmd) def test_individual_trees(): script = os.path.join(PYTHON_DEMO_DIR, 'individual_trees.py') cmd = ['python', script] subprocess.check_call(cmd) def test_predict_leaf_indices_demo(): script = os.path.join(PYTHON_DEMO_DIR, 'predict_leaf_indices.py') cmd = ['python', script] subprocess.check_call(cmd) def test_generalized_linear_model_demo(): script = os.path.join(PYTHON_DEMO_DIR, 'generalized_linear_model.py') cmd = ['python', script] subprocess.check_call(cmd) def test_cross_validation_demo(): script = os.path.join(PYTHON_DEMO_DIR, 'cross_validation.py') cmd = ['python', script] subprocess.check_call(cmd) def test_external_memory_demo(): script = os.path.join(PYTHON_DEMO_DIR, 'external_memory.py') cmd = ['python', script] subprocess.check_call(cmd) def test_evals_result_demo(): script = os.path.join(PYTHON_DEMO_DIR, 'evals_result.py') cmd = ['python', script] subprocess.check_call(cmd) @pytest.mark.skipif(**tm.no_sklearn()) @pytest.mark.skipif(**tm.no_pandas()) def test_aft_demo(): script = os.path.join(DEMO_DIR, 'aft_survival', 'aft_survival_demo.py') cmd = ['python', script] subprocess.check_call(cmd) assert os.path.exists('aft_model.json') os.remove('aft_model.json') @pytest.mark.skipif(**tm.no_matplotlib()) def test_callbacks_demo(): script = os.path.join(PYTHON_DEMO_DIR, 'callbacks.py') cmd = ['python', script, '--plot=0'] subprocess.check_call(cmd) def test_continuation_demo(): script = os.path.join(PYTHON_DEMO_DIR, 'continuation.py') cmd = ['python', script] subprocess.check_call(cmd) @pytest.mark.skipif(**tm.no_sklearn()) @pytest.mark.skipif(**tm.no_matplotlib()) def test_multioutput_reg() -> None: script = os.path.join(PYTHON_DEMO_DIR, "multioutput_regression.py") cmd = ['python', script, "--plot=0"] subprocess.check_call(cmd) @pytest.mark.skipif(**tm.no_sklearn()) def test_quantile_reg() -> None: script = os.path.join(PYTHON_DEMO_DIR, "quantile_regression.py") cmd = ['python', script] subprocess.check_call(cmd) @pytest.mark.skipif(**tm.no_ubjson()) def test_json_model() -> None: script = os.path.join(DEMO_DIR, "json-model", "json_parser.py") def run_test(reg: xgboost.XGBRegressor) -> None: with tempfile.TemporaryDirectory() as tmpdir: path = os.path.join(tmpdir, "reg.json") reg.save_model(path) cmd = ["python", script, f"--model={path}"] subprocess.check_call(cmd) path = os.path.join(tmpdir, "reg.ubj") reg.save_model(path) cmd = ["python", script, f"--model={path}"] subprocess.check_call(cmd) # numerical X, y = tm.make_sparse_regression(100, 10, 0.5, False) reg = xgboost.XGBRegressor(n_estimators=2, tree_method="hist") reg.fit(X, y) run_test(reg) # categorical X, y = tm.make_categorical( n_samples=1000, n_features=10, n_categories=6, onehot=False, sparsity=0.5, cat_ratio=0.5, shuffle=True, ) reg = xgboost.XGBRegressor( n_estimators=2, tree_method="hist", enable_categorical=True ) reg.fit(X, y) run_test(reg) # - gpu_acceleration is not tested due to covertype dataset is being too huge. # - gamma regression is not tested as it requires running a R script first. # - aft viz is not tested due to ploting is not controlled # - aft tunning is not tested due to extra dependency. def test_cli_regression_demo(): reg_dir = os.path.join(CLI_DEMO_DIR, 'regression') script = os.path.join(reg_dir, 'mapfeat.py') cmd = ['python', script] subprocess.check_call(cmd, cwd=reg_dir) script = os.path.join(reg_dir, 'mknfold.py') cmd = ['python', script, 'machine.txt', '1'] subprocess.check_call(cmd, cwd=reg_dir) exe = os.path.join(DEMO_DIR, os.path.pardir, 'xgboost') conf = os.path.join(reg_dir, 'machine.conf') subprocess.check_call([exe, conf], cwd=reg_dir) @pytest.mark.skipif(condition=sys.platform.startswith("win"), reason='Test requires sh execution.') def test_cli_binary_classification(): cls_dir = os.path.join(CLI_DEMO_DIR, 'binary_classification') with tm.DirectoryExcursion(cls_dir, cleanup=True): subprocess.check_call(['./runexp.sh']) os.remove('0002.model') # year prediction is not tested due to data size being too large. # rank is not tested as it requires unrar command.
6,699
28.777778
78
py
xgboost
xgboost-master/tests/python/test_monotone_constraints.py
import numpy as np import pytest import xgboost as xgb from xgboost import testing as tm dpath = 'demo/data/' def is_increasing(y): return np.count_nonzero(np.diff(y) < 0.0) == 0 def is_decreasing(y): return np.count_nonzero(np.diff(y) > 0.0) == 0 def is_correctly_constrained(learner, feature_names=None): n = 100 variable_x = np.linspace(0, 1, n).reshape((n, 1)) fixed_xs_values = np.linspace(0, 1, n) for i in range(n): fixed_x = fixed_xs_values[i] * np.ones((n, 1)) monotonically_increasing_x = np.column_stack((variable_x, fixed_x)) monotonically_increasing_dset = xgb.DMatrix(monotonically_increasing_x, feature_names=feature_names) monotonically_increasing_y = learner.predict( monotonically_increasing_dset ) monotonically_decreasing_x = np.column_stack((fixed_x, variable_x)) monotonically_decreasing_dset = xgb.DMatrix(monotonically_decreasing_x, feature_names=feature_names) monotonically_decreasing_y = learner.predict( monotonically_decreasing_dset ) if not ( is_increasing(monotonically_increasing_y) and is_decreasing(monotonically_decreasing_y) ): return False return True number_of_dpoints = 1000 x1_positively_correlated_with_y = np.random.random(size=number_of_dpoints) x2_negatively_correlated_with_y = np.random.random(size=number_of_dpoints) x = np.column_stack(( x1_positively_correlated_with_y, x2_negatively_correlated_with_y )) zs = np.random.normal(loc=0.0, scale=0.01, size=number_of_dpoints) y = ( 5 * x1_positively_correlated_with_y + np.sin(10 * np.pi * x1_positively_correlated_with_y) - 5 * x2_negatively_correlated_with_y - np.cos(10 * np.pi * x2_negatively_correlated_with_y) + zs ) training_dset = xgb.DMatrix(x, label=y) class TestMonotoneConstraints: def test_monotone_constraints_for_exact_tree_method(self): # first check monotonicity for the 'exact' tree method params_for_constrained_exact_method = { 'tree_method': 'exact', 'verbosity': 1, 'monotone_constraints': '(1, -1)' } constrained_exact_method = xgb.train( params_for_constrained_exact_method, training_dset ) assert is_correctly_constrained(constrained_exact_method) @pytest.mark.parametrize( "tree_method,policy", [ ("hist", "depthwise"), ("approx", "depthwise"), ("hist", "lossguide"), ("approx", "lossguide"), ], ) def test_monotone_constraints(self, tree_method: str, policy: str) -> None: params_for_constrained = { "tree_method": tree_method, "grow_policy": policy, "monotone_constraints": "(1, -1)", } constrained = xgb.train(params_for_constrained, training_dset) assert is_correctly_constrained(constrained) def test_monotone_constraints_tuple(self) -> None: params_for_constrained = {"monotone_constraints": (1, -1)} constrained = xgb.train(params_for_constrained, training_dset) assert is_correctly_constrained(constrained) @pytest.mark.parametrize('format', [dict, list]) def test_monotone_constraints_feature_names(self, format): # next check monotonicity when initializing monotone_constraints by feature names params = { 'tree_method': 'hist', 'grow_policy': 'lossguide', 'monotone_constraints': {'feature_0': 1, 'feature_1': -1} } if format == list: params = list(params.items()) with pytest.raises(ValueError): xgb.train(params, training_dset) feature_names = ['feature_0', 'feature_2'] training_dset_w_feature_names = xgb.DMatrix(x, label=y, feature_names=feature_names) with pytest.raises(ValueError): xgb.train(params, training_dset_w_feature_names) feature_names = ['feature_0', 'feature_1'] training_dset_w_feature_names = xgb.DMatrix(x, label=y, feature_names=feature_names) constrained_learner = xgb.train( params, training_dset_w_feature_names ) assert is_correctly_constrained(constrained_learner, feature_names) @pytest.mark.skipif(**tm.no_sklearn()) def test_training_accuracy(self): from sklearn.metrics import accuracy_score dtrain = xgb.DMatrix(dpath + "agaricus.txt.train?indexing_mode=1&format=libsvm") dtest = xgb.DMatrix(dpath + "agaricus.txt.test?indexing_mode=1&format=libsvm") params = {'eta': 1, 'max_depth': 6, 'objective': 'binary:logistic', 'tree_method': 'hist', 'monotone_constraints': '(1, 0)'} num_boost_round = 5 params['grow_policy'] = 'lossguide' bst = xgb.train(params, dtrain, num_boost_round) pred_dtest = (bst.predict(dtest) < 0.5) assert accuracy_score(dtest.get_label(), pred_dtest) < 0.1 params['grow_policy'] = 'depthwise' bst = xgb.train(params, dtrain, num_boost_round) pred_dtest = (bst.predict(dtest) < 0.5) assert accuracy_score(dtest.get_label(), pred_dtest) < 0.1
5,360
34.269737
92
py
xgboost
xgboost-master/tests/python/test_with_arrow.py
import os import unittest import numpy as np import pytest import xgboost as xgb from xgboost import testing as tm try: import pandas as pd import pyarrow as pa import pyarrow.csv as pc except ImportError: pass pytestmark = pytest.mark.skipif( tm.no_arrow()["condition"] or tm.no_pandas()["condition"], reason=tm.no_arrow()["reason"] + " or " + tm.no_pandas()["reason"], ) dpath = "demo/data/" class TestArrowTable(unittest.TestCase): def test_arrow_table(self): df = pd.DataFrame( [[0, 1, 2.0, 3.0], [1, 2, 3.0, 4.0]], columns=["a", "b", "c", "d"] ) table = pa.Table.from_pandas(df) dm = xgb.DMatrix(table) assert dm.num_row() == 2 assert dm.num_col() == 4 def test_arrow_table_with_label(self): df = pd.DataFrame([[1, 2.0, 3.0], [2, 3.0, 4.0]], columns=["a", "b", "c"]) table = pa.Table.from_pandas(df) label = np.array([0, 1]) dm = xgb.DMatrix(table) dm.set_label(label) assert dm.num_row() == 2 assert dm.num_col() == 3 np.testing.assert_array_equal(dm.get_label(), np.array([0, 1])) def test_arrow_table_from_np(self): coldata = np.array( [[1.0, 1.0, 0.0, 0.0], [2.0, 0.0, 1.0, 0.0], [3.0, 0.0, 0.0, 1.0]] ) cols = list(map(pa.array, coldata)) table = pa.Table.from_arrays(cols, ["a", "b", "c"]) dm = xgb.DMatrix(table) assert dm.num_row() == 4 assert dm.num_col() == 3 def test_arrow_train(self): import pandas as pd rows = 100 X = pd.DataFrame( { "A": np.random.randint(0, 10, size=rows), "B": np.random.randn(rows), "C": np.random.permutation([1, 0] * (rows // 2)), } ) y = pd.Series(np.random.randn(rows)) table = pa.Table.from_pandas(X) dtrain1 = xgb.DMatrix(table) dtrain1.set_label(y) bst1 = xgb.train({}, dtrain1, num_boost_round=10) preds1 = bst1.predict(xgb.DMatrix(X)) dtrain2 = xgb.DMatrix(X, y) bst2 = xgb.train({}, dtrain2, num_boost_round=10) preds2 = bst2.predict(xgb.DMatrix(X)) np.testing.assert_allclose(preds1, preds2) def test_arrow_survival(self): data = os.path.join(tm.data_dir(__file__), "veterans_lung_cancer.csv") table = pc.read_csv(data) y_lower_bound = table["Survival_label_lower_bound"] y_upper_bound = table["Survival_label_upper_bound"] X = table.drop(["Survival_label_lower_bound", "Survival_label_upper_bound"]) dtrain = xgb.DMatrix( X, label_lower_bound=y_lower_bound, label_upper_bound=y_upper_bound ) y_np_up = dtrain.get_float_info("label_upper_bound") y_np_low = dtrain.get_float_info("label_lower_bound") np.testing.assert_equal(y_np_up, y_upper_bound.to_pandas().values) np.testing.assert_equal(y_np_low, y_lower_bound.to_pandas().values)
3,020
32.197802
84
py
xgboost
xgboost-master/tests/python/test_early_stopping.py
import numpy as np import pytest import xgboost as xgb from xgboost import testing as tm from xgboost.testing.updater import get_basescore rng = np.random.RandomState(1994) class TestEarlyStopping: @pytest.mark.skipif(**tm.no_sklearn()) def test_early_stopping_nonparallel(self): from sklearn.datasets import load_digits from sklearn.model_selection import train_test_split digits = load_digits(n_class=2) X = digits['data'] y = digits['target'] X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=0) clf1 = xgb.XGBClassifier(learning_rate=0.1) clf1.fit(X_train, y_train, early_stopping_rounds=5, eval_metric="auc", eval_set=[(X_test, y_test)]) clf2 = xgb.XGBClassifier(learning_rate=0.1) clf2.fit(X_train, y_train, early_stopping_rounds=4, eval_metric="auc", eval_set=[(X_test, y_test)]) # should be the same assert clf1.best_score == clf2.best_score assert clf1.best_score != 1 # check overfit clf3 = xgb.XGBClassifier( learning_rate=0.1, eval_metric="auc", early_stopping_rounds=10 ) clf3.fit(X_train, y_train, eval_set=[(X_test, y_test)]) base_score = get_basescore(clf3) assert 0.53 > base_score > 0.5 clf3 = xgb.XGBClassifier( learning_rate=0.1, base_score=.5, eval_metric="auc", early_stopping_rounds=10 ) clf3.fit(X_train, y_train, eval_set=[(X_test, y_test)]) assert clf3.best_score == 1 def evalerror(self, preds, dtrain): from sklearn.metrics import mean_squared_error labels = dtrain.get_label() preds = 1.0 / (1.0 + np.exp(-preds)) return 'rmse', mean_squared_error(labels, preds) @staticmethod def assert_metrics_length(cv, expected_length): for key, value in cv.items(): assert len(value) == expected_length @pytest.mark.skipif(**tm.no_sklearn()) def test_cv_early_stopping(self): from sklearn.datasets import load_digits digits = load_digits(n_class=2) X = digits['data'] y = digits['target'] dm = xgb.DMatrix(X, label=y) params = {'max_depth': 2, 'eta': 1, 'verbosity': 0, 'objective': 'binary:logistic', 'eval_metric': 'error'} cv = xgb.cv(params, dm, num_boost_round=10, nfold=10, early_stopping_rounds=10) self.assert_metrics_length(cv, 10) cv = xgb.cv(params, dm, num_boost_round=10, nfold=10, early_stopping_rounds=5) self.assert_metrics_length(cv, 3) cv = xgb.cv(params, dm, num_boost_round=10, nfold=10, early_stopping_rounds=1) self.assert_metrics_length(cv, 1) cv = xgb.cv(params, dm, num_boost_round=10, nfold=10, feval=self.evalerror, early_stopping_rounds=10) self.assert_metrics_length(cv, 10) cv = xgb.cv(params, dm, num_boost_round=10, nfold=10, feval=self.evalerror, early_stopping_rounds=1) self.assert_metrics_length(cv, 5) cv = xgb.cv(params, dm, num_boost_round=10, nfold=10, feval=self.evalerror, maximize=True, early_stopping_rounds=1) self.assert_metrics_length(cv, 1) @pytest.mark.skipif(**tm.no_sklearn()) @pytest.mark.skipif(**tm.no_pandas()) def test_cv_early_stopping_with_multiple_eval_sets_and_metrics(self): from sklearn.datasets import load_breast_cancer X, y = load_breast_cancer(return_X_y=True) dm = xgb.DMatrix(X, label=y) params = {'objective':'binary:logistic'} metrics = [['auc'], ['error'], ['logloss'], ['logloss', 'auc'], ['logloss', 'error'], ['error', 'logloss']] num_iteration_history = [] # If more than one metrics is given, early stopping should use the last metric for i, m in enumerate(metrics): result = xgb.cv(params, dm, num_boost_round=1000, nfold=5, stratified=True, metrics=m, early_stopping_rounds=20, seed=42) num_iteration_history.append(len(result)) df = result['test-{}-mean'.format(m[-1])] # When early stopping is invoked, the last metric should be as best it can be. if m[-1] == 'auc': assert np.all(df <= df.iloc[-1]) else: assert np.all(df >= df.iloc[-1]) assert num_iteration_history[:3] == num_iteration_history[3:]
4,658
37.825
90
py
xgboost
xgboost-master/tests/python/test_updaters.py
import json from string import ascii_lowercase from typing import Any, Dict, List import numpy as np import pytest from hypothesis import given, note, settings, strategies import xgboost as xgb from xgboost import testing as tm from xgboost.testing.params import ( cat_parameter_strategy, exact_parameter_strategy, hist_multi_parameter_strategy, hist_parameter_strategy, ) from xgboost.testing.updater import ( check_get_quantile_cut, check_init_estimation, check_quantile_loss, ) def train_result(param, dmat, num_rounds): result = {} booster = xgb.train( param, dmat, num_rounds, evals=[(dmat, "train")], verbose_eval=False, evals_result=result, ) assert booster.num_features() == dmat.num_col() assert booster.num_boosted_rounds() == num_rounds assert booster.feature_names == dmat.feature_names assert booster.feature_types == dmat.feature_types return result class TestTreeMethodMulti: @given( exact_parameter_strategy, strategies.integers(1, 20), tm.multi_dataset_strategy ) @settings(deadline=None, print_blob=True) def test_exact(self, param: dict, num_rounds: int, dataset: tm.TestDataset) -> None: if dataset.name.endswith("-l1"): return param["tree_method"] = "exact" param = dataset.set_params(param) result = train_result(param, dataset.get_dmat(), num_rounds) assert tm.non_increasing(result["train"][dataset.metric]) @given( exact_parameter_strategy, hist_parameter_strategy, strategies.integers(1, 20), tm.multi_dataset_strategy, ) @settings(deadline=None, print_blob=True) def test_approx(self, param, hist_param, num_rounds, dataset): param["tree_method"] = "approx" param = dataset.set_params(param) param.update(hist_param) result = train_result(param, dataset.get_dmat(), num_rounds) note(result) assert tm.non_increasing(result["train"][dataset.metric]) @given( exact_parameter_strategy, hist_multi_parameter_strategy, strategies.integers(1, 20), tm.multi_dataset_strategy, ) @settings(deadline=None, print_blob=True) def test_hist( self, param: dict, hist_param: dict, num_rounds: int, dataset: tm.TestDataset ) -> None: if dataset.name.endswith("-l1"): return param["tree_method"] = "hist" param = dataset.set_params(param) param.update(hist_param) result = train_result(param, dataset.get_dmat(), num_rounds) note(result) assert tm.non_increasing(result["train"][dataset.metric]) class TestTreeMethod: USE_ONEHOT = np.iinfo(np.int32).max USE_PART = 1 @given( exact_parameter_strategy, strategies.integers(1, 20), tm.make_dataset_strategy() ) @settings(deadline=None, print_blob=True) def test_exact(self, param, num_rounds, dataset): if dataset.name.endswith("-l1"): return param['tree_method'] = 'exact' param = dataset.set_params(param) result = train_result(param, dataset.get_dmat(), num_rounds) assert tm.non_increasing(result['train'][dataset.metric]) @given( exact_parameter_strategy, hist_parameter_strategy, strategies.integers(1, 20), tm.make_dataset_strategy(), ) @settings(deadline=None, print_blob=True) def test_approx(self, param, hist_param, num_rounds, dataset): param["tree_method"] = "approx" param = dataset.set_params(param) param.update(hist_param) result = train_result(param, dataset.get_dmat(), num_rounds) note(result) assert tm.non_increasing(result["train"][dataset.metric]) @pytest.mark.skipif(**tm.no_sklearn()) def test_pruner(self): import sklearn params = {'tree_method': 'exact'} cancer = sklearn.datasets.load_breast_cancer() X = cancer['data'] y = cancer["target"] dtrain = xgb.DMatrix(X, y) booster = xgb.train(params, dtrain=dtrain, num_boost_round=10) grown = str(booster.get_dump()) params = {'updater': 'prune', 'process_type': 'update', 'gamma': '0.2'} booster = xgb.train(params, dtrain=dtrain, num_boost_round=10, xgb_model=booster) after_prune = str(booster.get_dump()) assert grown != after_prune booster = xgb.train(params, dtrain=dtrain, num_boost_round=10, xgb_model=booster) second_prune = str(booster.get_dump()) # Second prune should not change the tree assert after_prune == second_prune @given( exact_parameter_strategy, hist_parameter_strategy, strategies.integers(1, 20), tm.make_dataset_strategy() ) @settings(deadline=None, print_blob=True) def test_hist(self, param: dict, hist_param: dict, num_rounds: int, dataset: tm.TestDataset) -> None: param['tree_method'] = 'hist' param = dataset.set_params(param) param.update(hist_param) result = train_result(param, dataset.get_dmat(), num_rounds) note(result) assert tm.non_increasing(result['train'][dataset.metric]) def test_hist_categorical(self): # hist must be same as exact on all-categorial data ag_dtrain, ag_dtest = tm.load_agaricus(__file__) ag_param = {'max_depth': 2, 'tree_method': 'hist', 'eta': 1, 'verbosity': 0, 'objective': 'binary:logistic', 'eval_metric': 'auc'} hist_res = {} exact_res = {} xgb.train( ag_param, ag_dtrain, 10, evals=[(ag_dtrain, "train"), (ag_dtest, "test")], evals_result=hist_res ) ag_param["tree_method"] = "exact" xgb.train( ag_param, ag_dtrain, 10, evals=[(ag_dtrain, "train"), (ag_dtest, "test")], evals_result=exact_res ) assert hist_res['train']['auc'] == exact_res['train']['auc'] assert hist_res['test']['auc'] == exact_res['test']['auc'] @pytest.mark.skipif(**tm.no_sklearn()) def test_hist_degenerate_case(self): # Test a degenerate case where the quantile sketcher won't return any # quantile points for a particular feature (the second feature in # this example). Source: https://github.com/dmlc/xgboost/issues/2943 nan = np.nan param = {'missing': nan, 'tree_method': 'hist'} model = xgb.XGBRegressor(**param) X = np.array([[6.18827160e+05, 1.73000000e+02], [6.37345679e+05, nan], [6.38888889e+05, nan], [6.28086420e+05, nan]]) y = [1000000., 0., 0., 500000.] w = [0, 0, 1, 0] model.fit(X, y, sample_weight=w) @given(tm.sparse_datasets_strategy) @settings(deadline=None, print_blob=True) def test_sparse(self, dataset): param = {"tree_method": "hist", "max_bin": 64} hist_result = train_result(param, dataset.get_dmat(), 16) note(hist_result) assert tm.non_increasing(hist_result['train'][dataset.metric]) param = {"tree_method": "approx", "max_bin": 64} approx_result = train_result(param, dataset.get_dmat(), 16) note(approx_result) assert tm.non_increasing(approx_result['train'][dataset.metric]) np.testing.assert_allclose( hist_result["train"]["rmse"], approx_result["train"]["rmse"] ) def run_invalid_category(self, tree_method: str) -> None: rng = np.random.default_rng() # too large X = rng.integers(low=0, high=4, size=1000).reshape(100, 10) y = rng.normal(loc=0, scale=1, size=100) X[13, 7] = np.iinfo(np.int32).max + 1 # Check is performed during sketching. Xy = xgb.DMatrix(X, y, feature_types=["c"] * 10) with pytest.raises(ValueError): xgb.train({"tree_method": tree_method}, Xy) X[13, 7] = 16777216 Xy = xgb.DMatrix(X, y, feature_types=["c"] * 10) with pytest.raises(ValueError): xgb.train({"tree_method": tree_method}, Xy) # mixed positive and negative values X = rng.normal(loc=0, scale=1, size=1000).reshape(100, 10) y = rng.normal(loc=0, scale=1, size=100) Xy = xgb.DMatrix(X, y, feature_types=["c"] * 10) with pytest.raises(ValueError): xgb.train({"tree_method": tree_method}, Xy) if tree_method == "gpu_hist": import cupy as cp X, y = cp.array(X), cp.array(y) with pytest.raises(ValueError): Xy = xgb.QuantileDMatrix(X, y, feature_types=["c"] * 10) def test_invalid_category(self) -> None: self.run_invalid_category("approx") self.run_invalid_category("hist") def run_max_cat(self, tree_method: str) -> None: """Test data with size smaller than number of categories.""" import pandas as pd rng = np.random.default_rng(0) n_cat = 100 n = 5 X = pd.Series( ["".join(rng.choice(list(ascii_lowercase), size=3)) for i in range(n_cat)], dtype="category", )[:n].to_frame() reg = xgb.XGBRegressor( enable_categorical=True, tree_method=tree_method, n_estimators=10, ) y = pd.Series(range(n)) reg.fit(X=X, y=y, eval_set=[(X, y)]) assert tm.non_increasing(reg.evals_result()["validation_0"]["rmse"]) @pytest.mark.parametrize("tree_method", ["hist", "approx"]) @pytest.mark.skipif(**tm.no_pandas()) def test_max_cat(self, tree_method) -> None: self.run_max_cat(tree_method) def run_categorical_missing( self, rows: int, cols: int, cats: int, tree_method: str ) -> None: parameters: Dict[str, Any] = {"tree_method": tree_method} cat, label = tm.make_categorical( rows, n_features=cols, n_categories=cats, onehot=False, sparsity=0.5 ) Xy = xgb.DMatrix(cat, label, enable_categorical=True) def run(max_cat_to_onehot: int): # Test with onehot splits parameters["max_cat_to_onehot"] = max_cat_to_onehot evals_result: Dict[str, Dict] = {} booster = xgb.train( parameters, Xy, num_boost_round=16, evals=[(Xy, "Train")], evals_result=evals_result ) assert tm.non_increasing(evals_result["Train"]["rmse"]) y_predt = booster.predict(Xy) rmse = tm.root_mean_square(label, y_predt) np.testing.assert_allclose( rmse, evals_result["Train"]["rmse"][-1], rtol=2e-5 ) # Test with OHE split run(self.USE_ONEHOT) # Test with partition-based split run(self.USE_PART) def run_categorical_ohe( self, rows: int, cols: int, rounds: int, cats: int, tree_method: str ) -> None: onehot, label = tm.make_categorical(rows, cols, cats, True) cat, _ = tm.make_categorical(rows, cols, cats, False) by_etl_results: Dict[str, Dict[str, List[float]]] = {} by_builtin_results: Dict[str, Dict[str, List[float]]] = {} parameters: Dict[str, Any] = { "tree_method": tree_method, # Use one-hot exclusively "max_cat_to_onehot": self.USE_ONEHOT } m = xgb.DMatrix(onehot, label, enable_categorical=False) xgb.train( parameters, m, num_boost_round=rounds, evals=[(m, "Train")], evals_result=by_etl_results, ) m = xgb.DMatrix(cat, label, enable_categorical=True) xgb.train( parameters, m, num_boost_round=rounds, evals=[(m, "Train")], evals_result=by_builtin_results, ) # There are guidelines on how to specify tolerance based on considering output # as random variables. But in here the tree construction is extremely sensitive # to floating point errors. An 1e-5 error in a histogram bin can lead to an # entirely different tree. So even though the test is quite lenient, hypothesis # can still pick up falsifying examples from time to time. np.testing.assert_allclose( np.array(by_etl_results["Train"]["rmse"]), np.array(by_builtin_results["Train"]["rmse"]), rtol=1e-3, ) assert tm.non_increasing(by_builtin_results["Train"]["rmse"]) by_grouping: Dict[str, Dict[str, List[float]]] = {} # switch to partition-based splits parameters["max_cat_to_onehot"] = self.USE_PART parameters["reg_lambda"] = 0 m = xgb.DMatrix(cat, label, enable_categorical=True) xgb.train( parameters, m, num_boost_round=rounds, evals=[(m, "Train")], evals_result=by_grouping, ) rmse_oh = by_builtin_results["Train"]["rmse"] rmse_group = by_grouping["Train"]["rmse"] # always better or equal to onehot when there's no regularization. for a, b in zip(rmse_oh, rmse_group): assert a >= b parameters["reg_lambda"] = 1.0 by_grouping = {} xgb.train( parameters, m, num_boost_round=32, evals=[(m, "Train")], evals_result=by_grouping, ) assert tm.non_increasing(by_grouping["Train"]["rmse"]), by_grouping @given(strategies.integers(10, 400), strategies.integers(3, 8), strategies.integers(1, 2), strategies.integers(4, 7)) @settings(deadline=None, print_blob=True) @pytest.mark.skipif(**tm.no_pandas()) def test_categorical_ohe( self, rows: int, cols: int, rounds: int, cats: int ) -> None: self.run_categorical_ohe(rows, cols, rounds, cats, "approx") self.run_categorical_ohe(rows, cols, rounds, cats, "hist") @given( tm.categorical_dataset_strategy, exact_parameter_strategy, hist_parameter_strategy, cat_parameter_strategy, strategies.integers(4, 32), strategies.sampled_from(["hist", "approx"]), ) @settings(deadline=None, print_blob=True) @pytest.mark.skipif(**tm.no_pandas()) def test_categorical( self, dataset: tm.TestDataset, exact_parameters: Dict[str, Any], hist_parameters: Dict[str, Any], cat_parameters: Dict[str, Any], n_rounds: int, tree_method: str, ) -> None: cat_parameters.update(exact_parameters) cat_parameters.update(hist_parameters) cat_parameters["tree_method"] = tree_method results = train_result(cat_parameters, dataset.get_dmat(), n_rounds) tm.non_increasing(results["train"]["rmse"]) @given( hist_parameter_strategy, cat_parameter_strategy, strategies.sampled_from(["hist", "approx"]), ) @settings(deadline=None, print_blob=True) def test_categorical_ames_housing( self, hist_parameters: Dict[str, Any], cat_parameters: Dict[str, Any], tree_method: str, ) -> None: cat_parameters.update(hist_parameters) dataset = tm.TestDataset( "ames_housing", tm.data.get_ames_housing, "reg:squarederror", "rmse" ) cat_parameters["tree_method"] = tree_method results = train_result(cat_parameters, dataset.get_dmat(), 16) tm.non_increasing(results["train"]["rmse"]) @given( strategies.integers(10, 400), strategies.integers(3, 8), strategies.integers(4, 7) ) @settings(deadline=None, print_blob=True) @pytest.mark.skipif(**tm.no_pandas()) def test_categorical_missing(self, rows, cols, cats): self.run_categorical_missing(rows, cols, cats, "approx") self.run_categorical_missing(rows, cols, cats, "hist") def run_adaptive(self, tree_method, weighted) -> None: rng = np.random.RandomState(1994) from sklearn.datasets import make_regression from sklearn.utils import stats n_samples = 256 X, y = make_regression(n_samples, 16, random_state=rng) if weighted: w = rng.normal(size=n_samples) w -= w.min() Xy = xgb.DMatrix(X, y, weight=w) base_score = stats._weighted_percentile(y, w, percentile=50) else: Xy = xgb.DMatrix(X, y) base_score = np.median(y) booster_0 = xgb.train( { "tree_method": tree_method, "base_score": base_score, "objective": "reg:absoluteerror", }, Xy, num_boost_round=1, ) booster_1 = xgb.train( {"tree_method": tree_method, "objective": "reg:absoluteerror"}, Xy, num_boost_round=1, ) config_0 = json.loads(booster_0.save_config()) config_1 = json.loads(booster_1.save_config()) def get_score(config: Dict) -> float: return float(config["learner"]["learner_model_param"]["base_score"]) assert get_score(config_0) == get_score(config_1) raw_booster = booster_1.save_raw(raw_format="deprecated") booster_2 = xgb.Booster(model_file=raw_booster) config_2 = json.loads(booster_2.save_config()) assert get_score(config_1) == get_score(config_2) raw_booster = booster_1.save_raw(raw_format="ubj") booster_2 = xgb.Booster(model_file=raw_booster) config_2 = json.loads(booster_2.save_config()) assert get_score(config_1) == get_score(config_2) booster_0 = xgb.train( { "tree_method": tree_method, "base_score": base_score + 1.0, "objective": "reg:absoluteerror", }, Xy, num_boost_round=1, ) config_0 = json.loads(booster_0.save_config()) np.testing.assert_allclose(get_score(config_0), get_score(config_1) + 1) evals_result: Dict[str, Dict[str, list]] = {} xgb.train( { "tree_method": tree_method, "objective": "reg:absoluteerror", "subsample": 0.8, "eta": 1.0, }, Xy, num_boost_round=10, evals=[(Xy, "Train")], evals_result=evals_result, ) mae = evals_result["Train"]["mae"] assert mae[-1] < 20.0 assert tm.non_increasing(mae) @pytest.mark.skipif(**tm.no_sklearn()) @pytest.mark.parametrize( "tree_method,weighted", [ ("approx", False), ("hist", False), ("approx", True), ("hist", True) ] ) def test_adaptive(self, tree_method, weighted) -> None: self.run_adaptive(tree_method, weighted) def test_init_estimation(self) -> None: check_init_estimation("hist") @pytest.mark.parametrize("weighted", [True, False]) def test_quantile_loss(self, weighted: bool) -> None: check_quantile_loss("hist", weighted) @pytest.mark.skipif(**tm.no_pandas()) @pytest.mark.parametrize("tree_method", ["hist"]) def test_get_quantile_cut(self, tree_method: str) -> None: check_get_quantile_cut(tree_method)
19,708
34.384201
105
py
xgboost
xgboost-master/tests/python/test_with_shap.py
import numpy as np import pytest import xgboost as xgb try: import shap except Exception: shap = None pass pytestmark = pytest.mark.skipif(shap is None, reason="Requires shap package") # xgboost removed ntree_limit in 2.0, which breaks the SHAP package. @pytest.mark.xfail def test_with_shap() -> None: from sklearn.datasets import fetch_california_housing X, y = fetch_california_housing(return_X_y=True) dtrain = xgb.DMatrix(X, label=y) model = xgb.train({"learning_rate": 0.01}, dtrain, 10) explainer = shap.TreeExplainer(model) shap_values = explainer.shap_values(X) margin = model.predict(dtrain, output_margin=True) assert np.allclose( np.sum(shap_values, axis=len(shap_values.shape) - 1), margin - explainer.expected_value, 1e-3, 1e-3, )
832
24.242424
77
py
xgboost
xgboost-master/tests/python/test_parse_tree.py
import numpy as np import pytest import xgboost as xgb from xgboost import testing as tm pytestmark = pytest.mark.skipif(**tm.no_pandas()) dpath = 'demo/data/' rng = np.random.RandomState(1994) class TestTreesToDataFrame: def build_model(self, max_depth, num_round): dtrain, _ = tm.load_agaricus(__file__) param = {'max_depth': max_depth, 'objective': 'binary:logistic', 'verbosity': 1} num_round = num_round bst = xgb.train(param, dtrain, num_round) return bst def parse_dumped_model(self, booster, item_to_get, splitter): item_to_get += '=' txt_dump = booster.get_dump(with_stats=True) tree_list = [tree.split('/n') for tree in txt_dump] split_trees = [tree[0].split(item_to_get)[1:] for tree in tree_list] res = sum([float(line.split(splitter)[0]) for tree in split_trees for line in tree]) return res def test_trees_to_dataframe(self): bst = self.build_model(max_depth=5, num_round=10) gain_from_dump = self.parse_dumped_model(booster=bst, item_to_get='gain', splitter=',') cover_from_dump = self.parse_dumped_model(booster=bst, item_to_get='cover', splitter='\n') # method being tested df = bst.trees_to_dataframe() # test for equality of gains gain_from_df = df[df.Feature != 'Leaf'][['Gain']].sum() assert np.allclose(gain_from_dump, gain_from_df) # test for equality of covers cover_from_df = df.Cover.sum() assert np.allclose(cover_from_dump, cover_from_df) def run_tree_to_df_categorical(self, tree_method: str) -> None: X, y = tm.make_categorical(100, 10, 31, False) Xy = xgb.DMatrix(X, y, enable_categorical=True) booster = xgb.train({"tree_method": tree_method}, Xy, num_boost_round=10) df = booster.trees_to_dataframe() for _, x in df.iterrows(): if x["Feature"] != "Leaf": assert len(x["Category"]) >= 1 def test_tree_to_df_categorical(self) -> None: self.run_tree_to_df_categorical("approx") def run_split_value_histograms(self, tree_method) -> None: X, y = tm.make_categorical(1000, 10, 13, False) reg = xgb.XGBRegressor(tree_method=tree_method, enable_categorical=True) reg.fit(X, y) with pytest.raises(ValueError, match="doesn't"): reg.get_booster().get_split_value_histogram("3", bins=5) def test_split_value_histograms(self): self.run_split_value_histograms("approx")
2,770
36.958904
81
py
xgboost
xgboost-master/tests/python-gpu/test_large_input.py
import cupy as cp import numpy as np import pytest import xgboost as xgb # Test for integer overflow or out of memory exceptions def test_large_input(): available_bytes, _ = cp.cuda.runtime.memGetInfo() # 15 GB required_bytes = 1.5e10 if available_bytes < required_bytes: pytest.skip("Not enough memory on this device") n = 1000 m = ((1 << 31) + n - 1) // n assert np.log2(m * n) > 31 X = cp.ones((m, n), dtype=np.float32) y = cp.ones(m) w = cp.ones(m) dmat = xgb.QuantileDMatrix(X, y, weight=w) booster = xgb.train({"tree_method": "gpu_hist", "max_depth": 1}, dmat, 1) del y booster.inplace_predict(X)
670
25.84
77
py
xgboost
xgboost-master/tests/python-gpu/test_gpu_linear.py
import pytest from hypothesis import assume, given, note, settings, strategies import xgboost as xgb from xgboost import testing as tm pytestmark = tm.timeout(10) parameter_strategy = strategies.fixed_dictionaries({ 'booster': strategies.just('gblinear'), 'eta': strategies.floats(0.01, 0.25), 'tolerance': strategies.floats(1e-5, 1e-2), 'nthread': strategies.integers(1, 4), 'feature_selector': strategies.sampled_from(['cyclic', 'shuffle', 'greedy', 'thrifty']), 'top_k': strategies.integers(1, 10), }) def train_result(param, dmat, num_rounds): result = {} booster = xgb.train( param, dmat, num_rounds, [(dmat, 'train')], verbose_eval=False, evals_result=result ) assert booster.num_boosted_rounds() == num_rounds return result class TestGPULinear: @given(parameter_strategy, strategies.integers(10, 50), tm.make_dataset_strategy()) @settings(deadline=None, max_examples=20, print_blob=True) def test_gpu_coordinate(self, param, num_rounds, dataset): assume(len(dataset.y) > 0) param['updater'] = 'gpu_coord_descent' param = dataset.set_params(param) result = train_result(param, dataset.get_dmat(), num_rounds)['train'][dataset.metric] note(result) assert tm.non_increasing(result) # Loss is not guaranteed to always decrease because of regularisation parameters # We test a weaker condition that the loss has not increased between the first and last # iteration @given( parameter_strategy, strategies.integers(10, 50), tm.make_dataset_strategy(), strategies.floats(1e-5, 0.8), strategies.floats(1e-5, 0.8) ) @settings(deadline=None, max_examples=20, print_blob=True) def test_gpu_coordinate_regularised(self, param, num_rounds, dataset, alpha, lambd): assume(len(dataset.y) > 0) param['updater'] = 'gpu_coord_descent' param['alpha'] = alpha param['lambda'] = lambd param = dataset.set_params(param) result = train_result(param, dataset.get_dmat(), num_rounds)['train'][dataset.metric] note(result) assert tm.non_increasing([result[0], result[-1]]) @pytest.mark.skipif(**tm.no_cupy()) def test_gpu_coordinate_from_cupy(self): # Training linear model is quite expensive, so we don't include it in # test_from_cupy.py import cupy params = {'booster': 'gblinear', 'updater': 'gpu_coord_descent', 'n_estimators': 100} X, y = tm.get_california_housing() cpu_model = xgb.XGBRegressor(**params) cpu_model.fit(X, y) cpu_predt = cpu_model.predict(X) X = cupy.array(X) y = cupy.array(y) gpu_model = xgb.XGBRegressor(**params) gpu_model.fit(X, y) gpu_predt = gpu_model.predict(X) cupy.testing.assert_allclose(cpu_predt, gpu_predt)
2,973
36.175
93
py
xgboost
xgboost-master/tests/python-gpu/test_device_quantile_dmatrix.py
import sys import numpy as np import pytest from hypothesis import given, settings, strategies import xgboost as xgb from xgboost import testing as tm from xgboost.testing.data import check_inf sys.path.append("tests/python") import test_quantile_dmatrix as tqd class TestQuantileDMatrix: cputest = tqd.TestQuantileDMatrix() @pytest.mark.skipif(**tm.no_cupy()) def test_dmatrix_feature_weights(self) -> None: import cupy as cp rng = cp.random.RandomState(1994) data = rng.randn(5, 5) m = xgb.DMatrix(data) feature_weights = rng.uniform(size=5) m.set_info(feature_weights=feature_weights) cp.testing.assert_array_equal( cp.array(m.get_float_info("feature_weights")), feature_weights.astype(np.float32), ) @pytest.mark.skipif(**tm.no_cupy()) def test_dmatrix_cupy_init(self) -> None: import cupy as cp data = cp.random.randn(5, 5) xgb.QuantileDMatrix(data, cp.ones(5, dtype=np.float64)) @pytest.mark.parametrize( "on_device,tree_method", [(True, "hist"), (False, "gpu_hist"), (False, "hist"), (True, "gpu_hist")], ) def test_initialization(self, on_device: bool, tree_method: str) -> None: n_samples, n_features, max_bin = 64, 3, 16 X, y, w = tm.make_batches( n_samples, n_features=n_features, n_batches=1, use_cupy=on_device, ) # Init SparsePage Xy = xgb.DMatrix(X[0], y[0], weight=w[0]) # Init GIDX/Ellpack xgb.train( {"tree_method": tree_method, "max_bin": max_bin}, Xy, num_boost_round=1, ) # query cuts from GIDX/Ellpack qXy = xgb.QuantileDMatrix(X[0], y[0], weight=w[0], max_bin=max_bin, ref=Xy) tm.predictor_equal(Xy, qXy) with pytest.raises(ValueError, match="Inconsistent"): # max_bin changed. xgb.QuantileDMatrix(X[0], y[0], weight=w[0], max_bin=max_bin - 1, ref=Xy) # No error, DMatrix can be modified for different training session. xgb.train( {"tree_method": tree_method, "max_bin": max_bin - 1}, Xy, num_boost_round=1, ) # Init Ellpack/GIDX Xy = xgb.QuantileDMatrix(X[0], y[0], weight=w[0], max_bin=max_bin) # Init GIDX/Ellpack xgb.train( {"tree_method": tree_method, "max_bin": max_bin}, Xy, num_boost_round=1, ) # query cuts from GIDX/Ellpack qXy = xgb.QuantileDMatrix(X[0], y[0], weight=w[0], max_bin=max_bin, ref=Xy) tm.predictor_equal(Xy, qXy) with pytest.raises(ValueError, match="Inconsistent"): # max_bin changed. xgb.QuantileDMatrix(X[0], y[0], weight=w[0], max_bin=max_bin - 1, ref=Xy) Xy = xgb.DMatrix(X[0], y[0], weight=w[0]) booster0 = xgb.train( {"tree_method": "hist", "max_bin": max_bin, "max_depth": 4}, Xy, num_boost_round=1, ) booster1 = xgb.train( {"tree_method": "gpu_hist", "max_bin": max_bin, "max_depth": 4}, Xy, num_boost_round=1, ) qXy = xgb.QuantileDMatrix(X[0], y[0], weight=w[0], max_bin=max_bin, ref=Xy) predt0 = booster0.predict(qXy) predt1 = booster1.predict(qXy) np.testing.assert_allclose(predt0, predt1) @pytest.mark.skipif(**tm.no_cupy()) @pytest.mark.parametrize( "tree_method,max_bin", [("hist", 16), ("gpu_hist", 16), ("hist", 64), ("gpu_hist", 64)], ) def test_interoperability(self, tree_method: str, max_bin: int) -> None: import cupy as cp n_samples = 64 n_features = 3 X, y, w = tm.make_batches( n_samples, n_features=n_features, n_batches=1, use_cupy=False ) # from CPU Xy = xgb.QuantileDMatrix(X[0], y[0], weight=w[0], max_bin=max_bin) booster_0 = xgb.train( {"tree_method": tree_method, "max_bin": max_bin}, Xy, num_boost_round=4 ) X[0] = cp.array(X[0]) y[0] = cp.array(y[0]) w[0] = cp.array(w[0]) # from GPU Xy = xgb.QuantileDMatrix(X[0], y[0], weight=w[0], max_bin=max_bin) booster_1 = xgb.train( {"tree_method": tree_method, "max_bin": max_bin}, Xy, num_boost_round=4 ) cp.testing.assert_allclose( booster_0.inplace_predict(X[0]), booster_1.inplace_predict(X[0]) ) with pytest.raises(ValueError, match=r"Only.*hist.*"): xgb.train( {"tree_method": "approx", "max_bin": max_bin}, Xy, num_boost_round=4 ) @pytest.mark.skipif(**tm.no_cupy()) def test_metainfo(self) -> None: import cupy as cp rng = cp.random.RandomState(1994) rows = 10 cols = 3 data = rng.randn(rows, cols) labels = rng.randn(rows) fw = rng.randn(rows) fw -= fw.min() m = xgb.QuantileDMatrix(data=data, label=labels, feature_weights=fw) got_fw = m.get_float_info("feature_weights") got_labels = m.get_label() cp.testing.assert_allclose(fw, got_fw) cp.testing.assert_allclose(labels, got_labels) @pytest.mark.skipif(**tm.no_cupy()) @pytest.mark.skipif(**tm.no_cudf()) def test_ref_dmatrix(self) -> None: import cupy as cp rng = cp.random.RandomState(1994) self.cputest.run_ref_dmatrix(rng, "gpu_hist", False) @given( strategies.integers(1, 1000), strategies.integers(1, 100), strategies.fractions(0, 0.99), ) @settings(print_blob=True, deadline=None) def test_to_csr(self, n_samples, n_features, sparsity) -> None: import cupy as cp X, y = tm.make_sparse_regression(n_samples, n_features, sparsity, False) h_X = X.astype(np.float32) csr = h_X h_X = X.toarray() h_X[h_X == 0] = np.nan h_m = xgb.QuantileDMatrix(data=h_X) h_ret = h_m.get_data() d_X = cp.array(h_X) d_m = xgb.QuantileDMatrix(data=d_X, label=y) d_ret = d_m.get_data() np.testing.assert_equal(csr.indptr, d_ret.indptr) np.testing.assert_equal(csr.indices, d_ret.indices) np.testing.assert_equal(h_ret.indptr, d_ret.indptr) np.testing.assert_equal(h_ret.indices, d_ret.indices) booster = xgb.train( {"tree_method": "hist", "device": "cuda:0"}, dtrain=d_m ) np.testing.assert_allclose( booster.predict(d_m), booster.predict(xgb.DMatrix(d_m.get_data())), atol=1e-6, ) def test_ltr(self) -> None: import cupy as cp X, y, qid, w = tm.make_ltr(100, 3, 3, 5) # make sure GPU is used to run sketching. cpX = cp.array(X) Xy_qdm = xgb.QuantileDMatrix(cpX, y, qid=qid, weight=w) Xy = xgb.DMatrix(X, y, qid=qid, weight=w) xgb.train({"tree_method": "gpu_hist", "objective": "rank:ndcg"}, Xy) from_dm = xgb.QuantileDMatrix(X, weight=w, ref=Xy) from_qdm = xgb.QuantileDMatrix(X, weight=w, ref=Xy_qdm) assert tm.predictor_equal(from_qdm, from_dm) @pytest.mark.skipif(**tm.no_cupy()) def test_check_inf(self) -> None: import cupy as cp rng = cp.random.default_rng(1994) check_inf(rng)
7,477
30.821277
85
py
xgboost
xgboost-master/tests/python-gpu/conftest.py
import pytest from xgboost import testing as tm def has_rmm(): return tm.no_rmm()["condition"] @pytest.fixture(scope="session", autouse=True) def setup_rmm_pool(request, pytestconfig): tm.setup_rmm_pool(request, pytestconfig) def pytest_addoption(parser: pytest.Parser) -> None: parser.addoption( "--use-rmm-pool", action="store_true", default=False, help="Use RMM pool" ) def pytest_collection_modifyitems(config, items): if config.getoption("--use-rmm-pool"): blocklist = [ "python-gpu/test_gpu_demos.py::test_dask_training", "python-gpu/test_gpu_prediction.py::TestGPUPredict::test_shap", "python-gpu/test_gpu_linear.py::TestGPULinear", ] skip_mark = pytest.mark.skip( reason="This test is not run when --use-rmm-pool flag is active" ) for item in items: if any(item.nodeid.startswith(x) for x in blocklist): item.add_marker(skip_mark)
992
28.205882
81
py
xgboost
xgboost-master/tests/python-gpu/test_gpu_data_iterator.py
import sys import pytest from hypothesis import given, settings, strategies from xgboost.testing import no_cupy sys.path.append("tests/python") from test_data_iterator import run_data_iterator from test_data_iterator import test_single_batch as cpu_single_batch def test_gpu_single_batch() -> None: cpu_single_batch("gpu_hist") @pytest.mark.skipif(**no_cupy()) @given( strategies.integers(0, 1024), strategies.integers(1, 7), strategies.integers(0, 8), strategies.booleans(), strategies.booleans(), ) @settings(deadline=None, max_examples=10, print_blob=True) def test_gpu_data_iterator( n_samples_per_batch: int, n_features: int, n_batches: int, subsample: bool, use_cupy: bool, ) -> None: run_data_iterator( n_samples_per_batch, n_features, n_batches, "gpu_hist", subsample, use_cupy ) def test_cpu_data_iterator() -> None: """Make sure CPU algorithm can handle GPU inputs""" run_data_iterator(1024, 2, 3, "approx", False, True)
1,008
23.609756
83
py
xgboost
xgboost-master/tests/python-gpu/load_pickle.py
"""Loading a pickled model generated by test_pickling.py, only used by `test_gpu_with_dask.py`""" import json import os import numpy as np import pytest from test_gpu_pickling import build_dataset, load_pickle, model_path import xgboost as xgb from xgboost import testing as tm class TestLoadPickle: def test_load_pkl(self) -> None: """Test whether prediction is correct.""" assert os.environ["CUDA_VISIBLE_DEVICES"] == "-1" bst = load_pickle(model_path) x, y = build_dataset() if isinstance(bst, xgb.Booster): test_x = xgb.DMatrix(x) res = bst.predict(test_x) else: res = bst.predict(x) assert len(res) == 10 bst.set_params(n_jobs=1) # triggers a re-configuration res = bst.predict(x) assert len(res) == 10 def test_context_is_removed(self) -> None: """Under invalid CUDA_VISIBLE_DEVICES, context should reset""" assert os.environ["CUDA_VISIBLE_DEVICES"] == "-1" bst = load_pickle(model_path) config = bst.save_config() config = json.loads(config) assert config["learner"]["generic_param"]["device"] == "cpu" def test_context_is_preserved(self) -> None: """Test the device context is preserved after pickling.""" assert "CUDA_VISIBLE_DEVICES" not in os.environ.keys() bst = load_pickle(model_path) config = bst.save_config() config = json.loads(config) assert config["learner"]["generic_param"]["device"] == "cuda:0" def test_wrap_gpu_id(self) -> None: assert os.environ["CUDA_VISIBLE_DEVICES"] == "0" bst = load_pickle(model_path) config = bst.save_config() config = json.loads(config) assert config["learner"]["generic_param"]["device"] == "cuda:0" x, y = build_dataset() test_x = xgb.DMatrix(x) res = bst.predict(test_x) assert len(res) == 10 def test_training_on_cpu_only_env(self) -> None: assert os.environ["CUDA_VISIBLE_DEVICES"] == "-1" rng = np.random.RandomState(1994) X = rng.randn(10, 10) y = rng.randn(10) with pytest.warns(UserWarning, match="No visible GPU is found"): # Test no thrust exception is thrown with pytest.raises(xgb.core.XGBoostError, match="have at least one device"): xgb.train({"tree_method": "gpu_hist"}, xgb.DMatrix(X, y))
2,464
35.25
88
py
xgboost
xgboost-master/tests/python-gpu/test_monotonic_constraints.py
import sys import numpy as np import pytest import xgboost as xgb from xgboost import testing as tm sys.path.append("tests/python") import test_monotone_constraints as tmc rng = np.random.RandomState(1994) def non_decreasing(L): return all((x - y) < 0.001 for x, y in zip(L, L[1:])) def non_increasing(L): return all((y - x) < 0.001 for x, y in zip(L, L[1:])) def assert_constraint(constraint, tree_method): from sklearn.datasets import make_regression n = 1000 X, y = make_regression(n, random_state=rng, n_features=1, n_informative=1) dtrain = xgb.DMatrix(X, y) param = {} param['tree_method'] = tree_method param['monotone_constraints'] = "(" + str(constraint) + ")" bst = xgb.train(param, dtrain) dpredict = xgb.DMatrix(X[X[:, 0].argsort()]) pred = bst.predict(dpredict) if constraint > 0: assert non_decreasing(pred) elif constraint < 0: assert non_increasing(pred) @pytest.mark.skipif(**tm.no_sklearn()) def test_gpu_hist_basic(): assert_constraint(1, 'gpu_hist') assert_constraint(-1, 'gpu_hist') def test_gpu_hist_depthwise(): params = { 'tree_method': 'gpu_hist', 'grow_policy': 'depthwise', 'monotone_constraints': '(1, -1)' } model = xgb.train(params, tmc.training_dset) tmc.is_correctly_constrained(model) def test_gpu_hist_lossguide(): params = { 'tree_method': 'gpu_hist', 'grow_policy': 'lossguide', 'monotone_constraints': '(1, -1)' } model = xgb.train(params, tmc.training_dset) tmc.is_correctly_constrained(model)
1,609
23.769231
78
py
xgboost
xgboost-master/tests/python-gpu/test_from_cudf.py
import json import sys import numpy as np import pytest import xgboost as xgb from xgboost import testing as tm sys.path.append("tests/python") from test_dmatrix import set_base_margin_info def dmatrix_from_cudf(input_type, DMatrixT, missing=np.NAN): '''Test constructing DMatrix from cudf''' import cudf import pandas as pd kRows = 80 kCols = 3 na = np.random.randn(kRows, kCols) na[:, 0:2] = na[:, 0:2].astype(input_type) na[5, 0] = missing na[3, 1] = missing pa = pd.DataFrame({'0': na[:, 0], '1': na[:, 1], '2': na[:, 2].astype(np.int32)}) np_label = np.random.randn(kRows).astype(input_type) pa_label = pd.DataFrame(np_label) cd = cudf.from_pandas(pa) cd_label = cudf.from_pandas(pa_label).iloc[:, 0] dtrain = DMatrixT(cd, missing=missing, label=cd_label) assert dtrain.num_col() == kCols assert dtrain.num_row() == kRows def _test_from_cudf(DMatrixT): '''Test constructing DMatrix from cudf''' import cudf dmatrix_from_cudf(np.float32, DMatrixT, np.NAN) dmatrix_from_cudf(np.float64, DMatrixT, np.NAN) dmatrix_from_cudf(np.int8, DMatrixT, 2) dmatrix_from_cudf(np.int32, DMatrixT, -2) dmatrix_from_cudf(np.int64, DMatrixT, -3) cd = cudf.DataFrame({'x': [1, 2, 3], 'y': [0.1, 0.2, 0.3]}) dtrain = DMatrixT(cd) assert dtrain.feature_names == ['x', 'y'] assert dtrain.feature_types == ['int', 'float'] series = cudf.DataFrame({'x': [1, 2, 3]}).iloc[:, 0] assert isinstance(series, cudf.Series) dtrain = DMatrixT(series) assert dtrain.feature_names == ['x'] assert dtrain.feature_types == ['int'] with pytest.raises(ValueError, match=r".*multi.*"): dtrain = DMatrixT(cd, label=cd) xgb.train({"tree_method": "gpu_hist", "objective": "multi:softprob"}, dtrain) # Test when number of elements is less than 8 X = cudf.DataFrame({'x': cudf.Series([0, 1, 2, np.NAN, 4], dtype=np.int32)}) dtrain = DMatrixT(X) assert dtrain.num_col() == 1 assert dtrain.num_row() == 5 # Boolean is not supported. X_boolean = cudf.DataFrame({'x': cudf.Series([True, False])}) with pytest.raises(Exception): dtrain = DMatrixT(X_boolean) y_boolean = cudf.DataFrame({ 'x': cudf.Series([True, False, True, True, True])}) with pytest.raises(Exception): dtrain = DMatrixT(X_boolean, label=y_boolean) def _test_cudf_training(DMatrixT): import pandas as pd from cudf import DataFrame as df np.random.seed(1) X = pd.DataFrame(np.random.randn(50, 10)) y = pd.DataFrame(np.random.randn(50)) weights = np.random.random(50) + 1.0 cudf_weights = df.from_pandas(pd.DataFrame(weights)) base_margin = np.random.random(50) cudf_base_margin = df.from_pandas(pd.DataFrame(base_margin)) evals_result_cudf = {} dtrain_cudf = DMatrixT(df.from_pandas(X), df.from_pandas(y), weight=cudf_weights, base_margin=cudf_base_margin) params = {'gpu_id': 0, 'tree_method': 'gpu_hist'} xgb.train(params, dtrain_cudf, evals=[(dtrain_cudf, "train")], evals_result=evals_result_cudf) evals_result_np = {} dtrain_np = xgb.DMatrix(X, y, weight=weights, base_margin=base_margin) xgb.train(params, dtrain_np, evals=[(dtrain_np, "train")], evals_result=evals_result_np) assert np.array_equal(evals_result_cudf["train"]["rmse"], evals_result_np["train"]["rmse"]) def _test_cudf_metainfo(DMatrixT): import pandas as pd from cudf import DataFrame as df n = 100 X = np.random.random((n, 2)) dmat_cudf = DMatrixT(df.from_pandas(pd.DataFrame(X))) dmat = xgb.DMatrix(X) floats = np.random.random(n) uints = np.array([4, 2, 8]).astype("uint32") cudf_floats = df.from_pandas(pd.DataFrame(floats)) cudf_uints = df.from_pandas(pd.DataFrame(uints)) dmat.set_float_info('weight', floats) dmat.set_float_info('label', floats) dmat.set_float_info('base_margin', floats) dmat.set_uint_info('group', uints) dmat_cudf.set_info(weight=cudf_floats) dmat_cudf.set_info(label=cudf_floats) dmat_cudf.set_info(base_margin=cudf_floats) dmat_cudf.set_info(group=cudf_uints) # Test setting info with cudf DataFrame assert np.array_equal(dmat.get_float_info('weight'), dmat_cudf.get_float_info('weight')) assert np.array_equal(dmat.get_float_info('label'), dmat_cudf.get_float_info('label')) assert np.array_equal(dmat.get_float_info('base_margin'), dmat_cudf.get_float_info('base_margin')) assert np.array_equal(dmat.get_uint_info('group_ptr'), dmat_cudf.get_uint_info('group_ptr')) # Test setting info with cudf Series dmat_cudf.set_info(weight=cudf_floats[cudf_floats.columns[0]]) dmat_cudf.set_info(label=cudf_floats[cudf_floats.columns[0]]) dmat_cudf.set_info(base_margin=cudf_floats[cudf_floats.columns[0]]) dmat_cudf.set_info(group=cudf_uints[cudf_uints.columns[0]]) assert np.array_equal(dmat.get_float_info('weight'), dmat_cudf.get_float_info('weight')) assert np.array_equal(dmat.get_float_info('label'), dmat_cudf.get_float_info('label')) assert np.array_equal(dmat.get_float_info('base_margin'), dmat_cudf.get_float_info('base_margin')) assert np.array_equal(dmat.get_uint_info('group_ptr'), dmat_cudf.get_uint_info('group_ptr')) set_base_margin_info(df, DMatrixT, "gpu_hist") class TestFromColumnar: '''Tests for constructing DMatrix from data structure conforming Apache Arrow specification.''' @pytest.mark.skipif(**tm.no_cudf()) def test_simple_dmatrix_from_cudf(self): _test_from_cudf(xgb.DMatrix) @pytest.mark.skipif(**tm.no_cudf()) def test_device_dmatrix_from_cudf(self): _test_from_cudf(xgb.QuantileDMatrix) @pytest.mark.skipif(**tm.no_cudf()) def test_cudf_training_simple_dmatrix(self): _test_cudf_training(xgb.DMatrix) @pytest.mark.skipif(**tm.no_cudf()) def test_cudf_training_device_dmatrix(self): _test_cudf_training(xgb.QuantileDMatrix) @pytest.mark.skipif(**tm.no_cudf()) def test_cudf_metainfo_simple_dmatrix(self): _test_cudf_metainfo(xgb.DMatrix) @pytest.mark.skipif(**tm.no_cudf()) def test_cudf_metainfo_device_dmatrix(self): _test_cudf_metainfo(xgb.QuantileDMatrix) @pytest.mark.skipif(**tm.no_cudf()) def test_cudf_categorical(self) -> None: import cudf n_features = 30 _X, _y = tm.make_categorical(100, n_features, 17, False) X = cudf.from_pandas(_X) y = cudf.from_pandas(_y) Xy = xgb.DMatrix(X, y, enable_categorical=True) assert Xy.feature_types is not None assert len(Xy.feature_types) == X.shape[1] assert all(t == "c" for t in Xy.feature_types) Xy = xgb.QuantileDMatrix(X, y, enable_categorical=True) assert Xy.feature_types is not None assert len(Xy.feature_types) == X.shape[1] assert all(t == "c" for t in Xy.feature_types) # mixed dtypes X["1"] = X["1"].astype(np.int64) X["3"] = X["3"].astype(np.int64) df, cat_codes, _, _ = xgb.data._transform_cudf_df( X, None, None, enable_categorical=True ) assert X.shape[1] == n_features assert len(cat_codes) == X.shape[1] assert not cat_codes[0] assert not cat_codes[2] interfaces_str = xgb.data._cudf_array_interfaces(df, cat_codes) interfaces = json.loads(interfaces_str) assert len(interfaces) == X.shape[1] # test missing value X = cudf.DataFrame({"f0": ["a", "b", np.NaN]}) X["f0"] = X["f0"].astype("category") df, cat_codes, _, _ = xgb.data._transform_cudf_df( X, None, None, enable_categorical=True ) for col in cat_codes: assert col.has_nulls y = [0, 1, 2] with pytest.raises(ValueError): xgb.DMatrix(X, y) Xy = xgb.DMatrix(X, y, enable_categorical=True) assert Xy.num_row() == 3 assert Xy.num_col() == 1 with pytest.raises(ValueError, match="enable_categorical"): xgb.QuantileDMatrix(X, y) Xy = xgb.QuantileDMatrix(X, y, enable_categorical=True) assert Xy.num_row() == 3 assert Xy.num_col() == 1 X = X["f0"] with pytest.raises(ValueError): xgb.DMatrix(X, y) Xy = xgb.DMatrix(X, y, enable_categorical=True) assert Xy.num_row() == 3 assert Xy.num_col() == 1 @pytest.mark.skipif(**tm.no_cudf()) @pytest.mark.skipif(**tm.no_cupy()) @pytest.mark.skipif(**tm.no_sklearn()) @pytest.mark.skipif(**tm.no_pandas()) def test_cudf_training_with_sklearn(): import pandas as pd from cudf import DataFrame as df from cudf import Series as ss np.random.seed(1) X = pd.DataFrame(np.random.randn(50, 10)) y = pd.DataFrame((np.random.randn(50) > 0).astype(np.int8)) weights = np.random.random(50) + 1.0 cudf_weights = df.from_pandas(pd.DataFrame(weights)) base_margin = np.random.random(50) cudf_base_margin = df.from_pandas(pd.DataFrame(base_margin)) X_cudf = df.from_pandas(X) y_cudf = df.from_pandas(y) y_cudf_series = ss(data=y.iloc[:, 0]) for y_obj in [y_cudf, y_cudf_series]: clf = xgb.XGBClassifier(gpu_id=0, tree_method='gpu_hist') clf.fit(X_cudf, y_obj, sample_weight=cudf_weights, base_margin=cudf_base_margin, eval_set=[(X_cudf, y_obj)]) pred = clf.predict(X_cudf) assert np.array_equal(np.unique(pred), np.array([0, 1])) class IterForDMatrixTest(xgb.core.DataIter): '''A data iterator for XGBoost DMatrix. `reset` and `next` are required for any data iterator, other functions here are utilites for demonstration's purpose. ''' ROWS_PER_BATCH = 100 # data is splited by rows BATCHES = 16 def __init__(self, categorical): '''Generate some random data for demostration. Actual data can be anything that is currently supported by XGBoost. ''' import cudf self.rows = self.ROWS_PER_BATCH if categorical: self._data = [] self._labels = [] for i in range(self.BATCHES): X, y = tm.make_categorical(self.ROWS_PER_BATCH, 4, 13, False) self._data.append(cudf.from_pandas(X)) self._labels.append(y) else: rng = np.random.RandomState(1994) self._data = [ cudf.DataFrame( {'a': rng.randn(self.ROWS_PER_BATCH), 'b': rng.randn(self.ROWS_PER_BATCH)})] * self.BATCHES self._labels = [rng.randn(self.rows)] * self.BATCHES self.it = 0 # set iterator to 0 super().__init__(cache_prefix=None) def as_array(self): import cudf return cudf.concat(self._data) def as_array_labels(self): return np.concatenate(self._labels) def data(self): '''Utility function for obtaining current batch of data.''' return self._data[self.it] def labels(self): '''Utility function for obtaining current batch of label.''' return self._labels[self.it] def reset(self): '''Reset the iterator''' self.it = 0 def next(self, input_data): '''Yield next batch of data''' if self.it == len(self._data): # Return 0 when there's no more batch. return 0 input_data(data=self.data(), label=self.labels()) self.it += 1 return 1 @pytest.mark.skipif(**tm.no_cudf()) @pytest.mark.parametrize("enable_categorical", [True, False]) def test_from_cudf_iter(enable_categorical): rounds = 100 it = IterForDMatrixTest(enable_categorical) params = {"tree_method": "gpu_hist"} # Use iterator m_it = xgb.QuantileDMatrix(it, enable_categorical=enable_categorical) reg_with_it = xgb.train(params, m_it, num_boost_round=rounds) X = it.as_array() y = it.as_array_labels() m = xgb.DMatrix(X, y, enable_categorical=enable_categorical) assert m_it.num_col() == m.num_col() assert m_it.num_row() == m.num_row() reg = xgb.train(params, m, num_boost_round=rounds) predict = reg.predict(m) predict_with_it = reg_with_it.predict(m_it) np.testing.assert_allclose(predict_with_it, predict)
12,553
33.584022
96
py
xgboost
xgboost-master/tests/python-gpu/test_gpu_updaters.py
import sys from typing import Any, Dict import numpy as np import pytest from hypothesis import assume, given, note, settings, strategies import xgboost as xgb from xgboost import testing as tm from xgboost.testing.params import cat_parameter_strategy, hist_parameter_strategy from xgboost.testing.updater import ( check_get_quantile_cut, check_init_estimation, check_quantile_loss, ) sys.path.append("tests/python") import test_updaters as test_up pytestmark = tm.timeout(30) def train_result(param, dmat: xgb.DMatrix, num_rounds: int) -> dict: result: xgb.callback.TrainingCallback.EvalsLog = {} booster = xgb.train( param, dmat, num_rounds, [(dmat, "train")], verbose_eval=False, evals_result=result, ) assert booster.num_features() == dmat.num_col() assert booster.num_boosted_rounds() == num_rounds return result class TestGPUUpdatersMulti: @given( hist_parameter_strategy, strategies.integers(1, 20), tm.multi_dataset_strategy ) @settings(deadline=None, max_examples=50, print_blob=True) def test_hist(self, param, num_rounds, dataset): param["tree_method"] = "gpu_hist" param = dataset.set_params(param) result = train_result(param, dataset.get_dmat(), num_rounds) note(result) assert tm.non_increasing(result["train"][dataset.metric]) class TestGPUUpdaters: cputest = test_up.TestTreeMethod() @given( hist_parameter_strategy, strategies.integers(1, 20), tm.make_dataset_strategy() ) @settings(deadline=None, max_examples=50, print_blob=True) def test_gpu_hist(self, param, num_rounds, dataset): param["tree_method"] = "gpu_hist" param = dataset.set_params(param) result = train_result(param, dataset.get_dmat(), num_rounds) note(result) assert tm.non_increasing(result["train"][dataset.metric]) @given(tm.sparse_datasets_strategy) @settings(deadline=None, print_blob=True) def test_sparse(self, dataset): param = {"tree_method": "hist", "max_bin": 64} hist_result = train_result(param, dataset.get_dmat(), 16) note(hist_result) assert tm.non_increasing(hist_result['train'][dataset.metric]) param = {"tree_method": "gpu_hist", "max_bin": 64} gpu_hist_result = train_result(param, dataset.get_dmat(), 16) note(gpu_hist_result) assert tm.non_increasing(gpu_hist_result['train'][dataset.metric]) np.testing.assert_allclose( hist_result["train"]["rmse"], gpu_hist_result["train"]["rmse"], rtol=1e-2 ) @given(strategies.integers(10, 400), strategies.integers(3, 8), strategies.integers(1, 2), strategies.integers(4, 7)) @settings(deadline=None, max_examples=20, print_blob=True) @pytest.mark.skipif(**tm.no_pandas()) def test_categorical_ohe(self, rows, cols, rounds, cats): self.cputest.run_categorical_ohe(rows, cols, rounds, cats, "gpu_hist") @given( tm.categorical_dataset_strategy, hist_parameter_strategy, cat_parameter_strategy, strategies.integers(4, 32), ) @settings(deadline=None, max_examples=20, print_blob=True) @pytest.mark.skipif(**tm.no_pandas()) def test_categorical( self, dataset: tm.TestDataset, hist_parameters: Dict[str, Any], cat_parameters: Dict[str, Any], n_rounds: int, ) -> None: cat_parameters.update(hist_parameters) cat_parameters["tree_method"] = "gpu_hist" results = train_result(cat_parameters, dataset.get_dmat(), n_rounds) tm.non_increasing(results["train"]["rmse"]) @given( hist_parameter_strategy, cat_parameter_strategy, ) @settings(deadline=None, max_examples=10, print_blob=True) def test_categorical_ames_housing( self, hist_parameters: Dict[str, Any], cat_parameters: Dict[str, Any], ) -> None: cat_parameters.update(hist_parameters) dataset = tm.TestDataset( "ames_housing", tm.data.get_ames_housing, "reg:squarederror", "rmse" ) cat_parameters["tree_method"] = "gpu_hist" results = train_result(cat_parameters, dataset.get_dmat(), 16) tm.non_increasing(results["train"]["rmse"]) @given( strategies.integers(10, 400), strategies.integers(3, 8), strategies.integers(4, 7) ) @settings(deadline=None, max_examples=20, print_blob=True) @pytest.mark.skipif(**tm.no_pandas()) def test_categorical_missing(self, rows, cols, cats): self.cputest.run_categorical_missing(rows, cols, cats, "gpu_hist") @pytest.mark.skipif(**tm.no_pandas()) def test_max_cat(self) -> None: self.cputest.run_max_cat("gpu_hist") def test_categorical_32_cat(self): '''32 hits the bound of integer bitset, so special test''' rows = 1000 cols = 10 cats = 32 rounds = 4 self.cputest.run_categorical_ohe(rows, cols, rounds, cats, "gpu_hist") @pytest.mark.skipif(**tm.no_cupy()) def test_invalid_category(self): self.cputest.run_invalid_category("gpu_hist") @pytest.mark.skipif(**tm.no_cupy()) @given( hist_parameter_strategy, strategies.integers(1, 20), tm.make_dataset_strategy(), ) @settings(deadline=None, max_examples=20, print_blob=True) def test_gpu_hist_device_dmatrix( self, param: dict, num_rounds: int, dataset: tm.TestDataset ) -> None: # We cannot handle empty dataset yet assume(len(dataset.y) > 0) param['tree_method'] = 'gpu_hist' param = dataset.set_params(param) result = train_result( param, dataset.get_device_dmat(max_bin=param.get("max_bin", None)), num_rounds ) note(result) assert tm.non_increasing(result['train'][dataset.metric], tolerance=1e-3) @given( hist_parameter_strategy, strategies.integers(1, 3), tm.make_dataset_strategy(), ) @settings(deadline=None, max_examples=10, print_blob=True) def test_external_memory(self, param, num_rounds, dataset): if dataset.name.endswith("-l1"): return # We cannot handle empty dataset yet assume(len(dataset.y) > 0) param['tree_method'] = 'gpu_hist' param = dataset.set_params(param) m = dataset.get_external_dmat() external_result = train_result(param, m, num_rounds) del m assert tm.non_increasing(external_result['train'][dataset.metric]) def test_empty_dmatrix_prediction(self): # FIXME(trivialfis): This should be done with all updaters kRows = 0 kCols = 100 X = np.empty((kRows, kCols)) y = np.empty((kRows,)) dtrain = xgb.DMatrix(X, y) bst = xgb.train( {"verbosity": 2, "tree_method": "gpu_hist", "gpu_id": 0}, dtrain, verbose_eval=True, num_boost_round=6, evals=[(dtrain, 'Train')] ) kRows = 100 X = np.random.randn(kRows, kCols) dtest = xgb.DMatrix(X) predictions = bst.predict(dtest) # non-distributed, 0.0 is returned due to base_score estimation with 0 gradient. np.testing.assert_allclose(predictions, 0.0, 1e-6) @pytest.mark.mgpu @given(tm.make_dataset_strategy(), strategies.integers(0, 10)) @settings(deadline=None, max_examples=10, print_blob=True) def test_specified_gpu_id_gpu_update(self, dataset, gpu_id): param = {'tree_method': 'gpu_hist', 'gpu_id': gpu_id} param = dataset.set_params(param) result = train_result(param, dataset.get_dmat(), 10) assert tm.non_increasing(result['train'][dataset.metric]) @pytest.mark.skipif(**tm.no_sklearn()) @pytest.mark.parametrize("weighted", [True, False]) def test_adaptive(self, weighted) -> None: self.cputest.run_adaptive("gpu_hist", weighted) def test_init_estimation(self) -> None: check_init_estimation("gpu_hist") @pytest.mark.parametrize("weighted", [True, False]) def test_quantile_loss(self, weighted: bool) -> None: check_quantile_loss("gpu_hist", weighted) @pytest.mark.skipif(**tm.no_pandas()) def test_issue8824(self): # column sampling by node crashes because shared pointers go out of scope import pandas as pd data = pd.DataFrame(np.random.rand(1024, 8)) data.columns = "x" + data.columns.astype(str) features = data.columns data["y"] = data.sum(axis=1) < 4 dtrain = xgb.DMatrix(data[features], label=data["y"]) model = xgb.train( dtrain=dtrain, params={ "max_depth": 5, "learning_rate": 0.05, "objective": "binary:logistic", "tree_method": "gpu_hist", "colsample_bytree": 0.5, "colsample_bylevel": 0.5, "colsample_bynode": 0.5, # Causes issues "reg_alpha": 0.05, "reg_lambda": 0.005, "seed": 66, "subsample": 0.5, "gamma": 0.2, "eval_metric": "auc", }, num_boost_round=150, ) @pytest.mark.skipif(**tm.no_cudf()) def test_get_quantile_cut(self) -> None: check_get_quantile_cut("gpu_hist")
9,527
33.647273
88
py
xgboost
xgboost-master/tests/python-gpu/test_from_cupy.py
import json import sys import numpy as np import pytest import xgboost as xgb sys.path.append("tests/python") from test_dmatrix import set_base_margin_info from xgboost import testing as tm cupy = pytest.importorskip("cupy") def test_array_interface() -> None: arr = cupy.array([[1, 2, 3, 4], [1, 2, 3, 4]]) i_arr = arr.__cuda_array_interface__ i_arr = json.loads(json.dumps(i_arr)) ret = xgb.core.from_array_interface(i_arr) np.testing.assert_equal(cupy.asnumpy(arr), cupy.asnumpy(ret)) def dmatrix_from_cupy(input_type, DMatrixT, missing=np.NAN): '''Test constructing DMatrix from cupy''' import cupy as cp kRows = 80 kCols = 3 np_X = np.random.randn(kRows, kCols).astype(dtype=input_type) X = cp.array(np_X) X[5, 0] = missing X[3, 1] = missing y = cp.random.randn(kRows).astype(dtype=input_type) dtrain = DMatrixT(X, missing=missing, label=y) assert dtrain.num_col() == kCols assert dtrain.num_row() == kRows if DMatrixT is xgb.QuantileDMatrix: # Slice is not supported by QuantileDMatrix with pytest.raises(xgb.core.XGBoostError): dtrain.slice(rindex=[0, 1, 2]) dtrain.slice(rindex=[0, 1, 2]) else: dtrain.slice(rindex=[0, 1, 2]) dtrain.slice(rindex=[0, 1, 2]) return dtrain def _test_from_cupy(DMatrixT): '''Test constructing DMatrix from cupy''' import cupy as cp dmatrix_from_cupy(np.float16, DMatrixT, np.NAN) dmatrix_from_cupy(np.float32, DMatrixT, np.NAN) dmatrix_from_cupy(np.float64, DMatrixT, np.NAN) dmatrix_from_cupy(np.uint8, DMatrixT, 2) dmatrix_from_cupy(np.uint32, DMatrixT, 3) dmatrix_from_cupy(np.uint64, DMatrixT, 4) dmatrix_from_cupy(np.int8, DMatrixT, 2) dmatrix_from_cupy(np.int32, DMatrixT, -2) dmatrix_from_cupy(np.int64, DMatrixT, -3) with pytest.raises(ValueError): X = cp.random.randn(2, 2, dtype="float32") y = cp.random.randn(2, 2, 3, dtype="float32") DMatrixT(X, label=y) def _test_cupy_training(DMatrixT): import cupy as cp np.random.seed(1) cp.random.seed(1) X = cp.random.randn(50, 10, dtype="float32") y = cp.random.randn(50, dtype="float32") weights = np.random.random(50) + 1 cupy_weights = cp.array(weights) base_margin = np.random.random(50) cupy_base_margin = cp.array(base_margin) evals_result_cupy = {} dtrain_cp = DMatrixT(X, y, weight=cupy_weights, base_margin=cupy_base_margin) params = {'gpu_id': 0, 'nthread': 1, 'tree_method': 'gpu_hist'} xgb.train(params, dtrain_cp, evals=[(dtrain_cp, "train")], evals_result=evals_result_cupy) evals_result_np = {} dtrain_np = xgb.DMatrix(cp.asnumpy(X), cp.asnumpy(y), weight=weights, base_margin=base_margin) xgb.train(params, dtrain_np, evals=[(dtrain_np, "train")], evals_result=evals_result_np) assert np.array_equal(evals_result_cupy["train"]["rmse"], evals_result_np["train"]["rmse"]) def _test_cupy_metainfo(DMatrixT): import cupy as cp n = 100 X = np.random.random((n, 2)) dmat_cupy = DMatrixT(cp.array(X)) dmat = xgb.DMatrix(X) floats = np.random.random(n) uints = np.array([4, 2, 8]).astype("uint32") cupy_floats = cp.array(floats) cupy_uints = cp.array(uints) dmat.set_float_info('weight', floats) dmat.set_float_info('label', floats) dmat.set_float_info('base_margin', floats) dmat.set_uint_info('group', uints) dmat_cupy.set_info(weight=cupy_floats) dmat_cupy.set_info(label=cupy_floats) dmat_cupy.set_info(base_margin=cupy_floats) dmat_cupy.set_info(group=cupy_uints) # Test setting info with cupy assert np.array_equal(dmat.get_float_info('weight'), dmat_cupy.get_float_info('weight')) assert np.array_equal(dmat.get_float_info('label'), dmat_cupy.get_float_info('label')) assert np.array_equal(dmat.get_float_info('base_margin'), dmat_cupy.get_float_info('base_margin')) assert np.array_equal(dmat.get_uint_info('group_ptr'), dmat_cupy.get_uint_info('group_ptr')) set_base_margin_info(cp.asarray, DMatrixT, "gpu_hist") @pytest.mark.skipif(**tm.no_cupy()) @pytest.mark.skipif(**tm.no_sklearn()) def test_cupy_training_with_sklearn(): import cupy as cp np.random.seed(1) cp.random.seed(1) X = cp.random.randn(50, 10, dtype="float32") y = (cp.random.randn(50, dtype="float32") > 0).astype("int8") weights = np.random.random(50) + 1 cupy_weights = cp.array(weights) base_margin = np.random.random(50) cupy_base_margin = cp.array(base_margin) clf = xgb.XGBClassifier(gpu_id=0, tree_method="gpu_hist") clf.fit( X, y, sample_weight=cupy_weights, base_margin=cupy_base_margin, eval_set=[(X, y)], ) pred = clf.predict(X) assert np.array_equal(np.unique(pred), np.array([0, 1])) class TestFromCupy: '''Tests for constructing DMatrix from data structure conforming Apache Arrow specification.''' @pytest.mark.skipif(**tm.no_cupy()) def test_simple_dmat_from_cupy(self): _test_from_cupy(xgb.DMatrix) @pytest.mark.skipif(**tm.no_cupy()) def test_device_dmat_from_cupy(self): _test_from_cupy(xgb.QuantileDMatrix) @pytest.mark.skipif(**tm.no_cupy()) def test_cupy_training_device_dmat(self): _test_cupy_training(xgb.QuantileDMatrix) @pytest.mark.skipif(**tm.no_cupy()) def test_cupy_training_simple_dmat(self): _test_cupy_training(xgb.DMatrix) @pytest.mark.skipif(**tm.no_cupy()) def test_cupy_metainfo_simple_dmat(self): _test_cupy_metainfo(xgb.DMatrix) @pytest.mark.skipif(**tm.no_cupy()) def test_cupy_metainfo_device_dmat(self): _test_cupy_metainfo(xgb.QuantileDMatrix) @pytest.mark.skipif(**tm.no_cupy()) def test_dlpack_simple_dmat(self): import cupy as cp n = 100 X = cp.random.random((n, 2)) xgb.DMatrix(X.toDlpack()) @pytest.mark.skipif(**tm.no_cupy()) def test_cupy_categorical(self): import cupy as cp n_features = 10 X, y = tm.make_categorical(10, n_features, n_categories=4, onehot=False) X = cp.asarray(X.values.astype(cp.float32)) y = cp.array(y) feature_types = ['c'] * n_features assert isinstance(X, cp.ndarray) Xy = xgb.DMatrix(X, y, feature_types=feature_types) np.testing.assert_equal(np.array(Xy.feature_types), np.array(feature_types)) @pytest.mark.skipif(**tm.no_cupy()) def test_dlpack_device_dmat(self): import cupy as cp n = 100 X = cp.random.random((n, 2)) m = xgb.QuantileDMatrix(X.toDlpack()) with pytest.raises(xgb.core.XGBoostError): m.slice(rindex=[0, 1, 2]) @pytest.mark.skipif(**tm.no_cupy()) def test_qid(self): import cupy as cp rng = cp.random.RandomState(1994) rows = 100 cols = 10 X, y = rng.randn(rows, cols), rng.randn(rows) qid = rng.randint(low=0, high=10, size=rows, dtype=np.uint32) qid = cp.sort(qid) Xy = xgb.DMatrix(X, y) Xy.set_info(qid=qid) group_ptr = Xy.get_uint_info('group_ptr') assert group_ptr[0] == 0 assert group_ptr[-1] == rows @pytest.mark.skipif(**tm.no_cupy()) @pytest.mark.mgpu def test_specified_device(self): import cupy as cp cp.cuda.runtime.setDevice(0) dtrain = dmatrix_from_cupy(np.float32, xgb.QuantileDMatrix, np.nan) with pytest.raises( xgb.core.XGBoostError, match="Data is resided on a different device" ): xgb.train( {'tree_method': 'gpu_hist', 'gpu_id': 1}, dtrain, num_boost_round=10 )
7,908
31.681818
95
py
xgboost
xgboost-master/tests/python-gpu/test_gpu_with_sklearn.py
import json import os import sys import tempfile import numpy as np import pytest import xgboost as xgb from xgboost import testing as tm from xgboost.testing.ranking import run_ranking_qid_df sys.path.append("tests/python") import test_with_sklearn as twskl # noqa pytestmark = pytest.mark.skipif(**tm.no_sklearn()) rng = np.random.RandomState(1994) def test_gpu_binary_classification(): from sklearn.datasets import load_digits from sklearn.model_selection import KFold digits = load_digits(n_class=2) y = digits['target'] X = digits['data'] kf = KFold(n_splits=2, shuffle=True, random_state=rng) for cls in (xgb.XGBClassifier, xgb.XGBRFClassifier): for train_index, test_index in kf.split(X, y): xgb_model = cls( random_state=42, tree_method='gpu_hist', n_estimators=4, gpu_id='0').fit(X[train_index], y[train_index]) preds = xgb_model.predict(X[test_index]) labels = y[test_index] err = sum(1 for i in range(len(preds)) if int(preds[i] > 0.5) != labels[i]) / float(len(preds)) assert err < 0.1 @pytest.mark.skipif(**tm.no_cupy()) @pytest.mark.skipif(**tm.no_cudf()) def test_boost_from_prediction_gpu_hist(): import cudf import cupy as cp from sklearn.datasets import load_breast_cancer, load_digits tree_method = "gpu_hist" X, y = load_breast_cancer(return_X_y=True) X, y = cp.array(X), cp.array(y) twskl.run_boost_from_prediction_binary(tree_method, X, y, None) twskl.run_boost_from_prediction_binary(tree_method, X, y, cudf.DataFrame) X, y = load_digits(return_X_y=True) X, y = cp.array(X), cp.array(y) twskl.run_boost_from_prediction_multi_clasas( xgb.XGBClassifier, tree_method, X, y, None ) twskl.run_boost_from_prediction_multi_clasas( xgb.XGBClassifier, tree_method, X, y, cudf.DataFrame ) def test_num_parallel_tree(): twskl.run_housing_rf_regression("gpu_hist") @pytest.mark.skipif(**tm.no_pandas()) @pytest.mark.skipif(**tm.no_cudf()) @pytest.mark.skipif(**tm.no_sklearn()) def test_categorical(): import cudf import cupy as cp import pandas as pd from sklearn.datasets import load_svmlight_file data_dir = tm.data_dir(__file__) X, y = load_svmlight_file(os.path.join(data_dir, "agaricus.txt.train")) clf = xgb.XGBClassifier( tree_method="gpu_hist", enable_categorical=True, n_estimators=10, ) X = pd.DataFrame(X.todense()).astype("category") clf.fit(X, y) with tempfile.TemporaryDirectory() as tempdir: model = os.path.join(tempdir, "categorial.json") clf.save_model(model) with open(model) as fd: categorical = json.load(fd) categories_sizes = np.array( categorical["learner"]["gradient_booster"]["model"]["trees"][0][ "categories_sizes" ] ) assert categories_sizes.shape[0] != 0 np.testing.assert_allclose(categories_sizes, 1) def check_predt(X, y): reg = xgb.XGBRegressor( tree_method="gpu_hist", enable_categorical=True, n_estimators=64 ) reg.fit(X, y) predts = reg.predict(X) booster = reg.get_booster() assert "c" in booster.feature_types assert len(booster.feature_types) == 1 inp_predts = booster.inplace_predict(X) if isinstance(inp_predts, cp.ndarray): inp_predts = cp.asnumpy(inp_predts) np.testing.assert_allclose(predts, inp_predts) y = [1, 2, 3] X = pd.DataFrame({"f0": ["a", "b", "c"]}) X["f0"] = X["f0"].astype("category") check_predt(X, y) X = cudf.DataFrame(X) check_predt(X, y) @pytest.mark.skipif(**tm.no_cupy()) @pytest.mark.skipif(**tm.no_cudf()) def test_classififer(): import cudf import cupy as cp from sklearn.datasets import load_digits X, y = load_digits(return_X_y=True) y *= 10 clf = xgb.XGBClassifier(tree_method="gpu_hist", n_estimators=1) # numpy with pytest.raises(ValueError, match=r"Invalid classes.*"): clf.fit(X, y) # cupy X, y = cp.array(X), cp.array(y) with pytest.raises(ValueError, match=r"Invalid classes.*"): clf.fit(X, y) # cudf X, y = cudf.DataFrame(X), cudf.DataFrame(y) with pytest.raises(ValueError, match=r"Invalid classes.*"): clf.fit(X, y) # pandas X, y = load_digits(return_X_y=True, as_frame=True) y *= 10 with pytest.raises(ValueError, match=r"Invalid classes.*"): clf.fit(X, y) @pytest.mark.skipif(**tm.no_pandas()) def test_ranking_qid_df(): import cudf run_ranking_qid_df(cudf, "gpu_hist")
4,774
28.115854
80
py
xgboost
xgboost-master/tests/python-gpu/test_gpu_basic_models.py
import os import sys import numpy as np import pytest import xgboost as xgb from xgboost import testing as tm sys.path.append("tests/python") import test_basic_models as test_bm # Don't import the test class, otherwise they will run twice. import test_callback as test_cb # noqa rng = np.random.RandomState(1994) class TestGPUBasicModels: cpu_test_cb = test_cb.TestCallbacks() cpu_test_bm = test_bm.TestModels() def run_cls(self, X, y): cls = xgb.XGBClassifier(tree_method='gpu_hist') cls.fit(X, y) cls.get_booster().save_model('test_deterministic_gpu_hist-0.json') cls = xgb.XGBClassifier(tree_method='gpu_hist') cls.fit(X, y) cls.get_booster().save_model('test_deterministic_gpu_hist-1.json') with open('test_deterministic_gpu_hist-0.json', 'r') as fd: model_0 = fd.read() with open('test_deterministic_gpu_hist-1.json', 'r') as fd: model_1 = fd.read() os.remove('test_deterministic_gpu_hist-0.json') os.remove('test_deterministic_gpu_hist-1.json') return hash(model_0), hash(model_1) def test_custom_objective(self): self.cpu_test_bm.run_custom_objective("gpu_hist") def test_eta_decay(self): self.cpu_test_cb.run_eta_decay('gpu_hist') @pytest.mark.parametrize( "objective", ["binary:logistic", "reg:absoluteerror", "reg:quantileerror"] ) def test_eta_decay_leaf_output(self, objective) -> None: self.cpu_test_cb.run_eta_decay_leaf_output("gpu_hist", objective) def test_deterministic_gpu_hist(self): kRows = 1000 kCols = 64 kClasses = 4 # Create large values to force rounding. X = np.random.randn(kRows, kCols) * 1e4 y = np.random.randint(0, kClasses, size=kRows) model_0, model_1 = self.run_cls(X, y) assert model_0 == model_1 @pytest.mark.skipif(**tm.no_sklearn()) def test_invalid_gpu_id(self): from sklearn.datasets import load_digits X, y = load_digits(return_X_y=True) # should pass with invalid gpu id cls1 = xgb.XGBClassifier(tree_method="gpu_hist", gpu_id=9999) cls1.fit(X, y) # should throw error with fail_on_invalid_gpu_id enabled cls2 = xgb.XGBClassifier( tree_method="gpu_hist", gpu_id=9999, fail_on_invalid_gpu_id=True ) with pytest.raises(ValueError, match="ordinal 9999 is invalid"): cls2.fit(X, y) cls2 = xgb.XGBClassifier( tree_method="hist", device="cuda:9999", fail_on_invalid_gpu_id=True ) with pytest.raises(ValueError, match="ordinal 9999 is invalid"): cls2.fit(X, y)
2,714
30.941176
82
py
xgboost
xgboost-master/tests/python-gpu/test_gpu_training_continuation.py
import json import numpy as np import xgboost as xgb rng = np.random.RandomState(1994) class TestGPUTrainingContinuation: def test_training_continuation(self): kRows = 64 kCols = 32 X = np.random.randn(kRows, kCols) y = np.random.randn(kRows) dtrain = xgb.DMatrix(X, y) params = {'tree_method': 'gpu_hist', 'max_depth': '2', 'gamma': '0.1', 'alpha': '0.01'} bst_0 = xgb.train(params, dtrain, num_boost_round=64) dump_0 = bst_0.get_dump(dump_format='json') bst_1 = xgb.train(params, dtrain, num_boost_round=32) bst_1 = xgb.train(params, dtrain, num_boost_round=32, xgb_model=bst_1) dump_1 = bst_1.get_dump(dump_format='json') def recursive_compare(obj_0, obj_1): if isinstance(obj_0, float): assert np.isclose(obj_0, obj_1, atol=1e-6) elif isinstance(obj_0, str): assert obj_0 == obj_1 elif isinstance(obj_0, int): assert obj_0 == obj_1 elif isinstance(obj_0, dict): keys_0 = list(obj_0.keys()) keys_1 = list(obj_1.keys()) values_0 = list(obj_0.values()) values_1 = list(obj_1.values()) for i in range(len(obj_0.items())): assert keys_0[i] == keys_1[i] if list(obj_0.keys())[i] != 'missing': recursive_compare(values_0[i], values_1[i]) else: for i in range(len(obj_0)): recursive_compare(obj_0[i], obj_1[i]) assert len(dump_0) == len(dump_1) for i in range(len(dump_0)): obj_0 = json.loads(dump_0[i]) obj_1 = json.loads(dump_1[i]) recursive_compare(obj_0, obj_1)
1,870
34.980769
78
py
xgboost
xgboost-master/tests/python-gpu/test_gpu_eval_metrics.py
import json import sys import pytest import xgboost from xgboost import testing as tm from xgboost.testing.metrics import check_precision_score, check_quantile_error sys.path.append("tests/python") import test_eval_metrics as test_em # noqa class TestGPUEvalMetrics: cpu_test = test_em.TestEvalMetrics() @pytest.mark.parametrize("n_samples", [4, 100, 1000]) def test_roc_auc_binary(self, n_samples): self.cpu_test.run_roc_auc_binary("gpu_hist", n_samples) @pytest.mark.parametrize( "n_samples,weighted", [(4, False), (100, False), (1000, False), (1000, True)] ) def test_roc_auc_multi(self, n_samples, weighted): self.cpu_test.run_roc_auc_multi("gpu_hist", n_samples, weighted) @pytest.mark.parametrize("n_samples", [4, 100, 1000]) def test_roc_auc_ltr(self, n_samples): import numpy as np rng = np.random.RandomState(1994) n_samples = n_samples n_features = 10 X = rng.randn(n_samples, n_features) y = rng.randint(0, 16, size=n_samples) group = np.array([n_samples // 2, n_samples // 2]) Xy = xgboost.DMatrix(X, y, group=group) booster = xgboost.train( {"tree_method": "hist", "eval_metric": "auc", "objective": "rank:ndcg"}, Xy, num_boost_round=10, ) cpu_auc = float(booster.eval(Xy).split(":")[1]) booster.set_param({"device": "cuda:0"}) assert ( json.loads(booster.save_config())["learner"]["generic_param"]["device"] == "cuda:0" ) gpu_auc = float(booster.eval(Xy).split(":")[1]) assert ( json.loads(booster.save_config())["learner"]["generic_param"]["device"] == "cuda:0" ) np.testing.assert_allclose(cpu_auc, gpu_auc) def test_pr_auc_binary(self): self.cpu_test.run_pr_auc_binary("gpu_hist") def test_pr_auc_multi(self): self.cpu_test.run_pr_auc_multi("gpu_hist") def test_pr_auc_ltr(self): self.cpu_test.run_pr_auc_ltr("gpu_hist") def test_precision_score(self): check_precision_score("gpu_hist") @pytest.mark.skipif(**tm.no_sklearn()) def test_quantile_error(self) -> None: check_quantile_error("gpu_hist")
2,281
29.837838
85
py
xgboost
xgboost-master/tests/python-gpu/test_gpu_demos.py
import os import subprocess import sys import pytest from xgboost import testing as tm sys.path.append("tests/python") import test_demos as td # noqa @pytest.mark.skipif(**tm.no_cupy()) def test_data_iterator(): script = os.path.join(td.PYTHON_DEMO_DIR, 'quantile_data_iterator.py') cmd = ['python', script] subprocess.check_call(cmd) def test_update_process_demo(): script = os.path.join(td.PYTHON_DEMO_DIR, 'update_process.py') cmd = ['python', script] subprocess.check_call(cmd) def test_categorical_demo(): script = os.path.join(td.PYTHON_DEMO_DIR, 'categorical.py') cmd = ['python', script] subprocess.check_call(cmd)
669
21.333333
74
py
xgboost
xgboost-master/tests/python-gpu/test_gpu_plotting.py
import sys import pytest from xgboost import testing as tm sys.path.append("tests/python") import test_plotting as tp pytestmark = pytest.mark.skipif(**tm.no_multiple(tm.no_matplotlib(), tm.no_graphviz())) class TestPlotting: cputest = tp.TestPlotting() @pytest.mark.skipif(**tm.no_pandas()) def test_categorical(self): self.cputest.run_categorical("gpu_hist")
388
19.473684
87
py
xgboost
xgboost-master/tests/python-gpu/test_gpu_pickling.py
"""Test model IO with pickle.""" import os import pickle import subprocess import numpy as np import pytest import xgboost as xgb from xgboost import XGBClassifier from xgboost import testing as tm model_path = "./model.pkl" pytestmark = tm.timeout(30) def build_dataset(): N = 10 x = np.linspace(0, N * N, N * N) x = x.reshape((N, N)) y = np.linspace(0, N, N) return x, y def save_pickle(bst, path): with open(path, "wb") as fd: pickle.dump(bst, fd) def load_pickle(path): with open(path, "rb") as fd: bst = pickle.load(fd) return bst class TestPickling: args_template = ["pytest", "--verbose", "-s", "--fulltrace"] def run_pickling(self, bst) -> None: save_pickle(bst, model_path) args = [ "pytest", "--verbose", "-s", "--fulltrace", "./tests/python-gpu/load_pickle.py::TestLoadPickle::test_load_pkl", ] command = "" for arg in args: command += arg command += " " cuda_environment = {"CUDA_VISIBLE_DEVICES": "-1"} env = os.environ.copy() # Passing new_environment directly to `env' argument results # in failure on Windows: # Fatal Python error: _Py_HashRandomization_Init: failed to # get random numbers to initialize Python env.update(cuda_environment) # Load model in a CPU only environment. status = subprocess.call(command, env=env, shell=True) assert status == 0 os.remove(model_path) # TODO: This test is too slow @pytest.mark.skipif(**tm.no_sklearn()) def test_pickling(self): x, y = build_dataset() train_x = xgb.DMatrix(x, label=y) param = {"tree_method": "gpu_hist", "gpu_id": 0} bst = xgb.train(param, train_x) self.run_pickling(bst) bst = xgb.XGBRegressor(**param).fit(x, y) self.run_pickling(bst) param = {"booster": "gblinear", "updater": "gpu_coord_descent", "gpu_id": 0} bst = xgb.train(param, train_x) self.run_pickling(bst) bst = xgb.XGBRegressor(**param).fit(x, y) self.run_pickling(bst) @pytest.mark.mgpu def test_wrap_gpu_id(self): X, y = build_dataset() dtrain = xgb.DMatrix(X, y) bst = xgb.train( {"tree_method": "gpu_hist", "gpu_id": 1}, dtrain, num_boost_round=6 ) model_path = "model.pkl" save_pickle(bst, model_path) cuda_environment = {"CUDA_VISIBLE_DEVICES": "0"} env = os.environ.copy() env.update(cuda_environment) args = self.args_template.copy() args.append( "./tests/python-gpu/" "load_pickle.py::TestLoadPickle::test_wrap_gpu_id" ) status = subprocess.call(args, env=env) assert status == 0 os.remove(model_path) def test_pickled_context(self): x, y = tm.make_sparse_regression(10, 10, sparsity=0.8, as_dense=True) train_x = xgb.DMatrix(x, label=y) param = {"tree_method": "gpu_hist", "verbosity": 1} bst = xgb.train(param, train_x) save_pickle(bst, model_path) args = self.args_template.copy() root = tm.project_root(__file__) path = os.path.join(root, "tests", "python-gpu", "load_pickle.py") args.append(path + "::TestLoadPickle::test_context_is_removed") cuda_environment = {"CUDA_VISIBLE_DEVICES": "-1"} env = os.environ.copy() env.update(cuda_environment) # Load model in a CPU only environment. status = subprocess.call(args, env=env) assert status == 0 args = self.args_template.copy() args.append( "./tests/python-gpu/" "load_pickle.py::TestLoadPickle::test_context_is_preserved" ) # Load in environment that has GPU. env = os.environ.copy() assert "CUDA_VISIBLE_DEVICES" not in env.keys() status = subprocess.call(args, env=env) assert status == 0 os.remove(model_path) @pytest.mark.skipif(**tm.no_sklearn()) def test_predict_sklearn_pickle(self) -> None: from sklearn.datasets import load_digits x, y = load_digits(return_X_y=True) kwargs = { "tree_method": "gpu_hist", "objective": "binary:logistic", "gpu_id": 0, "n_estimators": 10, } model = XGBClassifier(**kwargs) model.fit(x, y) save_pickle(model, "model.pkl") del model # load model model = load_pickle("model.pkl") os.remove("model.pkl") gpu_pred = model.predict(x, output_margin=True) # Switch to CPU predictor bst = model.get_booster() bst.set_param({"device": "cpu"}) cpu_pred = model.predict(x, output_margin=True) np.testing.assert_allclose(cpu_pred, gpu_pred, rtol=1e-5) def test_training_on_cpu_only_env(self): cuda_environment = {"CUDA_VISIBLE_DEVICES": "-1"} env = os.environ.copy() env.update(cuda_environment) args = self.args_template.copy() args.append( "./tests/python-gpu/" "load_pickle.py::TestLoadPickle::test_training_on_cpu_only_env" ) status = subprocess.call(args, env=env) assert status == 0
5,400
27.882353
84
py
xgboost
xgboost-master/tests/python-gpu/test_gpu_ranking.py
import os from typing import Dict import numpy as np import pytest import xgboost from xgboost import testing as tm pytestmark = tm.timeout(30) def comp_training_with_rank_objective( dtrain: xgboost.DMatrix, dtest: xgboost.DMatrix, rank_objective: str, metric_name: str, tolerance: float = 1e-02, ) -> None: """Internal method that trains the dataset using the rank objective on GPU and CPU, evaluates the metric and determines if the delta between the metric is within the tolerance level. """ # specify validations set to watch performance watchlist = [(dtest, "eval"), (dtrain, "train")] params = { "booster": "gbtree", "tree_method": "gpu_hist", "gpu_id": 0, } num_trees = 100 check_metric_improvement_rounds = 10 evals_result: Dict[str, Dict] = {} params["objective"] = rank_objective params["eval_metric"] = metric_name bst = xgboost.train( params, dtrain, num_boost_round=num_trees, early_stopping_rounds=check_metric_improvement_rounds, evals=watchlist, evals_result=evals_result, ) gpu_scores = evals_result["train"][metric_name][-1] evals_result = {} cpu_params = { "booster": "gbtree", "tree_method": "hist", "gpu_id": -1, } cpu_params["objective"] = rank_objective cpu_params["eval_metric"] = metric_name bstc = xgboost.train( cpu_params, dtrain, num_boost_round=num_trees, early_stopping_rounds=check_metric_improvement_rounds, evals=watchlist, evals_result=evals_result, ) cpu_scores = evals_result["train"][metric_name][-1] info = (rank_objective, metric_name) assert np.allclose(gpu_scores, cpu_scores, tolerance, tolerance), info assert np.allclose(bst.best_score, bstc.best_score, tolerance, tolerance), info evals_result_weighted: Dict[str, Dict] = {} dtest.set_weight(np.ones((dtest.get_group().size,))) dtrain.set_weight(np.ones((dtrain.get_group().size,))) watchlist = [(dtest, "eval"), (dtrain, "train")] bst_w = xgboost.train( params, dtrain, num_boost_round=num_trees, early_stopping_rounds=check_metric_improvement_rounds, evals=watchlist, evals_result=evals_result_weighted, ) weighted_metric = evals_result_weighted["train"][metric_name][-1] tolerance = 1e-5 assert np.allclose(bst_w.best_score, bst.best_score, tolerance, tolerance) assert np.allclose(weighted_metric, gpu_scores, tolerance, tolerance) @pytest.mark.parametrize( "objective,metric", [ ("rank:pairwise", "auc"), ("rank:pairwise", "ndcg"), ("rank:pairwise", "map"), ("rank:ndcg", "auc"), ("rank:ndcg", "ndcg"), ("rank:ndcg", "map"), ("rank:map", "auc"), ("rank:map", "ndcg"), ("rank:map", "map"), ], ) def test_with_mq2008(objective, metric) -> None: ( x_train, y_train, qid_train, x_test, y_test, qid_test, x_valid, y_valid, qid_valid, ) = tm.data.get_mq2008(os.path.join(os.path.join(tm.demo_dir(__file__), "rank"))) if metric.find("map") != -1 or objective.find("map") != -1: y_train[y_train <= 1] = 0.0 y_train[y_train > 1] = 1.0 y_test[y_test <= 1] = 0.0 y_test[y_test > 1] = 1.0 dtrain = xgboost.DMatrix(x_train, y_train, qid=qid_train) dtest = xgboost.DMatrix(x_test, y_test, qid=qid_test) comp_training_with_rank_objective(dtrain, dtest, objective, metric)
3,649
27.294574
87
py
xgboost
xgboost-master/tests/python-gpu/test_gpu_prediction.py
import sys from copy import copy import numpy as np import pytest from hypothesis import assume, given, settings, strategies import xgboost as xgb from xgboost import testing as tm from xgboost.compat import PANDAS_INSTALLED if PANDAS_INSTALLED: from hypothesis.extra.pandas import column, data_frames, range_indexes else: def noop(*args, **kwargs): pass column, data_frames, range_indexes = noop, noop, noop sys.path.append("tests/python") from test_predict import run_predict_leaf # noqa from test_predict import run_threaded_predict # noqa rng = np.random.RandomState(1994) shap_parameter_strategy = strategies.fixed_dictionaries( { "max_depth": strategies.integers(1, 11), "max_leaves": strategies.integers(0, 256), "num_parallel_tree": strategies.sampled_from([1, 10]), } ).filter(lambda x: x["max_depth"] > 0 or x["max_leaves"] > 0) predict_parameter_strategy = strategies.fixed_dictionaries( { "max_depth": strategies.integers(1, 8), "num_parallel_tree": strategies.sampled_from([1, 4]), } ) # cupy nvrtc compilation can take a long time for the first run pytestmark = tm.timeout(30) class TestGPUPredict: def test_predict(self): iterations = 10 np.random.seed(1) test_num_rows = [10, 1000, 5000] test_num_cols = [10, 50, 500] # This test passes for tree_method=gpu_hist and tree_method=exact. but # for `hist` and `approx` the floating point error accumulates faster # and fails even tol is set to 1e-4. For `hist`, the mismatching rate # with 5000 rows is 0.04. for num_rows in test_num_rows: for num_cols in test_num_cols: dtrain = xgb.DMatrix( np.random.randn(num_rows, num_cols), label=[0, 1] * int(num_rows / 2), ) dval = xgb.DMatrix( np.random.randn(num_rows, num_cols), label=[0, 1] * int(num_rows / 2), ) dtest = xgb.DMatrix( np.random.randn(num_rows, num_cols), label=[0, 1] * int(num_rows / 2), ) watchlist = [(dtrain, "train"), (dval, "validation")] res = {} param = { "objective": "binary:logistic", "eval_metric": "logloss", "tree_method": "hist", "device": "gpu:0", "max_depth": 1, } bst = xgb.train( param, dtrain, iterations, evals=watchlist, evals_result=res ) assert tm.non_increasing(res["train"]["logloss"], tolerance=0.001) gpu_pred_train = bst.predict(dtrain, output_margin=True) gpu_pred_test = bst.predict(dtest, output_margin=True) gpu_pred_val = bst.predict(dval, output_margin=True) bst.set_param({"device": "cpu", "tree_method": "hist"}) bst_cpu = copy(bst) cpu_pred_train = bst_cpu.predict(dtrain, output_margin=True) cpu_pred_test = bst_cpu.predict(dtest, output_margin=True) cpu_pred_val = bst_cpu.predict(dval, output_margin=True) np.testing.assert_allclose(cpu_pred_train, gpu_pred_train, rtol=1e-6) np.testing.assert_allclose(cpu_pred_val, gpu_pred_val, rtol=1e-6) np.testing.assert_allclose(cpu_pred_test, gpu_pred_test, rtol=1e-6) # Test case for a bug where multiple batch predictions made on a # test set produce incorrect results @pytest.mark.skipif(**tm.no_sklearn()) def test_multi_predict(self): from sklearn.datasets import make_regression from sklearn.model_selection import train_test_split n = 1000 X, y = make_regression(n, random_state=rng) X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=123) dtrain = xgb.DMatrix(X_train, label=y_train) params = {} params["tree_method"] = "hist" params["device"] = "cuda:0" bst = xgb.train(params, dtrain) bst.set_param({"device": "cuda:0"}) # Don't reuse the DMatrix for prediction, otherwise the result is cached. predict_gpu_0 = bst.predict(xgb.DMatrix(X_test)) predict_gpu_1 = bst.predict(xgb.DMatrix(X_test)) bst.set_param({"device": "cpu"}) predict_cpu = bst.predict(xgb.DMatrix(X_test)) assert np.allclose(predict_gpu_0, predict_gpu_1) assert np.allclose(predict_gpu_0, predict_cpu) @pytest.mark.skipif(**tm.no_sklearn()) def test_sklearn(self): m, n = 15000, 14 tr_size = 2500 X = np.random.rand(m, n) y = 200 * np.matmul(X, np.arange(-3, -3 + n)) y = y.reshape(y.size) X_train, y_train = X[:tr_size, :], y[:tr_size] X_test, y_test = X[tr_size:, :], y[tr_size:] params = { "tree_method": "hist", "device": "cuda:0", "n_jobs": -1, "seed": 123, } m = xgb.XGBRegressor(**params).fit(X_train, y_train) gpu_train_score = m.score(X_train, y_train) gpu_test_score = m.score(X_test, y_test) # Now with cpu m.set_params(device="cpu") cpu_train_score = m.score(X_train, y_train) cpu_test_score = m.score(X_test, y_test) assert np.allclose(cpu_train_score, gpu_train_score) assert np.allclose(cpu_test_score, gpu_test_score) @pytest.mark.parametrize("device", ["cpu", "cuda"]) @pytest.mark.skipif(**tm.no_cupy()) def test_inplace_predict_device_type(self, device: str) -> None: """Test inplace predict with different device and data types. The sklearn interface uses inplace predict by default and gbtree fallbacks to DMatrix whenever device doesn't match. This test checks that XGBoost can handle different combinations of device and input data type. """ import cudf import cupy as cp import pandas as pd from scipy.sparse import csr_matrix reg = xgb.XGBRegressor(tree_method="hist", device=device) n_samples = 4096 n_features = 13 X, y, w = tm.make_regression(n_samples, n_features, use_cupy=True) X[X == 0.0] = 1.0 reg.fit(X, y, sample_weight=w) predt_0 = reg.predict(X) X = cp.asnumpy(X) predt_1 = reg.predict(X) df = pd.DataFrame(X) predt_2 = reg.predict(df) df = cudf.DataFrame(X) predt_3 = reg.predict(df) X_csr = csr_matrix(X) predt_4 = reg.predict(X_csr) np.testing.assert_allclose(predt_0, predt_1) np.testing.assert_allclose(predt_0, predt_2) np.testing.assert_allclose(predt_0, predt_3) np.testing.assert_allclose(predt_0, predt_4) def run_inplace_base_margin(self, booster, dtrain, X, base_margin): import cupy as cp dtrain.set_info(base_margin=base_margin) from_inplace = booster.inplace_predict(data=X, base_margin=base_margin) from_dmatrix = booster.predict(dtrain) cp.testing.assert_allclose(from_inplace, from_dmatrix) def run_inplace_predict_cupy(self, device: int) -> None: import cupy as cp cp.cuda.runtime.setDevice(device) rows = 1000 cols = 10 missing = 11 # set to integer for testing cp_rng = cp.random.RandomState(1994) cp.random.set_random_state(cp_rng) X = cp.random.randn(rows, cols) missing_idx = [i for i in range(0, cols, 4)] X[:, missing_idx] = missing # set to be missing y = cp.random.randn(rows) dtrain = xgb.DMatrix(X, y) booster = xgb.train( {"tree_method": "hist", "device": f"cuda:{device}"}, dtrain, num_boost_round=10, ) test = xgb.DMatrix(X[:10, ...], missing=missing) predt_from_array = booster.inplace_predict(X[:10, ...], missing=missing) predt_from_dmatrix = booster.predict(test) cp.testing.assert_allclose(predt_from_array, predt_from_dmatrix) def predict_dense(x): cp.cuda.runtime.setDevice(device) inplace_predt = booster.inplace_predict(x) d = xgb.DMatrix(x) copied_predt = cp.array(booster.predict(d)) return cp.all(copied_predt == inplace_predt) # Don't do this on Windows, see issue #5793 if sys.platform.startswith("win"): pytest.skip( "Multi-threaded in-place prediction with cuPy is not working on Windows" ) for i in range(10): run_threaded_predict(X, rows, predict_dense) base_margin = cp_rng.randn(rows) self.run_inplace_base_margin(booster, dtrain, X, base_margin) # Create a wide dataset X = cp_rng.randn(100, 10000) y = cp_rng.randn(100) missing_idx = [i for i in range(0, X.shape[1], 16)] X[:, missing_idx] = missing reg = xgb.XGBRegressor( tree_method="hist", n_estimators=8, missing=missing, device=f"cuda:{device}" ) reg.fit(X, y) reg.set_params(device=f"cuda:{device}") gpu_predt = reg.predict(X) reg = reg.set_params(device="cpu") cpu_predt = reg.predict(cp.asnumpy(X)) np.testing.assert_allclose(gpu_predt, cpu_predt, atol=1e-6) cp.cuda.runtime.setDevice(0) @pytest.mark.skipif(**tm.no_cupy()) def test_inplace_predict_cupy(self): self.run_inplace_predict_cupy(0) @pytest.mark.skipif(**tm.no_cupy()) @pytest.mark.mgpu def test_inplace_predict_cupy_specified_device(self): import cupy as cp n_devices = cp.cuda.runtime.getDeviceCount() for d in range(n_devices): self.run_inplace_predict_cupy(d) @pytest.mark.skipif(**tm.no_cupy()) @pytest.mark.skipif(**tm.no_cudf()) def test_inplace_predict_cudf(self): import cudf import cupy as cp import pandas as pd rows = 1000 cols = 10 rng = np.random.RandomState(1994) cp.cuda.runtime.setDevice(0) X = rng.randn(rows, cols) X = pd.DataFrame(X) y = rng.randn(rows) X = cudf.from_pandas(X) dtrain = xgb.DMatrix(X, y) booster = xgb.train( {"tree_method": "hist", "device": "cuda:0"}, dtrain, num_boost_round=10 ) test = xgb.DMatrix(X) predt_from_array = booster.inplace_predict(X) predt_from_dmatrix = booster.predict(test) cp.testing.assert_allclose(predt_from_array, predt_from_dmatrix) def predict_df(x): # column major array inplace_predt = booster.inplace_predict(x.values) d = xgb.DMatrix(x) copied_predt = cp.array(booster.predict(d)) assert cp.all(copied_predt == inplace_predt) inplace_predt = booster.inplace_predict(x) return cp.all(copied_predt == inplace_predt) for i in range(10): run_threaded_predict(X, rows, predict_df) base_margin = cudf.Series(rng.randn(rows)) self.run_inplace_base_margin(booster, dtrain, X, base_margin) @given( strategies.integers(1, 10), tm.make_dataset_strategy(), shap_parameter_strategy ) @settings(deadline=None, max_examples=20, print_blob=True) def test_shap(self, num_rounds, dataset, param): if dataset.name.endswith("-l1"): # not supported by the exact tree method return param.update({"tree_method": "hist", "device": "gpu:0"}) param = dataset.set_params(param) dmat = dataset.get_dmat() bst = xgb.train(param, dmat, num_rounds) test_dmat = xgb.DMatrix(dataset.X, dataset.y, dataset.w, dataset.margin) bst.set_param({"device": "gpu:0"}) shap = bst.predict(test_dmat, pred_contribs=True) margin = bst.predict(test_dmat, output_margin=True) assume(len(dataset.y) > 0) assert np.allclose(np.sum(shap, axis=len(shap.shape) - 1), margin, 1e-3, 1e-3) @given( strategies.integers(1, 10), tm.make_dataset_strategy(), shap_parameter_strategy ) @settings(deadline=None, max_examples=10, print_blob=True) def test_shap_interactions(self, num_rounds, dataset, param): if dataset.name.endswith("-l1"): # not supported by the exact tree method return param.update({"tree_method": "hist", "device": "cuda:0"}) param = dataset.set_params(param) dmat = dataset.get_dmat() bst = xgb.train(param, dmat, num_rounds) test_dmat = xgb.DMatrix(dataset.X, dataset.y, dataset.w, dataset.margin) bst.set_param({"device": "cuda:0"}) shap = bst.predict(test_dmat, pred_interactions=True) margin = bst.predict(test_dmat, output_margin=True) assume(len(dataset.y) > 0) assert np.allclose( np.sum(shap, axis=(len(shap.shape) - 1, len(shap.shape) - 2)), margin, 1e-3, 1e-3, ) def test_shap_categorical(self): X, y = tm.make_categorical(100, 20, 7, False) Xy = xgb.DMatrix(X, y, enable_categorical=True) booster = xgb.train( {"tree_method": "hist", "device": "gpu:0"}, Xy, num_boost_round=10 ) booster.set_param({"device": "cuda:0"}) shap = booster.predict(Xy, pred_contribs=True) margin = booster.predict(Xy, output_margin=True) np.testing.assert_allclose( np.sum(shap, axis=len(shap.shape) - 1), margin, rtol=1e-3 ) booster.set_param({"device": "cpu"}) shap = booster.predict(Xy, pred_contribs=True) margin = booster.predict(Xy, output_margin=True) np.testing.assert_allclose( np.sum(shap, axis=len(shap.shape) - 1), margin, rtol=1e-3 ) def test_predict_leaf_basic(self): gpu_leaf = run_predict_leaf("gpu:0") cpu_leaf = run_predict_leaf("cpu") np.testing.assert_equal(gpu_leaf, cpu_leaf) def run_predict_leaf_booster(self, param, num_rounds, dataset): param = dataset.set_params(param) m = dataset.get_dmat() booster = xgb.train( param, dtrain=dataset.get_dmat(), num_boost_round=num_rounds ) booster.set_param({"device": "cpu"}) cpu_leaf = booster.predict(m, pred_leaf=True) booster.set_param({"device": "cuda:0"}) gpu_leaf = booster.predict(m, pred_leaf=True) np.testing.assert_equal(cpu_leaf, gpu_leaf) @given(predict_parameter_strategy, tm.make_dataset_strategy()) @settings(deadline=None, max_examples=20, print_blob=True) def test_predict_leaf_gbtree(self, param: dict, dataset: tm.TestDataset) -> None: # Unsupported for random forest if param.get("num_parallel_tree", 1) > 1 and dataset.name.endswith("-l1"): return param.update({"booster": "gbtree", "tree_method": "hist", "device": "cuda:0"}) self.run_predict_leaf_booster(param, 10, dataset) @given(predict_parameter_strategy, tm.make_dataset_strategy()) @settings(deadline=None, max_examples=20, print_blob=True) def test_predict_leaf_dart(self, param: dict, dataset: tm.TestDataset) -> None: # Unsupported for random forest if param.get("num_parallel_tree", 1) > 1 and dataset.name.endswith("-l1"): return param.update({"booster": "dart", "tree_method": "hist", "device": "cuda:0"}) self.run_predict_leaf_booster(param, 10, dataset) @pytest.mark.skipif(**tm.no_sklearn()) @pytest.mark.skipif(**tm.no_pandas()) @given( df=data_frames( [ column("x0", elements=strategies.integers(min_value=0, max_value=3)), column("x1", elements=strategies.integers(min_value=0, max_value=5)), ], index=range_indexes(min_size=20, max_size=50), ) ) @settings(deadline=None, max_examples=20, print_blob=True) def test_predict_categorical_split(self, df): from sklearn.metrics import mean_squared_error df = df.astype("category") x0, x1 = df["x0"].to_numpy(), df["x1"].to_numpy() y = (x0 * 10 - 20) + (x1 - 2) dtrain = xgb.DMatrix(df, label=y, enable_categorical=True) params = { "tree_method": "hist", "max_depth": 3, "learning_rate": 1.0, "base_score": 0.0, "eval_metric": "rmse", "device": "cuda:0", } eval_history = {} bst = xgb.train( params, dtrain, num_boost_round=5, evals=[(dtrain, "train")], verbose_eval=False, evals_result=eval_history, ) bst.set_param({"device": "cuda:0"}) pred = bst.predict(dtrain) rmse = mean_squared_error(y_true=y, y_pred=pred, squared=False) np.testing.assert_almost_equal( rmse, eval_history["train"]["rmse"][-1], decimal=5 ) @pytest.mark.skipif(**tm.no_cupy()) @pytest.mark.parametrize("n_classes", [2, 3]) def test_predict_dart(self, n_classes): import cupy as cp from sklearn.datasets import make_classification n_samples = 1000 X_, y_ = make_classification( n_samples=n_samples, n_informative=5, n_classes=n_classes ) X, y = cp.array(X_), cp.array(y_) Xy = xgb.DMatrix(X, y) if n_classes == 2: params = { "tree_method": "hist", "device": "cuda:0", "booster": "dart", "rate_drop": 0.5, "objective": "binary:logistic", } else: params = { "tree_method": "hist", "device": "cuda:0", "booster": "dart", "rate_drop": 0.5, "objective": "multi:softprob", "num_class": n_classes, } booster = xgb.train(params, Xy, num_boost_round=32) # auto (GPU) inplace = booster.inplace_predict(X) copied = booster.predict(Xy) # CPU booster.set_param({"device": "cpu"}) cpu_inplace = booster.inplace_predict(X_) cpu_copied = booster.predict(Xy) copied = cp.array(copied) cp.testing.assert_allclose(cpu_inplace, copied, atol=1e-6) cp.testing.assert_allclose(cpu_copied, copied, atol=1e-6) cp.testing.assert_allclose(inplace, copied, atol=1e-6) # GPU booster.set_param({"device": "cuda:0"}) inplace = booster.inplace_predict(X) copied = booster.predict(Xy) copied = cp.array(copied) cp.testing.assert_allclose(inplace, copied, atol=1e-6) @pytest.mark.skipif(**tm.no_cupy()) def test_dtypes(self): import cupy as cp rows = 1000 cols = 10 rng = cp.random.RandomState(1994) orig = rng.randint(low=0, high=127, size=rows * cols).reshape(rows, cols) y = rng.randint(low=0, high=127, size=rows) dtrain = xgb.DMatrix(orig, label=y) booster = xgb.train({"tree_method": "hist", "device": "cuda:0"}, dtrain) predt_orig = booster.inplace_predict(orig) # all primitive types in numpy for dtype in [ cp.byte, cp.short, cp.intc, cp.int_, cp.longlong, cp.ubyte, cp.ushort, cp.uintc, cp.uint, cp.ulonglong, cp.half, cp.single, cp.double, ]: X = cp.array(orig, dtype=dtype) predt = booster.inplace_predict(X) cp.testing.assert_allclose(predt, predt_orig) # boolean orig = cp.random.binomial(1, 0.5, size=rows * cols).reshape(rows, cols) predt_orig = booster.inplace_predict(orig) for dtype in [cp.bool8, cp.bool_]: X = cp.array(orig, dtype=dtype) predt = booster.inplace_predict(X) cp.testing.assert_allclose(predt, predt_orig) # unsupported types for dtype in [ cp.complex64, cp.complex128, ]: X = cp.array(orig, dtype=dtype) with pytest.raises(ValueError): booster.inplace_predict(X)
20,606
34.963351
88
py
xgboost
xgboost-master/tests/python-gpu/test_gpu_parse_tree.py
import sys sys.path.append("tests/python") from test_parse_tree import TestTreesToDataFrame def test_tree_to_df_categorical(): cputest = TestTreesToDataFrame() cputest.run_tree_to_df_categorical("gpu_hist") def test_split_value_histograms(): cputest = TestTreesToDataFrame() cputest.run_split_value_histograms("gpu_hist")
343
21.933333
50
py
xgboost
xgboost-master/tests/python-gpu/test_gpu_interaction_constraints.py
import sys import numpy as np import pandas as pd import xgboost as xgb sys.path.append("tests/python") # Don't import the test class, otherwise they will run twice. import test_interaction_constraints as test_ic # noqa rng = np.random.RandomState(1994) class TestGPUInteractionConstraints: cputest = test_ic.TestInteractionConstraints() def test_interaction_constraints(self): self.cputest.run_interaction_constraints(tree_method="gpu_hist") def test_training_accuracy(self): self.cputest.training_accuracy(tree_method="gpu_hist") # case where different number of features can occur in the evaluator def test_issue_8730(self): X = pd.DataFrame( zip(range(0, 100), range(200, 300), range(300, 400), range(400, 500)), columns=["A", "B", "C", "D"], ) y = np.array([*([0] * 50), *([1] * 50)]) dm = xgb.DMatrix(X, label=y) params = { "eta": 0.16095019509249486, "min_child_weight": 1, "subsample": 0.688567929338029, "colsample_bynode": 0.7, "gamma": 5.666579817418348e-06, "lambda": 0.14943712232059794, "grow_policy": "depthwise", "max_depth": 3, "tree_method": "gpu_hist", "interaction_constraints": [["A", "B"], ["B", "D", "C"], ["C", "D"]], "objective": "count:poisson", "eval_metric": "poisson-nloglik", "verbosity": 0, } xgb.train(params, dm, num_boost_round=100)
1,551
30.04
82
py
xgboost
xgboost-master/tests/buildkite/enforce_daily_budget.py
import json import argparse if __name__ == "__main__": parser = argparse.ArgumentParser() parser.add_argument("--response", type=str, required=True) args = parser.parse_args() with open(args.response, "r") as f: payload = f.read() response = json.loads(payload) if response["approved"]: print(f"Testing approved. Reason: {response['reason']}") else: raise RuntimeError(f"Testing rejected. Reason: {response['reason']}")
473
30.6
77
py
xgboost
xgboost-master/tests/buildkite/infrastructure/service-user/create_service_user.py
import argparse import os import boto3 current_dir = os.path.dirname(__file__) def main(args): with open( os.path.join(current_dir, "service-user-template.yml"), encoding="utf-8" ) as f: service_user_template = f.read() stack_id = "buildkite-elastic-ci-stack-service-user" print("Create a new IAM user with suitable permissions...") client = boto3.client("cloudformation", region_name=args.aws_region) response = client.create_stack( StackName=stack_id, TemplateBody=service_user_template, Capabilities=[ "CAPABILITY_IAM", "CAPABILITY_NAMED_IAM", ], Parameters=[{"ParameterKey": "UserName", "ParameterValue": args.user_name}], ) waiter = client.get_waiter("stack_create_complete") waiter.wait(StackName=stack_id) user = boto3.resource("iam", region_name=args.aws_region).User(args.user_name) key_pair = user.create_access_key_pair() print("Finished creating an IAM users with suitable permissions.") print(f"Access Key ID: {key_pair.access_key_id}") print(f"Access Secret Access Key: {key_pair.secret_access_key}") if __name__ == "__main__": parser = argparse.ArgumentParser() parser.add_argument("--aws-region", type=str, required=True) parser.add_argument( "--user-name", type=str, default="buildkite-elastic-ci-stack-user" ) args = parser.parse_args() main(args)
1,442
31.066667
84
py
xgboost
xgboost-master/tests/buildkite/infrastructure/aws-stack-creator/metadata.py
AMI_ID = { # Managed by XGBoost team "linux-amd64-gpu": { "us-west-2": "ami-094271bed4788ddb5", }, "linux-amd64-mgpu": { "us-west-2": "ami-094271bed4788ddb5", }, "windows-gpu": { "us-west-2": "ami-0839681594a1d7627", }, "windows-cpu": { "us-west-2": "ami-0839681594a1d7627", }, # Managed by BuildKite # from https://s3.amazonaws.com/buildkite-aws-stack/latest/aws-stack.yml "linux-amd64-cpu": { "us-west-2": "ami-00f2127550cf03658", }, "pipeline-loader": { "us-west-2": "ami-00f2127550cf03658", }, "linux-arm64-cpu": { "us-west-2": "ami-0c5789068f4a2d1b5", }, } STACK_PARAMS = { "linux-amd64-gpu": { "InstanceOperatingSystem": "linux", "InstanceType": "g4dn.xlarge", "AgentsPerInstance": "1", "MinSize": "0", "MaxSize": "8", "OnDemandPercentage": "100", "ScaleOutFactor": "1.0", "ScaleInIdlePeriod": "60", # in seconds }, "linux-amd64-mgpu": { "InstanceOperatingSystem": "linux", "InstanceType": "g4dn.12xlarge", "AgentsPerInstance": "1", "MinSize": "0", "MaxSize": "1", "OnDemandPercentage": "100", "ScaleOutFactor": "1.0", "ScaleInIdlePeriod": "60", # in seconds }, "windows-gpu": { "InstanceOperatingSystem": "windows", "InstanceType": "g4dn.2xlarge", "AgentsPerInstance": "1", "MinSize": "0", "MaxSize": "2", "OnDemandPercentage": "100", "ScaleOutFactor": "1.0", "ScaleInIdlePeriod": "60", # in seconds }, "windows-cpu": { "InstanceOperatingSystem": "windows", "InstanceType": "c5a.2xlarge", "AgentsPerInstance": "1", "MinSize": "0", "MaxSize": "2", "OnDemandPercentage": "100", "ScaleOutFactor": "1.0", "ScaleInIdlePeriod": "60", # in seconds }, "linux-amd64-cpu": { "InstanceOperatingSystem": "linux", "InstanceType": "c5a.4xlarge", "AgentsPerInstance": "1", "MinSize": "0", "MaxSize": "16", "OnDemandPercentage": "100", "ScaleOutFactor": "1.0", "ScaleInIdlePeriod": "60", # in seconds }, "pipeline-loader": { "InstanceOperatingSystem": "linux", "InstanceType": "t3a.micro", "AgentsPerInstance": "1", "MinSize": "2", "MaxSize": "2", "OnDemandPercentage": "100", "ScaleOutFactor": "1.0", "ScaleInIdlePeriod": "60", # in seconds }, "linux-arm64-cpu": { "InstanceOperatingSystem": "linux", "InstanceType": "c6g.4xlarge", "AgentsPerInstance": "1", "MinSize": "0", "MaxSize": "8", "OnDemandPercentage": "100", "ScaleOutFactor": "1.0", "ScaleInIdlePeriod": "60", # in seconds }, } COMMON_STACK_PARAMS = { "BuildkiteAgentTimestampLines": "false", "BuildkiteWindowsAdministrator": "true", "AssociatePublicIpAddress": "true", "ScaleOutForWaitingJobs": "false", "EnableCostAllocationTags": "true", "CostAllocationTagName": "CreatedBy", "ECRAccessPolicy": "full", "EnableSecretsPlugin": "false", "EnableECRPlugin": "false", "EnableDockerLoginPlugin": "false", "EnableDockerUserNamespaceRemap": "false", "BuildkiteAgentExperiments": "normalised-upload-paths,resolve-commit-after-checkout", }
3,471
29.191304
89
py
xgboost
xgboost-master/tests/buildkite/infrastructure/aws-stack-creator/create_stack.py
import argparse import copy import os import re import sys import boto3 import botocore from metadata import AMI_ID, COMMON_STACK_PARAMS, STACK_PARAMS current_dir = os.path.dirname(__file__) sys.path.append(os.path.join(current_dir, "..")) from common_blocks.utils import create_or_update_stack, wait TEMPLATE_URL = "https://s3.amazonaws.com/buildkite-aws-stack/latest/aws-stack.yml" def get_availability_zones(*, aws_region): client = boto3.client("ec2", region_name=aws_region) r = client.describe_availability_zones( Filters=[ {"Name": "region-name", "Values": [aws_region]}, {"Name": "zone-type", "Values": ["availability-zone"]}, ] ) return sorted([x["ZoneName"] for x in r["AvailabilityZones"]]) def get_default_vpc(*, aws_region): ec2 = boto3.resource("ec2", region_name=aws_region) default_vpc_id = None for x in ec2.vpcs.filter(Filters=[{"Name": "is-default", "Values": ["true"]}]): return x # Create default VPC if not exist client = boto3.client("ec2", region_name=aws_region) r = client.create_default_vpc() default_vpc_id = r["Vpc"]["VpcId"] return ec2.Vpc(default_vpc_id) def format_params(args, *, stack_id, agent_iam_policy): default_vpc = get_default_vpc(aws_region=args.aws_region) azs = get_availability_zones(aws_region=args.aws_region) # For each of the first two availability zones (AZs), choose the default subnet subnets = [ x.id for x in default_vpc.subnets.filter( Filters=[ {"Name": "default-for-az", "Values": ["true"]}, {"Name": "availability-zone", "Values": azs[:2]}, ] ) ] assert len(subnets) == 2 params = copy.deepcopy(STACK_PARAMS[stack_id]) params["ImageId"] = AMI_ID[stack_id][args.aws_region] params["BuildkiteQueue"] = stack_id params["CostAllocationTagValue"] = f"buildkite-{stack_id}" params["BuildkiteAgentToken"] = args.agent_token params["VpcId"] = default_vpc.id params["Subnets"] = ",".join(subnets) params["ManagedPolicyARN"] = agent_iam_policy params.update(COMMON_STACK_PARAMS) return [{"ParameterKey": k, "ParameterValue": v} for k, v in params.items()] def get_full_stack_id(stack_id): return f"buildkite-{stack_id}-autoscaling-group" def create_agent_iam_policy(args, *, client): policy_stack_name = "buildkite-agent-iam-policy" print(f"Creating stack {policy_stack_name} for agent IAM policy...") with open( os.path.join(current_dir, "agent-iam-policy-template.yml"), encoding="utf-8", ) as f: policy_template = f.read() promise = create_or_update_stack( args, client=client, stack_name=policy_stack_name, template_body=policy_template ) wait(promise, client=client) cf = boto3.resource("cloudformation", region_name=args.aws_region) policy = cf.StackResource(policy_stack_name, "BuildkiteAgentManagedPolicy") return policy.physical_resource_id def main(args): client = boto3.client("cloudformation", region_name=args.aws_region) agent_iam_policy = create_agent_iam_policy(args, client=client) promises = [] for stack_id in AMI_ID: stack_id_full = get_full_stack_id(stack_id) print(f"Creating elastic CI stack {stack_id_full}...") params = format_params( args, stack_id=stack_id, agent_iam_policy=agent_iam_policy ) promise = create_or_update_stack( args, client=client, stack_name=stack_id_full, template_url=TEMPLATE_URL, params=params, ) promises.append(promise) print(f"CI stack {stack_id_full} is in progress in the background") for promise in promises: wait(promise, client=client) if __name__ == "__main__": parser = argparse.ArgumentParser() parser.add_argument("--aws-region", type=str, required=True) parser.add_argument("--agent-token", type=str, required=True) args = parser.parse_args() main(args)
4,092
30.976563
88
py
xgboost
xgboost-master/tests/buildkite/infrastructure/common_blocks/utils.py
import re import boto3 import botocore def stack_exists(args, *, stack_name): client = boto3.client("cloudformation", region_name=args.aws_region) waiter = client.get_waiter("stack_exists") try: waiter.wait(StackName=stack_name, WaiterConfig={"MaxAttempts": 1}) return True except botocore.exceptions.WaiterError as e: return False def create_or_update_stack( args, *, client, stack_name, template_url=None, template_body=None, params=None ): kwargs = { "StackName": stack_name, "Capabilities": [ "CAPABILITY_IAM", "CAPABILITY_NAMED_IAM", "CAPABILITY_AUTO_EXPAND", ], } if template_url: kwargs["TemplateURL"] = template_url if template_body: kwargs["TemplateBody"] = template_body if params: kwargs["Parameters"] = params if stack_exists(args, stack_name=stack_name): print(f"Stack {stack_name} already exists. Updating...") try: response = client.update_stack(**kwargs) return {"StackName": stack_name, "Action": "update"} except botocore.exceptions.ClientError as e: if e.response["Error"]["Code"] == "ValidationError" and re.search( "No updates are to be performed", e.response["Error"]["Message"] ): print(f"No update was made to {stack_name}") return {"StackName": stack_name, "Action": "noop"} else: raise e else: kwargs.update({"OnFailure": "ROLLBACK", "EnableTerminationProtection": False}) response = client.create_stack(**kwargs) return {"StackName": stack_name, "Action": "create"} def replace_stack( args, *, client, stack_name, template_url=None, template_body=None, params=None ): """Delete an existing stack and create a new stack with identical name""" if not stack_exists(args, stack_name=stack_name): raise ValueError(f"Stack {stack_name} does not exist") r = client.delete_stack(StackName=stack_name) delete_waiter = client.get_waiter("stack_delete_complete") delete_waiter.wait(StackName=stack_name) kwargs = { "StackName": stack_name, "Capabilities": [ "CAPABILITY_IAM", "CAPABILITY_NAMED_IAM", "CAPABILITY_AUTO_EXPAND", ], "OnFailure": "ROLLBACK", "EnableTerminationProtection": False, } if template_url: kwargs["TemplateURL"] = template_url if template_body: kwargs["TemplateBody"] = template_body if params: kwargs["Parameters"] = params response = client.create_stack(**kwargs) return {"StackName": stack_name, "Action": "create"} def wait(promise, *, client): stack_name = promise["StackName"] print(f"Waiting for {stack_name}...") if promise["Action"] == "create": waiter = client.get_waiter("stack_create_complete") waiter.wait(StackName=stack_name) print(f"Finished creating stack {stack_name}") elif promise["Action"] == "update": waiter = client.get_waiter("stack_update_complete") waiter.wait(StackName=stack_name) print(f"Finished updating stack {stack_name}") elif promise["Action"] != "noop": raise ValueError(f"Invalid promise {promise}")
3,351
33.204082
86
py
xgboost
xgboost-master/tests/buildkite/infrastructure/worker-image-pipeline/run_pipelines.py
import argparse import boto3 from create_worker_image_pipelines import get_full_stack_id from metadata import IMAGE_PARAMS def main(args): cf = boto3.resource("cloudformation", region_name=args.aws_region) builder_client = boto3.client("imagebuilder", region_name=args.aws_region) for stack_id in IMAGE_PARAMS: stack_id_full = get_full_stack_id(stack_id) pipeline_arn = cf.Stack(stack_id_full).Resource("Pipeline").physical_resource_id print(f"Running pipeline {pipeline_arn} to generate a new AMI...") r = builder_client.start_image_pipeline_execution(imagePipelineArn=pipeline_arn) if __name__ == "__main__": parser = argparse.ArgumentParser() parser.add_argument("--aws-region", type=str, required=True) args = parser.parse_args() main(args)
810
34.26087
88
py
xgboost
xgboost-master/tests/buildkite/infrastructure/worker-image-pipeline/metadata.py
IMAGE_PARAMS = { "linux-amd64-gpu": { "BaseImageId": "linuxamd64", # AMI ID is looked up from Buildkite's CloudFormation template "BootstrapScript": "linux-amd64-gpu-bootstrap.yml", "InstanceType": "g4dn.xlarge", "InstanceOperatingSystem": "Linux", "VolumeSize": "40", # in GiBs }, "windows-gpu": { "BaseImageId": "windows", # AMI ID is looked up from Buildkite's CloudFormation template "BootstrapScript": "windows-gpu-bootstrap.yml", "InstanceType": "g4dn.2xlarge", "InstanceOperatingSystem": "Windows", "VolumeSize": "120", # in GiBs }, }
656
33.578947
70
py
xgboost
xgboost-master/tests/buildkite/infrastructure/worker-image-pipeline/create_worker_image_pipelines.py
import argparse import copy import json import os import sys from urllib.request import urlopen import boto3 import cfn_flip from metadata import IMAGE_PARAMS current_dir = os.path.dirname(__file__) sys.path.append(os.path.join(current_dir, "..")) from common_blocks.utils import replace_stack, wait BUILDKITE_CF_TEMPLATE_URL = ( "https://s3.amazonaws.com/buildkite-aws-stack/latest/aws-stack.yml" ) def format_params(*, stack_id, aws_region, ami_mapping): params = copy.deepcopy(IMAGE_PARAMS[stack_id]) with open( os.path.join(current_dir, params["BootstrapScript"]), encoding="utf-8", ) as f: bootstrap_script = f.read() params["BaseImageId"] = ami_mapping[aws_region][params["BaseImageId"]] params["BootstrapScript"] = bootstrap_script return [{"ParameterKey": k, "ParameterValue": v} for k, v in params.items()] def get_ami_mapping(): with urlopen(BUILDKITE_CF_TEMPLATE_URL) as response: buildkite_cf_template = response.read().decode("utf-8") cfn_obj = json.loads(cfn_flip.to_json(buildkite_cf_template)) return cfn_obj["Mappings"]["AWSRegion2AMI"] def get_full_stack_id(stack_id): return f"buildkite-{stack_id}-worker" def main(args): with open( os.path.join(current_dir, "ec2-image-builder-pipeline-template.yml"), encoding="utf-8", ) as f: ec2_image_pipeline_template = f.read() ami_mapping = get_ami_mapping() client = boto3.client("cloudformation", region_name=args.aws_region) promises = [] for stack_id in IMAGE_PARAMS: stack_id_full = get_full_stack_id(stack_id) print(f"Creating EC2 image builder stack {stack_id_full}...") params = format_params( stack_id=stack_id, aws_region=args.aws_region, ami_mapping=ami_mapping ) promise = replace_stack( args, client=client, stack_name=stack_id_full, template_body=ec2_image_pipeline_template, params=params, ) promises.append(promise) print( f"EC2 image builder stack {stack_id_full} is in progress in the background" ) for promise in promises: wait(promise, client=client) if __name__ == "__main__": parser = argparse.ArgumentParser() parser.add_argument("--aws-region", type=str, required=True) args = parser.parse_args() main(args)
2,420
27.151163
87
py
xgboost
xgboost-master/tests/test_distributed/__init__.py
0
0
0
py
xgboost
xgboost-master/tests/test_distributed/test_federated/test_federated.py
#!/usr/bin/python import multiprocessing import sys import time import xgboost as xgb import xgboost.federated SERVER_KEY = 'server-key.pem' SERVER_CERT = 'server-cert.pem' CLIENT_KEY = 'client-key.pem' CLIENT_CERT = 'client-cert.pem' def run_server(port: int, world_size: int, with_ssl: bool) -> None: if with_ssl: xgboost.federated.run_federated_server(port, world_size, SERVER_KEY, SERVER_CERT, CLIENT_CERT) else: xgboost.federated.run_federated_server(port, world_size) def run_worker(port: int, world_size: int, rank: int, with_ssl: bool, with_gpu: bool) -> None: communicator_env = { 'xgboost_communicator': 'federated', 'federated_server_address': f'localhost:{port}', 'federated_world_size': world_size, 'federated_rank': rank } if with_ssl: communicator_env['federated_server_cert'] = SERVER_CERT communicator_env['federated_client_key'] = CLIENT_KEY communicator_env['federated_client_cert'] = CLIENT_CERT # Always call this before using distributed module with xgb.collective.CommunicatorContext(**communicator_env): # Load file, file will not be sharded in federated mode. dtrain = xgb.DMatrix('agaricus.txt.train-%02d' % rank) dtest = xgb.DMatrix('agaricus.txt.test-%02d' % rank) # Specify parameters via map, definition are same as c++ version param = {'max_depth': 2, 'eta': 1, 'objective': 'binary:logistic'} if with_gpu: param['tree_method'] = 'gpu_hist' param['gpu_id'] = rank # Specify validations set to watch performance watchlist = [(dtest, 'eval'), (dtrain, 'train')] num_round = 20 # Run training, all the features in training API is available. bst = xgb.train(param, dtrain, num_round, evals=watchlist, early_stopping_rounds=2) # Save the model, only ask process 0 to save the model. if xgb.collective.get_rank() == 0: bst.save_model("test.model.json") xgb.collective.communicator_print("Finished training\n") def run_federated(with_ssl: bool = True, with_gpu: bool = False) -> None: port = 9091 world_size = int(sys.argv[1]) server = multiprocessing.Process(target=run_server, args=(port, world_size, with_ssl)) server.start() time.sleep(1) if not server.is_alive(): raise Exception("Error starting Federated Learning server") workers = [] for rank in range(world_size): worker = multiprocessing.Process(target=run_worker, args=(port, world_size, rank, with_ssl, with_gpu)) workers.append(worker) worker.start() for worker in workers: worker.join() server.terminate() if __name__ == '__main__': run_federated(with_ssl=True, with_gpu=False) run_federated(with_ssl=False, with_gpu=False) run_federated(with_ssl=True, with_gpu=True) run_federated(with_ssl=False, with_gpu=True)
3,078
34.390805
94
py
xgboost
xgboost-master/tests/test_distributed/test_with_dask/test_with_dask.py
"""Copyright 2019-2022 XGBoost contributors""" import asyncio import json import os import pickle import socket import tempfile from concurrent.futures import ThreadPoolExecutor from functools import partial from itertools import starmap from math import ceil from operator import attrgetter, getitem from pathlib import Path from typing import Any, Dict, Generator, Optional, Tuple, Type, TypeVar, Union import hypothesis import numpy as np import pytest import scipy import sklearn from hypothesis import HealthCheck, given, note, settings from sklearn.datasets import make_classification, make_regression import xgboost as xgb from xgboost import testing as tm from xgboost.data import _is_cudf_df from xgboost.testing.params import hist_parameter_strategy from xgboost.testing.shared import ( get_feature_weights, validate_data_initialization, validate_leaf_output, ) pytestmark = [tm.timeout(60), pytest.mark.skipif(**tm.no_dask())] import dask import dask.array as da import dask.dataframe as dd from distributed import Client, LocalCluster from toolz import sliding_window # dependency of dask from xgboost.dask import DaskDMatrix from xgboost.testing.dask import check_init_estimation, check_uneven_nan dask.config.set({"distributed.scheduler.allowed-failures": False}) if hasattr(HealthCheck, "function_scoped_fixture"): suppress = [HealthCheck.function_scoped_fixture] else: suppress = hypothesis.utils.conventions.not_set # type:ignore @pytest.fixture(scope="module") def cluster() -> Generator: n_threads = os.cpu_count() assert n_threads is not None with LocalCluster( n_workers=2, threads_per_worker=n_threads // 2, dashboard_address=":0" ) as dask_cluster: yield dask_cluster @pytest.fixture def client(cluster: "LocalCluster") -> Generator: with Client(cluster) as dask_client: yield dask_client kRows = 1000 kCols = 10 kWorkers = 5 def make_categorical( client: Client, n_samples: int, n_features: int, n_categories: int, onehot: bool = False, ) -> Tuple[dd.DataFrame, dd.Series]: workers = tm.get_client_workers(client) n_workers = len(workers) dfs = [] def pack(**kwargs: Any) -> dd.DataFrame: X, y = tm.make_categorical(**kwargs) X["label"] = y return X meta = pack( n_samples=1, n_features=n_features, n_categories=n_categories, onehot=False ) for i, worker in enumerate(workers): l_n_samples = min( n_samples // n_workers, n_samples - i * (n_samples // n_workers) ) # make sure there's at least one sample for testing empty DMatrix if n_samples == 1 and i == 0: l_n_samples = 1 future = client.submit( pack, n_samples=l_n_samples, n_features=n_features, n_categories=n_categories, onehot=False, workers=[worker], ) dfs.append(future) df = dd.from_delayed(dfs, meta=meta) y = df["label"] X = df[df.columns.difference(["label"])] if onehot: return dd.get_dummies(X), y return X, y def generate_array( with_weights: bool = False, ) -> Tuple[da.Array, da.Array, Optional[da.Array]]: chunk_size = 20 rng = da.random.RandomState(1994) X = rng.random_sample((kRows, kCols), chunks=(chunk_size, -1)) y = rng.random_sample(kRows, chunks=chunk_size) if with_weights: w = rng.random_sample(kRows, chunks=chunk_size) return X, y, w return X, y, None def deterministic_persist_per_worker( df: dd.DataFrame, client: "Client" ) -> dd.DataFrame: # Got this script from https://github.com/dmlc/xgboost/issues/7927 # Query workers n_workers = len(client.cluster.workers) workers = map(attrgetter("worker_address"), client.cluster.workers.values()) # Slice data into roughly equal partitions subpartition_size = ceil(df.npartitions / n_workers) subpartition_divisions = range( 0, df.npartitions + subpartition_size, subpartition_size ) subpartition_slices = starmap(slice, sliding_window(2, subpartition_divisions)) subpartitions = map(partial(getitem, df.partitions), subpartition_slices) # Persist each subpartition on each worker # Rebuild dataframe from persisted subpartitions df2 = dd.concat( [ sp.persist(workers=w, allow_other_workers=False) for sp, w in zip(subpartitions, workers) ] ) return df2 Margin = TypeVar("Margin", dd.DataFrame, dd.Series, None) def deterministic_repartition( client: Client, X: dd.DataFrame, y: dd.Series, m: Margin, ) -> Tuple[dd.DataFrame, dd.Series, Margin]: # force repartition the data to avoid non-deterministic result if any(X.map_partitions(lambda x: _is_cudf_df(x)).compute()): # dask_cudf seems to be doing fine for now return X, y, m X["_y"] = y if m is not None: if isinstance(m, dd.DataFrame): m_columns = m.columns X = dd.concat([X, m], join="outer", axis=1) else: m_columns = ["_m"] X["_m"] = m X = deterministic_persist_per_worker(X, client) y = X["_y"] X = X[X.columns.difference(["_y"])] if m is not None: m = X[m_columns] X = X[X.columns.difference(m_columns)] return X, y, m @pytest.mark.parametrize("to_frame", [True, False]) def test_xgbclassifier_classes_type_and_value(to_frame: bool, client: "Client"): X, y = make_classification(n_samples=1000, n_features=4, random_state=123) if to_frame: import pandas as pd feats = [f"var_{i}" for i in range(4)] df = pd.DataFrame(X, columns=feats) df["target"] = y df = dd.from_pandas(df, npartitions=1) X, y = df[feats], df["target"] else: X = da.from_array(X) y = da.from_array(y) est = xgb.dask.DaskXGBClassifier(n_estimators=10).fit(X, y) assert isinstance(est.classes_, np.ndarray) np.testing.assert_array_equal(est.classes_, np.array([0, 1])) def test_from_dask_dataframe() -> None: with LocalCluster(n_workers=kWorkers, dashboard_address=":0") as cluster: with Client(cluster) as client: X, y, _ = generate_array() X = dd.from_dask_array(X) y = dd.from_dask_array(y) dtrain = DaskDMatrix(client, X, y) booster = xgb.dask.train(client, {}, dtrain, num_boost_round=2)["booster"] prediction = xgb.dask.predict(client, model=booster, data=dtrain) assert prediction.ndim == 1 assert isinstance(prediction, da.Array) assert prediction.shape[0] == kRows with pytest.raises(TypeError): # evals_result is not supported in dask interface. xgb.dask.train( # type:ignore client, {}, dtrain, num_boost_round=2, evals_result={} ) # force prediction to be computed from_dmatrix = prediction.compute() prediction = xgb.dask.predict(client, model=booster, data=X) from_df = prediction.compute() assert isinstance(prediction, dd.Series) assert np.all(prediction.compute().values == from_dmatrix) assert np.all(from_dmatrix == from_df.to_numpy()) series_predictions = xgb.dask.inplace_predict(client, booster, X) assert isinstance(series_predictions, dd.Series) np.testing.assert_allclose( series_predictions.compute().values, from_dmatrix ) # Make sure the output can be integrated back to original dataframe X["predict"] = prediction X["inplace_predict"] = series_predictions assert bool(X.isnull().values.any().compute()) is False def test_from_dask_array() -> None: with LocalCluster( n_workers=kWorkers, threads_per_worker=5, dashboard_address=":0" ) as cluster: with Client(cluster) as client: X, y, _ = generate_array() dtrain = DaskDMatrix(client, X, y) # results is {'booster': Booster, 'history': {...}} result = xgb.dask.train(client, {}, dtrain) prediction = xgb.dask.predict(client, result, dtrain) assert prediction.shape[0] == kRows assert isinstance(prediction, da.Array) # force prediction to be computed prediction = prediction.compute() booster: xgb.Booster = result["booster"] single_node_predt = booster.predict(xgb.DMatrix(X.compute())) np.testing.assert_allclose(prediction, single_node_predt) config = json.loads(booster.save_config()) assert int(config["learner"]["generic_param"]["nthread"]) == 5 from_arr = xgb.dask.predict(client, model=booster, data=X) assert isinstance(from_arr, da.Array) assert np.all(single_node_predt == from_arr.compute()) def test_dask_sparse(client: "Client") -> None: X_, y_ = make_classification(n_samples=1000, n_informative=5, n_classes=3) rng = np.random.default_rng(seed=0) idx = rng.integers(low=0, high=X_.shape[0], size=X_.shape[0] // 4) X_[idx, :] = np.nan # numpy X, y = da.from_array(X_), da.from_array(y_) clf = xgb.dask.DaskXGBClassifier(tree_method="hist", n_estimators=10) clf.client = client clf.fit(X, y, eval_set=[(X, y)]) dense_results = clf.evals_result() # scipy sparse X, y = da.from_array(X_).map_blocks(scipy.sparse.csr_matrix), da.from_array(y_) clf = xgb.dask.DaskXGBClassifier(tree_method="hist", n_estimators=10) clf.client = client clf.fit(X, y, eval_set=[(X, y)]) sparse_results = clf.evals_result() np.testing.assert_allclose( dense_results["validation_0"]["mlogloss"], sparse_results["validation_0"]["mlogloss"], ) def run_categorical(client: "Client", tree_method: str, X, X_onehot, y) -> None: parameters = {"tree_method": tree_method, "max_cat_to_onehot": 9999} # force onehot rounds = 10 m = xgb.dask.DaskDMatrix(client, X_onehot, y, enable_categorical=True) by_etl_results = xgb.dask.train( client, parameters, m, num_boost_round=rounds, evals=[(m, "Train")], )["history"] m = xgb.dask.DaskDMatrix(client, X, y, enable_categorical=True) output = xgb.dask.train( client, parameters, m, num_boost_round=rounds, evals=[(m, "Train")], ) by_builtin_results = output["history"] np.testing.assert_allclose( np.array(by_etl_results["Train"]["rmse"]), np.array(by_builtin_results["Train"]["rmse"]), rtol=1e-3, ) assert tm.non_increasing(by_builtin_results["Train"]["rmse"]) def check_model_output(model: xgb.dask.Booster) -> None: with tempfile.TemporaryDirectory() as tempdir: path = os.path.join(tempdir, "model.json") model.save_model(path) with open(path, "r") as fd: categorical = json.load(fd) categories_sizes = np.array( categorical["learner"]["gradient_booster"]["model"]["trees"][-1][ "categories_sizes" ] ) assert categories_sizes.shape[0] != 0 np.testing.assert_allclose(categories_sizes, 1) check_model_output(output["booster"]) reg = xgb.dask.DaskXGBRegressor( enable_categorical=True, n_estimators=10, tree_method=tree_method, # force onehot max_cat_to_onehot=9999, ) reg.fit(X, y) check_model_output(reg.get_booster()) reg = xgb.dask.DaskXGBRegressor( enable_categorical=True, n_estimators=10, tree_method="exact" ) with pytest.raises(ValueError, match="categorical data"): reg.fit(X, y) # check partition based reg = xgb.dask.DaskXGBRegressor( enable_categorical=True, n_estimators=10, tree_method=tree_method ) reg.fit(X, y, eval_set=[(X, y)]) assert tm.non_increasing(reg.evals_result()["validation_0"]["rmse"]) booster = reg.get_booster() predt = xgb.dask.predict(client, booster, X).compute().values inpredt = xgb.dask.inplace_predict(client, booster, X).compute().values if hasattr(predt, "get"): predt = predt.get() if hasattr(inpredt, "get"): inpredt = inpredt.get() np.testing.assert_allclose(predt, inpredt) def test_categorical(client: "Client") -> None: X, y = make_categorical(client, 10000, 30, 13) X_onehot, _ = make_categorical(client, 10000, 30, 13, True) run_categorical(client, "approx", X, X_onehot, y) run_categorical(client, "hist", X, X_onehot, y) ft = ["c"] * X.shape[1] reg = xgb.dask.DaskXGBRegressor( tree_method="hist", feature_types=ft, enable_categorical=True ) reg.fit(X, y) assert reg.get_booster().feature_types == ft def test_dask_predict_shape_infer(client: "Client") -> None: X, y = make_classification(n_samples=kRows, n_informative=5, n_classes=3) X_ = dd.from_array(X, chunksize=100) y_ = dd.from_array(y, chunksize=100) dtrain = xgb.dask.DaskDMatrix(client, data=X_, label=y_) model = xgb.dask.train( client, {"objective": "multi:softprob", "num_class": 3}, dtrain=dtrain ) preds = xgb.dask.predict(client, model, dtrain) assert preds.shape[0] == preds.compute().shape[0] assert preds.shape[1] == preds.compute().shape[1] prediction = xgb.dask.predict(client, model, X_, output_margin=True) assert isinstance(prediction, dd.DataFrame) prediction = prediction.compute() assert prediction.ndim == 2 assert prediction.shape[0] == kRows assert prediction.shape[1] == 3 prediction = xgb.dask.inplace_predict(client, model, X_, predict_type="margin") assert isinstance(prediction, dd.DataFrame) prediction = prediction.compute() assert prediction.ndim == 2 assert prediction.shape[0] == kRows assert prediction.shape[1] == 3 def run_boost_from_prediction_multi_class( X: dd.DataFrame, y: dd.Series, tree_method: str, device: str, client: "Client", ) -> None: model_0 = xgb.dask.DaskXGBClassifier( learning_rate=0.3, n_estimators=4, tree_method=tree_method, max_bin=768, device=device, ) X, y, _ = deterministic_repartition(client, X, y, None) model_0.fit(X=X, y=y) margin = xgb.dask.inplace_predict( client, model_0.get_booster(), X, predict_type="margin" ) margin.columns = [f"m_{i}" for i in range(margin.shape[1])] model_1 = xgb.dask.DaskXGBClassifier( learning_rate=0.3, n_estimators=4, tree_method=tree_method, max_bin=768, device=device, ) X, y, margin = deterministic_repartition(client, X, y, margin) model_1.fit(X=X, y=y, base_margin=margin) predictions_1 = xgb.dask.predict( client, model_1.get_booster(), xgb.dask.DaskDMatrix(client, X, base_margin=margin), output_margin=True, ) model_2 = xgb.dask.DaskXGBClassifier( learning_rate=0.3, n_estimators=8, tree_method=tree_method, max_bin=768, device=device, ) X, y, _ = deterministic_repartition(client, X, y, None) model_2.fit(X=X, y=y) predictions_2 = xgb.dask.inplace_predict( client, model_2.get_booster(), X, predict_type="margin" ) a = predictions_1.compute() b = predictions_2.compute() # cupy/cudf if hasattr(a, "get"): a = a.get() if hasattr(b, "values"): b = b.values if hasattr(b, "get"): b = b.get() np.testing.assert_allclose(a, b, atol=1e-5) def run_boost_from_prediction( X: dd.DataFrame, y: dd.Series, tree_method: str, device: str, client: "Client", ) -> None: X, y = client.persist([X, y]) model_0 = xgb.dask.DaskXGBClassifier( learning_rate=0.3, n_estimators=4, tree_method=tree_method, max_bin=512, device=device, ) X, y, _ = deterministic_repartition(client, X, y, None) model_0.fit(X=X, y=y) margin: dd.Series = model_0.predict(X, output_margin=True) model_1 = xgb.dask.DaskXGBClassifier( learning_rate=0.3, n_estimators=4, tree_method=tree_method, max_bin=512, device=device, ) X, y, margin = deterministic_repartition(client, X, y, margin) model_1.fit(X=X, y=y, base_margin=margin) X, y, margin = deterministic_repartition(client, X, y, margin) predictions_1: dd.Series = model_1.predict(X, base_margin=margin) model_2 = xgb.dask.DaskXGBClassifier( learning_rate=0.3, n_estimators=8, tree_method=tree_method, max_bin=512, device=device, ) X, y, _ = deterministic_repartition(client, X, y, None) model_2.fit(X=X, y=y) predictions_2: dd.Series = model_2.predict(X) predt_1 = predictions_1.compute() predt_2 = predictions_2.compute() if hasattr(predt_1, "to_numpy"): predt_1 = predt_1.to_numpy() if hasattr(predt_2, "to_numpy"): predt_2 = predt_2.to_numpy() np.testing.assert_allclose(predt_1, predt_2, atol=1e-5) margined = xgb.dask.DaskXGBClassifier(n_estimators=4) X, y, margin = deterministic_repartition(client, X, y, margin) margined.fit( X=X, y=y, base_margin=margin, eval_set=[(X, y)], base_margin_eval_set=[margin] ) unmargined = xgb.dask.DaskXGBClassifier(n_estimators=4) X, y, margin = deterministic_repartition(client, X, y, margin) unmargined.fit(X=X, y=y, eval_set=[(X, y)], base_margin=margin) margined_res = margined.evals_result()["validation_0"]["logloss"] unmargined_res = unmargined.evals_result()["validation_0"]["logloss"] assert len(margined_res) == len(unmargined_res) for i in range(len(margined_res)): # margined is correct one, so smaller error. assert margined_res[i] < unmargined_res[i] @pytest.mark.parametrize("tree_method", ["hist", "approx"]) def test_boost_from_prediction(tree_method: str, client: "Client") -> None: from sklearn.datasets import load_breast_cancer, load_digits X_, y_ = load_breast_cancer(return_X_y=True) X, y = dd.from_array(X_, chunksize=200), dd.from_array(y_, chunksize=200) run_boost_from_prediction(X, y, tree_method, "cpu", client) X_, y_ = load_digits(return_X_y=True) X, y = dd.from_array(X_, chunksize=100), dd.from_array(y_, chunksize=100) run_boost_from_prediction_multi_class(X, y, tree_method, "cpu", client) def test_inplace_predict(client: "Client") -> None: from sklearn.datasets import load_diabetes X_, y_ = load_diabetes(return_X_y=True) X, y = dd.from_array(X_, chunksize=32), dd.from_array(y_, chunksize=32) reg = xgb.dask.DaskXGBRegressor(n_estimators=4).fit(X, y) booster = reg.get_booster() base_margin = y inplace = xgb.dask.inplace_predict( client, booster, X, base_margin=base_margin ).compute() Xy = xgb.dask.DaskDMatrix(client, X, base_margin=base_margin) copied = xgb.dask.predict(client, booster, Xy).compute() np.testing.assert_allclose(inplace, copied) def test_dask_missing_value_reg(client: "Client") -> None: X_0 = np.ones((20 // 2, kCols)) X_1 = np.zeros((20 // 2, kCols)) X = np.concatenate([X_0, X_1], axis=0) np.random.shuffle(X) X = da.from_array(X) X = X.rechunk(20, 1) y = da.random.randint(0, 3, size=20) y.rechunk(20) regressor = xgb.dask.DaskXGBRegressor(verbosity=1, n_estimators=2, missing=0.0) regressor.client = client regressor.set_params(tree_method="hist") regressor.fit(X, y, eval_set=[(X, y)]) dd_predt = regressor.predict(X).compute() np_X = X.compute() np_predt = regressor.get_booster().predict(xgb.DMatrix(np_X, missing=0.0)) np.testing.assert_allclose(np_predt, dd_predt) def test_dask_missing_value_cls(client: "Client") -> None: X_0 = np.ones((kRows // 2, kCols)) X_1 = np.zeros((kRows // 2, kCols)) X = np.concatenate([X_0, X_1], axis=0) np.random.shuffle(X) X = da.from_array(X) X = X.rechunk(20, None) y = da.random.randint(0, 3, size=kRows) y = y.rechunk(20, 1) cls = xgb.dask.DaskXGBClassifier( verbosity=1, n_estimators=2, tree_method="hist", missing=0.0 ) cls.client = client cls.fit(X, y, eval_set=[(X, y)]) dd_pred_proba = cls.predict_proba(X).compute() np_X = X.compute() np_pred_proba = cls.get_booster().predict(xgb.DMatrix(np_X, missing=0.0)) np.testing.assert_allclose(np_pred_proba, dd_pred_proba) cls = xgb.dask.DaskXGBClassifier() assert hasattr(cls, "missing") @pytest.mark.parametrize("model", ["boosting", "rf"]) def test_dask_regressor(model: str, client: "Client") -> None: X, y, w = generate_array(with_weights=True) if model == "boosting": regressor = xgb.dask.DaskXGBRegressor(verbosity=1, n_estimators=2) else: regressor = xgb.dask.DaskXGBRFRegressor(verbosity=1, n_estimators=2) assert regressor._estimator_type == "regressor" assert sklearn.base.is_regressor(regressor) regressor.set_params(tree_method="hist") regressor.client = client regressor.fit(X, y, sample_weight=w, eval_set=[(X, y)]) prediction = regressor.predict(X) assert prediction.ndim == 1 assert prediction.shape[0] == kRows history = regressor.evals_result() assert isinstance(prediction, da.Array) assert isinstance(history, dict) assert list(history["validation_0"].keys())[0] == "rmse" forest = int( json.loads(regressor.get_booster().save_config())["learner"][ "gradient_booster" ]["gbtree_model_param"]["num_parallel_tree"] ) if model == "boosting": assert len(history["validation_0"]["rmse"]) == 2 assert forest == 1 else: assert len(history["validation_0"]["rmse"]) == 1 assert forest == 2 def run_dask_classifier( X: xgb.dask._DaskCollection, y: xgb.dask._DaskCollection, w: xgb.dask._DaskCollection, model: str, tree_method: Optional[str], client: "Client", n_classes, ) -> None: metric = "merror" if n_classes > 2 else "logloss" if model == "boosting": classifier = xgb.dask.DaskXGBClassifier( verbosity=1, n_estimators=2, eval_metric=metric, tree_method=tree_method ) else: classifier = xgb.dask.DaskXGBRFClassifier( verbosity=1, n_estimators=2, eval_metric=metric, tree_method=tree_method ) assert classifier._estimator_type == "classifier" assert sklearn.base.is_classifier(classifier) classifier.client = client classifier.fit(X, y, sample_weight=w, eval_set=[(X, y)]) prediction = classifier.predict(X).compute() assert prediction.ndim == 1 assert prediction.shape[0] == kRows history = classifier.evals_result() assert isinstance(history, dict) assert list(history.keys())[0] == "validation_0" assert list(history["validation_0"].keys())[0] == metric assert len(list(history["validation_0"])) == 1 config = json.loads(classifier.get_booster().save_config()) n_threads = int(config["learner"]["generic_param"]["nthread"]) assert n_threads != 0 and n_threads != os.cpu_count() forest = int( config["learner"]["gradient_booster"]["gbtree_model_param"]["num_parallel_tree"] ) if model == "boosting": assert len(history["validation_0"][metric]) == 2 assert forest == 1 else: assert len(history["validation_0"][metric]) == 1 assert forest == 2 # Test .predict_proba() probas = classifier.predict_proba(X).compute() assert classifier.n_classes_ == n_classes assert probas.ndim == 2 assert probas.shape[0] == kRows assert probas.shape[1] == n_classes if n_classes > 2: cls_booster = classifier.get_booster() single_node_proba = cls_booster.inplace_predict(X.compute()) # test shared by CPU and GPU if isinstance(single_node_proba, np.ndarray): np.testing.assert_allclose(single_node_proba, probas) else: import cupy cupy.testing.assert_allclose(single_node_proba, probas) # Test with dataframe, not shared with GPU as cupy doesn't work well with da.unique. if isinstance(X, da.Array) and n_classes > 2: X_d: dd.DataFrame = X.to_dask_dataframe() assert classifier.n_classes_ == n_classes prediction_df = classifier.predict(X_d).compute() assert prediction_df.ndim == 1 assert prediction_df.shape[0] == kRows np.testing.assert_allclose(prediction_df, prediction) probas = classifier.predict_proba(X).compute() np.testing.assert_allclose(single_node_proba, probas) @pytest.mark.parametrize("model", ["boosting", "rf"]) def test_dask_classifier(model: str, client: "Client") -> None: X, y, w = generate_array(with_weights=True) y = (y * 10).astype(np.int32) assert w is not None run_dask_classifier(X, y, w, model, None, client, 10) y_bin = y.copy() y_bin[y > 5] = 1.0 y_bin[y <= 5] = 0.0 run_dask_classifier(X, y_bin, w, model, None, client, 2) def test_empty_dmatrix_training_continuation(client: "Client") -> None: kRows, kCols = 1, 97 X = dd.from_array(np.random.randn(kRows, kCols)) y = dd.from_array(np.random.rand(kRows)) X.columns = ["X" + str(i) for i in range(0, kCols)] dtrain = xgb.dask.DaskDMatrix(client, X, y) kRows += 1000 X = dd.from_array(np.random.randn(kRows, kCols), chunksize=10) X.columns = ["X" + str(i) for i in range(0, kCols)] y = dd.from_array(np.random.rand(kRows), chunksize=10) valid = xgb.dask.DaskDMatrix(client, X, y) out = xgb.dask.train( client, {"tree_method": "hist"}, dtrain=dtrain, num_boost_round=2, evals=[(valid, "validation")], ) out = xgb.dask.train( client, {"tree_method": "hist"}, dtrain=dtrain, xgb_model=out["booster"], num_boost_round=2, evals=[(valid, "validation")], ) assert xgb.dask.predict(client, out, dtrain).compute().shape[0] == 1 def run_empty_dmatrix_reg(client: "Client", parameters: dict) -> None: def _check_outputs(out: xgb.dask.TrainReturnT, predictions: np.ndarray) -> None: assert isinstance(out["booster"], xgb.dask.Booster) for _, v in out["history"]["validation"].items(): assert len(v) == 2 assert isinstance(predictions, np.ndarray) assert predictions.shape[0] == 1 kRows, kCols = 1, 97 X = dd.from_array(np.random.randn(kRows, kCols)) y = dd.from_array(np.random.rand(kRows)) dtrain = xgb.dask.DaskDMatrix(client, X, y) out = xgb.dask.train( client, parameters, dtrain=dtrain, evals=[(dtrain, "validation")], num_boost_round=2, ) predictions = xgb.dask.predict(client=client, model=out, data=dtrain).compute() _check_outputs(out, predictions) # valid has more rows than train kRows += 1 X = dd.from_array(np.random.randn(kRows, kCols)) y = dd.from_array(np.random.rand(kRows)) valid = xgb.dask.DaskDMatrix(client, X, y) out = xgb.dask.train( client, parameters, dtrain=dtrain, evals=[(valid, "validation")], num_boost_round=2, ) predictions = xgb.dask.predict(client=client, model=out, data=dtrain).compute() _check_outputs(out, predictions) # train has more rows than evals valid = dtrain kRows += 1 X = dd.from_array(np.random.randn(kRows, kCols)) y = dd.from_array(np.random.rand(kRows)) dtrain = xgb.dask.DaskDMatrix(client, X, y) out = xgb.dask.train( client, parameters, dtrain=dtrain, evals=[(valid, "validation")], num_boost_round=2, ) predictions = xgb.dask.predict(client=client, model=out, data=valid).compute() _check_outputs(out, predictions) def run_empty_dmatrix_cls(client: "Client", parameters: dict) -> None: n_classes = 4 def _check_outputs(out: xgb.dask.TrainReturnT, predictions: np.ndarray) -> None: assert isinstance(out["booster"], xgb.dask.Booster) assert len(out["history"]["validation"]["merror"]) == 2 assert isinstance(predictions, np.ndarray) assert predictions.shape[1] == n_classes, predictions.shape kRows, kCols = 1, 97 X = dd.from_array(np.random.randn(kRows, kCols)) y = dd.from_array(np.random.randint(low=0, high=n_classes, size=kRows)) dtrain = xgb.dask.DaskDMatrix(client, X, y) parameters["objective"] = "multi:softprob" parameters["eval_metric"] = "merror" parameters["num_class"] = n_classes out = xgb.dask.train( client, parameters, dtrain=dtrain, evals=[(dtrain, "validation")], num_boost_round=2, ) predictions = xgb.dask.predict(client=client, model=out, data=dtrain) assert predictions.shape[1] == n_classes predictions = predictions.compute() _check_outputs(out, predictions) # train has more rows than evals valid = dtrain kRows += 1 X = dd.from_array(np.random.randn(kRows, kCols)) y = dd.from_array(np.random.randint(low=0, high=n_classes, size=kRows)) dtrain = xgb.dask.DaskDMatrix(client, X, y) out = xgb.dask.train( client, parameters, dtrain=dtrain, evals=[(valid, "validation")], num_boost_round=2, ) predictions = xgb.dask.predict(client=client, model=out, data=valid).compute() _check_outputs(out, predictions) def run_empty_dmatrix_auc(client: "Client", device: str, n_workers: int) -> None: from sklearn import datasets n_samples = 100 n_features = 7 rng = np.random.RandomState(1994) make_classification = partial( datasets.make_classification, n_features=n_features, random_state=rng ) # binary X_, y_ = make_classification(n_samples=n_samples, random_state=rng) X = dd.from_array(X_, chunksize=10) y = dd.from_array(y_, chunksize=10) n_samples = n_workers - 1 valid_X_, valid_y_ = make_classification(n_samples=n_samples, random_state=rng) valid_X = dd.from_array(valid_X_, chunksize=n_samples) valid_y = dd.from_array(valid_y_, chunksize=n_samples) cls = xgb.dask.DaskXGBClassifier(device=device, n_estimators=2) cls.fit(X, y, eval_metric=["auc", "aucpr"], eval_set=[(valid_X, valid_y)]) # multiclass X_, y_ = make_classification( n_samples=n_samples, n_classes=n_workers, n_informative=n_features, n_redundant=0, n_repeated=0, ) for i in range(y_.shape[0]): y_[i] = i % n_workers X = dd.from_array(X_, chunksize=10) y = dd.from_array(y_, chunksize=10) n_samples = n_workers - 1 valid_X_, valid_y_ = make_classification( n_samples=n_samples, n_classes=n_workers, n_informative=n_features, n_redundant=0, n_repeated=0, ) for i in range(valid_y_.shape[0]): valid_y_[i] = i % n_workers valid_X = dd.from_array(valid_X_, chunksize=n_samples) valid_y = dd.from_array(valid_y_, chunksize=n_samples) cls = xgb.dask.DaskXGBClassifier(device=device, n_estimators=2) cls.fit(X, y, eval_metric=["auc", "aucpr"], eval_set=[(valid_X, valid_y)]) def test_empty_dmatrix_auc() -> None: with LocalCluster(n_workers=4, dashboard_address=":0") as cluster: with Client(cluster) as client: run_empty_dmatrix_auc(client, "cpu", 4) def run_auc(client: "Client", device: str) -> None: from sklearn import datasets n_samples = 100 n_features = 97 rng = np.random.RandomState(1994) X_, y_ = datasets.make_classification( n_samples=n_samples, n_features=n_features, random_state=rng ) X = dd.from_array(X_, chunksize=10) y = dd.from_array(y_, chunksize=10) valid_X_, valid_y_ = datasets.make_classification( n_samples=n_samples, n_features=n_features, random_state=rng ) valid_X = dd.from_array(valid_X_, chunksize=10) valid_y = dd.from_array(valid_y_, chunksize=10) cls = xgb.XGBClassifier(device=device, n_estimators=2) cls.fit(X_, y_, eval_metric="auc", eval_set=[(valid_X_, valid_y_)]) dcls = xgb.dask.DaskXGBClassifier(device=device, n_estimators=2) dcls.fit(X, y, eval_metric="auc", eval_set=[(valid_X, valid_y)]) approx = dcls.evals_result()["validation_0"]["auc"] exact = cls.evals_result()["validation_0"]["auc"] for i in range(2): # approximated test. assert np.abs(approx[i] - exact[i]) <= 0.06 def test_auc(client: "Client") -> None: run_auc(client, "cpu") # No test for Exact, as empty DMatrix handling are mostly for distributed # environment and Exact doesn't support it. @pytest.mark.parametrize("tree_method", ["hist", "approx"]) def test_empty_dmatrix(tree_method) -> None: with LocalCluster(n_workers=kWorkers, dashboard_address=":0") as cluster: with Client(cluster) as client: parameters = {"tree_method": tree_method} run_empty_dmatrix_reg(client, parameters) run_empty_dmatrix_cls(client, parameters) parameters = {"tree_method": tree_method, "objective": "reg:absoluteerror"} run_empty_dmatrix_reg(client, parameters) async def run_from_dask_array_asyncio(scheduler_address: str) -> xgb.dask.TrainReturnT: async with Client(scheduler_address, asynchronous=True) as client: X, y, _ = generate_array() m = await DaskDMatrix(client, X, y) output = await xgb.dask.train(client, {}, dtrain=m) with_m = await xgb.dask.predict(client, output, m) with_X = await xgb.dask.predict(client, output, X) inplace = await xgb.dask.inplace_predict(client, output, X) assert isinstance(with_m, da.Array) assert isinstance(with_X, da.Array) assert isinstance(inplace, da.Array) np.testing.assert_allclose( await client.compute(with_m), await client.compute(with_X) ) np.testing.assert_allclose( await client.compute(with_m), await client.compute(inplace) ) return output async def run_dask_regressor_asyncio(scheduler_address: str) -> None: async with Client(scheduler_address, asynchronous=True) as client: X, y, _ = generate_array() regressor = await xgb.dask.DaskXGBRegressor(verbosity=1, n_estimators=2) regressor.set_params(tree_method="hist") regressor.client = client await regressor.fit(X, y, eval_set=[(X, y)]) prediction = await regressor.predict(X) assert prediction.ndim == 1 assert prediction.shape[0] == kRows history = regressor.evals_result() assert isinstance(prediction, da.Array) assert isinstance(history, dict) assert list(history["validation_0"].keys())[0] == "rmse" assert len(history["validation_0"]["rmse"]) == 2 awaited = await client.compute(prediction) assert awaited.shape[0] == kRows async def run_dask_classifier_asyncio(scheduler_address: str) -> None: async with Client(scheduler_address, asynchronous=True) as client: X, y, _ = generate_array() y = (y * 10).astype(np.int32) classifier = await xgb.dask.DaskXGBClassifier( verbosity=1, n_estimators=2, eval_metric="merror" ) classifier.client = client await classifier.fit(X, y, eval_set=[(X, y)]) prediction = await classifier.predict(X) assert prediction.ndim == 1 assert prediction.shape[0] == kRows history = classifier.evals_result() assert isinstance(prediction, da.Array) assert isinstance(history, dict) assert list(history.keys())[0] == "validation_0" assert list(history["validation_0"].keys())[0] == "merror" assert len(list(history["validation_0"])) == 1 assert len(history["validation_0"]["merror"]) == 2 # Test .predict_proba() probas = await classifier.predict_proba(X) assert classifier.n_classes_ == 10 assert probas.ndim == 2 assert probas.shape[0] == kRows assert probas.shape[1] == 10 # Test with dataframe. X_d = dd.from_dask_array(X) y_d = dd.from_dask_array(y) await classifier.fit(X_d, y_d) assert classifier.n_classes_ == 10 prediction = await client.compute(await classifier.predict(X_d)) assert prediction.ndim == 1 assert prediction.shape[0] == kRows def test_with_asyncio() -> None: with LocalCluster(n_workers=2, dashboard_address=":0") as cluster: with Client(cluster) as client: address = client.scheduler.address output = asyncio.run(run_from_dask_array_asyncio(address)) assert isinstance(output["booster"], xgb.Booster) assert isinstance(output["history"], dict) asyncio.run(run_dask_regressor_asyncio(address)) asyncio.run(run_dask_classifier_asyncio(address)) async def generate_concurrent_trainings() -> None: async def train() -> None: async with LocalCluster( n_workers=2, threads_per_worker=1, asynchronous=True, dashboard_address=":0" ) as cluster: async with Client(cluster, asynchronous=True) as client: X, y, w = generate_array(with_weights=True) dtrain = await DaskDMatrix(client, X, y, weight=w) dvalid = await DaskDMatrix(client, X, y, weight=w) output = await xgb.dask.train(client, {}, dtrain=dtrain) await xgb.dask.predict(client, output, data=dvalid) await asyncio.gather(train(), train()) def test_concurrent_trainings() -> None: asyncio.run(generate_concurrent_trainings()) def test_predict(client: "Client") -> None: X, y, _ = generate_array() dtrain = DaskDMatrix(client, X, y) booster = xgb.dask.train(client, {}, dtrain, num_boost_round=2)["booster"] predt_0 = xgb.dask.predict(client, model=booster, data=dtrain) assert predt_0.ndim == 1 assert predt_0.shape[0] == kRows margin = xgb.dask.predict(client, model=booster, data=dtrain, output_margin=True) assert margin.ndim == 1 assert margin.shape[0] == kRows shap = xgb.dask.predict(client, model=booster, data=dtrain, pred_contribs=True) assert shap.ndim == 2 assert shap.shape[0] == kRows assert shap.shape[1] == kCols + 1 booster_f = client.scatter(booster, broadcast=True) predt_1 = xgb.dask.predict(client, booster_f, X).compute() predt_2 = xgb.dask.inplace_predict(client, booster_f, X).compute() np.testing.assert_allclose(predt_0, predt_1) np.testing.assert_allclose(predt_0, predt_2) def test_predict_with_meta(client: "Client") -> None: X, y, w = generate_array(with_weights=True) assert w is not None partition_size = 20 margin = da.random.random(kRows, partition_size) + 1e4 dtrain = DaskDMatrix(client, X, y, weight=w, base_margin=margin) booster: xgb.Booster = xgb.dask.train(client, {}, dtrain, num_boost_round=4)[ "booster" ] prediction = xgb.dask.predict(client, model=booster, data=dtrain) assert prediction.ndim == 1 assert prediction.shape[0] == kRows prediction = client.compute(prediction).result() assert np.all(prediction > 1e3) m = xgb.DMatrix(X.compute()) m.set_info(label=y.compute(), weight=w.compute(), base_margin=margin.compute()) single = booster.predict(m) # Make sure the ordering is correct. assert np.all(prediction == single) def run_aft_survival(client: "Client", dmatrix_t: Type) -> None: df = dd.read_csv(os.path.join(tm.data_dir(__file__), "veterans_lung_cancer.csv")) y_lower_bound = df["Survival_label_lower_bound"] y_upper_bound = df["Survival_label_upper_bound"] X = df.drop(["Survival_label_lower_bound", "Survival_label_upper_bound"], axis=1) m = dmatrix_t( client, X, label_lower_bound=y_lower_bound, label_upper_bound=y_upper_bound ) base_params = { "verbosity": 0, "objective": "survival:aft", "eval_metric": "aft-nloglik", "learning_rate": 0.05, "aft_loss_distribution_scale": 1.20, "max_depth": 6, "lambda": 0.01, "alpha": 0.02, } nloglik_rec = {} dists = ["normal", "logistic", "extreme"] for dist in dists: params = base_params params.update({"aft_loss_distribution": dist}) evals_result = {} out = xgb.dask.train( client, params, m, num_boost_round=100, evals=[(m, "train")] ) evals_result = out["history"] nloglik_rec[dist] = evals_result["train"]["aft-nloglik"] # AFT metric (negative log likelihood) improve monotonically assert all(p >= q for p, q in zip(nloglik_rec[dist], nloglik_rec[dist][:1])) # For this data, normal distribution works the best assert nloglik_rec["normal"][-1] < 4.9 assert nloglik_rec["logistic"][-1] > 4.9 assert nloglik_rec["extreme"][-1] > 4.9 def test_dask_aft_survival() -> None: with LocalCluster(n_workers=kWorkers, dashboard_address=":0") as cluster: with Client(cluster) as client: run_aft_survival(client, DaskDMatrix) def test_dask_ranking(client: "Client") -> None: dpath = "demo/rank/" mq2008 = tm.data.get_mq2008(dpath) data = [] for d in mq2008: if isinstance(d, scipy.sparse.csr_matrix): d[d == 0] = np.inf d = d.toarray() d[d == 0] = np.nan d[np.isinf(d)] = 0 data.append(dd.from_array(d, chunksize=32)) else: data.append(dd.from_array(d, chunksize=32)) ( x_train, y_train, qid_train, x_test, y_test, qid_test, x_valid, y_valid, qid_valid, ) = data qid_train = qid_train.astype(np.uint32) qid_valid = qid_valid.astype(np.uint32) qid_test = qid_test.astype(np.uint32) rank = xgb.dask.DaskXGBRanker(n_estimators=2500) rank.fit( x_train, y_train, qid=qid_train, eval_set=[(x_test, y_test), (x_train, y_train)], eval_qid=[qid_test, qid_train], eval_metric=["ndcg"], verbose=True, early_stopping_rounds=10, ) assert rank.n_features_in_ == 46 assert rank.best_score > 0.98 @pytest.mark.parametrize("booster", ["dart", "gbtree"]) def test_dask_predict_leaf(booster: str, client: "Client") -> None: from sklearn.datasets import load_digits X_, y_ = load_digits(return_X_y=True) num_parallel_tree = 4 X, y = dd.from_array(X_, chunksize=32), dd.from_array(y_, chunksize=32) rounds = 4 cls = xgb.dask.DaskXGBClassifier( n_estimators=rounds, num_parallel_tree=num_parallel_tree, booster=booster ) cls.client = client cls.fit(X, y) leaf = xgb.dask.predict( client, cls.get_booster(), X.to_dask_array(), # we can't map_blocks on dataframe when output is 4-dim. pred_leaf=True, strict_shape=True, validate_features=False, ).compute() assert leaf.shape[0] == X_.shape[0] assert leaf.shape[1] == rounds assert leaf.shape[2] == cls.n_classes_ assert leaf.shape[3] == num_parallel_tree leaf_from_apply = cls.apply(X).reshape(leaf.shape).compute() np.testing.assert_allclose(leaf_from_apply, leaf) validate_leaf_output(leaf, num_parallel_tree) def test_dask_iteration_range(client: "Client"): X, y, _ = generate_array() n_rounds = 10 Xy = xgb.DMatrix(X.compute(), y.compute()) dXy = xgb.dask.DaskDMatrix(client, X, y) booster = xgb.dask.train( client, {"tree_method": "hist"}, dXy, num_boost_round=n_rounds )["booster"] for i in range(0, n_rounds): iter_range = (0, i) native_predt = booster.predict(Xy, iteration_range=iter_range) with_dask_dmatrix = xgb.dask.predict( client, booster, dXy, iteration_range=iter_range ) with_dask_collection = xgb.dask.predict( client, booster, X, iteration_range=iter_range ) with_inplace = xgb.dask.inplace_predict( client, booster, X, iteration_range=iter_range ) np.testing.assert_allclose(native_predt, with_dask_dmatrix.compute()) np.testing.assert_allclose(native_predt, with_dask_collection.compute()) np.testing.assert_allclose(native_predt, with_inplace.compute()) full_predt = xgb.dask.predict(client, booster, X, iteration_range=(0, n_rounds)) default = xgb.dask.predict(client, booster, X) np.testing.assert_allclose(full_predt.compute(), default.compute()) class TestWithDask: def test_dmatrix_binary(self, client: "Client") -> None: def save_dmatrix(rabit_args: Dict[str, Union[int, str]], tmpdir: str) -> None: with xgb.dask.CommunicatorContext(**rabit_args): rank = xgb.collective.get_rank() X, y = tm.make_categorical(100, 4, 4, False) Xy = xgb.DMatrix(X, y, enable_categorical=True) path = os.path.join(tmpdir, f"{rank}.bin") Xy.save_binary(path) def load_dmatrix(rabit_args: Dict[str, Union[int, str]], tmpdir: str) -> None: with xgb.dask.CommunicatorContext(**rabit_args): rank = xgb.collective.get_rank() path = os.path.join(tmpdir, f"{rank}.bin") Xy = xgb.DMatrix(path) assert Xy.num_row() == 100 assert Xy.num_col() == 4 with tempfile.TemporaryDirectory() as tmpdir: workers = tm.get_client_workers(client) rabit_args = client.sync( xgb.dask._get_rabit_args, len(workers), None, client ) futures = [] for w in workers: # same argument for each worker, must set pure to False otherwise dask # will try to reuse the result from the first worker and hang waiting # for it. f = client.submit( save_dmatrix, rabit_args, tmpdir, workers=[w], pure=False ) futures.append(f) client.gather(futures) rabit_args = client.sync( xgb.dask._get_rabit_args, len(workers), None, client ) futures = [] for w in workers: f = client.submit( load_dmatrix, rabit_args, tmpdir, workers=[w], pure=False ) futures.append(f) client.gather(futures) @pytest.mark.parametrize( "config_key,config_value", [("verbosity", 0), ("use_rmm", True)] ) def test_global_config( self, client: "Client", config_key: str, config_value: Any ) -> None: X, y, _ = generate_array() xgb.config.set_config(**{config_key: config_value}) dtrain = DaskDMatrix(client, X, y) before_fname = "./before_training-test_global_config" after_fname = "./after_training-test_global_config" class TestCallback(xgb.callback.TrainingCallback): def write_file(self, fname: str) -> None: with open(fname, "w") as fd: fd.write(str(xgb.config.get_config()[config_key])) def before_training(self, model: xgb.Booster) -> xgb.Booster: self.write_file(before_fname) assert xgb.config.get_config()[config_key] == config_value return model def after_training(self, model: xgb.Booster) -> xgb.Booster: assert xgb.config.get_config()[config_key] == config_value return model def before_iteration( self, model: xgb.Booster, epoch: int, evals_log: Dict ) -> bool: assert xgb.config.get_config()[config_key] == config_value return False def after_iteration( self, model: xgb.Booster, epoch: int, evals_log: Dict ) -> bool: self.write_file(after_fname) assert xgb.config.get_config()[config_key] == config_value return False xgb.dask.train( client, {}, dtrain, num_boost_round=4, callbacks=[TestCallback()] )["booster"] with open(before_fname, "r") as before, open(after_fname, "r") as after: assert before.read() == str(config_value) assert after.read() == str(config_value) os.remove(before_fname) os.remove(after_fname) with dask.config.set({"xgboost.foo": "bar"}): with pytest.raises(ValueError, match=r"Unknown configuration.*"): xgb.dask.train(client, {}, dtrain, num_boost_round=4) with dask.config.set({"xgboost.scheduler_address": "127.0.0.1:foo"}): with pytest.raises(socket.gaierror, match=r".*not known.*"): xgb.dask.train(client, {}, dtrain, num_boost_round=1) def run_updater_test( self, client: "Client", params: Dict, num_rounds: int, dataset: tm.TestDataset, tree_method: str, ) -> None: params["tree_method"] = tree_method params = dataset.set_params(params) # It doesn't make sense to distribute a completely # empty dataset. if dataset.X.shape[0] == 0: return chunk = 128 y_chunk = chunk if len(dataset.y.shape) == 1 else (chunk, dataset.y.shape[1]) X = da.from_array(dataset.X, chunks=(chunk, dataset.X.shape[1])) y = da.from_array(dataset.y, chunks=y_chunk) if dataset.w is not None: w = da.from_array(dataset.w, chunks=(chunk,)) else: w = None m = xgb.dask.DaskDMatrix(client, data=X, label=y, weight=w) history = xgb.dask.train( client, params=params, dtrain=m, num_boost_round=num_rounds, evals=[(m, "train")], )["history"] note(history) history = history["train"][dataset.metric] def is_stump(): return ( params.get("max_depth", None) == 1 or params.get("max_leaves", None) == 1 ) def minimum_bin() -> bool: return "max_bin" in params and params["max_bin"] == 2 # See note on `ObjFunction::UpdateTreeLeaf`. update_leaf = dataset.name.endswith("-l1") if update_leaf and len(history) >= 2: assert history[0] >= history[-1] return elif minimum_bin() and is_stump(): assert tm.non_increasing(history, tolerance=1e-3) else: assert tm.non_increasing(history) # Make sure that it's decreasing if is_stump(): # we might have already got the best score with base_score. assert history[-1] <= history[0] else: assert history[-1] < history[0] @given(params=hist_parameter_strategy, dataset=tm.make_dataset_strategy()) @settings( deadline=None, max_examples=10, suppress_health_check=suppress, print_blob=True ) def test_hist( self, params: Dict, dataset: tm.TestDataset, client: "Client" ) -> None: num_rounds = 10 self.run_updater_test(client, params, num_rounds, dataset, "hist") def test_quantile_dmatrix(self, client: Client) -> None: X, y = make_categorical(client, 10000, 30, 13) Xy = xgb.dask.DaskDMatrix(client, X, y, enable_categorical=True) valid_Xy = xgb.dask.DaskDMatrix(client, X, y, enable_categorical=True) output = xgb.dask.train( client, {"tree_method": "hist"}, Xy, num_boost_round=10, evals=[(Xy, "Train"), (valid_Xy, "Valid")], ) dmatrix_hist = output["history"] Xy = xgb.dask.DaskQuantileDMatrix(client, X, y, enable_categorical=True) valid_Xy = xgb.dask.DaskQuantileDMatrix( client, X, y, enable_categorical=True, ref=Xy ) output = xgb.dask.train( client, {"tree_method": "hist"}, Xy, num_boost_round=10, evals=[(Xy, "Train"), (valid_Xy, "Valid")], ) quantile_hist = output["history"] np.testing.assert_allclose( quantile_hist["Train"]["rmse"], dmatrix_hist["Train"]["rmse"] ) np.testing.assert_allclose( quantile_hist["Valid"]["rmse"], dmatrix_hist["Valid"]["rmse"] ) def test_empty_quantile_dmatrix(self, client: Client) -> None: X, y = make_categorical(client, 2, 30, 13) X_valid, y_valid = make_categorical(client, 10000, 30, 13) X_valid, y_valid, _ = deterministic_repartition(client, X_valid, y_valid, None) Xy = xgb.dask.DaskQuantileDMatrix(client, X, y, enable_categorical=True) Xy_valid = xgb.dask.DaskQuantileDMatrix( client, X_valid, y_valid, ref=Xy, enable_categorical=True ) result = xgb.dask.train( client, {"tree_method": "hist"}, Xy, num_boost_round=10, evals=[(Xy_valid, "Valid")], ) predt = xgb.dask.inplace_predict(client, result["booster"], X).compute() np.testing.assert_allclose(y.compute(), predt) rmse = result["history"]["Valid"]["rmse"][-1] assert rmse < 32.0 @given(params=hist_parameter_strategy, dataset=tm.make_dataset_strategy()) @settings( deadline=None, max_examples=10, suppress_health_check=suppress, print_blob=True ) def test_approx( self, client: "Client", params: Dict, dataset: tm.TestDataset ) -> None: num_rounds = 10 self.run_updater_test(client, params, num_rounds, dataset, "approx") def test_adaptive(self) -> None: def get_score(config: Dict) -> float: return float(config["learner"]["learner_model_param"]["base_score"]) def local_test(rabit_args: Dict[str, Union[int, str]], worker_id: int) -> bool: with xgb.dask.CommunicatorContext(**rabit_args): if worker_id == 0: y = np.array([0.0, 0.0, 0.0]) x = np.array([[0.0]] * 3) else: y = np.array([1000.0]) x = np.array( [ [0.0], ] ) Xy = xgb.DMatrix(x, y) booster = xgb.train( {"tree_method": "hist", "objective": "reg:absoluteerror"}, Xy, num_boost_round=1, ) config = json.loads(booster.save_config()) base_score = get_score(config) assert base_score == 250.0 return True with LocalCluster(n_workers=2, dashboard_address=":0") as cluster: with Client(cluster) as client: workers = tm.get_client_workers(client) rabit_args = client.sync( xgb.dask._get_rabit_args, len(workers), None, client ) futures = [] for i, _ in enumerate(workers): f = client.submit(local_test, rabit_args, i) futures.append(f) results = client.gather(futures) assert all(results) def test_n_workers(self) -> None: with LocalCluster(n_workers=2, dashboard_address=":0") as cluster: with Client(cluster) as client: workers = tm.get_client_workers(client) from sklearn.datasets import load_breast_cancer X, y = load_breast_cancer(return_X_y=True) dX = client.submit(da.from_array, X, workers=[workers[0]]).result() dy = client.submit(da.from_array, y, workers=[workers[0]]).result() train = xgb.dask.DaskDMatrix(client, dX, dy) dX = dd.from_array(X) dX = client.persist(dX, workers=workers[1]) dy = dd.from_array(y) dy = client.persist(dy, workers=workers[1]) valid = xgb.dask.DaskDMatrix(client, dX, dy) merged = xgb.dask._get_workers_from_data( train, evals=[(valid, "Valid")] ) assert len(merged) == 2 @pytest.mark.skipif(**tm.no_dask()) def test_feature_weights(self, client: "Client") -> None: kRows = 1024 kCols = 64 rng = da.random.RandomState(1994) X = rng.random_sample((kRows, kCols), chunks=(32, -1)) y = rng.random_sample(kRows, chunks=32) fw = np.ones(shape=(kCols,)) for i in range(kCols): fw[i] *= float(i) fw = da.from_array(fw) parser = os.path.join(tm.demo_dir(__file__), "json-model", "json_parser.py") poly_increasing = get_feature_weights( X, y, fw, parser, "approx", model=xgb.dask.DaskXGBRegressor ) fw = np.ones(shape=(kCols,)) for i in range(kCols): fw[i] *= float(kCols - i) fw = da.from_array(fw) poly_decreasing = get_feature_weights( X, y, fw, parser, "approx", model=xgb.dask.DaskXGBRegressor ) # Approxmated test, this is dependent on the implementation of random # number generator in std library. assert poly_increasing[0] > 0.08 assert poly_decreasing[0] < -0.08 @pytest.mark.skipif(**tm.no_dask()) @pytest.mark.skipif(**tm.no_sklearn()) def test_custom_objective(self, client: "Client") -> None: from sklearn.datasets import fetch_california_housing X, y = fetch_california_housing(return_X_y=True) X, y = da.from_array(X), da.from_array(y) rounds = 20 with tempfile.TemporaryDirectory() as tmpdir: path = os.path.join(tmpdir, "log") def sqr( labels: np.ndarray, predts: np.ndarray ) -> Tuple[np.ndarray, np.ndarray]: with open(path, "a") as fd: print("Running sqr", file=fd) grad = predts - labels hess = np.ones(shape=labels.shape[0]) return grad, hess reg = xgb.dask.DaskXGBRegressor( n_estimators=rounds, objective=sqr, tree_method="hist" ) reg.fit(X, y, eval_set=[(X, y)]) # Check the obj is ran for rounds. with open(path, "r") as fd: out = fd.readlines() assert len(out) == rounds results_custom = reg.evals_result() reg = xgb.dask.DaskXGBRegressor( n_estimators=rounds, tree_method="hist", base_score=0.5 ) reg.fit(X, y, eval_set=[(X, y)]) results_native = reg.evals_result() np.testing.assert_allclose( results_custom["validation_0"]["rmse"], results_native["validation_0"]["rmse"], ) tm.non_increasing(results_native["validation_0"]["rmse"]) def test_no_duplicated_partition(self) -> None: """Assert each worker has the correct amount of data, and DMatrix initialization doesn't generate unnecessary copies of data. """ with LocalCluster(n_workers=2, dashboard_address=":0") as cluster: with Client(cluster) as client: X, y, _ = generate_array() n_partitions = X.npartitions m = xgb.dask.DaskDMatrix(client, X, y) workers = tm.get_client_workers(client) rabit_args = client.sync( xgb.dask._get_rabit_args, len(workers), None, client ) n_workers = len(workers) def worker_fn(worker_addr: str, data_ref: Dict) -> None: with xgb.dask.CommunicatorContext(**rabit_args): local_dtrain = xgb.dask._dmatrix_from_list_of_parts( **data_ref, nthread=7 ) total = np.array([local_dtrain.num_row()]) total = xgb.collective.allreduce(total, xgb.collective.Op.SUM) assert total[0] == kRows futures = [] for i in range(len(workers)): futures.append( client.submit( worker_fn, workers[i], m._create_fn_args(workers[i]), pure=False, workers=[workers[i]], ) ) client.gather(futures) has_what = client.has_what() cnt = 0 data = set() for k, v in has_what.items(): for d in v: cnt += 1 data.add(d) assert len(data) == cnt # Subtract the on disk resource from each worker assert cnt - n_workers == n_partitions def test_data_initialization(self, client: "Client") -> None: """assert that we don't create duplicated DMatrix""" from sklearn.datasets import load_digits X, y = load_digits(return_X_y=True) X, y = dd.from_array(X, chunksize=32), dd.from_array(y, chunksize=32) validate_data_initialization( xgb.dask.DaskQuantileDMatrix, xgb.dask.DaskXGBClassifier, X, y ) def run_shap( self, X: Any, y: Any, params: Dict[str, Any], client: "Client" ) -> None: rows = X.shape[0] cols = X.shape[1] def assert_shape(shape: Tuple[int, ...]) -> None: assert shape[0] == rows if "num_class" in params.keys(): assert shape[1] == params["num_class"] assert shape[2] == cols + 1 else: assert shape[1] == cols + 1 X, y = da.from_array(X, chunks=(32, -1)), da.from_array(y, chunks=32) Xy = xgb.dask.DaskDMatrix(client, X, y) booster = xgb.dask.train(client, params, Xy, num_boost_round=10)["booster"] test_Xy = xgb.dask.DaskDMatrix(client, X, y) shap = xgb.dask.predict(client, booster, test_Xy, pred_contribs=True).compute() margin = xgb.dask.predict( client, booster, test_Xy, output_margin=True ).compute() assert_shape(shap.shape) assert np.allclose(np.sum(shap, axis=len(shap.shape) - 1), margin, 1e-5, 1e-5) shap = xgb.dask.predict(client, booster, X, pred_contribs=True).compute() margin = xgb.dask.predict(client, booster, X, output_margin=True).compute() assert_shape(shap.shape) assert np.allclose(np.sum(shap, axis=len(shap.shape) - 1), margin, 1e-5, 1e-5) if "num_class" not in params.keys(): X = dd.from_dask_array(X).repartition(npartitions=32) y = dd.from_dask_array(y).repartition(npartitions=32) shap_df = xgb.dask.predict( client, booster, X, pred_contribs=True, validate_features=False ).compute() assert_shape(shap_df.shape) assert np.allclose( np.sum(shap_df, axis=len(shap_df.shape) - 1), margin, 1e-5, 1e-5 ) def run_shap_cls_sklearn(self, X: Any, y: Any, client: "Client") -> None: X, y = da.from_array(X, chunks=(32, -1)), da.from_array(y, chunks=32) cls = xgb.dask.DaskXGBClassifier(n_estimators=4) cls.client = client cls.fit(X, y) booster = cls.get_booster() test_Xy = xgb.dask.DaskDMatrix(client, X, y) shap = xgb.dask.predict(client, booster, test_Xy, pred_contribs=True).compute() margin = xgb.dask.predict( client, booster, test_Xy, output_margin=True ).compute() assert np.allclose(np.sum(shap, axis=len(shap.shape) - 1), margin, 1e-5, 1e-5) shap = xgb.dask.predict(client, booster, X, pred_contribs=True).compute() margin = xgb.dask.predict(client, booster, X, output_margin=True).compute() assert np.allclose(np.sum(shap, axis=len(shap.shape) - 1), margin, 1e-5, 1e-5) def test_shap(self, client: "Client") -> None: from sklearn.datasets import load_diabetes, load_iris X, y = load_diabetes(return_X_y=True) params: Dict[str, Any] = {"objective": "reg:squarederror"} self.run_shap(X, y, params, client) X, y = load_iris(return_X_y=True) params = {"objective": "multi:softmax", "num_class": 3} self.run_shap(X, y, params, client) params = {"objective": "multi:softprob", "num_class": 3} self.run_shap(X, y, params, client) self.run_shap_cls_sklearn(X, y, client) def run_shap_interactions( self, X: Any, y: Any, params: Dict[str, Any], client: "Client" ) -> None: rows = X.shape[0] cols = X.shape[1] X, y = da.from_array(X, chunks=(32, -1)), da.from_array(y, chunks=32) Xy = xgb.dask.DaskDMatrix(client, X, y) booster = xgb.dask.train(client, params, Xy, num_boost_round=10)["booster"] test_Xy = xgb.dask.DaskDMatrix(client, X, y) shap = xgb.dask.predict( client, booster, test_Xy, pred_interactions=True ).compute() assert len(shap.shape) == 3 assert shap.shape[0] == rows assert shap.shape[1] == cols + 1 assert shap.shape[2] == cols + 1 margin = xgb.dask.predict( client, booster, test_Xy, output_margin=True ).compute() assert np.allclose( np.sum(shap, axis=(len(shap.shape) - 1, len(shap.shape) - 2)), margin, 1e-5, 1e-5, ) def test_shap_interactions(self, client: "Client") -> None: from sklearn.datasets import load_diabetes X, y = load_diabetes(return_X_y=True) params = {"objective": "reg:squarederror"} self.run_shap_interactions(X, y, params, client) @pytest.mark.skipif(**tm.no_sklearn()) def test_sklearn_io(self, client: "Client") -> None: from sklearn.datasets import load_digits X_, y_ = load_digits(return_X_y=True) X, y = da.from_array(X_), da.from_array(y_) cls = xgb.dask.DaskXGBClassifier(n_estimators=10) cls.client = client cls.fit(X, y) predt_0 = cls.predict(X) with tempfile.TemporaryDirectory() as tmpdir: path = os.path.join(tmpdir, "model.pkl") with open(path, "wb") as fd: pickle.dump(cls, fd) with open(path, "rb") as fd: cls = pickle.load(fd) predt_1 = cls.predict(X) np.testing.assert_allclose(predt_0.compute(), predt_1.compute()) path = os.path.join(tmpdir, "cls.json") cls.save_model(path) cls = xgb.dask.DaskXGBClassifier() cls.load_model(path) assert cls.n_classes_ == 10 predt_2 = cls.predict(X) np.testing.assert_allclose(predt_0.compute(), predt_2.compute()) # Use single node to load cls = xgb.XGBClassifier() cls.load_model(path) assert cls.n_classes_ == 10 predt_3 = cls.predict(X_) np.testing.assert_allclose(predt_0.compute(), predt_3) def test_dask_unsupported_features(client: "Client") -> None: X, y, _ = generate_array() # gblinear doesn't support distributed training. with pytest.raises(NotImplementedError, match="gblinear"): xgb.dask.train( client, {"booster": "gblinear"}, xgb.dask.DaskDMatrix(client, X, y) ) def test_parallel_submits(client: "Client") -> None: """Test for running multiple train simultaneously from single clients.""" try: from distributed import MultiLock # NOQA except ImportError: pytest.skip("`distributed.MultiLock' is not available") from sklearn.datasets import load_digits futures = [] workers = tm.get_client_workers(client) n_submits = len(workers) for i in range(n_submits): X_, y_ = load_digits(return_X_y=True) X = dd.from_array(X_, chunksize=32) y = dd.from_array(y_, chunksize=32) cls = xgb.dask.DaskXGBClassifier( verbosity=1, n_estimators=i + 1, eval_metric="merror", ) f = client.submit(cls.fit, X, y, pure=False) futures.append(f) classifiers = client.gather(futures) assert len(classifiers) == n_submits for i, cls in enumerate(classifiers): assert cls.get_booster().num_boosted_rounds() == i + 1 def run_tree_stats(client: Client, tree_method: str, device: str) -> str: """assert that different workers count dosn't affect summ statistic's on root""" def dask_train(X, y, num_obs, num_features): chunk_size = 100 X = da.from_array(X, chunks=(chunk_size, num_features)) y = da.from_array(y.reshape(num_obs, 1), chunks=(chunk_size, 1)) dtrain = xgb.dask.DaskDMatrix(client, X, y) output = xgb.dask.train( client, { "verbosity": 0, "tree_method": tree_method, "device": device, "objective": "reg:squarederror", "max_depth": 3, }, dtrain, num_boost_round=1, ) dump_model = output["booster"].get_dump(with_stats=True, dump_format="json")[0] return json.loads(dump_model) num_obs = 1000 num_features = 10 X, y = make_regression(num_obs, num_features, random_state=777) model = dask_train(X, y, num_obs, num_features) # asserts children have correct cover. stack = [model] while stack: node: dict = stack.pop() if "leaf" in node.keys(): continue cover = 0 for c in node["children"]: cover += c["cover"] stack.append(c) assert cover == node["cover"] return model["cover"] @pytest.mark.parametrize("tree_method", ["hist", "approx"]) def test_tree_stats(tree_method: str) -> None: with LocalCluster(n_workers=1, dashboard_address=":0") as cluster: with Client(cluster) as client: local = run_tree_stats(client, tree_method, "cpu") with LocalCluster(n_workers=2, dashboard_address=":0") as cluster: with Client(cluster) as client: distributed = run_tree_stats(client, tree_method, "cpu") assert local == distributed def test_parallel_submit_multi_clients() -> None: """Test for running multiple train simultaneously from multiple clients.""" try: from distributed import MultiLock # NOQA except ImportError: pytest.skip("`distributed.MultiLock' is not available") from sklearn.datasets import load_digits with LocalCluster(n_workers=4, dashboard_address=":0") as cluster: with Client(cluster) as client: workers = tm.get_client_workers(client) n_submits = len(workers) assert n_submits == 4 futures = [] for i in range(n_submits): client = Client(cluster) X_, y_ = load_digits(return_X_y=True) X_ += 1.0 X = dd.from_array(X_, chunksize=32) y = dd.from_array(y_, chunksize=32) cls = xgb.dask.DaskXGBClassifier( verbosity=1, n_estimators=i + 1, eval_metric="merror", ) f = client.submit(cls.fit, X, y, pure=False) futures.append((client, f)) t_futures = [] with ThreadPoolExecutor(max_workers=16) as e: for i in range(n_submits): def _() -> xgb.dask.DaskXGBClassifier: return futures[i][0].compute(futures[i][1]).result() f = e.submit(_) t_futures.append(f) for i, f in enumerate(t_futures): assert f.result().get_booster().num_boosted_rounds() == i + 1 def test_init_estimation(client: Client) -> None: check_init_estimation("hist", client) @pytest.mark.parametrize("tree_method", ["hist", "approx"]) def test_uneven_nan(tree_method) -> None: n_workers = 2 with LocalCluster(n_workers=n_workers) as cluster: with Client(cluster) as client: check_uneven_nan(client, tree_method, n_workers) class TestDaskCallbacks: @pytest.mark.skipif(**tm.no_sklearn()) def test_early_stopping(self, client: "Client") -> None: from sklearn.datasets import load_breast_cancer X, y = load_breast_cancer(return_X_y=True) X, y = da.from_array(X), da.from_array(y) m = xgb.dask.DaskDMatrix(client, X, y) valid = xgb.dask.DaskDMatrix(client, X, y) early_stopping_rounds = 5 booster = xgb.dask.train( client, { "objective": "binary:logistic", "eval_metric": "error", "tree_method": "hist", }, m, evals=[(valid, "Valid")], num_boost_round=1000, early_stopping_rounds=early_stopping_rounds, )["booster"] assert hasattr(booster, "best_score") dump = booster.get_dump(dump_format="json") assert len(dump) - booster.best_iteration == early_stopping_rounds + 1 valid_X, valid_y = load_breast_cancer(return_X_y=True) valid_X, valid_y = da.from_array(valid_X), da.from_array(valid_y) cls = xgb.dask.DaskXGBClassifier( objective="binary:logistic", tree_method="hist", n_estimators=1000 ) cls.client = client cls.fit( X, y, early_stopping_rounds=early_stopping_rounds, eval_set=[(valid_X, valid_y)], ) booster = cls.get_booster() dump = booster.get_dump(dump_format="json") assert len(dump) - booster.best_iteration == early_stopping_rounds + 1 # Specify the metric cls = xgb.dask.DaskXGBClassifier( objective="binary:logistic", tree_method="hist", n_estimators=1000 ) cls.client = client cls.fit( X, y, early_stopping_rounds=early_stopping_rounds, eval_set=[(valid_X, valid_y)], eval_metric="error", ) assert tm.non_increasing(cls.evals_result()["validation_0"]["error"]) booster = cls.get_booster() dump = booster.get_dump(dump_format="json") assert len(cls.evals_result()["validation_0"]["error"]) < 20 assert len(dump) - booster.best_iteration == early_stopping_rounds + 1 @pytest.mark.skipif(**tm.no_sklearn()) def test_early_stopping_custom_eval(self, client: "Client") -> None: from sklearn.datasets import load_breast_cancer X, y = load_breast_cancer(return_X_y=True) X, y = da.from_array(X), da.from_array(y) m = xgb.dask.DaskDMatrix(client, X, y) valid = xgb.dask.DaskDMatrix(client, X, y) early_stopping_rounds = 5 booster = xgb.dask.train( client, { "objective": "binary:logistic", "eval_metric": "error", "tree_method": "hist", }, m, evals=[(m, "Train"), (valid, "Valid")], feval=tm.eval_error_metric, num_boost_round=1000, early_stopping_rounds=early_stopping_rounds, )["booster"] assert hasattr(booster, "best_score") dump = booster.get_dump(dump_format="json") assert len(dump) - booster.best_iteration == early_stopping_rounds + 1 valid_X, valid_y = load_breast_cancer(return_X_y=True) valid_X, valid_y = da.from_array(valid_X), da.from_array(valid_y) cls = xgb.dask.DaskXGBClassifier( objective="binary:logistic", tree_method="hist", n_estimators=1000, eval_metric=tm.eval_error_metric_skl, ) cls.client = client cls.fit( X, y, early_stopping_rounds=early_stopping_rounds, eval_set=[(valid_X, valid_y)], ) booster = cls.get_booster() dump = booster.get_dump(dump_format="json") assert len(dump) - booster.best_iteration == early_stopping_rounds + 1 @pytest.mark.skipif(**tm.no_sklearn()) def test_callback(self, client: "Client") -> None: from sklearn.datasets import load_breast_cancer X, y = load_breast_cancer(return_X_y=True) X, y = da.from_array(X), da.from_array(y) cls = xgb.dask.DaskXGBClassifier( objective="binary:logistic", tree_method="hist", n_estimators=10 ) cls.client = client with tempfile.TemporaryDirectory() as tmpdir: cls.fit( X, y, callbacks=[ xgb.callback.TrainingCheckPoint( directory=Path(tmpdir), iterations=1, name="model" ) ], ) for i in range(1, 10): assert os.path.exists(os.path.join(tmpdir, "model_" + str(i) + ".json"))
78,178
34.089318
96
py
xgboost
xgboost-master/tests/test_distributed/test_with_dask/__init__.py
1
0
0
py
xgboost
xgboost-master/tests/test_distributed/test_with_dask/test_demos.py
import os import subprocess import pytest from xgboost import testing as tm @pytest.mark.skipif(**tm.no_dask()) def test_dask_cpu_training_demo(): script = os.path.join(tm.demo_dir(__file__), "dask", "cpu_training.py") cmd = ["python", script] subprocess.check_call(cmd) @pytest.mark.skipif(**tm.no_dask()) def test_dask_cpu_survival_demo(): script = os.path.join(tm.demo_dir(__file__), "dask", "cpu_survival.py") cmd = ["python", script] subprocess.check_call(cmd) # Not actually run on CI due to missing dask_ml. @pytest.mark.skipif(**tm.no_dask()) @pytest.mark.skipif(**tm.no_dask_ml()) def test_dask_callbacks_demo(): script = os.path.join(tm.demo_dir(__file__), "dask", "dask_callbacks.py") cmd = ["python", script] subprocess.check_call(cmd) @pytest.mark.skipif(**tm.no_dask()) def test_dask_sklearn_demo(): script = os.path.join(tm.demo_dir(__file__), "dask", "sklearn_cpu_training.py") cmd = ["python", script] subprocess.check_call(cmd)
1,004
26.162162
83
py
xgboost
xgboost-master/tests/test_distributed/test_gpu_with_spark/test_data.py
import pytest from xgboost import testing as tm pytestmark = pytest.mark.skipif(**tm.no_spark()) from ..test_with_spark.test_data import run_dmatrix_ctor @pytest.mark.skipif(**tm.no_cudf()) @pytest.mark.parametrize( "is_feature_cols,is_qdm", [(True, True), (True, False), (False, True), (False, False)], ) def test_dmatrix_ctor(is_feature_cols: bool, is_qdm: bool) -> None: run_dmatrix_ctor(is_feature_cols, is_qdm, on_gpu=True)
446
25.294118
67
py
xgboost
xgboost-master/tests/test_distributed/test_gpu_with_spark/conftest.py
from typing import Sequence import pytest def pytest_collection_modifyitems(config: pytest.Config, items: Sequence) -> None: # mark dask tests as `mgpu`. mgpu_mark = pytest.mark.mgpu for item in items: item.add_marker(mgpu_mark)
252
22
82
py
xgboost
xgboost-master/tests/test_distributed/test_gpu_with_spark/__init__.py
0
0
0
py
xgboost
xgboost-master/tests/test_distributed/test_gpu_with_spark/test_gpu_spark.py
import json import logging import subprocess import pytest import sklearn from xgboost import testing as tm pytestmark = pytest.mark.skipif(**tm.no_spark()) from pyspark.ml.linalg import Vectors from pyspark.ml.tuning import CrossValidator, ParamGridBuilder from pyspark.sql import SparkSession from xgboost.spark import SparkXGBClassifier, SparkXGBRegressor gpu_discovery_script_path = "tests/test_distributed/test_gpu_with_spark/discover_gpu.sh" def get_devices(): """This works only if driver is the same machine of worker.""" completed = subprocess.run(gpu_discovery_script_path, stdout=subprocess.PIPE) assert completed.returncode == 0, "Failed to execute discovery script." msg = completed.stdout.decode("utf-8") result = json.loads(msg) addresses = result["addresses"] return addresses executor_gpu_amount = len(get_devices()) executor_cores = executor_gpu_amount num_workers = executor_gpu_amount @pytest.fixture(scope="module", autouse=True) def spark_session_with_gpu(): spark_config = { "spark.master": f"local-cluster[1, {executor_gpu_amount}, 1024]", "spark.python.worker.reuse": "false", "spark.driver.host": "127.0.0.1", "spark.task.maxFailures": "1", "spark.sql.execution.pyspark.udf.simplifiedTraceback.enabled": "false", "spark.sql.pyspark.jvmStacktrace.enabled": "true", "spark.cores.max": executor_cores, "spark.task.cpus": "1", "spark.executor.cores": executor_cores, "spark.worker.resource.gpu.amount": executor_gpu_amount, "spark.task.resource.gpu.amount": "1", "spark.executor.resource.gpu.amount": executor_gpu_amount, "spark.worker.resource.gpu.discoveryScript": gpu_discovery_script_path, } builder = SparkSession.builder.appName("xgboost spark python API Tests with GPU") for k, v in spark_config.items(): builder.config(k, v) spark = builder.getOrCreate() logging.getLogger("pyspark").setLevel(logging.INFO) # We run a dummy job so that we block until the workers have connected to the master spark.sparkContext.parallelize( range(num_workers), num_workers ).barrier().mapPartitions(lambda _: []).collect() yield spark spark.stop() @pytest.fixture def spark_iris_dataset(spark_session_with_gpu): spark = spark_session_with_gpu data = sklearn.datasets.load_iris() train_rows = [ (Vectors.dense(features), float(label)) for features, label in zip(data.data[0::2], data.target[0::2]) ] train_df = spark.createDataFrame( spark.sparkContext.parallelize(train_rows, num_workers), ["features", "label"] ) test_rows = [ (Vectors.dense(features), float(label)) for features, label in zip(data.data[1::2], data.target[1::2]) ] test_df = spark.createDataFrame( spark.sparkContext.parallelize(test_rows, num_workers), ["features", "label"] ) return train_df, test_df @pytest.fixture def spark_iris_dataset_feature_cols(spark_session_with_gpu): spark = spark_session_with_gpu data = sklearn.datasets.load_iris() train_rows = [ (*features.tolist(), float(label)) for features, label in zip(data.data[0::2], data.target[0::2]) ] train_df = spark.createDataFrame( spark.sparkContext.parallelize(train_rows, num_workers), [*data.feature_names, "label"], ) test_rows = [ (*features.tolist(), float(label)) for features, label in zip(data.data[1::2], data.target[1::2]) ] test_df = spark.createDataFrame( spark.sparkContext.parallelize(test_rows, num_workers), [*data.feature_names, "label"], ) return train_df, test_df, data.feature_names @pytest.fixture def spark_diabetes_dataset(spark_session_with_gpu): spark = spark_session_with_gpu data = sklearn.datasets.load_diabetes() train_rows = [ (Vectors.dense(features), float(label)) for features, label in zip(data.data[0::2], data.target[0::2]) ] train_df = spark.createDataFrame( spark.sparkContext.parallelize(train_rows, num_workers), ["features", "label"] ) test_rows = [ (Vectors.dense(features), float(label)) for features, label in zip(data.data[1::2], data.target[1::2]) ] test_df = spark.createDataFrame( spark.sparkContext.parallelize(test_rows, num_workers), ["features", "label"] ) return train_df, test_df @pytest.fixture def spark_diabetes_dataset_feature_cols(spark_session_with_gpu): spark = spark_session_with_gpu data = sklearn.datasets.load_diabetes() train_rows = [ (*features.tolist(), float(label)) for features, label in zip(data.data[0::2], data.target[0::2]) ] train_df = spark.createDataFrame( spark.sparkContext.parallelize(train_rows, num_workers), [*data.feature_names, "label"], ) test_rows = [ (*features.tolist(), float(label)) for features, label in zip(data.data[1::2], data.target[1::2]) ] test_df = spark.createDataFrame( spark.sparkContext.parallelize(test_rows, num_workers), [*data.feature_names, "label"], ) return train_df, test_df, data.feature_names def test_sparkxgb_classifier_with_gpu(spark_iris_dataset): from pyspark.ml.evaluation import MulticlassClassificationEvaluator classifier = SparkXGBClassifier(device="cuda", num_workers=num_workers) train_df, test_df = spark_iris_dataset model = classifier.fit(train_df) pred_result_df = model.transform(test_df) evaluator = MulticlassClassificationEvaluator(metricName="f1") f1 = evaluator.evaluate(pred_result_df) assert f1 >= 0.97 def test_sparkxgb_classifier_feature_cols_with_gpu(spark_iris_dataset_feature_cols): from pyspark.ml.evaluation import MulticlassClassificationEvaluator train_df, test_df, feature_names = spark_iris_dataset_feature_cols classifier = SparkXGBClassifier( features_col=feature_names, device="cuda", num_workers=num_workers ) model = classifier.fit(train_df) pred_result_df = model.transform(test_df) evaluator = MulticlassClassificationEvaluator(metricName="f1") f1 = evaluator.evaluate(pred_result_df) assert f1 >= 0.97 def test_cv_sparkxgb_classifier_feature_cols_with_gpu(spark_iris_dataset_feature_cols): from pyspark.ml.evaluation import MulticlassClassificationEvaluator train_df, test_df, feature_names = spark_iris_dataset_feature_cols classifier = SparkXGBClassifier( features_col=feature_names, device="cuda", num_workers=num_workers ) grid = ParamGridBuilder().addGrid(classifier.max_depth, [6, 8]).build() evaluator = MulticlassClassificationEvaluator(metricName="f1") cv = CrossValidator( estimator=classifier, evaluator=evaluator, estimatorParamMaps=grid, numFolds=3 ) cvModel = cv.fit(train_df) pred_result_df = cvModel.transform(test_df) f1 = evaluator.evaluate(pred_result_df) assert f1 >= 0.97 clf = SparkXGBClassifier( features_col=feature_names, use_gpu=True, num_workers=num_workers ) grid = ParamGridBuilder().addGrid(clf.max_depth, [6, 8]).build() evaluator = MulticlassClassificationEvaluator(metricName="f1") cv = CrossValidator( estimator=clf, evaluator=evaluator, estimatorParamMaps=grid, numFolds=3 ) cvModel = cv.fit(train_df) pred_result_df = cvModel.transform(test_df) f1 = evaluator.evaluate(pred_result_df) assert f1 >= 0.97 def test_sparkxgb_regressor_with_gpu(spark_diabetes_dataset): from pyspark.ml.evaluation import RegressionEvaluator regressor = SparkXGBRegressor(device="cuda", num_workers=num_workers) train_df, test_df = spark_diabetes_dataset model = regressor.fit(train_df) pred_result_df = model.transform(test_df) evaluator = RegressionEvaluator(metricName="rmse") rmse = evaluator.evaluate(pred_result_df) assert rmse <= 65.0 def test_sparkxgb_regressor_feature_cols_with_gpu(spark_diabetes_dataset_feature_cols): from pyspark.ml.evaluation import RegressionEvaluator train_df, test_df, feature_names = spark_diabetes_dataset_feature_cols regressor = SparkXGBRegressor( features_col=feature_names, device="cuda", num_workers=num_workers ) model = regressor.fit(train_df) pred_result_df = model.transform(test_df) evaluator = RegressionEvaluator(metricName="rmse") rmse = evaluator.evaluate(pred_result_df) assert rmse <= 65.0
8,564
34.83682
88
py
xgboost
xgboost-master/tests/test_distributed/test_with_spark/test_data.py
from typing import List import numpy as np import pandas as pd import pytest from xgboost import testing as tm pytestmark = [pytest.mark.skipif(**tm.no_spark())] from xgboost import DMatrix, QuantileDMatrix from xgboost.spark.data import ( _read_csr_matrix_from_unwrapped_spark_vec, alias, create_dmatrix_from_partitions, stack_series, ) def test_stack() -> None: a = pd.DataFrame({"a": [[1, 2], [3, 4]]}) b = stack_series(a["a"]) assert b.shape == (2, 2) a = pd.DataFrame({"a": [[1], [3]]}) b = stack_series(a["a"]) assert b.shape == (2, 1) a = pd.DataFrame({"a": [np.array([1, 2]), np.array([3, 4])]}) b = stack_series(a["a"]) assert b.shape == (2, 2) a = pd.DataFrame({"a": [np.array([1]), np.array([3])]}) b = stack_series(a["a"]) assert b.shape == (2, 1) def run_dmatrix_ctor(is_feature_cols: bool, is_qdm: bool, on_gpu: bool) -> None: rng = np.random.default_rng(0) dfs: List[pd.DataFrame] = [] n_features = 16 n_samples_per_batch = 16 n_batches = 10 feature_types = ["float"] * n_features for i in range(n_batches): X = rng.normal(loc=0, size=256).reshape(n_samples_per_batch, n_features) y = rng.normal(loc=0, size=n_samples_per_batch) m = rng.normal(loc=0, size=n_samples_per_batch) w = rng.normal(loc=0.5, scale=0.5, size=n_samples_per_batch) w -= w.min() valid = rng.binomial(n=1, p=0.5, size=16).astype(np.bool_) df = pd.DataFrame( {alias.label: y, alias.margin: m, alias.weight: w, alias.valid: valid} ) if is_feature_cols: for j in range(X.shape[1]): df[f"feat-{j}"] = pd.Series(X[:, j]) else: df[alias.data] = pd.Series(list(X)) dfs.append(df) kwargs = {"feature_types": feature_types} device_id = 0 if on_gpu else None cols = [f"feat-{i}" for i in range(n_features)] feature_cols = cols if is_feature_cols else None train_Xy, valid_Xy = create_dmatrix_from_partitions( iter(dfs), feature_cols, dev_ordinal=device_id, use_qdm=is_qdm, kwargs=kwargs, enable_sparse_data_optim=False, has_validation_col=True, ) if is_qdm: assert isinstance(train_Xy, QuantileDMatrix) assert isinstance(valid_Xy, QuantileDMatrix) else: assert not isinstance(train_Xy, QuantileDMatrix) assert isinstance(train_Xy, DMatrix) assert not isinstance(valid_Xy, QuantileDMatrix) assert isinstance(valid_Xy, DMatrix) assert valid_Xy is not None assert valid_Xy.num_row() + train_Xy.num_row() == n_samples_per_batch * n_batches assert train_Xy.num_col() == n_features assert valid_Xy.num_col() == n_features df = pd.concat(dfs, axis=0) df_train = df.loc[~df[alias.valid], :] df_valid = df.loc[df[alias.valid], :] assert df_train.shape[0] == train_Xy.num_row() assert df_valid.shape[0] == valid_Xy.num_row() # margin np.testing.assert_allclose( df_train[alias.margin].to_numpy(), train_Xy.get_base_margin() ) np.testing.assert_allclose( df_valid[alias.margin].to_numpy(), valid_Xy.get_base_margin() ) # weight np.testing.assert_allclose(df_train[alias.weight].to_numpy(), train_Xy.get_weight()) np.testing.assert_allclose(df_valid[alias.weight].to_numpy(), valid_Xy.get_weight()) # label np.testing.assert_allclose(df_train[alias.label].to_numpy(), train_Xy.get_label()) np.testing.assert_allclose(df_valid[alias.label].to_numpy(), valid_Xy.get_label()) np.testing.assert_equal(train_Xy.feature_types, feature_types) np.testing.assert_equal(valid_Xy.feature_types, feature_types) @pytest.mark.parametrize( "is_feature_cols,is_qdm", [(True, True), (True, False), (False, True), (False, False)], ) def test_dmatrix_ctor(is_feature_cols: bool, is_qdm: bool) -> None: run_dmatrix_ctor(is_feature_cols, is_qdm, on_gpu=False) def test_read_csr_matrix_from_unwrapped_spark_vec() -> None: from scipy.sparse import csr_matrix pd1 = pd.DataFrame( { "featureVectorType": [0, 1, 1, 0], "featureVectorSize": [3, None, None, 3], "featureVectorIndices": [ np.array([0, 2], dtype=np.int32), None, None, np.array([1, 2], dtype=np.int32), ], "featureVectorValues": [ np.array([3.0, 0.0], dtype=np.float64), np.array([13.0, 14.0, 0.0], dtype=np.float64), np.array([0.0, 24.0, 25.0], dtype=np.float64), np.array([0.0, 35.0], dtype=np.float64), ], } ) sm = _read_csr_matrix_from_unwrapped_spark_vec(pd1) assert isinstance(sm, csr_matrix) np.testing.assert_array_equal( sm.data, [3.0, 0.0, 13.0, 14.0, 0.0, 0.0, 24.0, 25.0, 0.0, 35.0] ) np.testing.assert_array_equal(sm.indptr, [0, 2, 5, 8, 10]) np.testing.assert_array_equal(sm.indices, [0, 2, 0, 1, 2, 0, 1, 2, 1, 2]) assert sm.shape == (4, 3)
5,132
31.903846
88
py
xgboost
xgboost-master/tests/test_distributed/test_with_spark/utils.py
import contextlib import logging import shutil import sys import tempfile import unittest from io import StringIO import pytest from xgboost import testing as tm pytestmark = [pytest.mark.skipif(**tm.no_spark())] from pyspark.sql import SparkSession from xgboost.spark.utils import _get_default_params_from_func class UtilsTest(unittest.TestCase): def test_get_default_params(self): class Foo: def func1(self, x, y, key1=None, key2="val2", key3=0, key4=None): pass unsupported_params = {"key2", "key4"} expected_default_params = { "key1": None, "key3": 0, } actual_default_params = _get_default_params_from_func( Foo.func1, unsupported_params ) self.assertEqual( len(expected_default_params.keys()), len(actual_default_params.keys()) ) for k, v in actual_default_params.items(): self.assertEqual(expected_default_params[k], v) @contextlib.contextmanager def patch_stdout(): """patch stdout and give an output""" sys_stdout = sys.stdout io_out = StringIO() sys.stdout = io_out try: yield io_out finally: sys.stdout = sys_stdout @contextlib.contextmanager def patch_logger(name): """patch logger and give an output""" io_out = StringIO() log = logging.getLogger(name) handler = logging.StreamHandler(io_out) log.addHandler(handler) try: yield io_out finally: log.removeHandler(handler) class TestTempDir(object): @classmethod def make_tempdir(cls): """ :param dir: Root directory in which to create the temp directory """ cls.tempdir = tempfile.mkdtemp(prefix="sparkdl_tests") @classmethod def remove_tempdir(cls): shutil.rmtree(cls.tempdir) class TestSparkContext(object): @classmethod def setup_env(cls, spark_config): builder = SparkSession.builder.appName("xgboost spark python API Tests") for k, v in spark_config.items(): builder.config(k, v) spark = builder.getOrCreate() logging.getLogger("pyspark").setLevel(logging.INFO) cls.sc = spark.sparkContext cls.session = spark @classmethod def tear_down_env(cls): cls.session.stop() cls.session = None cls.sc.stop() cls.sc = None class SparkTestCase(TestSparkContext, TestTempDir, unittest.TestCase): @classmethod def setUpClass(cls): cls.setup_env( { "spark.master": "local[4]", "spark.python.worker.reuse": "false", "spark.driver.host": "127.0.0.1", "spark.task.maxFailures": "1", "spark.sql.execution.pyspark.udf.simplifiedTraceback.enabled": "false", "spark.sql.pyspark.jvmStacktrace.enabled": "true", } ) cls.make_tempdir() @classmethod def tearDownClass(cls): cls.remove_tempdir() cls.tear_down_env() class SparkLocalClusterTestCase(TestSparkContext, TestTempDir, unittest.TestCase): @classmethod def setUpClass(cls): cls.setup_env( { "spark.master": "local-cluster[2, 2, 1024]", "spark.python.worker.reuse": "false", "spark.driver.host": "127.0.0.1", "spark.task.maxFailures": "1", "spark.sql.execution.pyspark.udf.simplifiedTraceback.enabled": "false", "spark.sql.pyspark.jvmStacktrace.enabled": "true", "spark.cores.max": "4", "spark.task.cpus": "1", "spark.executor.cores": "2", } ) cls.make_tempdir() # We run a dummy job so that we block until the workers have connected to the master cls.sc.parallelize(range(4), 4).barrier().mapPartitions(lambda _: []).collect() @classmethod def tearDownClass(cls): cls.remove_tempdir() cls.tear_down_env()
4,058
27.1875
92
py
xgboost
xgboost-master/tests/test_distributed/test_with_spark/test_spark_local.py
import glob import logging import random import tempfile import uuid from collections import namedtuple from typing import Generator, Sequence, Type import numpy as np import pytest import xgboost as xgb from xgboost import testing as tm from xgboost.spark.data import pred_contribs pytestmark = [tm.timeout(60), pytest.mark.skipif(**tm.no_spark())] from pyspark.ml import Pipeline, PipelineModel from pyspark.ml.evaluation import BinaryClassificationEvaluator from pyspark.ml.feature import VectorAssembler from pyspark.ml.functions import vector_to_array from pyspark.ml.linalg import Vectors from pyspark.ml.tuning import CrossValidator, ParamGridBuilder from pyspark.sql import SparkSession from pyspark.sql import functions as spark_sql_func from xgboost import XGBClassifier, XGBModel, XGBRegressor from xgboost.spark import ( SparkXGBClassifier, SparkXGBClassifierModel, SparkXGBRanker, SparkXGBRegressor, SparkXGBRegressorModel, ) from xgboost.spark.core import _non_booster_params from .utils import SparkTestCase logging.getLogger("py4j").setLevel(logging.INFO) def no_sparse_unwrap() -> tm.PytestSkip: try: from pyspark.sql.functions import unwrap_udt except ImportError: return {"reason": "PySpark<3.4", "condition": True} return {"reason": "PySpark<3.4", "condition": False} @pytest.fixture def spark() -> Generator[SparkSession, None, None]: config = { "spark.master": "local[4]", "spark.python.worker.reuse": "false", "spark.driver.host": "127.0.0.1", "spark.task.maxFailures": "1", "spark.sql.execution.pyspark.udf.simplifiedTraceback.enabled": "false", "spark.sql.pyspark.jvmStacktrace.enabled": "true", } builder = SparkSession.builder.appName("XGBoost PySpark Python API Tests") for k, v in config.items(): builder.config(k, v) logging.getLogger("pyspark").setLevel(logging.INFO) sess = builder.getOrCreate() yield sess sess.stop() sess.sparkContext.stop() RegWithWeight = namedtuple( "RegWithWeight", ( "reg_params_with_eval", "reg_df_train_with_eval_weight", "reg_df_test_with_eval_weight", "reg_with_eval_best_score", "reg_with_eval_and_weight_best_score", ), ) @pytest.fixture def reg_with_weight( spark: SparkSession, ) -> Generator[RegWithWeight, SparkSession, None]: reg_params_with_eval = { "validation_indicator_col": "isVal", "early_stopping_rounds": 1, "eval_metric": "rmse", } X = np.array([[1.0, 2.0, 3.0], [0.0, 1.0, 5.5], [4.0, 5.0, 6.0], [0.0, 6.0, 7.5]]) w = np.array([1.0, 2.0, 1.0, 2.0]) y = np.array([0, 1, 2, 3]) reg1 = XGBRegressor() reg1.fit(X, y, sample_weight=w) predt1 = reg1.predict(X) X_train = np.array([[1.0, 2.0, 3.0], [0.0, 1.0, 5.5]]) X_val = np.array([[4.0, 5.0, 6.0], [0.0, 6.0, 7.5]]) y_train = np.array([0, 1]) y_val = np.array([2, 3]) w_train = np.array([1.0, 2.0]) w_val = np.array([1.0, 2.0]) reg2 = XGBRegressor(early_stopping_rounds=1, eval_metric="rmse") reg2.fit( X_train, y_train, eval_set=[(X_val, y_val)], ) predt2 = reg2.predict(X) best_score2 = reg2.best_score reg3 = XGBRegressor(early_stopping_rounds=1, eval_metric="rmse") reg3.fit( X_train, y_train, sample_weight=w_train, eval_set=[(X_val, y_val)], sample_weight_eval_set=[w_val], ) predt3 = reg3.predict(X) best_score3 = reg3.best_score reg_df_train_with_eval_weight = spark.createDataFrame( [ (Vectors.dense(1.0, 2.0, 3.0), 0, False, 1.0), (Vectors.sparse(3, {1: 1.0, 2: 5.5}), 1, False, 2.0), (Vectors.dense(4.0, 5.0, 6.0), 2, True, 1.0), (Vectors.sparse(3, {1: 6.0, 2: 7.5}), 3, True, 2.0), ], ["features", "label", "isVal", "weight"], ) reg_df_test_with_eval_weight = spark.createDataFrame( [ ( Vectors.dense(1.0, 2.0, 3.0), float(predt1[0]), float(predt2[0]), float(predt3[0]), ), ( Vectors.sparse(3, {1: 1.0, 2: 5.5}), float(predt1[1]), float(predt2[1]), float(predt3[1]), ), ], [ "features", "expected_prediction_with_weight", "expected_prediction_with_eval", "expected_prediction_with_weight_and_eval", ], ) yield RegWithWeight( reg_params_with_eval, reg_df_train_with_eval_weight, reg_df_test_with_eval_weight, best_score2, best_score3, ) RegData = namedtuple("RegData", ("reg_df_train", "reg_df_test", "reg_params")) @pytest.fixture def reg_data(spark: SparkSession) -> Generator[RegData, None, None]: X = np.array([[1.0, 2.0, 3.0], [0.0, 1.0, 5.5]]) y = np.array([0, 1]) reg1 = xgb.XGBRegressor() reg1.fit(X, y) predt0 = reg1.predict(X) pred_contrib0: np.ndarray = pred_contribs(reg1, X, None, False) reg_params = { "max_depth": 5, "n_estimators": 10, "iteration_range": [0, 5], "max_bin": 9, } # convert np array to pyspark dataframe reg_df_train_data = [ (Vectors.dense(X[0, :]), int(y[0])), (Vectors.sparse(3, {1: float(X[1, 1]), 2: float(X[1, 2])}), int(y[1])), ] reg_df_train = spark.createDataFrame(reg_df_train_data, ["features", "label"]) reg2 = xgb.XGBRegressor(max_depth=5, n_estimators=10) reg2.fit(X, y) predt2 = reg2.predict(X, iteration_range=[0, 5]) # array([0.22185266, 0.77814734], dtype=float32) reg_df_test = spark.createDataFrame( [ ( Vectors.dense(X[0, :]), float(predt0[0]), pred_contrib0[0, :].tolist(), float(predt2[0]), ), ( Vectors.sparse(3, {1: 1.0, 2: 5.5}), float(predt0[1]), pred_contrib0[1, :].tolist(), float(predt2[1]), ), ], [ "features", "expected_prediction", "expected_pred_contribs", "expected_prediction_with_params", ], ) yield RegData(reg_df_train, reg_df_test, reg_params) MultiClfData = namedtuple("MultiClfData", ("multi_clf_df_train", "multi_clf_df_test")) @pytest.fixture def multi_clf_data(spark: SparkSession) -> Generator[MultiClfData, None, None]: X = np.array([[1.0, 2.0, 3.0], [1.0, 2.0, 4.0], [0.0, 1.0, 5.5], [-1.0, -2.0, 1.0]]) y = np.array([0, 0, 1, 2]) cls1 = xgb.XGBClassifier() cls1.fit(X, y) predt0 = cls1.predict(X) proba0: np.ndarray = cls1.predict_proba(X) pred_contrib0: np.ndarray = pred_contribs(cls1, X, None, False) # convert np array to pyspark dataframe multi_cls_df_train_data = [ (Vectors.dense(X[0, :]), int(y[0])), (Vectors.dense(X[1, :]), int(y[1])), (Vectors.sparse(3, {1: float(X[2, 1]), 2: float(X[2, 2])}), int(y[2])), (Vectors.dense(X[3, :]), int(y[3])), ] multi_clf_df_train = spark.createDataFrame( multi_cls_df_train_data, ["features", "label"] ) multi_clf_df_test = spark.createDataFrame( [ ( Vectors.dense(X[0, :]), float(predt0[0]), proba0[0, :].tolist(), pred_contrib0[0, :].tolist(), ), ( Vectors.dense(X[1, :]), float(predt0[1]), proba0[1, :].tolist(), pred_contrib0[1, :].tolist(), ), ( Vectors.sparse(3, {1: 1.0, 2: 5.5}), float(predt0[2]), proba0[2, :].tolist(), pred_contrib0[2, :].tolist(), ), ], [ "features", "expected_prediction", "expected_probability", "expected_pred_contribs", ], ) yield MultiClfData(multi_clf_df_train, multi_clf_df_test) ClfWithWeight = namedtuple( "ClfWithWeight", ( "cls_params_with_eval", "cls_df_train_with_eval_weight", "cls_df_test_with_eval_weight", "cls_with_eval_best_score", "cls_with_eval_and_weight_best_score", ), ) @pytest.fixture def clf_with_weight( spark: SparkSession, ) -> Generator[ClfWithWeight, SparkSession, None]: """Test classifier with weight and eval set.""" X = np.array([[1.0, 2.0, 3.0], [0.0, 1.0, 5.5], [4.0, 5.0, 6.0], [0.0, 6.0, 7.5]]) w = np.array([1.0, 2.0, 1.0, 2.0]) y = np.array([0, 1, 0, 1]) cls1 = XGBClassifier() cls1.fit(X, y, sample_weight=w) X_train = np.array([[1.0, 2.0, 3.0], [0.0, 1.0, 5.5]]) X_val = np.array([[4.0, 5.0, 6.0], [0.0, 6.0, 7.5]]) y_train = np.array([0, 1]) y_val = np.array([0, 1]) w_train = np.array([1.0, 2.0]) w_val = np.array([1.0, 2.0]) cls2 = XGBClassifier() cls2.fit( X_train, y_train, eval_set=[(X_val, y_val)], early_stopping_rounds=1, eval_metric="logloss", ) cls3 = XGBClassifier() cls3.fit( X_train, y_train, sample_weight=w_train, eval_set=[(X_val, y_val)], sample_weight_eval_set=[w_val], early_stopping_rounds=1, eval_metric="logloss", ) cls_df_train_with_eval_weight = spark.createDataFrame( [ (Vectors.dense(1.0, 2.0, 3.0), 0, False, 1.0), (Vectors.sparse(3, {1: 1.0, 2: 5.5}), 1, False, 2.0), (Vectors.dense(4.0, 5.0, 6.0), 0, True, 1.0), (Vectors.sparse(3, {1: 6.0, 2: 7.5}), 1, True, 2.0), ], ["features", "label", "isVal", "weight"], ) cls_params_with_eval = { "validation_indicator_col": "isVal", "early_stopping_rounds": 1, "eval_metric": "logloss", } cls_df_test_with_eval_weight = spark.createDataFrame( [ ( Vectors.dense(1.0, 2.0, 3.0), [float(p) for p in cls1.predict_proba(X)[0, :]], [float(p) for p in cls2.predict_proba(X)[0, :]], [float(p) for p in cls3.predict_proba(X)[0, :]], ), ], [ "features", "expected_prob_with_weight", "expected_prob_with_eval", "expected_prob_with_weight_and_eval", ], ) cls_with_eval_best_score = cls2.best_score cls_with_eval_and_weight_best_score = cls3.best_score yield ClfWithWeight( cls_params_with_eval, cls_df_train_with_eval_weight, cls_df_test_with_eval_weight, cls_with_eval_best_score, cls_with_eval_and_weight_best_score, ) ClfData = namedtuple( "ClfData", ("cls_params", "cls_df_train", "cls_df_train_large", "cls_df_test") ) @pytest.fixture def clf_data(spark: SparkSession) -> Generator[ClfData, None, None]: cls_params = {"max_depth": 5, "n_estimators": 10, "scale_pos_weight": 4} X = np.array([[1.0, 2.0, 3.0], [0.0, 1.0, 5.5]]) y = np.array([0, 1]) cl1 = xgb.XGBClassifier() cl1.fit(X, y) predt0 = cl1.predict(X) proba0: np.ndarray = cl1.predict_proba(X) pred_contrib0: np.ndarray = pred_contribs(cl1, X, None, True) cl2 = xgb.XGBClassifier(**cls_params) cl2.fit(X, y) predt1 = cl2.predict(X) proba1: np.ndarray = cl2.predict_proba(X) pred_contrib1: np.ndarray = pred_contribs(cl2, X, None, True) # convert np array to pyspark dataframe cls_df_train_data = [ (Vectors.dense(X[0, :]), int(y[0])), (Vectors.sparse(3, {1: float(X[1, 1]), 2: float(X[1, 2])}), int(y[1])), ] cls_df_train = spark.createDataFrame(cls_df_train_data, ["features", "label"]) cls_df_train_large = spark.createDataFrame( cls_df_train_data * 100, ["features", "label"] ) cls_df_test = spark.createDataFrame( [ ( Vectors.dense(X[0, :]), int(predt0[0]), proba0[0, :].tolist(), pred_contrib0[0, :].tolist(), int(predt1[0]), proba1[0, :].tolist(), pred_contrib1[0, :].tolist(), ), ( Vectors.sparse(3, {1: 1.0, 2: 5.5}), int(predt0[1]), proba0[1, :].tolist(), pred_contrib0[1, :].tolist(), int(predt1[1]), proba1[1, :].tolist(), pred_contrib1[1, :].tolist(), ), ], [ "features", "expected_prediction", "expected_probability", "expected_pred_contribs", "expected_prediction_with_params", "expected_probability_with_params", "expected_pred_contribs_with_params", ], ) yield ClfData(cls_params, cls_df_train, cls_df_train_large, cls_df_test) def assert_model_compatible(model: XGBModel, model_path: str) -> None: bst = xgb.Booster() path = glob.glob(f"{model_path}/**/model/part-00000", recursive=True)[0] bst.load_model(path) np.testing.assert_equal( np.array(model.get_booster().save_raw("json")), np.array(bst.save_raw("json")) ) def check_sub_dict_match( sub_dist: dict, whole_dict: dict, excluding_keys: Sequence[str] ) -> None: for k in sub_dist: if k not in excluding_keys: assert k in whole_dict, f"check on {k} failed" assert sub_dist[k] == whole_dict[k], f"check on {k} failed" def get_params_map(params_kv: dict, estimator: Type) -> dict: return {getattr(estimator, k): v for k, v in params_kv.items()} class TestPySparkLocal: def test_regressor_basic(self, reg_data: RegData) -> None: regressor = SparkXGBRegressor(pred_contrib_col="pred_contribs") model = regressor.fit(reg_data.reg_df_train) assert regressor.uid == model.uid pred_result = model.transform(reg_data.reg_df_test).collect() for row in pred_result: np.testing.assert_equal(row.prediction, row.expected_prediction) np.testing.assert_allclose( row.pred_contribs, row.expected_pred_contribs, atol=1e-3 ) def test_regressor_with_weight_eval(self, reg_with_weight: RegWithWeight) -> None: # with weight regressor_with_weight = SparkXGBRegressor(weight_col="weight") model_with_weight = regressor_with_weight.fit( reg_with_weight.reg_df_train_with_eval_weight ) pred_result_with_weight = model_with_weight.transform( reg_with_weight.reg_df_test_with_eval_weight ).collect() for row in pred_result_with_weight: assert np.isclose( row.prediction, row.expected_prediction_with_weight, atol=1e-3 ) # with eval regressor_with_eval = SparkXGBRegressor(**reg_with_weight.reg_params_with_eval) model_with_eval = regressor_with_eval.fit( reg_with_weight.reg_df_train_with_eval_weight ) assert np.isclose( model_with_eval._xgb_sklearn_model.best_score, reg_with_weight.reg_with_eval_best_score, atol=1e-3, ) pred_result_with_eval = model_with_eval.transform( reg_with_weight.reg_df_test_with_eval_weight ).collect() for row in pred_result_with_eval: np.testing.assert_allclose( row.prediction, row.expected_prediction_with_eval, atol=1e-3 ) # with weight and eval regressor_with_weight_eval = SparkXGBRegressor( weight_col="weight", **reg_with_weight.reg_params_with_eval ) model_with_weight_eval = regressor_with_weight_eval.fit( reg_with_weight.reg_df_train_with_eval_weight ) pred_result_with_weight_eval = model_with_weight_eval.transform( reg_with_weight.reg_df_test_with_eval_weight ).collect() np.testing.assert_allclose( model_with_weight_eval._xgb_sklearn_model.best_score, reg_with_weight.reg_with_eval_and_weight_best_score, atol=1e-3, ) for row in pred_result_with_weight_eval: np.testing.assert_allclose( row.prediction, row.expected_prediction_with_weight_and_eval, atol=1e-3, ) def test_multi_classifier_basic(self, multi_clf_data: MultiClfData) -> None: cls = SparkXGBClassifier(pred_contrib_col="pred_contribs") model = cls.fit(multi_clf_data.multi_clf_df_train) pred_result = model.transform(multi_clf_data.multi_clf_df_test).collect() for row in pred_result: np.testing.assert_equal(row.prediction, row.expected_prediction) np.testing.assert_allclose( row.probability, row.expected_probability, rtol=1e-3 ) np.testing.assert_allclose( row.pred_contribs, row.expected_pred_contribs, atol=1e-3 ) def test_classifier_with_weight_eval(self, clf_with_weight: ClfWithWeight) -> None: # with weight classifier_with_weight = SparkXGBClassifier(weight_col="weight") model_with_weight = classifier_with_weight.fit( clf_with_weight.cls_df_train_with_eval_weight ) pred_result_with_weight = model_with_weight.transform( clf_with_weight.cls_df_test_with_eval_weight ).collect() for row in pred_result_with_weight: assert np.allclose( row.probability, row.expected_prob_with_weight, atol=1e-3 ) # with eval classifier_with_eval = SparkXGBClassifier( **clf_with_weight.cls_params_with_eval ) model_with_eval = classifier_with_eval.fit( clf_with_weight.cls_df_train_with_eval_weight ) assert np.isclose( model_with_eval._xgb_sklearn_model.best_score, clf_with_weight.cls_with_eval_best_score, atol=1e-3, ) pred_result_with_eval = model_with_eval.transform( clf_with_weight.cls_df_test_with_eval_weight ).collect() for row in pred_result_with_eval: assert np.allclose(row.probability, row.expected_prob_with_eval, atol=1e-3) # with weight and eval classifier_with_weight_eval = SparkXGBClassifier( weight_col="weight", **clf_with_weight.cls_params_with_eval ) model_with_weight_eval = classifier_with_weight_eval.fit( clf_with_weight.cls_df_train_with_eval_weight ) pred_result_with_weight_eval = model_with_weight_eval.transform( clf_with_weight.cls_df_test_with_eval_weight ).collect() np.testing.assert_allclose( model_with_weight_eval._xgb_sklearn_model.best_score, clf_with_weight.cls_with_eval_and_weight_best_score, atol=1e-3, ) for row in pred_result_with_weight_eval: np.testing.assert_allclose( row.probability, row.expected_prob_with_weight_and_eval, atol=1e-3 ) def test_classifier_model_save_load(self, clf_data: ClfData) -> None: with tempfile.TemporaryDirectory() as tmpdir: path = "file:" + tmpdir clf = SparkXGBClassifier(**clf_data.cls_params) model = clf.fit(clf_data.cls_df_train) model.save(path) loaded_model = SparkXGBClassifierModel.load(path) assert model.uid == loaded_model.uid for k, v in clf_data.cls_params.items(): assert loaded_model.getOrDefault(k) == v pred_result = loaded_model.transform(clf_data.cls_df_test).collect() for row in pred_result: np.testing.assert_allclose( row.probability, row.expected_probability_with_params, atol=1e-3 ) with pytest.raises(AssertionError, match="Expected class name"): SparkXGBRegressorModel.load(path) assert_model_compatible(model, tmpdir) def test_classifier_basic(self, clf_data: ClfData) -> None: classifier = SparkXGBClassifier( **clf_data.cls_params, pred_contrib_col="pred_contrib" ) model = classifier.fit(clf_data.cls_df_train) pred_result = model.transform(clf_data.cls_df_test).collect() for row in pred_result: np.testing.assert_equal(row.prediction, row.expected_prediction_with_params) np.testing.assert_allclose( row.probability, row.expected_probability_with_params, rtol=1e-3 ) np.testing.assert_equal( row.pred_contrib, row.expected_pred_contribs_with_params ) def test_classifier_with_params(self, clf_data: ClfData) -> None: classifier = SparkXGBClassifier(**clf_data.cls_params) all_params = dict( **(classifier._gen_xgb_params_dict()), **(classifier._gen_fit_params_dict()), **(classifier._gen_predict_params_dict()), ) check_sub_dict_match( clf_data.cls_params, all_params, excluding_keys=_non_booster_params ) model = classifier.fit(clf_data.cls_df_train) all_params = dict( **(model._gen_xgb_params_dict()), **(model._gen_fit_params_dict()), **(model._gen_predict_params_dict()), ) check_sub_dict_match( clf_data.cls_params, all_params, excluding_keys=_non_booster_params ) pred_result = model.transform(clf_data.cls_df_test).collect() for row in pred_result: np.testing.assert_equal(row.prediction, row.expected_prediction_with_params) np.testing.assert_allclose( row.probability, row.expected_probability_with_params, rtol=1e-3 ) def test_classifier_model_pipeline_save_load(self, clf_data: ClfData) -> None: with tempfile.TemporaryDirectory() as tmpdir: path = "file:" + tmpdir classifier = SparkXGBClassifier() pipeline = Pipeline(stages=[classifier]) pipeline = pipeline.copy( extra=get_params_map(clf_data.cls_params, classifier) ) model = pipeline.fit(clf_data.cls_df_train) model.save(path) loaded_model = PipelineModel.load(path) for k, v in clf_data.cls_params.items(): assert loaded_model.stages[0].getOrDefault(k) == v pred_result = loaded_model.transform(clf_data.cls_df_test).collect() for row in pred_result: np.testing.assert_allclose( row.probability, row.expected_probability_with_params, atol=1e-3 ) assert_model_compatible(model.stages[0], tmpdir) def test_classifier_with_cross_validator(self, clf_data: ClfData) -> None: xgb_classifer = SparkXGBClassifier(n_estimators=1) paramMaps = ParamGridBuilder().addGrid(xgb_classifer.max_depth, [1, 2]).build() cvBin = CrossValidator( estimator=xgb_classifer, estimatorParamMaps=paramMaps, evaluator=BinaryClassificationEvaluator(), seed=1, parallelism=4, numFolds=2, ) cvBinModel = cvBin.fit(clf_data.cls_df_train_large) cvBinModel.transform(clf_data.cls_df_test) def test_convert_to_sklearn_model_clf(self, clf_data: ClfData) -> None: classifier = SparkXGBClassifier( n_estimators=200, missing=2.0, max_depth=3, sketch_eps=0.5 ) clf_model = classifier.fit(clf_data.cls_df_train) # Check that regardless of what booster, _convert_to_model converts to the # correct class type sklearn_classifier = classifier._convert_to_sklearn_model( clf_model.get_booster().save_raw("json"), clf_model.get_booster().save_config(), ) assert isinstance(sklearn_classifier, XGBClassifier) assert sklearn_classifier.n_estimators == 200 assert sklearn_classifier.missing == 2.0 assert sklearn_classifier.max_depth == 3 assert sklearn_classifier.get_params()["sketch_eps"] == 0.5 def test_classifier_array_col_as_feature(self, clf_data: ClfData) -> None: train_dataset = clf_data.cls_df_train.withColumn( "features", vector_to_array(spark_sql_func.col("features")) ) test_dataset = clf_data.cls_df_test.withColumn( "features", vector_to_array(spark_sql_func.col("features")) ) classifier = SparkXGBClassifier() model = classifier.fit(train_dataset) pred_result = model.transform(test_dataset).collect() for row in pred_result: np.testing.assert_equal(row.prediction, row.expected_prediction) np.testing.assert_allclose( row.probability, row.expected_probability, rtol=1e-3 ) def test_classifier_with_feature_names_types_weights( self, clf_data: ClfData ) -> None: classifier = SparkXGBClassifier( feature_names=["a1", "a2", "a3"], feature_types=["i", "int", "float"], feature_weights=[2.0, 5.0, 3.0], ) model = classifier.fit(clf_data.cls_df_train) model.transform(clf_data.cls_df_test).collect() def test_early_stop_param_validation(self, clf_data: ClfData) -> None: classifier = SparkXGBClassifier(early_stopping_rounds=1) with pytest.raises(ValueError, match="early_stopping_rounds"): classifier.fit(clf_data.cls_df_train) def test_classifier_with_list_eval_metric(self, clf_data: ClfData) -> None: classifier = SparkXGBClassifier(eval_metric=["auc", "rmse"]) model = classifier.fit(clf_data.cls_df_train) model.transform(clf_data.cls_df_test).collect() def test_classifier_with_string_eval_metric(self, clf_data: ClfData) -> None: classifier = SparkXGBClassifier(eval_metric="auc") model = classifier.fit(clf_data.cls_df_train) model.transform(clf_data.cls_df_test).collect() def test_regressor_params_basic(self) -> None: py_reg = SparkXGBRegressor() assert hasattr(py_reg, "n_estimators") assert py_reg.n_estimators.parent == py_reg.uid assert not hasattr(py_reg, "gpu_id") assert hasattr(py_reg, "device") assert py_reg.getOrDefault(py_reg.n_estimators) == 100 assert py_reg.getOrDefault(getattr(py_reg, "objective")), "reg:squarederror" py_reg2 = SparkXGBRegressor(n_estimators=200) assert py_reg2.getOrDefault(getattr(py_reg2, "n_estimators")), 200 py_reg3 = py_reg2.copy({getattr(py_reg2, "max_depth"): 10}) assert py_reg3.getOrDefault(getattr(py_reg3, "n_estimators")), 200 assert py_reg3.getOrDefault(getattr(py_reg3, "max_depth")), 10 def test_classifier_params_basic(self) -> None: py_clf = SparkXGBClassifier() assert hasattr(py_clf, "n_estimators") assert py_clf.n_estimators.parent == py_clf.uid assert not hasattr(py_clf, "gpu_id") assert hasattr(py_clf, "device") assert py_clf.getOrDefault(py_clf.n_estimators) == 100 assert py_clf.getOrDefault(getattr(py_clf, "objective")) is None py_clf2 = SparkXGBClassifier(n_estimators=200) assert py_clf2.getOrDefault(getattr(py_clf2, "n_estimators")) == 200 py_clf3 = py_clf2.copy({getattr(py_clf2, "max_depth"): 10}) assert py_clf3.getOrDefault(getattr(py_clf3, "n_estimators")) == 200 assert py_clf3.getOrDefault(getattr(py_clf3, "max_depth")), 10 def test_classifier_kwargs_basic(self, clf_data: ClfData) -> None: py_clf = SparkXGBClassifier(**clf_data.cls_params) assert hasattr(py_clf, "n_estimators") assert py_clf.n_estimators.parent == py_clf.uid assert not hasattr(py_clf, "gpu_id") assert hasattr(py_clf, "device") assert hasattr(py_clf, "arbitrary_params_dict") assert py_clf.getOrDefault(py_clf.arbitrary_params_dict) == {} # Testing overwritten params py_clf = SparkXGBClassifier() py_clf.setParams(x=1, y=2) py_clf.setParams(y=3, z=4) xgb_params = py_clf._gen_xgb_params_dict() assert xgb_params["x"] == 1 assert xgb_params["y"] == 3 assert xgb_params["z"] == 4 def test_regressor_model_save_load(self, reg_data: RegData) -> None: with tempfile.TemporaryDirectory() as tmpdir: path = "file:" + tmpdir regressor = SparkXGBRegressor(**reg_data.reg_params) model = regressor.fit(reg_data.reg_df_train) model.save(path) loaded_model = SparkXGBRegressorModel.load(path) assert model.uid == loaded_model.uid for k, v in reg_data.reg_params.items(): assert loaded_model.getOrDefault(k) == v pred_result = loaded_model.transform(reg_data.reg_df_test).collect() for row in pred_result: assert np.isclose( row.prediction, row.expected_prediction_with_params, atol=1e-3 ) with pytest.raises(AssertionError, match="Expected class name"): SparkXGBClassifierModel.load(path) assert_model_compatible(model, tmpdir) def test_regressor_with_params(self, reg_data: RegData) -> None: regressor = SparkXGBRegressor(**reg_data.reg_params) all_params = dict( **(regressor._gen_xgb_params_dict()), **(regressor._gen_fit_params_dict()), **(regressor._gen_predict_params_dict()), ) check_sub_dict_match( reg_data.reg_params, all_params, excluding_keys=_non_booster_params ) model = regressor.fit(reg_data.reg_df_train) all_params = dict( **(model._gen_xgb_params_dict()), **(model._gen_fit_params_dict()), **(model._gen_predict_params_dict()), ) check_sub_dict_match( reg_data.reg_params, all_params, excluding_keys=_non_booster_params ) pred_result = model.transform(reg_data.reg_df_test).collect() for row in pred_result: assert np.isclose( row.prediction, row.expected_prediction_with_params, atol=1e-3 ) def test_regressor_model_pipeline_save_load(self, reg_data: RegData) -> None: with tempfile.TemporaryDirectory() as tmpdir: path = "file:" + tmpdir regressor = SparkXGBRegressor() pipeline = Pipeline(stages=[regressor]) pipeline = pipeline.copy( extra=get_params_map(reg_data.reg_params, regressor) ) model = pipeline.fit(reg_data.reg_df_train) model.save(path) loaded_model = PipelineModel.load(path) for k, v in reg_data.reg_params.items(): assert loaded_model.stages[0].getOrDefault(k) == v pred_result = loaded_model.transform(reg_data.reg_df_test).collect() for row in pred_result: assert np.isclose( row.prediction, row.expected_prediction_with_params, atol=1e-3 ) assert_model_compatible(model.stages[0], tmpdir) def test_device_param(self, reg_data: RegData, clf_data: ClfData) -> None: clf = SparkXGBClassifier(device="cuda", tree_method="exact") with pytest.raises(ValueError, match="not supported on GPU"): clf.fit(clf_data.cls_df_train) regressor = SparkXGBRegressor(device="cuda", tree_method="exact") with pytest.raises(ValueError, match="not supported on GPU"): regressor.fit(reg_data.reg_df_train) reg = SparkXGBRegressor(device="cuda", tree_method="gpu_hist") reg._validate_params() reg = SparkXGBRegressor(device="cuda") reg._validate_params() clf = SparkXGBClassifier(device="cuda", tree_method="gpu_hist") clf._validate_params() clf = SparkXGBClassifier(device="cuda") clf._validate_params() class XgboostLocalTest(SparkTestCase): def setUp(self): logging.getLogger().setLevel("INFO") random.seed(2020) # The following code use xgboost python library to train xgb model and predict. # # >>> import numpy as np # >>> import xgboost # >>> X = np.array([[1.0, 2.0, 3.0], [0.0, 1.0, 5.5]]) # >>> y = np.array([0, 1]) # >>> reg1 = xgboost.XGBRegressor() # >>> reg1.fit(X, y) # >>> reg1.predict(X) # array([8.8375784e-04, 9.9911624e-01], dtype=float32) # >>> def custom_lr(boosting_round): # ... return 1.0 / (boosting_round + 1) # ... # >>> reg1.fit(X, y, callbacks=[xgboost.callback.LearningRateScheduler(custom_lr)]) # >>> reg1.predict(X) # array([0.02406844, 0.9759315 ], dtype=float32) # >>> reg2 = xgboost.XGBRegressor(max_depth=5, n_estimators=10) # >>> reg2.fit(X, y) # >>> reg2.predict(X, ntree_limit=5) # array([0.22185266, 0.77814734], dtype=float32) self.reg_params = { "max_depth": 5, "n_estimators": 10, "ntree_limit": 5, "max_bin": 9, } self.reg_df_train = self.session.createDataFrame( [ (Vectors.dense(1.0, 2.0, 3.0), 0), (Vectors.sparse(3, {1: 1.0, 2: 5.5}), 1), ], ["features", "label"], ) self.reg_df_test = self.session.createDataFrame( [ (Vectors.dense(1.0, 2.0, 3.0), 0.0, 0.2219, 0.02406), (Vectors.sparse(3, {1: 1.0, 2: 5.5}), 1.0, 0.7781, 0.9759), ], [ "features", "expected_prediction", "expected_prediction_with_params", "expected_prediction_with_callbacks", ], ) # kwargs test (using the above data, train, we get the same results) self.cls_params_kwargs = {"tree_method": "approx", "sketch_eps": 0.03} # >>> X = np.array([[1.0, 2.0, 3.0], [1.0, 2.0, 4.0], [0.0, 1.0, 5.5], [-1.0, -2.0, 1.0]]) # >>> y = np.array([0, 0, 1, 2]) # >>> cl = xgboost.XGBClassifier() # >>> cl.fit(X, y) # >>> cl.predict_proba(np.array([[1.0, 2.0, 3.0]])) # array([[0.5374299 , 0.23128504, 0.23128504]], dtype=float32) # Test classifier with both base margin and without # >>> import numpy as np # >>> import xgboost # >>> X = np.array([[1.0, 2.0, 3.0], [0.0, 1.0, 5.5], [4.0, 5.0, 6.0], [0.0, 6.0, 7.5]]) # >>> w = np.array([1.0, 2.0, 1.0, 2.0]) # >>> y = np.array([0, 1, 0, 1]) # >>> base_margin = np.array([1,0,0,1]) # # This is without the base margin # >>> cls1 = xgboost.XGBClassifier() # >>> cls1.fit(X, y, sample_weight=w) # >>> cls1.predict_proba(np.array([[1.0, 2.0, 3.0]])) # array([[0.3333333, 0.6666667]], dtype=float32) # >>> cls1.predict(np.array([[1.0, 2.0, 3.0]])) # array([1]) # # This is with the same base margin for predict # >>> cls2 = xgboost.XGBClassifier() # >>> cls2.fit(X, y, sample_weight=w, base_margin=base_margin) # >>> cls2.predict_proba(np.array([[1.0, 2.0, 3.0]]), base_margin=[0]) # array([[0.44142532, 0.5585747 ]], dtype=float32) # >>> cls2.predict(np.array([[1.0, 2.0, 3.0]]), base_margin=[0]) # array([1]) # # This is with a different base margin for predict # # >>> cls2 = xgboost.XGBClassifier() # >>> cls2.fit(X, y, sample_weight=w, base_margin=base_margin) # >>> cls2.predict_proba(np.array([[1.0, 2.0, 3.0]]), base_margin=[1]) # array([[0.2252, 0.7747 ]], dtype=float32) # >>> cls2.predict(np.array([[1.0, 2.0, 3.0]]), base_margin=[0]) # array([1]) self.cls_df_train_without_base_margin = self.session.createDataFrame( [ (Vectors.dense(1.0, 2.0, 3.0), 0, 1.0), (Vectors.sparse(3, {1: 1.0, 2: 5.5}), 1, 2.0), (Vectors.dense(4.0, 5.0, 6.0), 0, 1.0), (Vectors.sparse(3, {1: 6.0, 2: 7.5}), 1, 2.0), ], ["features", "label", "weight"], ) self.cls_df_test_without_base_margin = self.session.createDataFrame( [ (Vectors.dense(1.0, 2.0, 3.0), [0.3333, 0.6666], 1), ], [ "features", "expected_prob_without_base_margin", "expected_prediction_without_base_margin", ], ) self.cls_df_train_with_same_base_margin = self.session.createDataFrame( [ (Vectors.dense(1.0, 2.0, 3.0), 0, 1.0, 1), (Vectors.sparse(3, {1: 1.0, 2: 5.5}), 1, 2.0, 0), (Vectors.dense(4.0, 5.0, 6.0), 0, 1.0, 0), (Vectors.sparse(3, {1: 6.0, 2: 7.5}), 1, 2.0, 1), ], ["features", "label", "weight", "base_margin"], ) self.cls_df_test_with_same_base_margin = self.session.createDataFrame( [ (Vectors.dense(1.0, 2.0, 3.0), 0, [0.4415, 0.5585], 1), ], [ "features", "base_margin", "expected_prob_with_base_margin", "expected_prediction_with_base_margin", ], ) self.cls_df_train_with_different_base_margin = self.session.createDataFrame( [ (Vectors.dense(1.0, 2.0, 3.0), 0, 1.0, 1), (Vectors.sparse(3, {1: 1.0, 2: 5.5}), 1, 2.0, 0), (Vectors.dense(4.0, 5.0, 6.0), 0, 1.0, 0), (Vectors.sparse(3, {1: 6.0, 2: 7.5}), 1, 2.0, 1), ], ["features", "label", "weight", "base_margin"], ) self.cls_df_test_with_different_base_margin = self.session.createDataFrame( [ (Vectors.dense(1.0, 2.0, 3.0), 1, [0.2252, 0.7747], 1), ], [ "features", "base_margin", "expected_prob_with_base_margin", "expected_prediction_with_base_margin", ], ) self.reg_df_sparse_train = self.session.createDataFrame( [ (Vectors.dense(1.0, 0.0, 3.0, 0.0, 0.0), 0), (Vectors.sparse(5, {1: 1.0, 3: 5.5}), 1), (Vectors.sparse(5, {4: -3.0}), 2), ] * 10, ["features", "label"], ) self.cls_df_sparse_train = self.session.createDataFrame( [ (Vectors.dense(1.0, 0.0, 3.0, 0.0, 0.0), 0), (Vectors.sparse(5, {1: 1.0, 3: 5.5}), 1), (Vectors.sparse(5, {4: -3.0}), 0), ] * 10, ["features", "label"], ) def get_local_tmp_dir(self): return self.tempdir + str(uuid.uuid4()) def test_convert_to_sklearn_model_reg(self) -> None: regressor = SparkXGBRegressor( n_estimators=200, missing=2.0, max_depth=3, sketch_eps=0.5 ) reg_model = regressor.fit(self.reg_df_train) sklearn_regressor = regressor._convert_to_sklearn_model( reg_model.get_booster().save_raw("json"), reg_model.get_booster().save_config(), ) assert isinstance(sklearn_regressor, XGBRegressor) assert sklearn_regressor.n_estimators == 200 assert sklearn_regressor.missing == 2.0 assert sklearn_regressor.max_depth == 3 assert sklearn_regressor.get_params()["sketch_eps"] == 0.5 def test_param_alias(self): py_cls = SparkXGBClassifier(features_col="f1", label_col="l1") self.assertEqual(py_cls.getOrDefault(py_cls.featuresCol), "f1") self.assertEqual(py_cls.getOrDefault(py_cls.labelCol), "l1") with pytest.raises( ValueError, match="Please use param name features_col instead" ): SparkXGBClassifier(featuresCol="f1") @staticmethod def test_param_value_converter(): py_cls = SparkXGBClassifier(missing=np.float64(1.0), sketch_eps=np.float64(0.3)) # don't check by isintance(v, float) because for numpy scalar it will also return True assert py_cls.getOrDefault(py_cls.missing).__class__.__name__ == "float" assert ( py_cls.getOrDefault(py_cls.arbitrary_params_dict)[ "sketch_eps" ].__class__.__name__ == "float64" ) def test_callbacks(self): from xgboost.callback import LearningRateScheduler path = self.get_local_tmp_dir() def custom_learning_rate(boosting_round): return 1.0 / (boosting_round + 1) cb = [LearningRateScheduler(custom_learning_rate)] regressor = SparkXGBRegressor(callbacks=cb) # Test the save/load of the estimator instead of the model, since # the callbacks param only exists in the estimator but not in the model regressor.save(path) regressor = SparkXGBRegressor.load(path) model = regressor.fit(self.reg_df_train) pred_result = model.transform(self.reg_df_test).collect() for row in pred_result: self.assertTrue( np.isclose( row.prediction, row.expected_prediction_with_callbacks, atol=1e-3 ) ) def test_train_with_initial_model(self): path = self.get_local_tmp_dir() reg1 = SparkXGBRegressor(**self.reg_params) model = reg1.fit(self.reg_df_train) init_booster = model.get_booster() reg2 = SparkXGBRegressor( max_depth=2, n_estimators=2, xgb_model=init_booster, max_bin=21 ) model21 = reg2.fit(self.reg_df_train) pred_res21 = model21.transform(self.reg_df_test).collect() reg2.save(path) reg2 = SparkXGBRegressor.load(path) self.assertTrue(reg2.getOrDefault(reg2.xgb_model) is not None) model22 = reg2.fit(self.reg_df_train) pred_res22 = model22.transform(self.reg_df_test).collect() # Test the transform result is the same for original and loaded model for row1, row2 in zip(pred_res21, pred_res22): self.assertTrue(np.isclose(row1.prediction, row2.prediction, atol=1e-3)) def test_classifier_with_base_margin(self): cls_without_base_margin = SparkXGBClassifier(weight_col="weight") model_without_base_margin = cls_without_base_margin.fit( self.cls_df_train_without_base_margin ) pred_result_without_base_margin = model_without_base_margin.transform( self.cls_df_test_without_base_margin ).collect() for row in pred_result_without_base_margin: self.assertTrue( np.isclose( row.prediction, row.expected_prediction_without_base_margin, atol=1e-3, ) ) np.testing.assert_allclose( row.probability, row.expected_prob_without_base_margin, atol=1e-3 ) cls_with_same_base_margin = SparkXGBClassifier( weight_col="weight", base_margin_col="base_margin" ) model_with_same_base_margin = cls_with_same_base_margin.fit( self.cls_df_train_with_same_base_margin ) pred_result_with_same_base_margin = model_with_same_base_margin.transform( self.cls_df_test_with_same_base_margin ).collect() for row in pred_result_with_same_base_margin: self.assertTrue( np.isclose( row.prediction, row.expected_prediction_with_base_margin, atol=1e-3 ) ) np.testing.assert_allclose( row.probability, row.expected_prob_with_base_margin, atol=1e-3 ) cls_with_different_base_margin = SparkXGBClassifier( weight_col="weight", base_margin_col="base_margin" ) model_with_different_base_margin = cls_with_different_base_margin.fit( self.cls_df_train_with_different_base_margin ) pred_result_with_different_base_margin = ( model_with_different_base_margin.transform( self.cls_df_test_with_different_base_margin ).collect() ) for row in pred_result_with_different_base_margin: self.assertTrue( np.isclose( row.prediction, row.expected_prediction_with_base_margin, atol=1e-3 ) ) np.testing.assert_allclose( row.probability, row.expected_prob_with_base_margin, atol=1e-3 ) def test_num_workers_param(self): regressor = SparkXGBRegressor(num_workers=-1) self.assertRaises(ValueError, regressor._validate_params) classifier = SparkXGBClassifier(num_workers=0) self.assertRaises(ValueError, classifier._validate_params) def test_feature_importances(self): reg1 = SparkXGBRegressor(**self.reg_params) model = reg1.fit(self.reg_df_train) booster = model.get_booster() self.assertEqual(model.get_feature_importances(), booster.get_score()) self.assertEqual( model.get_feature_importances(importance_type="gain"), booster.get_score(importance_type="gain"), ) def test_regressor_array_col_as_feature(self): train_dataset = self.reg_df_train.withColumn( "features", vector_to_array(spark_sql_func.col("features")) ) test_dataset = self.reg_df_test.withColumn( "features", vector_to_array(spark_sql_func.col("features")) ) regressor = SparkXGBRegressor() model = regressor.fit(train_dataset) pred_result = model.transform(test_dataset).collect() for row in pred_result: self.assertTrue( np.isclose(row.prediction, row.expected_prediction, atol=1e-3) ) @pytest.mark.skipif(**no_sparse_unwrap()) def test_regressor_with_sparse_optim(self): regressor = SparkXGBRegressor(missing=0.0) model = regressor.fit(self.reg_df_sparse_train) assert model._xgb_sklearn_model.missing == 0.0 pred_result = model.transform(self.reg_df_sparse_train).collect() # enable sparse optimiaztion regressor2 = SparkXGBRegressor(missing=0.0, enable_sparse_data_optim=True) model2 = regressor2.fit(self.reg_df_sparse_train) assert model2.getOrDefault(model2.enable_sparse_data_optim) assert model2._xgb_sklearn_model.missing == 0.0 pred_result2 = model2.transform(self.reg_df_sparse_train).collect() for row1, row2 in zip(pred_result, pred_result2): self.assertTrue(np.isclose(row1.prediction, row2.prediction, atol=1e-3)) @pytest.mark.skipif(**no_sparse_unwrap()) def test_classifier_with_sparse_optim(self): cls = SparkXGBClassifier(missing=0.0) model = cls.fit(self.cls_df_sparse_train) assert model._xgb_sklearn_model.missing == 0.0 pred_result = model.transform(self.cls_df_sparse_train).collect() # enable sparse optimiaztion cls2 = SparkXGBClassifier(missing=0.0, enable_sparse_data_optim=True) model2 = cls2.fit(self.cls_df_sparse_train) assert model2.getOrDefault(model2.enable_sparse_data_optim) assert model2._xgb_sklearn_model.missing == 0.0 pred_result2 = model2.transform(self.cls_df_sparse_train).collect() for row1, row2 in zip(pred_result, pred_result2): self.assertTrue(np.allclose(row1.probability, row2.probability, rtol=1e-3)) def test_empty_validation_data(self) -> None: for tree_method in [ "hist", "approx", ]: # pytest.mark conflict with python unittest df_train = self.session.createDataFrame( [ (Vectors.dense(10.1, 11.2, 11.3), 0, False), (Vectors.dense(1, 1.2, 1.3), 1, False), (Vectors.dense(14.0, 15.0, 16.0), 0, False), (Vectors.dense(1.1, 1.2, 1.3), 1, True), ], ["features", "label", "val_col"], ) classifier = SparkXGBClassifier( num_workers=2, tree_method=tree_method, min_child_weight=0.0, reg_alpha=0, reg_lambda=0, validation_indicator_col="val_col", ) model = classifier.fit(df_train) pred_result = model.transform(df_train).collect() for row in pred_result: self.assertEqual(row.prediction, row.label) def test_empty_train_data(self) -> None: for tree_method in [ "hist", "approx", ]: # pytest.mark conflict with python unittest df_train = self.session.createDataFrame( [ (Vectors.dense(10.1, 11.2, 11.3), 0, True), (Vectors.dense(1, 1.2, 1.3), 1, True), (Vectors.dense(14.0, 15.0, 16.0), 0, True), (Vectors.dense(1.1, 1.2, 1.3), 1, False), ], ["features", "label", "val_col"], ) classifier = SparkXGBClassifier( num_workers=2, min_child_weight=0.0, reg_alpha=0, reg_lambda=0, tree_method=tree_method, validation_indicator_col="val_col", ) model = classifier.fit(df_train) pred_result = model.transform(df_train).collect() for row in pred_result: assert row.prediction == 1.0 def test_empty_partition(self): # raw_df.repartition(4) will result int severe data skew, actually, # there is no any data in reducer partition 1, reducer partition 2 # see https://github.com/dmlc/xgboost/issues/8221 for tree_method in [ "hist", "approx", ]: # pytest.mark conflict with python unittest raw_df = self.session.range(0, 100, 1, 50).withColumn( "label", spark_sql_func.when(spark_sql_func.rand(1) > 0.5, 1).otherwise(0), ) vector_assembler = ( VectorAssembler().setInputCols(["id"]).setOutputCol("features") ) data_trans = vector_assembler.setHandleInvalid("keep").transform(raw_df) classifier = SparkXGBClassifier(num_workers=4, tree_method=tree_method) classifier.fit(data_trans) def test_unsupported_params(self): with pytest.raises(ValueError, match="evals_result"): SparkXGBClassifier(evals_result={}) LTRData = namedtuple("LTRData", ("df_train", "df_test", "df_train_1")) @pytest.fixture def ltr_data(spark: SparkSession) -> Generator[LTRData, None, None]: spark.conf.set("spark.sql.execution.arrow.maxRecordsPerBatch", "8") ranker_df_train = spark.createDataFrame( [ (Vectors.dense(1.0, 2.0, 3.0), 0, 0), (Vectors.dense(4.0, 5.0, 6.0), 1, 0), (Vectors.dense(9.0, 4.0, 8.0), 2, 0), (Vectors.sparse(3, {1: 1.0, 2: 5.5}), 0, 1), (Vectors.sparse(3, {1: 6.0, 2: 7.5}), 1, 1), (Vectors.sparse(3, {1: 8.0, 2: 9.5}), 2, 1), ], ["features", "label", "qid"], ) X_train = np.array( [ [1.0, 2.0, 3.0], [4.0, 5.0, 6.0], [9.0, 4.0, 8.0], [np.NaN, 1.0, 5.5], [np.NaN, 6.0, 7.5], [np.NaN, 8.0, 9.5], ] ) qid_train = np.array([0, 0, 0, 1, 1, 1]) y_train = np.array([0, 1, 2, 0, 1, 2]) X_test = np.array( [ [1.5, 2.0, 3.0], [4.5, 5.0, 6.0], [9.0, 4.5, 8.0], [np.NaN, 1.0, 6.0], [np.NaN, 6.0, 7.0], [np.NaN, 8.0, 10.5], ] ) ltr = xgb.XGBRanker(tree_method="approx", objective="rank:pairwise") ltr.fit(X_train, y_train, qid=qid_train) predt = ltr.predict(X_test) ranker_df_test = spark.createDataFrame( [ (Vectors.dense(1.5, 2.0, 3.0), 0, float(predt[0])), (Vectors.dense(4.5, 5.0, 6.0), 0, float(predt[1])), (Vectors.dense(9.0, 4.5, 8.0), 0, float(predt[2])), (Vectors.sparse(3, {1: 1.0, 2: 6.0}), 1, float(predt[3])), (Vectors.sparse(3, {1: 6.0, 2: 7.0}), 1, float(predt[4])), (Vectors.sparse(3, {1: 8.0, 2: 10.5}), 1, float(predt[5])), ], ["features", "qid", "expected_prediction"], ) ranker_df_train_1 = spark.createDataFrame( [ (Vectors.sparse(3, {1: 1.0, 2: 5.5}), 0, 9), (Vectors.sparse(3, {1: 6.0, 2: 7.5}), 1, 9), (Vectors.sparse(3, {1: 8.0, 2: 9.5}), 2, 9), (Vectors.dense(1.0, 2.0, 3.0), 0, 8), (Vectors.dense(4.0, 5.0, 6.0), 1, 8), (Vectors.dense(9.0, 4.0, 8.0), 2, 8), (Vectors.sparse(3, {1: 1.0, 2: 5.5}), 0, 7), (Vectors.sparse(3, {1: 6.0, 2: 7.5}), 1, 7), (Vectors.sparse(3, {1: 8.0, 2: 9.5}), 2, 7), (Vectors.dense(1.0, 2.0, 3.0), 0, 6), (Vectors.dense(4.0, 5.0, 6.0), 1, 6), (Vectors.dense(9.0, 4.0, 8.0), 2, 6), ] * 4, ["features", "label", "qid"], ) yield LTRData(ranker_df_train, ranker_df_test, ranker_df_train_1) class TestPySparkLocalLETOR: def test_ranker(self, ltr_data: LTRData) -> None: ranker = SparkXGBRanker(qid_col="qid", objective="rank:pairwise") assert ranker.getOrDefault(ranker.objective) == "rank:pairwise" model = ranker.fit(ltr_data.df_train) pred_result = model.transform(ltr_data.df_test).collect() for row in pred_result: assert np.isclose(row.prediction, row.expected_prediction, rtol=1e-3) def test_ranker_qid_sorted(self, ltr_data: LTRData) -> None: ranker = SparkXGBRanker(qid_col="qid", num_workers=4, objective="rank:ndcg") assert ranker.getOrDefault(ranker.objective) == "rank:ndcg" model = ranker.fit(ltr_data.df_train_1) model.transform(ltr_data.df_test).collect()
55,161
37.227304
98
py
xgboost
xgboost-master/tests/test_distributed/test_with_spark/__init__.py
0
0
0
py
xgboost
xgboost-master/tests/test_distributed/test_with_spark/test_spark_local_cluster.py
import json import logging import os import random import tempfile import uuid from collections import namedtuple import numpy as np import pytest import xgboost as xgb from xgboost import testing as tm from xgboost.callback import LearningRateScheduler pytestmark = pytest.mark.skipif(**tm.no_spark()) from typing import Generator from pyspark.ml.linalg import Vectors from pyspark.sql import SparkSession from xgboost.spark import SparkXGBClassifier, SparkXGBRegressor from xgboost.spark.utils import _get_max_num_concurrent_tasks from .utils import SparkLocalClusterTestCase @pytest.fixture def spark() -> Generator[SparkSession, None, None]: config = { "spark.master": "local-cluster[2, 2, 1024]", "spark.python.worker.reuse": "false", "spark.driver.host": "127.0.0.1", "spark.task.maxFailures": "1", "spark.sql.execution.pyspark.udf.simplifiedTraceback.enabled": "false", "spark.sql.pyspark.jvmStacktrace.enabled": "true", "spark.cores.max": "4", "spark.task.cpus": "1", "spark.executor.cores": "2", } builder = SparkSession.builder.appName("XGBoost PySpark Python API Tests") for k, v in config.items(): builder.config(k, v) logging.getLogger("pyspark").setLevel(logging.INFO) sess = builder.getOrCreate() yield sess sess.stop() sess.sparkContext.stop() RegData = namedtuple("RegData", ("reg_df_train", "reg_df_test", "reg_params")) @pytest.fixture def reg_data(spark: SparkSession) -> Generator[RegData, None, None]: reg_params = {"max_depth": 5, "n_estimators": 10, "iteration_range": (0, 5)} X = np.array([[1.0, 2.0, 3.0], [0.0, 1.0, 5.5]]) y = np.array([0, 1]) def custom_lr(boosting_round): return 1.0 / (boosting_round + 1) reg1 = xgb.XGBRegressor(callbacks=[LearningRateScheduler(custom_lr)]) reg1.fit(X, y) predt1 = reg1.predict(X) # array([0.02406833, 0.97593164], dtype=float32) reg2 = xgb.XGBRegressor(max_depth=5, n_estimators=10) reg2.fit(X, y) predt2 = reg2.predict(X, iteration_range=(0, 5)) # array([0.22185263, 0.77814734], dtype=float32) reg_df_train = spark.createDataFrame( [ (Vectors.dense(1.0, 2.0, 3.0), 0), (Vectors.sparse(3, {1: 1.0, 2: 5.5}), 1), ], ["features", "label"], ) reg_df_test = spark.createDataFrame( [ (Vectors.dense(1.0, 2.0, 3.0), 0.0, float(predt2[0]), float(predt1[0])), ( Vectors.sparse(3, {1: 1.0, 2: 5.5}), 1.0, float(predt2[1]), float(predt1[1]), ), ], [ "features", "expected_prediction", "expected_prediction_with_params", "expected_prediction_with_callbacks", ], ) yield RegData(reg_df_train, reg_df_test, reg_params) class TestPySparkLocalCluster: def test_regressor_basic_with_params(self, reg_data: RegData) -> None: regressor = SparkXGBRegressor(**reg_data.reg_params) model = regressor.fit(reg_data.reg_df_train) pred_result = model.transform(reg_data.reg_df_test).collect() for row in pred_result: assert np.isclose( row.prediction, row.expected_prediction_with_params, atol=1e-3 ) def test_callbacks(self, reg_data: RegData) -> None: with tempfile.TemporaryDirectory() as tmpdir: path = os.path.join(tmpdir, str(uuid.uuid4())) def custom_lr(boosting_round): return 1.0 / (boosting_round + 1) cb = [LearningRateScheduler(custom_lr)] regressor = SparkXGBRegressor(callbacks=cb) # Test the save/load of the estimator instead of the model, since # the callbacks param only exists in the estimator but not in the model regressor.save(path) regressor = SparkXGBRegressor.load(path) model = regressor.fit(reg_data.reg_df_train) pred_result = model.transform(reg_data.reg_df_test).collect() for row in pred_result: assert np.isclose( row.prediction, row.expected_prediction_with_callbacks, atol=1e-3 ) class XgboostLocalClusterTestCase(SparkLocalClusterTestCase): def setUp(self): random.seed(2020) self.n_workers = _get_max_num_concurrent_tasks(self.session.sparkContext) # Distributed section # Binary classification self.cls_df_train_distributed = self.session.createDataFrame( [ (Vectors.dense(1.0, 2.0, 3.0), 0), (Vectors.sparse(3, {1: 1.0, 2: 5.5}), 1), (Vectors.dense(4.0, 5.0, 6.0), 0), (Vectors.sparse(3, {1: 6.0, 2: 7.5}), 1), ] * 100, ["features", "label"], ) self.cls_df_test_distributed = self.session.createDataFrame( [ (Vectors.dense(1.0, 2.0, 3.0), 0, [0.9949826, 0.0050174]), (Vectors.sparse(3, {1: 1.0, 2: 5.5}), 1, [0.0050174, 0.9949826]), (Vectors.dense(4.0, 5.0, 6.0), 0, [0.9949826, 0.0050174]), (Vectors.sparse(3, {1: 6.0, 2: 7.5}), 1, [0.0050174, 0.9949826]), ], ["features", "expected_label", "expected_probability"], ) # Binary classification with different num_estimators self.cls_df_test_distributed_lower_estimators = self.session.createDataFrame( [ (Vectors.dense(1.0, 2.0, 3.0), 0, [0.9735, 0.0265]), (Vectors.sparse(3, {1: 1.0, 2: 5.5}), 1, [0.0265, 0.9735]), (Vectors.dense(4.0, 5.0, 6.0), 0, [0.9735, 0.0265]), (Vectors.sparse(3, {1: 6.0, 2: 7.5}), 1, [0.0265, 0.9735]), ], ["features", "expected_label", "expected_probability"], ) # Multiclass classification self.cls_df_train_distributed_multiclass = self.session.createDataFrame( [ (Vectors.dense(1.0, 2.0, 3.0), 0), (Vectors.sparse(3, {1: 1.0, 2: 5.5}), 1), (Vectors.dense(4.0, 5.0, 6.0), 0), (Vectors.sparse(3, {1: 6.0, 2: 7.5}), 2), ] * 100, ["features", "label"], ) self.cls_df_test_distributed_multiclass = self.session.createDataFrame( [ (Vectors.dense(1.0, 2.0, 3.0), 0, [4.294563, -2.449409, -2.449409]), ( Vectors.sparse(3, {1: 1.0, 2: 5.5}), 1, [-2.3796105, 3.669014, -2.449409], ), (Vectors.dense(4.0, 5.0, 6.0), 0, [4.294563, -2.449409, -2.449409]), ( Vectors.sparse(3, {1: 6.0, 2: 7.5}), 2, [-2.3796105, -2.449409, 3.669014], ), ], ["features", "expected_label", "expected_margins"], ) # Regression self.reg_df_train_distributed = self.session.createDataFrame( [ (Vectors.dense(1.0, 2.0, 3.0), 0), (Vectors.sparse(3, {1: 1.0, 2: 5.5}), 1), (Vectors.dense(4.0, 5.0, 6.0), 0), (Vectors.sparse(3, {1: 6.0, 2: 7.5}), 2), ] * 100, ["features", "label"], ) self.reg_df_test_distributed = self.session.createDataFrame( [ (Vectors.dense(1.0, 2.0, 3.0), 1.533e-04), (Vectors.sparse(3, {1: 1.0, 2: 5.5}), 9.999e-01), (Vectors.dense(4.0, 5.0, 6.0), 1.533e-04), (Vectors.sparse(3, {1: 6.0, 2: 7.5}), 1.999e00), ], ["features", "expected_label"], ) # Adding weight and validation self.clf_params_with_eval_dist = { "validation_indicator_col": "isVal", "early_stopping_rounds": 1, "eval_metric": "logloss", } self.clf_params_with_weight_dist = {"weight_col": "weight"} self.cls_df_train_distributed_with_eval_weight = self.session.createDataFrame( [ (Vectors.dense(1.0, 2.0, 3.0), 0, False, 1.0), (Vectors.sparse(3, {1: 1.0, 2: 5.5}), 1, False, 2.0), (Vectors.dense(4.0, 5.0, 6.0), 0, True, 1.0), (Vectors.sparse(3, {1: 6.0, 2: 7.5}), 1, True, 2.0), ] * 100, ["features", "label", "isVal", "weight"], ) self.cls_df_test_distributed_with_eval_weight = self.session.createDataFrame( [ ( Vectors.dense(1.0, 2.0, 3.0), [0.9955, 0.0044], [0.9904, 0.0096], [0.9903, 0.0097], ), ], [ "features", "expected_prob_with_weight", "expected_prob_with_eval", "expected_prob_with_weight_and_eval", ], ) self.clf_best_score_eval = 0.009677 self.clf_best_score_weight_and_eval = 0.006626 self.reg_params_with_eval_dist = { "validation_indicator_col": "isVal", "early_stopping_rounds": 1, "eval_metric": "rmse", } self.reg_params_with_weight_dist = {"weight_col": "weight"} self.reg_df_train_distributed_with_eval_weight = self.session.createDataFrame( [ (Vectors.dense(1.0, 2.0, 3.0), 0, False, 1.0), (Vectors.sparse(3, {1: 1.0, 2: 5.5}), 1, False, 2.0), (Vectors.dense(4.0, 5.0, 6.0), 0, True, 1.0), (Vectors.sparse(3, {1: 6.0, 2: 7.5}), 1, True, 2.0), ] * 100, ["features", "label", "isVal", "weight"], ) self.reg_df_test_distributed_with_eval_weight = self.session.createDataFrame( [ (Vectors.dense(1.0, 2.0, 3.0), 4.583e-05, 5.239e-05, 6.03e-05), ( Vectors.sparse(3, {1: 1.0, 2: 5.5}), 9.9997e-01, 9.99947e-01, 9.9995e-01, ), ], [ "features", "expected_prediction_with_weight", "expected_prediction_with_eval", "expected_prediction_with_weight_and_eval", ], ) self.reg_best_score_eval = 5.239e-05 self.reg_best_score_weight_and_eval = 4.850e-05 def test_classifier_distributed_basic(self): classifier = SparkXGBClassifier(num_workers=self.n_workers, n_estimators=100) model = classifier.fit(self.cls_df_train_distributed) pred_result = model.transform(self.cls_df_test_distributed).collect() for row in pred_result: self.assertTrue(np.isclose(row.expected_label, row.prediction, atol=1e-3)) self.assertTrue( np.allclose(row.expected_probability, row.probability, atol=1e-3) ) def test_classifier_distributed_multiclass(self): # There is no built-in multiclass option for external storage classifier = SparkXGBClassifier(num_workers=self.n_workers, n_estimators=100) model = classifier.fit(self.cls_df_train_distributed_multiclass) pred_result = model.transform(self.cls_df_test_distributed_multiclass).collect() for row in pred_result: self.assertTrue(np.isclose(row.expected_label, row.prediction, atol=1e-3)) self.assertTrue( np.allclose(row.expected_margins, row.rawPrediction, atol=1e-3) ) def test_regressor_distributed_basic(self): regressor = SparkXGBRegressor(num_workers=self.n_workers, n_estimators=100) model = regressor.fit(self.reg_df_train_distributed) pred_result = model.transform(self.reg_df_test_distributed).collect() for row in pred_result: self.assertTrue(np.isclose(row.expected_label, row.prediction, atol=1e-3)) def test_classifier_distributed_weight_eval(self): # with weight classifier = SparkXGBClassifier( num_workers=self.n_workers, n_estimators=100, **self.clf_params_with_weight_dist ) model = classifier.fit(self.cls_df_train_distributed_with_eval_weight) pred_result = model.transform( self.cls_df_test_distributed_with_eval_weight ).collect() for row in pred_result: self.assertTrue( np.allclose(row.probability, row.expected_prob_with_weight, atol=1e-3) ) # with eval only classifier = SparkXGBClassifier( num_workers=self.n_workers, n_estimators=100, **self.clf_params_with_eval_dist ) model = classifier.fit(self.cls_df_train_distributed_with_eval_weight) pred_result = model.transform( self.cls_df_test_distributed_with_eval_weight ).collect() for row in pred_result: self.assertTrue( np.allclose(row.probability, row.expected_prob_with_eval, atol=1e-3) ) assert np.isclose( float(model.get_booster().attributes()["best_score"]), self.clf_best_score_eval, rtol=1e-3, ) # with both weight and eval classifier = SparkXGBClassifier( num_workers=self.n_workers, n_estimators=100, **self.clf_params_with_eval_dist, **self.clf_params_with_weight_dist ) model = classifier.fit(self.cls_df_train_distributed_with_eval_weight) pred_result = model.transform( self.cls_df_test_distributed_with_eval_weight ).collect() for row in pred_result: self.assertTrue( np.allclose( row.probability, row.expected_prob_with_weight_and_eval, atol=1e-3 ) ) np.isclose( float(model.get_booster().attributes()["best_score"]), self.clf_best_score_weight_and_eval, rtol=1e-3, ) def test_regressor_distributed_weight_eval(self): # with weight regressor = SparkXGBRegressor( num_workers=self.n_workers, n_estimators=100, **self.reg_params_with_weight_dist ) model = regressor.fit(self.reg_df_train_distributed_with_eval_weight) pred_result = model.transform( self.reg_df_test_distributed_with_eval_weight ).collect() for row in pred_result: self.assertTrue( np.isclose( row.prediction, row.expected_prediction_with_weight, atol=1e-3 ) ) # with eval only regressor = SparkXGBRegressor( num_workers=self.n_workers, n_estimators=100, **self.reg_params_with_eval_dist ) model = regressor.fit(self.reg_df_train_distributed_with_eval_weight) pred_result = model.transform( self.reg_df_test_distributed_with_eval_weight ).collect() for row in pred_result: self.assertTrue( np.isclose(row.prediction, row.expected_prediction_with_eval, atol=1e-3) ) assert np.isclose( float(model.get_booster().attributes()["best_score"]), self.reg_best_score_eval, rtol=1e-3, ) # with both weight and eval regressor = SparkXGBRegressor( num_workers=self.n_workers, n_estimators=100, use_external_storage=False, **self.reg_params_with_eval_dist, **self.reg_params_with_weight_dist ) model = regressor.fit(self.reg_df_train_distributed_with_eval_weight) pred_result = model.transform( self.reg_df_test_distributed_with_eval_weight ).collect() for row in pred_result: self.assertTrue( np.isclose( row.prediction, row.expected_prediction_with_weight_and_eval, atol=1e-3, ) ) assert np.isclose( float(model.get_booster().attributes()["best_score"]), self.reg_best_score_weight_and_eval, rtol=1e-3, ) def test_num_estimators(self): classifier = SparkXGBClassifier(num_workers=self.n_workers, n_estimators=10) model = classifier.fit(self.cls_df_train_distributed) pred_result = model.transform( self.cls_df_test_distributed_lower_estimators ).collect() for row in pred_result: self.assertTrue(np.isclose(row.expected_label, row.prediction, atol=1e-3)) self.assertTrue( np.allclose(row.expected_probability, row.probability, atol=1e-3) ) def test_distributed_params(self): classifier = SparkXGBClassifier(num_workers=self.n_workers, max_depth=7) model = classifier.fit(self.cls_df_train_distributed) self.assertTrue(hasattr(classifier, "max_depth")) self.assertEqual(classifier.getOrDefault(classifier.max_depth), 7) booster_config = json.loads(model.get_booster().save_config()) max_depth = booster_config["learner"]["gradient_booster"]["tree_train_param"][ "max_depth" ] assert int(max_depth) == 7 def test_repartition(self): # The following test case has a few partitioned datasets that are either # well partitioned relative to the number of workers that the user wants # or poorly partitioned. We only want to repartition when the dataset # is poorly partitioned so _repartition_needed is true in those instances. classifier = SparkXGBClassifier(num_workers=self.n_workers) basic = self.cls_df_train_distributed self.assertTrue(classifier._repartition_needed(basic)) bad_repartitioned = basic.repartition(self.n_workers + 1) self.assertTrue(classifier._repartition_needed(bad_repartitioned)) good_repartitioned = basic.repartition(self.n_workers) self.assertFalse(classifier._repartition_needed(good_repartitioned)) # Now testing if force_repartition returns True regardless of whether the data is well partitioned classifier = SparkXGBClassifier( num_workers=self.n_workers, force_repartition=True ) good_repartitioned = basic.repartition(self.n_workers) self.assertTrue(classifier._repartition_needed(good_repartitioned))
18,886
37.623722
106
py
xgboost
xgboost-master/tests/test_distributed/test_gpu_with_dask/conftest.py
from typing import Generator, Sequence import pytest from xgboost import testing as tm @pytest.fixture(scope="session", autouse=True) def setup_rmm_pool(request, pytestconfig: pytest.Config) -> None: tm.setup_rmm_pool(request, pytestconfig) @pytest.fixture(scope="class") def local_cuda_client(request, pytestconfig: pytest.Config) -> Generator: kwargs = {} if hasattr(request, "param"): kwargs.update(request.param) if pytestconfig.getoption("--use-rmm-pool"): if tm.no_rmm()["condition"]: raise ImportError("The --use-rmm-pool option requires the RMM package") import rmm kwargs["rmm_pool_size"] = "2GB" if tm.no_dask_cuda()["condition"]: raise ImportError("The local_cuda_cluster fixture requires dask_cuda package") from dask.distributed import Client from dask_cuda import LocalCUDACluster yield Client(LocalCUDACluster(**kwargs)) def pytest_addoption(parser: pytest.Parser) -> None: parser.addoption( "--use-rmm-pool", action="store_true", default=False, help="Use RMM pool" ) def pytest_collection_modifyitems(config: pytest.Config, items: Sequence) -> None: # mark dask tests as `mgpu`. mgpu_mark = pytest.mark.mgpu for item in items: item.add_marker(mgpu_mark)
1,302
29.302326
86
py
xgboost
xgboost-master/tests/test_distributed/test_gpu_with_dask/test_gpu_demos.py
import os import subprocess import pytest from xgboost import testing as tm @pytest.mark.skipif(**tm.no_dask()) @pytest.mark.skipif(**tm.no_dask_cuda()) @pytest.mark.skipif(**tm.no_cupy()) @pytest.mark.mgpu def test_dask_training(): script = os.path.join(tm.demo_dir(__file__), "dask", "gpu_training.py") cmd = ["python", script] subprocess.check_call(cmd) @pytest.mark.skipif(**tm.no_dask_cuda()) @pytest.mark.skipif(**tm.no_dask()) @pytest.mark.mgpu def test_dask_sklearn_demo(): script = os.path.join(tm.demo_dir(__file__), "dask", "sklearn_gpu_training.py") cmd = ["python", script] subprocess.check_call(cmd)
644
23.807692
83
py
xgboost
xgboost-master/tests/test_distributed/test_gpu_with_dask/__init__.py
1
0
0
py
xgboost
xgboost-master/tests/test_distributed/test_gpu_with_dask/test_gpu_with_dask.py
"""Copyright 2019-2022 XGBoost contributors""" import asyncio import json from collections import OrderedDict from inspect import signature from typing import Any, Dict, Type, TypeVar import numpy as np import pytest from hypothesis import given, note, settings, strategies from hypothesis._settings import duration import xgboost as xgb from xgboost import testing as tm from xgboost.testing.params import hist_parameter_strategy pytestmark = [ pytest.mark.skipif(**tm.no_dask()), pytest.mark.skipif(**tm.no_dask_cuda()), ] from ..test_with_dask.test_with_dask import generate_array from ..test_with_dask.test_with_dask import kCols as random_cols from ..test_with_dask.test_with_dask import ( make_categorical, run_auc, run_boost_from_prediction, run_boost_from_prediction_multi_class, run_categorical, run_dask_classifier, run_empty_dmatrix_auc, run_empty_dmatrix_cls, run_empty_dmatrix_reg, run_tree_stats, suppress, ) try: import cudf import dask.dataframe as dd from dask import array as da from dask.distributed import Client from dask_cuda import LocalCUDACluster from xgboost import dask as dxgb from xgboost.testing.dask import check_init_estimation, check_uneven_nan except ImportError: pass def run_with_dask_dataframe(DMatrixT: Type, client: Client) -> None: import cupy as cp cp.cuda.runtime.setDevice(0) _X, _y, _ = generate_array() X = dd.from_dask_array(_X) y = dd.from_dask_array(_y) X = X.map_partitions(cudf.from_pandas) y = y.map_partitions(cudf.from_pandas) dtrain = DMatrixT(client, X, y) out = dxgb.train( client, {"tree_method": "hist", "debug_synchronize": True, "device": "cuda"}, dtrain=dtrain, evals=[(dtrain, "X")], num_boost_round=4, ) assert isinstance(out["booster"], dxgb.Booster) assert len(out["history"]["X"]["rmse"]) == 4 predictions = dxgb.predict(client, out, dtrain) assert isinstance(predictions.compute(), np.ndarray) series_predictions = dxgb.inplace_predict(client, out, X) assert isinstance(series_predictions, dd.Series) single_node = out["booster"].predict(xgb.DMatrix(X.compute())) cp.testing.assert_allclose(single_node, predictions.compute()) np.testing.assert_allclose(single_node, series_predictions.compute().to_numpy()) predt = dxgb.predict(client, out, X) assert isinstance(predt, dd.Series) T = TypeVar("T") def is_df(part: T) -> T: assert isinstance(part, cudf.DataFrame), part return part predt.map_partitions(is_df, meta=dd.utils.make_meta({"prediction": "f4"})) cp.testing.assert_allclose(predt.values.compute(), single_node) # Make sure the output can be integrated back to original dataframe X["predict"] = predictions X["inplace_predict"] = series_predictions has_null = X.isnull().values.any().compute() assert bool(has_null) is False def run_with_dask_array(DMatrixT: Type, client: Client) -> None: import cupy as cp cp.cuda.runtime.setDevice(0) X, y, _ = generate_array() X = X.map_blocks(cp.asarray) y = y.map_blocks(cp.asarray) dtrain = DMatrixT(client, X, y) out = dxgb.train( client, {"tree_method": "hist", "debug_synchronize": True, "device": "cuda"}, dtrain=dtrain, evals=[(dtrain, "X")], num_boost_round=2, ) from_dmatrix = dxgb.predict(client, out, dtrain).compute() assert ( json.loads(out["booster"].save_config())["learner"]["gradient_booster"][ "updater" ][0]["name"] == "grow_gpu_hist" ) inplace_predictions = dxgb.inplace_predict(client, out, X).compute() single_node = out["booster"].predict(xgb.DMatrix(X.compute())) np.testing.assert_allclose(single_node, from_dmatrix) device = cp.cuda.runtime.getDevice() assert device == inplace_predictions.device.id single_node = cp.array(single_node) assert device == single_node.device.id cp.testing.assert_allclose(single_node, inplace_predictions) def to_cp(x: Any, DMatrixT: Type) -> Any: import cupy if isinstance(x, np.ndarray) and DMatrixT is dxgb.DaskQuantileDMatrix: X = cupy.array(x) else: X = x return X def run_gpu_hist( params: Dict, num_rounds: int, dataset: tm.TestDataset, DMatrixT: Type, client: Client, ) -> None: params["tree_method"] = "hist" params["device"] = "cuda" params = dataset.set_params(params) # It doesn't make sense to distribute a completely # empty dataset. if dataset.X.shape[0] == 0: return chunk = 128 X = to_cp(dataset.X, DMatrixT) X = da.from_array(X, chunks=(chunk, dataset.X.shape[1])) y = to_cp(dataset.y, DMatrixT) y_chunk = chunk if len(dataset.y.shape) == 1 else (chunk, dataset.y.shape[1]) y = da.from_array(y, chunks=y_chunk) if dataset.w is not None: w = to_cp(dataset.w, DMatrixT) w = da.from_array(w, chunks=(chunk,)) else: w = None if DMatrixT is dxgb.DaskQuantileDMatrix: m = DMatrixT( client, data=X, label=y, weight=w, max_bin=params.get("max_bin", 256) ) else: m = DMatrixT(client, data=X, label=y, weight=w) history = dxgb.train( client, params=params, dtrain=m, num_boost_round=num_rounds, evals=[(m, "train")], )["history"]["train"][dataset.metric] note(history) # See note on `ObjFunction::UpdateTreeLeaf`. update_leaf = dataset.name.endswith("-l1") if update_leaf: assert history[0] + 1e-2 >= history[-1] return else: assert tm.non_increasing(history) def test_tree_stats() -> None: with LocalCUDACluster(n_workers=1) as cluster: with Client(cluster) as client: local = run_tree_stats(client, "hist", "cuda") with LocalCUDACluster(n_workers=2) as cluster: with Client(cluster) as client: distributed = run_tree_stats(client, "hist", "cuda") assert local == distributed class TestDistributedGPU: @pytest.mark.skipif(**tm.no_cudf()) def test_boost_from_prediction(self, local_cuda_client: Client) -> None: import cudf from sklearn.datasets import load_breast_cancer, load_iris X_, y_ = load_breast_cancer(return_X_y=True) X = dd.from_array(X_, chunksize=100).map_partitions(cudf.from_pandas) y = dd.from_array(y_, chunksize=100).map_partitions(cudf.from_pandas) run_boost_from_prediction(X, y, "hist", "cuda", local_cuda_client) X_, y_ = load_iris(return_X_y=True) X = dd.from_array(X_, chunksize=50).map_partitions(cudf.from_pandas) y = dd.from_array(y_, chunksize=50).map_partitions(cudf.from_pandas) run_boost_from_prediction_multi_class(X, y, "hist", "cuda", local_cuda_client) def test_init_estimation(self, local_cuda_client: Client) -> None: check_init_estimation("gpu_hist", local_cuda_client) def test_uneven_nan(self) -> None: n_workers = 2 with LocalCUDACluster(n_workers=n_workers) as cluster: with Client(cluster) as client: check_uneven_nan(client, "gpu_hist", n_workers) @pytest.mark.skipif(**tm.no_dask_cudf()) def test_dask_dataframe(self, local_cuda_client: Client) -> None: run_with_dask_dataframe(dxgb.DaskDMatrix, local_cuda_client) run_with_dask_dataframe(dxgb.DaskQuantileDMatrix, local_cuda_client) @pytest.mark.skipif(**tm.no_dask_cudf()) def test_categorical(self, local_cuda_client: Client) -> None: import dask_cudf X, y = make_categorical(local_cuda_client, 10000, 30, 13) X = dask_cudf.from_dask_dataframe(X) X_onehot, _ = make_categorical(local_cuda_client, 10000, 30, 13, True) X_onehot = dask_cudf.from_dask_dataframe(X_onehot) run_categorical(local_cuda_client, "gpu_hist", X, X_onehot, y) @given( params=hist_parameter_strategy, num_rounds=strategies.integers(1, 20), dataset=tm.make_dataset_strategy(), dmatrix_type=strategies.sampled_from( [dxgb.DaskDMatrix, dxgb.DaskQuantileDMatrix] ), ) @settings( deadline=duration(seconds=120), max_examples=20, suppress_health_check=suppress, print_blob=True, ) @pytest.mark.skipif(**tm.no_cupy()) def test_gpu_hist( self, params: Dict, num_rounds: int, dataset: tm.TestDataset, dmatrix_type: type, local_cuda_client: Client, ) -> None: run_gpu_hist(params, num_rounds, dataset, dmatrix_type, local_cuda_client) def test_empty_quantile_dmatrix(self, local_cuda_client: Client) -> None: client = local_cuda_client X, y = make_categorical(client, 1, 30, 13) X_valid, y_valid = make_categorical(client, 10000, 30, 13) Xy = xgb.dask.DaskQuantileDMatrix(client, X, y, enable_categorical=True) Xy_valid = xgb.dask.DaskQuantileDMatrix( client, X_valid, y_valid, ref=Xy, enable_categorical=True ) result = xgb.dask.train( client, {"tree_method": "hist", "device": "cuda", "debug_synchronize": True}, Xy, num_boost_round=10, evals=[(Xy_valid, "Valid")], ) predt = xgb.dask.inplace_predict(client, result["booster"], X).compute() np.testing.assert_allclose(y.compute(), predt) rmse = result["history"]["Valid"]["rmse"][-1] assert rmse < 32.0 @pytest.mark.skipif(**tm.no_cupy()) def test_dask_array(self, local_cuda_client: Client) -> None: run_with_dask_array(dxgb.DaskDMatrix, local_cuda_client) run_with_dask_array(dxgb.DaskQuantileDMatrix, local_cuda_client) @pytest.mark.skipif(**tm.no_cupy()) def test_early_stopping(self, local_cuda_client: Client) -> None: from sklearn.datasets import load_breast_cancer X, y = load_breast_cancer(return_X_y=True) X, y = da.from_array(X), da.from_array(y) m = dxgb.DaskDMatrix(local_cuda_client, X, y) valid = dxgb.DaskDMatrix(local_cuda_client, X, y) early_stopping_rounds = 5 booster = dxgb.train( local_cuda_client, { "objective": "binary:logistic", "eval_metric": "error", "tree_method": "hist", "device": "cuda", }, m, evals=[(valid, "Valid")], num_boost_round=1000, early_stopping_rounds=early_stopping_rounds, )["booster"] assert hasattr(booster, "best_score") dump = booster.get_dump(dump_format="json") assert len(dump) - booster.best_iteration == early_stopping_rounds + 1 valid_X = X valid_y = y cls = dxgb.DaskXGBClassifier( objective="binary:logistic", tree_method="hist", device="cuda", eval_metric="error", n_estimators=100, ) cls.client = local_cuda_client cls.fit( X, y, early_stopping_rounds=early_stopping_rounds, eval_set=[(valid_X, valid_y)], ) booster = cls.get_booster() dump = booster.get_dump(dump_format="json") assert len(dump) - booster.best_iteration == early_stopping_rounds + 1 @pytest.mark.skipif(**tm.no_cudf()) @pytest.mark.parametrize("model", ["boosting"]) def test_dask_classifier(self, model: str, local_cuda_client: Client) -> None: import dask_cudf X_, y_, w_ = generate_array(with_weights=True) y_ = (y_ * 10).astype(np.int32) X = dask_cudf.from_dask_dataframe(dd.from_dask_array(X_)) y = dask_cudf.from_dask_dataframe(dd.from_dask_array(y_)) w = dask_cudf.from_dask_dataframe(dd.from_dask_array(w_)) run_dask_classifier(X, y, w, model, "gpu_hist", local_cuda_client, 10) def test_empty_dmatrix(self, local_cuda_client: Client) -> None: parameters = { "tree_method": "hist", "debug_synchronize": True, "device": "cuda", } run_empty_dmatrix_reg(local_cuda_client, parameters) run_empty_dmatrix_cls(local_cuda_client, parameters) @pytest.mark.skipif(**tm.no_dask_cudf()) def test_empty_partition(self, local_cuda_client: Client) -> None: import cudf import cupy import dask_cudf mult = 100 df = cudf.DataFrame( { "a": [1, 2, 3, 4, 5.1] * mult, "b": [10, 15, 29.3, 30, 31] * mult, "y": [10, 20, 30, 40.0, 50] * mult, } ) parameters = { "tree_method": "hist", "debug_synchronize": True, "device": "cuda", } empty = df.iloc[:0] ddf = dask_cudf.concat( [dask_cudf.from_cudf(empty, npartitions=1)] + [dask_cudf.from_cudf(df, npartitions=3)] + [dask_cudf.from_cudf(df, npartitions=3)] ) X = ddf[ddf.columns.difference(["y"])] y = ddf[["y"]] dtrain = dxgb.DaskQuantileDMatrix(local_cuda_client, X, y) bst_empty = xgb.dask.train( local_cuda_client, parameters, dtrain, evals=[(dtrain, "train")] ) predt_empty = dxgb.predict(local_cuda_client, bst_empty, X).compute().values ddf = dask_cudf.concat( [dask_cudf.from_cudf(df, npartitions=3)] + [dask_cudf.from_cudf(df, npartitions=3)] ) X = ddf[ddf.columns.difference(["y"])] y = ddf[["y"]] dtrain = dxgb.DaskQuantileDMatrix(local_cuda_client, X, y) bst = xgb.dask.train( local_cuda_client, parameters, dtrain, evals=[(dtrain, "train")] ) predt = dxgb.predict(local_cuda_client, bst, X).compute().values cupy.testing.assert_allclose(predt, predt_empty) predt = dxgb.predict(local_cuda_client, bst, dtrain).compute() cupy.testing.assert_allclose(predt, predt_empty) predt = dxgb.inplace_predict(local_cuda_client, bst, X).compute().values cupy.testing.assert_allclose(predt, predt_empty) df = df.to_pandas() empty = df.iloc[:0] ddf = dd.concat( [dd.from_pandas(empty, npartitions=1)] + [dd.from_pandas(df, npartitions=3)] + [dd.from_pandas(df, npartitions=3)] ) X = ddf[ddf.columns.difference(["y"])] y = ddf[["y"]] predt_empty = cupy.asnumpy(predt_empty) predt = dxgb.predict(local_cuda_client, bst_empty, X).compute().values np.testing.assert_allclose(predt, predt_empty) in_predt = ( dxgb.inplace_predict(local_cuda_client, bst_empty, X).compute().values ) np.testing.assert_allclose(predt, in_predt) def test_empty_dmatrix_auc(self, local_cuda_client: Client) -> None: n_workers = len(tm.get_client_workers(local_cuda_client)) run_empty_dmatrix_auc(local_cuda_client, "cuda", n_workers) def test_auc(self, local_cuda_client: Client) -> None: run_auc(local_cuda_client, "cuda") def test_invalid_ordinal(self, local_cuda_client: Client) -> None: """One should not specify the device ordinal with dask.""" with pytest.raises(ValueError, match="device=cuda"): X, y, _ = generate_array() m = dxgb.DaskDMatrix(local_cuda_client, X, y) dxgb.train(local_cuda_client, {"device": "cuda:0"}, m) booster = dxgb.train(local_cuda_client, {"device": "cuda"}, m)["booster"] assert ( json.loads(booster.save_config())["learner"]["generic_param"]["device"] == "cuda:0" ) def test_data_initialization(self, local_cuda_client: Client) -> None: X, y, _ = generate_array() fw = da.random.random((random_cols,)) fw = fw - fw.min() m = dxgb.DaskDMatrix(local_cuda_client, X, y, feature_weights=fw) workers = tm.get_client_workers(local_cuda_client) rabit_args = local_cuda_client.sync( dxgb._get_rabit_args, len(workers), None, local_cuda_client ) def worker_fn(worker_addr: str, data_ref: Dict) -> None: with dxgb.CommunicatorContext(**rabit_args): local_dtrain = dxgb._dmatrix_from_list_of_parts(**data_ref, nthread=7) fw_rows = local_dtrain.get_float_info("feature_weights").shape[0] assert fw_rows == local_dtrain.num_col() futures = [] for i in range(len(workers)): futures.append( local_cuda_client.submit( worker_fn, workers[i], m._create_fn_args(workers[i]), pure=False, workers=[workers[i]], ) ) local_cuda_client.gather(futures) def test_interface_consistency(self) -> None: sig = OrderedDict(signature(dxgb.DaskDMatrix).parameters) del sig["client"] ddm_names = list(sig.keys()) sig = OrderedDict(signature(dxgb.DaskQuantileDMatrix).parameters) del sig["client"] del sig["max_bin"] del sig["ref"] ddqdm_names = list(sig.keys()) assert len(ddm_names) == len(ddqdm_names) # between dask for i in range(len(ddm_names)): assert ddm_names[i] == ddqdm_names[i] sig = OrderedDict(signature(xgb.DMatrix).parameters) del sig["nthread"] # no nthread in dask dm_names = list(sig.keys()) sig = OrderedDict(signature(xgb.QuantileDMatrix).parameters) del sig["nthread"] del sig["max_bin"] del sig["ref"] dqdm_names = list(sig.keys()) # between single node assert len(dm_names) == len(dqdm_names) for i in range(len(dm_names)): assert dm_names[i] == dqdm_names[i] # ddm <-> dm for i in range(len(ddm_names)): assert ddm_names[i] == dm_names[i] # dqdm <-> ddqdm for i in range(len(ddqdm_names)): assert ddqdm_names[i] == dqdm_names[i] sig = OrderedDict(signature(xgb.XGBRanker.fit).parameters) ranker_names = list(sig.keys()) sig = OrderedDict(signature(xgb.dask.DaskXGBRanker.fit).parameters) dranker_names = list(sig.keys()) for rn, drn in zip(ranker_names, dranker_names): assert rn == drn @pytest.mark.skipif(**tm.no_cupy()) def test_with_asyncio(local_cuda_client: Client) -> None: address = local_cuda_client.scheduler.address output = asyncio.run(run_from_dask_array_asyncio(address)) assert isinstance(output["booster"], xgb.Booster) assert isinstance(output["history"], dict) async def run_from_dask_array_asyncio(scheduler_address: str) -> dxgb.TrainReturnT: async with Client(scheduler_address, asynchronous=True) as client: import cupy as cp X, y, _ = generate_array() X = X.map_blocks(cp.array) y = y.map_blocks(cp.array) m = await xgb.dask.DaskQuantileDMatrix(client, X, y) output = await xgb.dask.train( client, {"tree_method": "hist", "device": "cuda"}, dtrain=m ) with_m = await xgb.dask.predict(client, output, m) with_X = await xgb.dask.predict(client, output, X) inplace = await xgb.dask.inplace_predict(client, output, X) assert isinstance(with_m, da.Array) assert isinstance(with_X, da.Array) assert isinstance(inplace, da.Array) cp.testing.assert_allclose( await client.compute(with_m), await client.compute(with_X) ) cp.testing.assert_allclose( await client.compute(with_m), await client.compute(inplace) ) client.shutdown() return output
20,069
33.484536
86
py
xgboost
xgboost-master/tests/benchmark/benchmark_linear.py
#pylint: skip-file import argparse import xgboost as xgb import numpy as np from sklearn.datasets import make_classification from sklearn.model_selection import train_test_split import time import ast rng = np.random.RandomState(1994) def run_benchmark(args): try: dtest = xgb.DMatrix('dtest.dm') dtrain = xgb.DMatrix('dtrain.dm') if not (dtest.num_col() == args.columns \ and dtrain.num_col() == args.columns): raise ValueError("Wrong cols") if not (dtest.num_row() == args.rows * args.test_size \ and dtrain.num_row() == args.rows * (1-args.test_size)): raise ValueError("Wrong rows") except: print("Generating dataset: {} rows * {} columns".format(args.rows, args.columns)) print("{}/{} test/train split".format(args.test_size, 1.0 - args.test_size)) tmp = time.time() X, y = make_classification(args.rows, n_features=args.columns, n_redundant=0, n_informative=args.columns, n_repeated=0, random_state=7) if args.sparsity < 1.0: X = np.array([[np.nan if rng.uniform(0, 1) < args.sparsity else x for x in x_row] for x_row in X]) X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=args.test_size, random_state=7) print ("Generate Time: %s seconds" % (str(time.time() - tmp))) tmp = time.time() print ("DMatrix Start") dtrain = xgb.DMatrix(X_train, y_train) dtest = xgb.DMatrix(X_test, y_test, nthread=-1) print ("DMatrix Time: %s seconds" % (str(time.time() - tmp))) dtest.save_binary('dtest.dm') dtrain.save_binary('dtrain.dm') param = {'objective': 'binary:logistic','booster':'gblinear'} if args.params != '': param.update(ast.literal_eval(args.params)) param['updater'] = args.updater print("Training with '%s'" % param['updater']) tmp = time.time() xgb.train(param, dtrain, args.iterations, evals=[(dtrain,"train")], early_stopping_rounds = args.columns) print ("Train Time: %s seconds" % (str(time.time() - tmp))) parser = argparse.ArgumentParser() parser.add_argument('--updater', default='coord_descent') parser.add_argument('--sparsity', type=float, default=0.0) parser.add_argument('--lambda', type=float, default=1.0) parser.add_argument('--tol', type=float, default=1e-5) parser.add_argument('--alpha', type=float, default=1.0) parser.add_argument('--rows', type=int, default=1000000) parser.add_argument('--iterations', type=int, default=10000) parser.add_argument('--columns', type=int, default=50) parser.add_argument('--test_size', type=float, default=0.25) parser.add_argument('--standardise', type=bool, default=False) parser.add_argument('--params', default='', help='Provide additional parameters as a Python dict string, e.g. --params \"{\'max_depth\':2}\"') args = parser.parse_args() run_benchmark(args)
2,912
40.614286
143
py
xgboost
xgboost-master/tests/benchmark/generate_libsvm.py
"""Generate synthetic data in LIBSVM format.""" import argparse import io import time import numpy as np from sklearn.datasets import make_classification from sklearn.model_selection import train_test_split RNG = np.random.RandomState(2019) def generate_data(args): """Generates the data.""" print("Generating dataset: {} rows * {} columns".format(args.rows, args.columns)) print("Sparsity {}".format(args.sparsity)) print("{}/{} train/test split".format(1.0 - args.test_size, args.test_size)) tmp = time.time() n_informative = args.columns * 7 // 10 n_redundant = args.columns // 10 n_repeated = args.columns // 10 print("n_informative: {}, n_redundant: {}, n_repeated: {}".format(n_informative, n_redundant, n_repeated)) x, y = make_classification(n_samples=args.rows, n_features=args.columns, n_informative=n_informative, n_redundant=n_redundant, n_repeated=n_repeated, shuffle=False, random_state=RNG) print("Generate Time: {} seconds".format(time.time() - tmp)) tmp = time.time() x_train, x_test, y_train, y_test = train_test_split(x, y, test_size=args.test_size, random_state=RNG, shuffle=False) print("Train/Test Split Time: {} seconds".format(time.time() - tmp)) tmp = time.time() write_file('train.libsvm', x_train, y_train, args.sparsity) print("Write Train Time: {} seconds".format(time.time() - tmp)) tmp = time.time() write_file('test.libsvm', x_test, y_test, args.sparsity) print("Write Test Time: {} seconds".format(time.time() - tmp)) def write_file(filename, x_data, y_data, sparsity): with open(filename, 'w') as f: for x, y in zip(x_data, y_data): write_line(f, x, y, sparsity) def write_line(f, x, y, sparsity): with io.StringIO() as line: line.write(str(y)) for i, col in enumerate(x): if 0.0 < sparsity < 1.0: if RNG.uniform(0, 1) > sparsity: write_feature(line, i, col) else: write_feature(line, i, col) line.write('\n') f.write(line.getvalue()) def write_feature(line, index, feature): line.write(' ') line.write(str(index)) line.write(':') line.write(str(feature)) def main(): """The main function. Defines and parses command line arguments and calls the generator. """ parser = argparse.ArgumentParser() parser.add_argument('--rows', type=int, default=1000000) parser.add_argument('--columns', type=int, default=50) parser.add_argument('--sparsity', type=float, default=0.0) parser.add_argument('--test_size', type=float, default=0.01) args = parser.parse_args() generate_data(args) if __name__ == '__main__': main()
2,928
32.284091
97
py
xgboost
xgboost-master/tests/benchmark/benchmark_tree.py
"""Run benchmark on the tree booster.""" import argparse import ast import time import numpy as np import xgboost as xgb RNG = np.random.RandomState(1994) def run_benchmark(args): """Runs the benchmark.""" try: dtest = xgb.DMatrix('dtest.dm') dtrain = xgb.DMatrix('dtrain.dm') if not (dtest.num_col() == args.columns and dtrain.num_col() == args.columns): raise ValueError("Wrong cols") if not (dtest.num_row() == args.rows * args.test_size and dtrain.num_row() == args.rows * (1 - args.test_size)): raise ValueError("Wrong rows") except: print("Generating dataset: {} rows * {} columns".format(args.rows, args.columns)) print("{}/{} test/train split".format(args.test_size, 1.0 - args.test_size)) tmp = time.time() X = RNG.rand(args.rows, args.columns) y = RNG.randint(0, 2, args.rows) if 0.0 < args.sparsity < 1.0: X = np.array([[np.nan if RNG.uniform(0, 1) < args.sparsity else x for x in x_row] for x_row in X]) train_rows = int(args.rows * (1.0 - args.test_size)) test_rows = int(args.rows * args.test_size) X_train = X[:train_rows, :] X_test = X[-test_rows:, :] y_train = y[:train_rows] y_test = y[-test_rows:] print("Generate Time: %s seconds" % (str(time.time() - tmp))) del X, y tmp = time.time() print("DMatrix Start") dtrain = xgb.DMatrix(X_train, y_train, nthread=-1) dtest = xgb.DMatrix(X_test, y_test, nthread=-1) print("DMatrix Time: %s seconds" % (str(time.time() - tmp))) del X_train, y_train, X_test, y_test dtest.save_binary('dtest.dm') dtrain.save_binary('dtrain.dm') param = {'objective': 'binary:logistic'} if args.params != '': param.update(ast.literal_eval(args.params)) param['tree_method'] = args.tree_method print("Training with '%s'" % param['tree_method']) tmp = time.time() xgb.train(param, dtrain, args.iterations, evals=[(dtest, "test")]) print("Train Time: %s seconds" % (str(time.time() - tmp))) def main(): """The main function. Defines and parses command line arguments and calls the benchmark. """ parser = argparse.ArgumentParser() parser.add_argument('--tree_method', default='gpu_hist') parser.add_argument('--sparsity', type=float, default=0.0) parser.add_argument('--rows', type=int, default=1000000) parser.add_argument('--columns', type=int, default=50) parser.add_argument('--iterations', type=int, default=500) parser.add_argument('--test_size', type=float, default=0.25) parser.add_argument('--params', default='', help='Provide additional parameters as a Python dict string, e.g. --params ' '\"{\'max_depth\':2}\"') args = parser.parse_args() run_benchmark(args) if __name__ == '__main__': main()
3,021
33.735632
100
py
xgboost
xgboost-master/demo/nvflare/horizontal/custom/controller.py
""" Example of training controller with NVFlare =========================================== """ import multiprocessing from nvflare.apis.client import Client from nvflare.apis.fl_context import FLContext from nvflare.apis.impl.controller import Controller, Task from nvflare.apis.shareable import Shareable from nvflare.apis.signal import Signal from trainer import SupportedTasks import xgboost.federated class XGBoostController(Controller): def __init__(self, port: int, world_size: int, server_key_path: str, server_cert_path: str, client_cert_path: str): """Controller for federated XGBoost. Args: port: the port for the gRPC server to listen on. world_size: the number of sites. server_key_path: the path to the server key file. server_cert_path: the path to the server certificate file. client_cert_path: the path to the client certificate file. """ super().__init__() self._port = port self._world_size = world_size self._server_key_path = server_key_path self._server_cert_path = server_cert_path self._client_cert_path = client_cert_path self._server = None def start_controller(self, fl_ctx: FLContext): self._server = multiprocessing.Process( target=xgboost.federated.run_federated_server, args=(self._port, self._world_size, self._server_key_path, self._server_cert_path, self._client_cert_path)) self._server.start() def stop_controller(self, fl_ctx: FLContext): if self._server: self._server.terminate() def process_result_of_unknown_task(self, client: Client, task_name: str, client_task_id: str, result: Shareable, fl_ctx: FLContext): self.log_warning(fl_ctx, f"Unknown task: {task_name} from client {client.name}.") def control_flow(self, abort_signal: Signal, fl_ctx: FLContext): self.log_info(fl_ctx, "XGBoost training control flow started.") if abort_signal.triggered: return task = Task(name=SupportedTasks.TRAIN, data=Shareable()) self.broadcast_and_wait( task=task, min_responses=self._world_size, fl_ctx=fl_ctx, wait_time_after_min_received=1, abort_signal=abort_signal, ) if abort_signal.triggered: return self.log_info(fl_ctx, "XGBoost training control flow finished.")
2,587
36.507246
89
py
xgboost
xgboost-master/demo/nvflare/horizontal/custom/trainer.py
import os from nvflare.apis.executor import Executor from nvflare.apis.fl_constant import FLContextKey, ReturnCode from nvflare.apis.fl_context import FLContext from nvflare.apis.shareable import Shareable, make_reply from nvflare.apis.signal import Signal import xgboost as xgb from xgboost import callback class SupportedTasks(object): TRAIN = "train" class XGBoostTrainer(Executor): def __init__(self, server_address: str, world_size: int, server_cert_path: str, client_key_path: str, client_cert_path: str, use_gpus: bool): """Trainer for federated XGBoost. Args: server_address: address for the gRPC server to connect to. world_size: the number of sites. server_cert_path: the path to the server certificate file. client_key_path: the path to the client key file. client_cert_path: the path to the client certificate file. """ super().__init__() self._server_address = server_address self._world_size = world_size self._server_cert_path = server_cert_path self._client_key_path = client_key_path self._client_cert_path = client_cert_path self._use_gpus = use_gpus def execute(self, task_name: str, shareable: Shareable, fl_ctx: FLContext, abort_signal: Signal) -> Shareable: self.log_info(fl_ctx, f"Executing {task_name}") try: if task_name == SupportedTasks.TRAIN: self._do_training(fl_ctx) return make_reply(ReturnCode.OK) else: self.log_error(fl_ctx, f"{task_name} is not a supported task.") return make_reply(ReturnCode.TASK_UNKNOWN) except BaseException as e: self.log_exception(fl_ctx, f"Task {task_name} failed. Exception: {e.__str__()}") return make_reply(ReturnCode.EXECUTION_EXCEPTION) def _do_training(self, fl_ctx: FLContext): client_name = fl_ctx.get_prop(FLContextKey.CLIENT_NAME) rank = int(client_name.split('-')[1]) - 1 communicator_env = { 'xgboost_communicator': 'federated', 'federated_server_address': self._server_address, 'federated_world_size': self._world_size, 'federated_rank': rank, 'federated_server_cert': self._server_cert_path, 'federated_client_key': self._client_key_path, 'federated_client_cert': self._client_cert_path } with xgb.collective.CommunicatorContext(**communicator_env): # Load file, file will not be sharded in federated mode. dtrain = xgb.DMatrix('agaricus.txt.train?format=libsvm') dtest = xgb.DMatrix('agaricus.txt.test?format=libsvm') # Specify parameters via map, definition are same as c++ version param = {'max_depth': 2, 'eta': 1, 'objective': 'binary:logistic'} if self._use_gpus: self.log_info(fl_ctx, f'Training with GPU {rank}') param['tree_method'] = 'gpu_hist' param['gpu_id'] = rank # Specify validations set to watch performance watchlist = [(dtest, 'eval'), (dtrain, 'train')] num_round = 20 # Run training, all the features in training API is available. bst = xgb.train(param, dtrain, num_round, evals=watchlist, early_stopping_rounds=2, verbose_eval=False, callbacks=[callback.EvaluationMonitor(rank=rank)]) # Save the model. workspace = fl_ctx.get_prop(FLContextKey.WORKSPACE_OBJECT) run_number = fl_ctx.get_prop(FLContextKey.CURRENT_RUN) run_dir = workspace.get_run_dir(run_number) bst.save_model(os.path.join(run_dir, "test.model.json")) xgb.collective.communicator_print("Finished training\n")
3,971
42.648352
84
py
xgboost
xgboost-master/demo/nvflare/vertical/custom/controller.py
""" Example of training controller with NVFlare =========================================== """ import multiprocessing from nvflare.apis.client import Client from nvflare.apis.fl_context import FLContext from nvflare.apis.impl.controller import Controller, Task from nvflare.apis.shareable import Shareable from nvflare.apis.signal import Signal from trainer import SupportedTasks import xgboost.federated class XGBoostController(Controller): def __init__(self, port: int, world_size: int, server_key_path: str, server_cert_path: str, client_cert_path: str): """Controller for federated XGBoost. Args: port: the port for the gRPC server to listen on. world_size: the number of sites. server_key_path: the path to the server key file. server_cert_path: the path to the server certificate file. client_cert_path: the path to the client certificate file. """ super().__init__() self._port = port self._world_size = world_size self._server_key_path = server_key_path self._server_cert_path = server_cert_path self._client_cert_path = client_cert_path self._server = None def start_controller(self, fl_ctx: FLContext): self._server = multiprocessing.Process( target=xgboost.federated.run_federated_server, args=(self._port, self._world_size, self._server_key_path, self._server_cert_path, self._client_cert_path)) self._server.start() def stop_controller(self, fl_ctx: FLContext): if self._server: self._server.terminate() def process_result_of_unknown_task(self, client: Client, task_name: str, client_task_id: str, result: Shareable, fl_ctx: FLContext): self.log_warning(fl_ctx, f"Unknown task: {task_name} from client {client.name}.") def control_flow(self, abort_signal: Signal, fl_ctx: FLContext): self.log_info(fl_ctx, "XGBoost training control flow started.") if abort_signal.triggered: return task = Task(name=SupportedTasks.TRAIN, data=Shareable()) self.broadcast_and_wait( task=task, min_responses=self._world_size, fl_ctx=fl_ctx, wait_time_after_min_received=1, abort_signal=abort_signal, ) if abort_signal.triggered: return self.log_info(fl_ctx, "XGBoost training control flow finished.")
2,587
36.507246
89
py
xgboost
xgboost-master/demo/nvflare/vertical/custom/trainer.py
import os from nvflare.apis.executor import Executor from nvflare.apis.fl_constant import FLContextKey, ReturnCode from nvflare.apis.fl_context import FLContext from nvflare.apis.shareable import Shareable, make_reply from nvflare.apis.signal import Signal import xgboost as xgb from xgboost import callback class SupportedTasks(object): TRAIN = "train" class XGBoostTrainer(Executor): def __init__(self, server_address: str, world_size: int, server_cert_path: str, client_key_path: str, client_cert_path: str): """Trainer for federated XGBoost. Args: server_address: address for the gRPC server to connect to. world_size: the number of sites. server_cert_path: the path to the server certificate file. client_key_path: the path to the client key file. client_cert_path: the path to the client certificate file. """ super().__init__() self._server_address = server_address self._world_size = world_size self._server_cert_path = server_cert_path self._client_key_path = client_key_path self._client_cert_path = client_cert_path def execute(self, task_name: str, shareable: Shareable, fl_ctx: FLContext, abort_signal: Signal) -> Shareable: self.log_info(fl_ctx, f"Executing {task_name}") try: if task_name == SupportedTasks.TRAIN: self._do_training(fl_ctx) return make_reply(ReturnCode.OK) else: self.log_error(fl_ctx, f"{task_name} is not a supported task.") return make_reply(ReturnCode.TASK_UNKNOWN) except BaseException as e: self.log_exception(fl_ctx, f"Task {task_name} failed. Exception: {e.__str__()}") return make_reply(ReturnCode.EXECUTION_EXCEPTION) def _do_training(self, fl_ctx: FLContext): client_name = fl_ctx.get_prop(FLContextKey.CLIENT_NAME) rank = int(client_name.split('-')[1]) - 1 communicator_env = { 'xgboost_communicator': 'federated', 'federated_server_address': self._server_address, 'federated_world_size': self._world_size, 'federated_rank': rank, 'federated_server_cert': self._server_cert_path, 'federated_client_key': self._client_key_path, 'federated_client_cert': self._client_cert_path } with xgb.collective.CommunicatorContext(**communicator_env): # Load file, file will not be sharded in federated mode. if rank == 0: label = '&label_column=0' else: label = '' dtrain = xgb.DMatrix(f'higgs.train.csv?format=csv{label}', data_split_mode=1) dtest = xgb.DMatrix(f'higgs.test.csv?format=csv{label}', data_split_mode=1) # specify parameters via map param = { 'validate_parameters': True, 'eta': 0.1, 'gamma': 1.0, 'max_depth': 8, 'min_child_weight': 100, 'tree_method': 'approx', 'grow_policy': 'depthwise', 'objective': 'binary:logistic', 'eval_metric': 'auc', } # specify validations set to watch performance watchlist = [(dtest, "eval"), (dtrain, "train")] # number of boosting rounds num_round = 10 bst = xgb.train(param, dtrain, num_round, evals=watchlist, early_stopping_rounds=2) # Save the model. workspace = fl_ctx.get_prop(FLContextKey.WORKSPACE_OBJECT) run_number = fl_ctx.get_prop(FLContextKey.CURRENT_RUN) run_dir = workspace.get_run_dir(run_number) bst.save_model(os.path.join(run_dir, "higgs.model.federated.vertical.json")) xgb.collective.communicator_print("Finished training\n")
4,015
39.979592
95
py
xgboost
xgboost-master/demo/aft_survival/aft_survival_demo_with_optuna.py
""" Demo for survival analysis (regression) with Optuna. ==================================================== Demo for survival analysis (regression) using Accelerated Failure Time (AFT) model, using Optuna to tune hyperparameters """ import numpy as np import optuna import pandas as pd from sklearn.model_selection import ShuffleSplit import xgboost as xgb # The Veterans' Administration Lung Cancer Trial # The Statistical Analysis of Failure Time Data by Kalbfleisch J. and Prentice R (1980) df = pd.read_csv('../data/veterans_lung_cancer.csv') print('Training data:') print(df) # Split features and labels y_lower_bound = df['Survival_label_lower_bound'] y_upper_bound = df['Survival_label_upper_bound'] X = df.drop(['Survival_label_lower_bound', 'Survival_label_upper_bound'], axis=1) # Split data into training and validation sets rs = ShuffleSplit(n_splits=2, test_size=.7, random_state=0) train_index, valid_index = next(rs.split(X)) dtrain = xgb.DMatrix(X.values[train_index, :]) dtrain.set_float_info('label_lower_bound', y_lower_bound[train_index]) dtrain.set_float_info('label_upper_bound', y_upper_bound[train_index]) dvalid = xgb.DMatrix(X.values[valid_index, :]) dvalid.set_float_info('label_lower_bound', y_lower_bound[valid_index]) dvalid.set_float_info('label_upper_bound', y_upper_bound[valid_index]) # Define hyperparameter search space base_params = {'verbosity': 0, 'objective': 'survival:aft', 'eval_metric': 'aft-nloglik', 'tree_method': 'hist'} # Hyperparameters common to all trials def objective(trial): params = {'learning_rate': trial.suggest_loguniform('learning_rate', 0.01, 1.0), 'aft_loss_distribution': trial.suggest_categorical('aft_loss_distribution', ['normal', 'logistic', 'extreme']), 'aft_loss_distribution_scale': trial.suggest_loguniform('aft_loss_distribution_scale', 0.1, 10.0), 'max_depth': trial.suggest_int('max_depth', 3, 8), 'lambda': trial.suggest_loguniform('lambda', 1e-8, 1.0), 'alpha': trial.suggest_loguniform('alpha', 1e-8, 1.0)} # Search space params.update(base_params) pruning_callback = optuna.integration.XGBoostPruningCallback(trial, 'valid-aft-nloglik') bst = xgb.train(params, dtrain, num_boost_round=10000, evals=[(dtrain, 'train'), (dvalid, 'valid')], early_stopping_rounds=50, verbose_eval=False, callbacks=[pruning_callback]) if bst.best_iteration >= 25: return bst.best_score else: return np.inf # Reject models with < 25 trees # Run hyperparameter search study = optuna.create_study(direction='minimize') study.optimize(objective, n_trials=200) print('Completed hyperparameter tuning with best aft-nloglik = {}.'.format(study.best_trial.value)) params = {} params.update(base_params) params.update(study.best_trial.params) # Re-run training with the best hyperparameter combination print('Re-running the best trial... params = {}'.format(params)) bst = xgb.train(params, dtrain, num_boost_round=10000, evals=[(dtrain, 'train'), (dvalid, 'valid')], early_stopping_rounds=50) # Run prediction on the validation set df = pd.DataFrame({'Label (lower bound)': y_lower_bound[valid_index], 'Label (upper bound)': y_upper_bound[valid_index], 'Predicted label': bst.predict(dvalid)}) print(df) # Show only data points with right-censored labels print(df[np.isinf(df['Label (upper bound)'])]) # Save trained model bst.save_model('aft_best_model.json')
3,655
42.52381
112
py
xgboost
xgboost-master/demo/aft_survival/aft_survival_viz_demo.py
""" Visual demo for survival analysis (regression) with Accelerated Failure Time (AFT) model. ========================================================================================= This demo uses 1D toy data and visualizes how XGBoost fits a tree ensemble. The ensemble model starts out as a flat line and evolves into a step function in order to account for all ranged labels. """ import matplotlib.pyplot as plt import numpy as np import xgboost as xgb plt.rcParams.update({"font.size": 13}) # Function to visualize censored labels def plot_censored_labels( X: np.ndarray, y_lower: np.ndarray, y_upper: np.ndarray ) -> None: def replace_inf(x: np.ndarray, target_value: float) -> np.ndarray: x[np.isinf(x)] = target_value return x plt.plot(X, y_lower, "o", label="y_lower", color="blue") plt.plot(X, y_upper, "o", label="y_upper", color="fuchsia") plt.vlines( X, ymin=replace_inf(y_lower, 0.01), ymax=replace_inf(y_upper, 1000.0), label="Range for y", color="gray", ) # Toy data X = np.array([1, 2, 3, 4, 5]).reshape((-1, 1)) INF = np.inf y_lower = np.array([10, 15, -INF, 30, 100]) y_upper = np.array([INF, INF, 20, 50, INF]) # Visualize toy data plt.figure(figsize=(5, 4)) plot_censored_labels(X, y_lower, y_upper) plt.ylim((6, 200)) plt.legend(loc="lower right") plt.title("Toy data") plt.xlabel("Input feature") plt.ylabel("Label") plt.yscale("log") plt.tight_layout() plt.show(block=True) # Will be used to visualize XGBoost model grid_pts = np.linspace(0.8, 5.2, 1000).reshape((-1, 1)) # Train AFT model using XGBoost dmat = xgb.DMatrix(X) dmat.set_float_info("label_lower_bound", y_lower) dmat.set_float_info("label_upper_bound", y_upper) params = {"max_depth": 3, "objective": "survival:aft", "min_child_weight": 0} accuracy_history = [] class PlotIntermediateModel(xgb.callback.TrainingCallback): """Custom callback to plot intermediate models.""" def __init__(self) -> None: super().__init__() def after_iteration( self, model: xgb.Booster, epoch: int, evals_log: xgb.callback.TrainingCallback.EvalsLog, ) -> bool: """Run after training is finished.""" # Compute y_pred = prediction using the intermediate model, at current boosting # iteration y_pred = model.predict(dmat) # "Accuracy" = the number of data points whose ranged label (y_lower, y_upper) # includes the corresponding predicted label (y_pred) acc = np.sum( np.logical_and(y_pred >= y_lower, y_pred <= y_upper) / len(X) * 100 ) accuracy_history.append(acc) # Plot ranged labels as well as predictions by the model plt.subplot(5, 3, epoch + 1) plot_censored_labels(X, y_lower, y_upper) y_pred_grid_pts = model.predict(xgb.DMatrix(grid_pts)) plt.plot( grid_pts, y_pred_grid_pts, "r-", label="XGBoost AFT model", linewidth=4 ) plt.title("Iteration {}".format(epoch), x=0.5, y=0.8) plt.xlim((0.8, 5.2)) plt.ylim((1 if np.min(y_pred) < 6 else 6, 200)) plt.yscale("log") return False res: xgb.callback.TrainingCallback.EvalsLog = {} plt.figure(figsize=(12, 13)) bst = xgb.train( params, dmat, 15, [(dmat, "train")], evals_result=res, callbacks=[PlotIntermediateModel()], ) plt.tight_layout() plt.legend( loc="lower center", ncol=4, bbox_to_anchor=(0.5, 0), bbox_transform=plt.gcf().transFigure, ) plt.tight_layout() # Plot negative log likelihood over boosting iterations plt.figure(figsize=(8, 3)) plt.subplot(1, 2, 1) plt.plot(res["train"]["aft-nloglik"], "b-o", label="aft-nloglik") plt.xlabel("# Boosting Iterations") plt.legend(loc="best") # Plot "accuracy" over boosting iterations # "Accuracy" = the number of data points whose ranged label (y_lower, y_upper) includes # the corresponding predicted label (y_pred) plt.subplot(1, 2, 2) plt.plot(accuracy_history, "r-o", label="Accuracy (%)") plt.xlabel("# Boosting Iterations") plt.legend(loc="best") plt.tight_layout() plt.show()
4,167
28.985612
89
py
xgboost
xgboost-master/demo/aft_survival/aft_survival_demo.py
""" Demo for survival analysis (regression). ======================================== Demo for survival analysis (regression). using Accelerated Failure Time (AFT) model. """ import os import numpy as np import pandas as pd from sklearn.model_selection import ShuffleSplit import xgboost as xgb # The Veterans' Administration Lung Cancer Trial # The Statistical Analysis of Failure Time Data by Kalbfleisch J. and Prentice R (1980) CURRENT_DIR = os.path.dirname(__file__) df = pd.read_csv(os.path.join(CURRENT_DIR, '../data/veterans_lung_cancer.csv')) print('Training data:') print(df) # Split features and labels y_lower_bound = df['Survival_label_lower_bound'] y_upper_bound = df['Survival_label_upper_bound'] X = df.drop(['Survival_label_lower_bound', 'Survival_label_upper_bound'], axis=1) # Split data into training and validation sets rs = ShuffleSplit(n_splits=2, test_size=.7, random_state=0) train_index, valid_index = next(rs.split(X)) dtrain = xgb.DMatrix(X.values[train_index, :]) dtrain.set_float_info('label_lower_bound', y_lower_bound[train_index]) dtrain.set_float_info('label_upper_bound', y_upper_bound[train_index]) dvalid = xgb.DMatrix(X.values[valid_index, :]) dvalid.set_float_info('label_lower_bound', y_lower_bound[valid_index]) dvalid.set_float_info('label_upper_bound', y_upper_bound[valid_index]) # Train gradient boosted trees using AFT loss and metric params = {'verbosity': 0, 'objective': 'survival:aft', 'eval_metric': 'aft-nloglik', 'tree_method': 'hist', 'learning_rate': 0.05, 'aft_loss_distribution': 'normal', 'aft_loss_distribution_scale': 1.20, 'max_depth': 6, 'lambda': 0.01, 'alpha': 0.02} bst = xgb.train(params, dtrain, num_boost_round=10000, evals=[(dtrain, 'train'), (dvalid, 'valid')], early_stopping_rounds=50) # Run prediction on the validation set df = pd.DataFrame({'Label (lower bound)': y_lower_bound[valid_index], 'Label (upper bound)': y_upper_bound[valid_index], 'Predicted label': bst.predict(dvalid)}) print(df) # Show only data points with right-censored labels print(df[np.isinf(df['Label (upper bound)'])]) # Save trained model bst.save_model('aft_model.json')
2,291
35.380952
87
py
xgboost
xgboost-master/demo/multiclass_classification/train.py
#!/usr/bin/python from __future__ import division import numpy as np import xgboost as xgb # label need to be 0 to num_class -1 data = np.loadtxt('./dermatology.data', delimiter=',', converters={33: lambda x:int(x == '?'), 34: lambda x:int(x) - 1}) sz = data.shape train = data[:int(sz[0] * 0.7), :] test = data[int(sz[0] * 0.7):, :] train_X = train[:, :33] train_Y = train[:, 34] test_X = test[:, :33] test_Y = test[:, 34] xg_train = xgb.DMatrix(train_X, label=train_Y) xg_test = xgb.DMatrix(test_X, label=test_Y) # setup parameters for xgboost param = {} # use softmax multi-class classification param['objective'] = 'multi:softmax' # scale weight of positive examples param['eta'] = 0.1 param['max_depth'] = 6 param['nthread'] = 4 param['num_class'] = 6 watchlist = [(xg_train, 'train'), (xg_test, 'test')] num_round = 5 bst = xgb.train(param, xg_train, num_round, watchlist) # get prediction pred = bst.predict(xg_test) error_rate = np.sum(pred != test_Y) / test_Y.shape[0] print('Test error using softmax = {}'.format(error_rate)) # do the same thing again, but output probabilities param['objective'] = 'multi:softprob' bst = xgb.train(param, xg_train, num_round, watchlist) # Note: this convention has been changed since xgboost-unity # get prediction, this is in 1D array, need reshape to (ndata, nclass) pred_prob = bst.predict(xg_test).reshape(test_Y.shape[0], 6) pred_label = np.argmax(pred_prob, axis=1) error_rate = np.sum(pred_label != test_Y) / test_Y.shape[0] print('Test error using softprob = {}'.format(error_rate))
1,553
28.884615
73
py
xgboost
xgboost-master/demo/gpu_acceleration/cover_type.py
import time from sklearn.datasets import fetch_covtype from sklearn.model_selection import train_test_split import xgboost as xgb # Fetch dataset using sklearn cov = fetch_covtype() X = cov.data y = cov.target # Create 0.75/0.25 train/test split X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.25, train_size=0.75, random_state=42) # Specify sufficient boosting iterations to reach a minimum num_round = 3000 # Leave most parameters as default param = {'objective': 'multi:softmax', # Specify multiclass classification 'num_class': 8, # Number of possible output classes 'tree_method': 'gpu_hist' # Use GPU accelerated algorithm } # Convert input data from numpy to XGBoost format dtrain = xgb.DMatrix(X_train, label=y_train) dtest = xgb.DMatrix(X_test, label=y_test) gpu_res = {} # Store accuracy result tmp = time.time() # Train model xgb.train(param, dtrain, num_round, evals=[(dtest, 'test')], evals_result=gpu_res) print("GPU Training Time: %s seconds" % (str(time.time() - tmp))) # Repeat for CPU algorithm tmp = time.time() param['tree_method'] = 'hist' cpu_res = {} xgb.train(param, dtrain, num_round, evals=[(dtest, 'test')], evals_result=cpu_res) print("CPU Training Time: %s seconds" % (str(time.time() - tmp)))
1,333
30.761905
90
py
xgboost
xgboost-master/demo/dask/cpu_survival.py
""" Example of training survival model with Dask on CPU =================================================== """ import os import dask.dataframe as dd from dask.distributed import Client, LocalCluster import xgboost as xgb from xgboost.dask import DaskDMatrix def main(client): # Load an example survival data from CSV into a Dask data frame. # The Veterans' Administration Lung Cancer Trial # The Statistical Analysis of Failure Time Data by Kalbfleisch J. and Prentice R (1980) CURRENT_DIR = os.path.dirname(__file__) df = dd.read_csv( os.path.join(CURRENT_DIR, os.pardir, "data", "veterans_lung_cancer.csv") ) # DaskDMatrix acts like normal DMatrix, works as a proxy for local # DMatrix scatter around workers. # For AFT survival, you'd need to extract the lower and upper bounds for the label # and pass them as arguments to DaskDMatrix. y_lower_bound = df["Survival_label_lower_bound"] y_upper_bound = df["Survival_label_upper_bound"] X = df.drop(["Survival_label_lower_bound", "Survival_label_upper_bound"], axis=1) dtrain = DaskDMatrix( client, X, label_lower_bound=y_lower_bound, label_upper_bound=y_upper_bound ) # Use train method from xgboost.dask instead of xgboost. This # distributed version of train returns a dictionary containing the # resulting booster and evaluation history obtained from # evaluation metrics. params = { "verbosity": 1, "objective": "survival:aft", "eval_metric": "aft-nloglik", "learning_rate": 0.05, "aft_loss_distribution_scale": 1.20, "aft_loss_distribution": "normal", "max_depth": 6, "lambda": 0.01, "alpha": 0.02, } output = xgb.dask.train( client, params, dtrain, num_boost_round=100, evals=[(dtrain, "train")] ) bst = output["booster"] history = output["history"] # you can pass output directly into `predict` too. prediction = xgb.dask.predict(client, bst, dtrain) print("Evaluation history: ", history) # Uncomment the following line to save the model to the disk # bst.save_model('survival_model.json') return prediction if __name__ == "__main__": # or use other clusters for scaling with LocalCluster(n_workers=7, threads_per_worker=4) as cluster: with Client(cluster) as client: main(client)
2,404
32.402778
91
py
xgboost
xgboost-master/demo/dask/sklearn_cpu_training.py
""" Use scikit-learn regressor interface with CPU histogram tree method =================================================================== """ from dask import array as da from dask.distributed import Client, LocalCluster import xgboost def main(client): # generate some random data for demonstration n = 100 m = 10000 partition_size = 100 X = da.random.random((m, n), partition_size) y = da.random.random(m, partition_size) regressor = xgboost.dask.DaskXGBRegressor(verbosity=1, n_estimators=2) regressor.set_params(tree_method="hist") # assigning client here is optional regressor.client = client regressor.fit(X, y, eval_set=[(X, y)]) prediction = regressor.predict(X) bst = regressor.get_booster() history = regressor.evals_result() print("Evaluation history:", history) # returned prediction is always a dask array. assert isinstance(prediction, da.Array) return bst # returning the trained model if __name__ == "__main__": # or use other clusters for scaling with LocalCluster(n_workers=4, threads_per_worker=1) as cluster: with Client(cluster) as client: main(client)
1,186
27.95122
74
py
xgboost
xgboost-master/demo/dask/sklearn_gpu_training.py
""" Use scikit-learn regressor interface with GPU histogram tree method =================================================================== """ from dask import array as da from dask.distributed import Client # It's recommended to use dask_cuda for GPU assignment from dask_cuda import LocalCUDACluster import xgboost def main(client): # generate some random data for demonstration n = 100 m = 1000000 partition_size = 10000 X = da.random.random((m, n), partition_size) y = da.random.random(m, partition_size) regressor = xgboost.dask.DaskXGBRegressor(verbosity=1) # set the device to CUDA regressor.set_params(tree_method="hist", device="cuda") # assigning client here is optional regressor.client = client regressor.fit(X, y, eval_set=[(X, y)]) prediction = regressor.predict(X) bst = regressor.get_booster() history = regressor.evals_result() print("Evaluation history:", history) # returned prediction is always a dask array. assert isinstance(prediction, da.Array) return bst # returning the trained model if __name__ == "__main__": # With dask cuda, one can scale up XGBoost to arbitrary GPU clusters. # `LocalCUDACluster` used here is only for demonstration purpose. with LocalCUDACluster() as cluster: with Client(cluster) as client: main(client)
1,375
28.276596
73
py
xgboost
xgboost-master/demo/dask/gpu_training.py
""" Example of training with Dask on GPU ==================================== """ import dask_cudf from dask import array as da from dask import dataframe as dd from dask.distributed import Client from dask_cuda import LocalCUDACluster import xgboost as xgb from xgboost import dask as dxgb from xgboost.dask import DaskDMatrix def using_dask_matrix(client: Client, X: da.Array, y: da.Array) -> da.Array: # DaskDMatrix acts like normal DMatrix, works as a proxy for local DMatrix scatter # around workers. dtrain = DaskDMatrix(client, X, y) # Use train method from xgboost.dask instead of xgboost. This distributed version # of train returns a dictionary containing the resulting booster and evaluation # history obtained from evaluation metrics. output = xgb.dask.train( client, { "verbosity": 2, "tree_method": "hist", # Golden line for GPU training "device": "cuda", }, dtrain, num_boost_round=4, evals=[(dtrain, "train")], ) bst = output["booster"] history = output["history"] # you can pass output directly into `predict` too. prediction = xgb.dask.predict(client, bst, dtrain) print("Evaluation history:", history) return prediction def using_quantile_device_dmatrix(client: Client, X: da.Array, y: da.Array) -> da.Array: """`DaskQuantileDMatrix` is a data type specialized for `hist` tree methods for reducing memory usage. .. versionadded:: 1.2.0 """ X = dask_cudf.from_dask_dataframe(dd.from_dask_array(X)) y = dask_cudf.from_dask_dataframe(dd.from_dask_array(y)) # `DaskQuantileDMatrix` is used instead of `DaskDMatrix`, be careful that it can not # be used for anything else other than training unless a reference is specified. See # the `ref` argument of `DaskQuantileDMatrix`. dtrain = dxgb.DaskQuantileDMatrix(client, X, y) output = xgb.dask.train( client, {"verbosity": 2, "tree_method": "hist", "device": "cuda"}, dtrain, num_boost_round=4, ) prediction = xgb.dask.predict(client, output, X) return prediction if __name__ == "__main__": # `LocalCUDACluster` is used for assigning GPU to XGBoost processes. Here # `n_workers` represents the number of GPUs since we use one GPU per worker process. with LocalCUDACluster(n_workers=2, threads_per_worker=4) as cluster: with Client(cluster) as client: # generate some random data for demonstration m = 100000 n = 100 X = da.random.random(size=(m, n), chunks=10000) y = da.random.random(size=(m,), chunks=10000) print("Using DaskQuantileDMatrix") from_ddqdm = using_quantile_device_dmatrix(client, X, y) print("Using DMatrix") from_dmatrix = using_dask_matrix(client, X, y)
2,919
33.352941
88
py
xgboost
xgboost-master/demo/dask/cpu_training.py
""" Example of training with Dask on CPU ==================================== """ from dask import array as da from dask.distributed import Client, LocalCluster import xgboost as xgb from xgboost.dask import DaskDMatrix def main(client): # generate some random data for demonstration m = 100000 n = 100 X = da.random.random(size=(m, n), chunks=100) y = da.random.random(size=(m,), chunks=100) # DaskDMatrix acts like normal DMatrix, works as a proxy for local # DMatrix scatter around workers. dtrain = DaskDMatrix(client, X, y) # Use train method from xgboost.dask instead of xgboost. This # distributed version of train returns a dictionary containing the # resulting booster and evaluation history obtained from # evaluation metrics. output = xgb.dask.train( client, {"verbosity": 1, "tree_method": "hist"}, dtrain, num_boost_round=4, evals=[(dtrain, "train")], ) bst = output["booster"] history = output["history"] # you can pass output directly into `predict` too. prediction = xgb.dask.predict(client, bst, dtrain) print("Evaluation history:", history) return prediction if __name__ == "__main__": # or use other clusters for scaling with LocalCluster(n_workers=7, threads_per_worker=4) as cluster: with Client(cluster) as client: main(client)
1,408
27.755102
70
py
xgboost
xgboost-master/demo/dask/dask_callbacks.py
""" Example of using callbacks with Dask ==================================== """ import numpy as np from dask.distributed import Client, LocalCluster from dask_ml.datasets import make_regression from dask_ml.model_selection import train_test_split import xgboost as xgb from xgboost.dask import DaskDMatrix def probability_for_going_backward(epoch): return 0.999 / (1.0 + 0.05 * np.log(1.0 + epoch)) # All callback functions must inherit from TrainingCallback class CustomEarlyStopping(xgb.callback.TrainingCallback): """A custom early stopping class where early stopping is determined stochastically. In the beginning, allow the metric to become worse with a probability of 0.999. As boosting progresses, the probability should be adjusted downward""" def __init__(self, *, validation_set, target_metric, maximize, seed): self.validation_set = validation_set self.target_metric = target_metric self.maximize = maximize self.seed = seed self.rng = np.random.default_rng(seed=seed) if maximize: self.better = lambda x, y: x > y else: self.better = lambda x, y: x < y def after_iteration(self, model, epoch, evals_log): metric_history = evals_log[self.validation_set][self.target_metric] if len(metric_history) < 2 or self.better( metric_history[-1], metric_history[-2] ): return False # continue training p = probability_for_going_backward(epoch) go_backward = self.rng.choice(2, size=(1,), replace=True, p=[1 - p, p]).astype( np.bool )[0] print( "The validation metric went into the wrong direction. " + f"Stopping training with probability {1 - p}..." ) if go_backward: return False # continue training else: return True # stop training def main(client): m = 100000 n = 100 X, y = make_regression(n_samples=m, n_features=n, chunks=200, random_state=0) X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=0) dtrain = DaskDMatrix(client, X_train, y_train) dtest = DaskDMatrix(client, X_test, y_test) output = xgb.dask.train( client, { "verbosity": 1, "tree_method": "hist", "objective": "reg:squarederror", "eval_metric": "rmse", "max_depth": 6, "learning_rate": 1.0, }, dtrain, num_boost_round=1000, evals=[(dtrain, "train"), (dtest, "test")], callbacks=[ CustomEarlyStopping( validation_set="test", target_metric="rmse", maximize=False, seed=0 ) ], ) if __name__ == "__main__": # or use other clusters for scaling with LocalCluster(n_workers=4, threads_per_worker=1) as cluster: with Client(cluster) as client: main(client)
2,965
31.955556
87
py
xgboost
xgboost-master/demo/CLI/binary_classification/mapfeat.py
#!/usr/bin/env python3 def loadfmap( fname ): fmap = {} nmap = {} for l in open( fname ): arr = l.split() if arr[0].find('.') != -1: idx = int( arr[0].strip('.') ) assert idx not in fmap fmap[ idx ] = {} ftype = arr[1].strip(':') content = arr[2] else: content = arr[0] for it in content.split(','): if it.strip() == '': continue k , v = it.split('=') fmap[ idx ][ v ] = len(nmap) nmap[ len(nmap) ] = ftype+'='+k return fmap, nmap def write_nmap( fo, nmap ): for i in range( len(nmap) ): fo.write('%d\t%s\ti\n' % (i, nmap[i]) ) # start here fmap, nmap = loadfmap( 'agaricus-lepiota.fmap' ) fo = open( 'featmap.txt', 'w' ) write_nmap( fo, nmap ) fo.close() fo = open( 'agaricus.txt', 'w' ) for l in open( 'agaricus-lepiota.data' ): arr = l.split(',') if arr[0] == 'p': fo.write('1') else: assert arr[0] == 'e' fo.write('0') for i in range( 1,len(arr) ): fo.write( ' %d:1' % fmap[i][arr[i].strip()] ) fo.write('\n') fo.close()
1,179
23.583333
53
py
xgboost
xgboost-master/demo/CLI/binary_classification/mknfold.py
#!/usr/bin/env python3 import random import sys if len(sys.argv) < 2: print ('Usage:<filename> <k> [nfold = 5]') exit(0) random.seed( 10 ) k = int( sys.argv[2] ) if len(sys.argv) > 3: nfold = int( sys.argv[3] ) else: nfold = 5 fi = open( sys.argv[1], 'r' ) ftr = open( sys.argv[1]+'.train', 'w' ) fte = open( sys.argv[1]+'.test', 'w' ) for l in fi: if random.randint( 1 , nfold ) == k: fte.write( l ) else: ftr.write( l ) fi.close() ftr.close() fte.close()
503
15.8
46
py
xgboost
xgboost-master/demo/CLI/yearpredMSD/csv2libsvm.py
#!/usr/bin/env python3 import sys fo = open(sys.argv[2], 'w') for l in open(sys.argv[1]): arr = l.split(',') fo.write('%s' % arr[0]) for i in range(len(arr) - 1): fo.write(' %d:%s' % (i, arr[i+1])) fo.close()
232
16.923077
42
py