text
stringlengths
81
112k
Convert a ctypes int pointer array to a numpy array. def cint8_array_to_numpy(cptr, length): """Convert a ctypes int pointer array to a numpy array.""" if isinstance(cptr, ctypes.POINTER(ctypes.c_int8)): return np.fromiter(cptr, dtype=np.int8, count=length) else: raise RuntimeError('Expected int pointer')
Convert Python dictionary to string, which is passed to C API. def param_dict_to_str(data): """Convert Python dictionary to string, which is passed to C API.""" if data is None or not data: return "" pairs = [] for key, val in data.items(): if isinstance(val, (list, tuple, set)) or is_numpy_1d_array(val): pairs.append(str(key) + '=' + ','.join(map(str, val))) elif isinstance(val, string_type) or isinstance(val, numeric_types) or is_numeric(val): pairs.append(str(key) + '=' + str(val)) elif val is not None: raise TypeError('Unknown type of parameter:%s, got:%s' % (key, type(val).__name__)) return ' '.join(pairs)
Fix the memory of multi-dimensional sliced object. def convert_from_sliced_object(data): """Fix the memory of multi-dimensional sliced object.""" if data.base is not None and isinstance(data, np.ndarray) and isinstance(data.base, np.ndarray): if not data.flags.c_contiguous: warnings.warn("Usage of np.ndarray subset (sliced data) is not recommended " "due to it will double the peak memory cost in LightGBM.") return np.copy(data) return data
Get pointer of float numpy array / list. def c_float_array(data): """Get pointer of float numpy array / list.""" if is_1d_list(data): data = np.array(data, copy=False) if is_numpy_1d_array(data): data = convert_from_sliced_object(data) assert data.flags.c_contiguous if data.dtype == np.float32: ptr_data = data.ctypes.data_as(ctypes.POINTER(ctypes.c_float)) type_data = C_API_DTYPE_FLOAT32 elif data.dtype == np.float64: ptr_data = data.ctypes.data_as(ctypes.POINTER(ctypes.c_double)) type_data = C_API_DTYPE_FLOAT64 else: raise TypeError("Expected np.float32 or np.float64, met type({})" .format(data.dtype)) else: raise TypeError("Unknown type({})".format(type(data).__name__)) return (ptr_data, type_data, data)
Get pointer of int numpy array / list. def c_int_array(data): """Get pointer of int numpy array / list.""" if is_1d_list(data): data = np.array(data, copy=False) if is_numpy_1d_array(data): data = convert_from_sliced_object(data) assert data.flags.c_contiguous if data.dtype == np.int32: ptr_data = data.ctypes.data_as(ctypes.POINTER(ctypes.c_int32)) type_data = C_API_DTYPE_INT32 elif data.dtype == np.int64: ptr_data = data.ctypes.data_as(ctypes.POINTER(ctypes.c_int64)) type_data = C_API_DTYPE_INT64 else: raise TypeError("Expected np.int32 or np.int64, met type({})" .format(data.dtype)) else: raise TypeError("Unknown type({})".format(type(data).__name__)) return (ptr_data, type_data, data)
Predict logic. Parameters ---------- data : string, numpy array, pandas DataFrame, H2O DataTable's Frame or scipy.sparse Data source for prediction. When data type is string, it represents the path of txt file. num_iteration : int, optional (default=-1) Iteration used for prediction. raw_score : bool, optional (default=False) Whether to predict raw scores. pred_leaf : bool, optional (default=False) Whether to predict leaf index. pred_contrib : bool, optional (default=False) Whether to predict feature contributions. data_has_header : bool, optional (default=False) Whether data has header. Used only for txt data. is_reshape : bool, optional (default=True) Whether to reshape to (nrow, ncol). Returns ------- result : numpy array Prediction result. def predict(self, data, num_iteration=-1, raw_score=False, pred_leaf=False, pred_contrib=False, data_has_header=False, is_reshape=True): """Predict logic. Parameters ---------- data : string, numpy array, pandas DataFrame, H2O DataTable's Frame or scipy.sparse Data source for prediction. When data type is string, it represents the path of txt file. num_iteration : int, optional (default=-1) Iteration used for prediction. raw_score : bool, optional (default=False) Whether to predict raw scores. pred_leaf : bool, optional (default=False) Whether to predict leaf index. pred_contrib : bool, optional (default=False) Whether to predict feature contributions. data_has_header : bool, optional (default=False) Whether data has header. Used only for txt data. is_reshape : bool, optional (default=True) Whether to reshape to (nrow, ncol). Returns ------- result : numpy array Prediction result. """ if isinstance(data, Dataset): raise TypeError("Cannot use Dataset instance for prediction, please use raw data instead") data = _data_from_pandas(data, None, None, self.pandas_categorical)[0] predict_type = C_API_PREDICT_NORMAL if raw_score: predict_type = C_API_PREDICT_RAW_SCORE if pred_leaf: predict_type = C_API_PREDICT_LEAF_INDEX if pred_contrib: predict_type = C_API_PREDICT_CONTRIB int_data_has_header = 1 if data_has_header else 0 if num_iteration > self.num_total_iteration: num_iteration = self.num_total_iteration if isinstance(data, string_type): with _TempFile() as f: _safe_call(_LIB.LGBM_BoosterPredictForFile( self.handle, c_str(data), ctypes.c_int(int_data_has_header), ctypes.c_int(predict_type), ctypes.c_int(num_iteration), c_str(self.pred_parameter), c_str(f.name))) lines = f.readlines() nrow = len(lines) preds = [float(token) for line in lines for token in line.split('\t')] preds = np.array(preds, dtype=np.float64, copy=False) elif isinstance(data, scipy.sparse.csr_matrix): preds, nrow = self.__pred_for_csr(data, num_iteration, predict_type) elif isinstance(data, scipy.sparse.csc_matrix): preds, nrow = self.__pred_for_csc(data, num_iteration, predict_type) elif isinstance(data, np.ndarray): preds, nrow = self.__pred_for_np2d(data, num_iteration, predict_type) elif isinstance(data, list): try: data = np.array(data) except BaseException: raise ValueError('Cannot convert data list to numpy array.') preds, nrow = self.__pred_for_np2d(data, num_iteration, predict_type) elif isinstance(data, DataTable): preds, nrow = self.__pred_for_np2d(data.to_numpy(), num_iteration, predict_type) else: try: warnings.warn('Converting data to scipy sparse matrix.') csr = scipy.sparse.csr_matrix(data) except BaseException: raise TypeError('Cannot predict data for type {}'.format(type(data).__name__)) preds, nrow = self.__pred_for_csr(csr, num_iteration, predict_type) if pred_leaf: preds = preds.astype(np.int32) if is_reshape and preds.size != nrow: if preds.size % nrow == 0: preds = preds.reshape(nrow, -1) else: raise ValueError('Length of predict result (%d) cannot be divide nrow (%d)' % (preds.size, nrow)) return preds
Get size of prediction result. def __get_num_preds(self, num_iteration, nrow, predict_type): """Get size of prediction result.""" if nrow > MAX_INT32: raise LightGBMError('LightGBM cannot perform prediction for data' 'with number of rows greater than MAX_INT32 (%d).\n' 'You can split your data into chunks' 'and then concatenate predictions for them' % MAX_INT32) n_preds = ctypes.c_int64(0) _safe_call(_LIB.LGBM_BoosterCalcNumPredict( self.handle, ctypes.c_int(nrow), ctypes.c_int(predict_type), ctypes.c_int(num_iteration), ctypes.byref(n_preds))) return n_preds.value
Predict for a 2-D numpy matrix. def __pred_for_np2d(self, mat, num_iteration, predict_type): """Predict for a 2-D numpy matrix.""" if len(mat.shape) != 2: raise ValueError('Input numpy.ndarray or list must be 2 dimensional') def inner_predict(mat, num_iteration, predict_type, preds=None): if mat.dtype == np.float32 or mat.dtype == np.float64: data = np.array(mat.reshape(mat.size), dtype=mat.dtype, copy=False) else: """change non-float data to float data, need to copy""" data = np.array(mat.reshape(mat.size), dtype=np.float32) ptr_data, type_ptr_data, _ = c_float_array(data) n_preds = self.__get_num_preds(num_iteration, mat.shape[0], predict_type) if preds is None: preds = np.zeros(n_preds, dtype=np.float64) elif len(preds.shape) != 1 or len(preds) != n_preds: raise ValueError("Wrong length of pre-allocated predict array") out_num_preds = ctypes.c_int64(0) _safe_call(_LIB.LGBM_BoosterPredictForMat( self.handle, ptr_data, ctypes.c_int(type_ptr_data), ctypes.c_int(mat.shape[0]), ctypes.c_int(mat.shape[1]), ctypes.c_int(C_API_IS_ROW_MAJOR), ctypes.c_int(predict_type), ctypes.c_int(num_iteration), c_str(self.pred_parameter), ctypes.byref(out_num_preds), preds.ctypes.data_as(ctypes.POINTER(ctypes.c_double)))) if n_preds != out_num_preds.value: raise ValueError("Wrong length for predict results") return preds, mat.shape[0] nrow = mat.shape[0] if nrow > MAX_INT32: sections = np.arange(start=MAX_INT32, stop=nrow, step=MAX_INT32) # __get_num_preds() cannot work with nrow > MAX_INT32, so calculate overall number of predictions piecemeal n_preds = [self.__get_num_preds(num_iteration, i, predict_type) for i in np.diff([0] + list(sections) + [nrow])] n_preds_sections = np.array([0] + n_preds, dtype=np.intp).cumsum() preds = np.zeros(sum(n_preds), dtype=np.float64) for chunk, (start_idx_pred, end_idx_pred) in zip_(np.array_split(mat, sections), zip_(n_preds_sections, n_preds_sections[1:])): # avoid memory consumption by arrays concatenation operations inner_predict(chunk, num_iteration, predict_type, preds[start_idx_pred:end_idx_pred]) return preds, nrow else: return inner_predict(mat, num_iteration, predict_type)
Predict for a CSR data. def __pred_for_csr(self, csr, num_iteration, predict_type): """Predict for a CSR data.""" def inner_predict(csr, num_iteration, predict_type, preds=None): nrow = len(csr.indptr) - 1 n_preds = self.__get_num_preds(num_iteration, nrow, predict_type) if preds is None: preds = np.zeros(n_preds, dtype=np.float64) elif len(preds.shape) != 1 or len(preds) != n_preds: raise ValueError("Wrong length of pre-allocated predict array") out_num_preds = ctypes.c_int64(0) ptr_indptr, type_ptr_indptr, __ = c_int_array(csr.indptr) ptr_data, type_ptr_data, _ = c_float_array(csr.data) assert csr.shape[1] <= MAX_INT32 csr.indices = csr.indices.astype(np.int32, copy=False) _safe_call(_LIB.LGBM_BoosterPredictForCSR( self.handle, ptr_indptr, ctypes.c_int32(type_ptr_indptr), csr.indices.ctypes.data_as(ctypes.POINTER(ctypes.c_int32)), ptr_data, ctypes.c_int(type_ptr_data), ctypes.c_int64(len(csr.indptr)), ctypes.c_int64(len(csr.data)), ctypes.c_int64(csr.shape[1]), ctypes.c_int(predict_type), ctypes.c_int(num_iteration), c_str(self.pred_parameter), ctypes.byref(out_num_preds), preds.ctypes.data_as(ctypes.POINTER(ctypes.c_double)))) if n_preds != out_num_preds.value: raise ValueError("Wrong length for predict results") return preds, nrow nrow = len(csr.indptr) - 1 if nrow > MAX_INT32: sections = [0] + list(np.arange(start=MAX_INT32, stop=nrow, step=MAX_INT32)) + [nrow] # __get_num_preds() cannot work with nrow > MAX_INT32, so calculate overall number of predictions piecemeal n_preds = [self.__get_num_preds(num_iteration, i, predict_type) for i in np.diff(sections)] n_preds_sections = np.array([0] + n_preds, dtype=np.intp).cumsum() preds = np.zeros(sum(n_preds), dtype=np.float64) for (start_idx, end_idx), (start_idx_pred, end_idx_pred) in zip_(zip_(sections, sections[1:]), zip_(n_preds_sections, n_preds_sections[1:])): # avoid memory consumption by arrays concatenation operations inner_predict(csr[start_idx:end_idx], num_iteration, predict_type, preds[start_idx_pred:end_idx_pred]) return preds, nrow else: return inner_predict(csr, num_iteration, predict_type)
Predict for a CSC data. def __pred_for_csc(self, csc, num_iteration, predict_type): """Predict for a CSC data.""" nrow = csc.shape[0] if nrow > MAX_INT32: return self.__pred_for_csr(csc.tocsr(), num_iteration, predict_type) n_preds = self.__get_num_preds(num_iteration, nrow, predict_type) preds = np.zeros(n_preds, dtype=np.float64) out_num_preds = ctypes.c_int64(0) ptr_indptr, type_ptr_indptr, __ = c_int_array(csc.indptr) ptr_data, type_ptr_data, _ = c_float_array(csc.data) assert csc.shape[0] <= MAX_INT32 csc.indices = csc.indices.astype(np.int32, copy=False) _safe_call(_LIB.LGBM_BoosterPredictForCSC( self.handle, ptr_indptr, ctypes.c_int32(type_ptr_indptr), csc.indices.ctypes.data_as(ctypes.POINTER(ctypes.c_int32)), ptr_data, ctypes.c_int(type_ptr_data), ctypes.c_int64(len(csc.indptr)), ctypes.c_int64(len(csc.data)), ctypes.c_int64(csc.shape[0]), ctypes.c_int(predict_type), ctypes.c_int(num_iteration), c_str(self.pred_parameter), ctypes.byref(out_num_preds), preds.ctypes.data_as(ctypes.POINTER(ctypes.c_double)))) if n_preds != out_num_preds.value: raise ValueError("Wrong length for predict results") return preds, nrow
Initialize data from a 2-D numpy matrix. def __init_from_np2d(self, mat, params_str, ref_dataset): """Initialize data from a 2-D numpy matrix.""" if len(mat.shape) != 2: raise ValueError('Input numpy.ndarray must be 2 dimensional') self.handle = ctypes.c_void_p() if mat.dtype == np.float32 or mat.dtype == np.float64: data = np.array(mat.reshape(mat.size), dtype=mat.dtype, copy=False) else: # change non-float data to float data, need to copy data = np.array(mat.reshape(mat.size), dtype=np.float32) ptr_data, type_ptr_data, _ = c_float_array(data) _safe_call(_LIB.LGBM_DatasetCreateFromMat( ptr_data, ctypes.c_int(type_ptr_data), ctypes.c_int(mat.shape[0]), ctypes.c_int(mat.shape[1]), ctypes.c_int(C_API_IS_ROW_MAJOR), c_str(params_str), ref_dataset, ctypes.byref(self.handle))) return self
Initialize data from a list of 2-D numpy matrices. def __init_from_list_np2d(self, mats, params_str, ref_dataset): """Initialize data from a list of 2-D numpy matrices.""" ncol = mats[0].shape[1] nrow = np.zeros((len(mats),), np.int32) if mats[0].dtype == np.float64: ptr_data = (ctypes.POINTER(ctypes.c_double) * len(mats))() else: ptr_data = (ctypes.POINTER(ctypes.c_float) * len(mats))() holders = [] type_ptr_data = None for i, mat in enumerate(mats): if len(mat.shape) != 2: raise ValueError('Input numpy.ndarray must be 2 dimensional') if mat.shape[1] != ncol: raise ValueError('Input arrays must have same number of columns') nrow[i] = mat.shape[0] if mat.dtype == np.float32 or mat.dtype == np.float64: mats[i] = np.array(mat.reshape(mat.size), dtype=mat.dtype, copy=False) else: # change non-float data to float data, need to copy mats[i] = np.array(mat.reshape(mat.size), dtype=np.float32) chunk_ptr_data, chunk_type_ptr_data, holder = c_float_array(mats[i]) if type_ptr_data is not None and chunk_type_ptr_data != type_ptr_data: raise ValueError('Input chunks must have same type') ptr_data[i] = chunk_ptr_data type_ptr_data = chunk_type_ptr_data holders.append(holder) self.handle = ctypes.c_void_p() _safe_call(_LIB.LGBM_DatasetCreateFromMats( ctypes.c_int(len(mats)), ctypes.cast(ptr_data, ctypes.POINTER(ctypes.POINTER(ctypes.c_double))), ctypes.c_int(type_ptr_data), nrow.ctypes.data_as(ctypes.POINTER(ctypes.c_int32)), ctypes.c_int(ncol), ctypes.c_int(C_API_IS_ROW_MAJOR), c_str(params_str), ref_dataset, ctypes.byref(self.handle))) return self
Initialize data from a CSR matrix. def __init_from_csr(self, csr, params_str, ref_dataset): """Initialize data from a CSR matrix.""" if len(csr.indices) != len(csr.data): raise ValueError('Length mismatch: {} vs {}'.format(len(csr.indices), len(csr.data))) self.handle = ctypes.c_void_p() ptr_indptr, type_ptr_indptr, __ = c_int_array(csr.indptr) ptr_data, type_ptr_data, _ = c_float_array(csr.data) assert csr.shape[1] <= MAX_INT32 csr.indices = csr.indices.astype(np.int32, copy=False) _safe_call(_LIB.LGBM_DatasetCreateFromCSR( ptr_indptr, ctypes.c_int(type_ptr_indptr), csr.indices.ctypes.data_as(ctypes.POINTER(ctypes.c_int32)), ptr_data, ctypes.c_int(type_ptr_data), ctypes.c_int64(len(csr.indptr)), ctypes.c_int64(len(csr.data)), ctypes.c_int64(csr.shape[1]), c_str(params_str), ref_dataset, ctypes.byref(self.handle))) return self
Initialize data from a CSC matrix. def __init_from_csc(self, csc, params_str, ref_dataset): """Initialize data from a CSC matrix.""" if len(csc.indices) != len(csc.data): raise ValueError('Length mismatch: {} vs {}'.format(len(csc.indices), len(csc.data))) self.handle = ctypes.c_void_p() ptr_indptr, type_ptr_indptr, __ = c_int_array(csc.indptr) ptr_data, type_ptr_data, _ = c_float_array(csc.data) assert csc.shape[0] <= MAX_INT32 csc.indices = csc.indices.astype(np.int32, copy=False) _safe_call(_LIB.LGBM_DatasetCreateFromCSC( ptr_indptr, ctypes.c_int(type_ptr_indptr), csc.indices.ctypes.data_as(ctypes.POINTER(ctypes.c_int32)), ptr_data, ctypes.c_int(type_ptr_data), ctypes.c_int64(len(csc.indptr)), ctypes.c_int64(len(csc.data)), ctypes.c_int64(csc.shape[0]), c_str(params_str), ref_dataset, ctypes.byref(self.handle))) return self
Lazy init. Returns ------- self : Dataset Constructed Dataset object. def construct(self): """Lazy init. Returns ------- self : Dataset Constructed Dataset object. """ if self.handle is None: if self.reference is not None: if self.used_indices is None: # create valid self._lazy_init(self.data, label=self.label, reference=self.reference, weight=self.weight, group=self.group, init_score=self.init_score, predictor=self._predictor, silent=self.silent, feature_name=self.feature_name, params=self.params) else: # construct subset used_indices = list_to_1d_numpy(self.used_indices, np.int32, name='used_indices') assert used_indices.flags.c_contiguous if self.reference.group is not None: group_info = np.array(self.reference.group).astype(int) _, self.group = np.unique(np.repeat(range_(len(group_info)), repeats=group_info)[self.used_indices], return_counts=True) self.handle = ctypes.c_void_p() params_str = param_dict_to_str(self.params) _safe_call(_LIB.LGBM_DatasetGetSubset( self.reference.construct().handle, used_indices.ctypes.data_as(ctypes.POINTER(ctypes.c_int32)), ctypes.c_int(used_indices.shape[0]), c_str(params_str), ctypes.byref(self.handle))) self.data = self.reference.data self.get_data() if self.group is not None: self.set_group(self.group) if self.get_label() is None: raise ValueError("Label should not be None.") else: # create train self._lazy_init(self.data, label=self.label, weight=self.weight, group=self.group, init_score=self.init_score, predictor=self._predictor, silent=self.silent, feature_name=self.feature_name, categorical_feature=self.categorical_feature, params=self.params) if self.free_raw_data: self.data = None return self
Create validation data align with current Dataset. Parameters ---------- data : string, numpy array, pandas DataFrame, H2O DataTable's Frame, scipy.sparse or list of numpy arrays Data source of Dataset. If string, it represents the path to txt file. label : list, numpy 1-D array, pandas Series / one-column DataFrame or None, optional (default=None) Label of the data. weight : list, numpy 1-D array, pandas Series or None, optional (default=None) Weight for each instance. group : list, numpy 1-D array, pandas Series or None, optional (default=None) Group/query size for Dataset. init_score : list, numpy 1-D array, pandas Series or None, optional (default=None) Init score for Dataset. silent : bool, optional (default=False) Whether to print messages during construction. params : dict or None, optional (default=None) Other parameters for validation Dataset. Returns ------- valid : Dataset Validation Dataset with reference to self. def create_valid(self, data, label=None, weight=None, group=None, init_score=None, silent=False, params=None): """Create validation data align with current Dataset. Parameters ---------- data : string, numpy array, pandas DataFrame, H2O DataTable's Frame, scipy.sparse or list of numpy arrays Data source of Dataset. If string, it represents the path to txt file. label : list, numpy 1-D array, pandas Series / one-column DataFrame or None, optional (default=None) Label of the data. weight : list, numpy 1-D array, pandas Series or None, optional (default=None) Weight for each instance. group : list, numpy 1-D array, pandas Series or None, optional (default=None) Group/query size for Dataset. init_score : list, numpy 1-D array, pandas Series or None, optional (default=None) Init score for Dataset. silent : bool, optional (default=False) Whether to print messages during construction. params : dict or None, optional (default=None) Other parameters for validation Dataset. Returns ------- valid : Dataset Validation Dataset with reference to self. """ ret = Dataset(data, label=label, reference=self, weight=weight, group=group, init_score=init_score, silent=silent, params=params, free_raw_data=self.free_raw_data) ret._predictor = self._predictor ret.pandas_categorical = self.pandas_categorical return ret
Get subset of current Dataset. Parameters ---------- used_indices : list of int Indices used to create the subset. params : dict or None, optional (default=None) These parameters will be passed to Dataset constructor. Returns ------- subset : Dataset Subset of the current Dataset. def subset(self, used_indices, params=None): """Get subset of current Dataset. Parameters ---------- used_indices : list of int Indices used to create the subset. params : dict or None, optional (default=None) These parameters will be passed to Dataset constructor. Returns ------- subset : Dataset Subset of the current Dataset. """ if params is None: params = self.params ret = Dataset(None, reference=self, feature_name=self.feature_name, categorical_feature=self.categorical_feature, params=params, free_raw_data=self.free_raw_data) ret._predictor = self._predictor ret.pandas_categorical = self.pandas_categorical ret.used_indices = used_indices return ret
Save Dataset to a binary file. Parameters ---------- filename : string Name of the output file. Returns ------- self : Dataset Returns self. def save_binary(self, filename): """Save Dataset to a binary file. Parameters ---------- filename : string Name of the output file. Returns ------- self : Dataset Returns self. """ _safe_call(_LIB.LGBM_DatasetSaveBinary( self.construct().handle, c_str(filename))) return self
Set property into the Dataset. Parameters ---------- field_name : string The field name of the information. data : list, numpy 1-D array, pandas Series or None The array of data to be set. Returns ------- self : Dataset Dataset with set property. def set_field(self, field_name, data): """Set property into the Dataset. Parameters ---------- field_name : string The field name of the information. data : list, numpy 1-D array, pandas Series or None The array of data to be set. Returns ------- self : Dataset Dataset with set property. """ if self.handle is None: raise Exception("Cannot set %s before construct dataset" % field_name) if data is None: # set to None _safe_call(_LIB.LGBM_DatasetSetField( self.handle, c_str(field_name), None, ctypes.c_int(0), ctypes.c_int(FIELD_TYPE_MAPPER[field_name]))) return self dtype = np.float32 if field_name == 'group': dtype = np.int32 elif field_name == 'init_score': dtype = np.float64 data = list_to_1d_numpy(data, dtype, name=field_name) if data.dtype == np.float32 or data.dtype == np.float64: ptr_data, type_data, _ = c_float_array(data) elif data.dtype == np.int32: ptr_data, type_data, _ = c_int_array(data) else: raise TypeError("Excepted np.float32/64 or np.int32, meet type({})".format(data.dtype)) if type_data != FIELD_TYPE_MAPPER[field_name]: raise TypeError("Input type error for set_field") _safe_call(_LIB.LGBM_DatasetSetField( self.handle, c_str(field_name), ptr_data, ctypes.c_int(len(data)), ctypes.c_int(type_data))) return self
Get property from the Dataset. Parameters ---------- field_name : string The field name of the information. Returns ------- info : numpy array A numpy array with information from the Dataset. def get_field(self, field_name): """Get property from the Dataset. Parameters ---------- field_name : string The field name of the information. Returns ------- info : numpy array A numpy array with information from the Dataset. """ if self.handle is None: raise Exception("Cannot get %s before construct Dataset" % field_name) tmp_out_len = ctypes.c_int() out_type = ctypes.c_int() ret = ctypes.POINTER(ctypes.c_void_p)() _safe_call(_LIB.LGBM_DatasetGetField( self.handle, c_str(field_name), ctypes.byref(tmp_out_len), ctypes.byref(ret), ctypes.byref(out_type))) if out_type.value != FIELD_TYPE_MAPPER[field_name]: raise TypeError("Return type error for get_field") if tmp_out_len.value == 0: return None if out_type.value == C_API_DTYPE_INT32: return cint32_array_to_numpy(ctypes.cast(ret, ctypes.POINTER(ctypes.c_int32)), tmp_out_len.value) elif out_type.value == C_API_DTYPE_FLOAT32: return cfloat32_array_to_numpy(ctypes.cast(ret, ctypes.POINTER(ctypes.c_float)), tmp_out_len.value) elif out_type.value == C_API_DTYPE_FLOAT64: return cfloat64_array_to_numpy(ctypes.cast(ret, ctypes.POINTER(ctypes.c_double)), tmp_out_len.value) elif out_type.value == C_API_DTYPE_INT8: return cint8_array_to_numpy(ctypes.cast(ret, ctypes.POINTER(ctypes.c_int8)), tmp_out_len.value) else: raise TypeError("Unknown type")
Set categorical features. Parameters ---------- categorical_feature : list of int or strings Names or indices of categorical features. Returns ------- self : Dataset Dataset with set categorical features. def set_categorical_feature(self, categorical_feature): """Set categorical features. Parameters ---------- categorical_feature : list of int or strings Names or indices of categorical features. Returns ------- self : Dataset Dataset with set categorical features. """ if self.categorical_feature == categorical_feature: return self if self.data is not None: if self.categorical_feature is None: self.categorical_feature = categorical_feature return self._free_handle() elif categorical_feature == 'auto': warnings.warn('Using categorical_feature in Dataset.') return self else: warnings.warn('categorical_feature in Dataset is overridden.\n' 'New categorical_feature is {}'.format(sorted(list(categorical_feature)))) self.categorical_feature = categorical_feature return self._free_handle() else: raise LightGBMError("Cannot set categorical feature after freed raw data, " "set free_raw_data=False when construct Dataset to avoid this.")
Set predictor for continued training. It is not recommended for user to call this function. Please use init_model argument in engine.train() or engine.cv() instead. def _set_predictor(self, predictor): """Set predictor for continued training. It is not recommended for user to call this function. Please use init_model argument in engine.train() or engine.cv() instead. """ if predictor is self._predictor: return self if self.data is not None: self._predictor = predictor return self._free_handle() else: raise LightGBMError("Cannot set predictor after freed raw data, " "set free_raw_data=False when construct Dataset to avoid this.")
Set reference Dataset. Parameters ---------- reference : Dataset Reference that is used as a template to construct the current Dataset. Returns ------- self : Dataset Dataset with set reference. def set_reference(self, reference): """Set reference Dataset. Parameters ---------- reference : Dataset Reference that is used as a template to construct the current Dataset. Returns ------- self : Dataset Dataset with set reference. """ self.set_categorical_feature(reference.categorical_feature) \ .set_feature_name(reference.feature_name) \ ._set_predictor(reference._predictor) # we're done if self and reference share a common upstrem reference if self.get_ref_chain().intersection(reference.get_ref_chain()): return self if self.data is not None: self.reference = reference return self._free_handle() else: raise LightGBMError("Cannot set reference after freed raw data, " "set free_raw_data=False when construct Dataset to avoid this.")
Set feature name. Parameters ---------- feature_name : list of strings Feature names. Returns ------- self : Dataset Dataset with set feature name. def set_feature_name(self, feature_name): """Set feature name. Parameters ---------- feature_name : list of strings Feature names. Returns ------- self : Dataset Dataset with set feature name. """ if feature_name != 'auto': self.feature_name = feature_name if self.handle is not None and feature_name is not None and feature_name != 'auto': if len(feature_name) != self.num_feature(): raise ValueError("Length of feature_name({}) and num_feature({}) don't match" .format(len(feature_name), self.num_feature())) c_feature_name = [c_str(name) for name in feature_name] _safe_call(_LIB.LGBM_DatasetSetFeatureNames( self.handle, c_array(ctypes.c_char_p, c_feature_name), ctypes.c_int(len(feature_name)))) return self
Set label of Dataset. Parameters ---------- label : list, numpy 1-D array, pandas Series / one-column DataFrame or None The label information to be set into Dataset. Returns ------- self : Dataset Dataset with set label. def set_label(self, label): """Set label of Dataset. Parameters ---------- label : list, numpy 1-D array, pandas Series / one-column DataFrame or None The label information to be set into Dataset. Returns ------- self : Dataset Dataset with set label. """ self.label = label if self.handle is not None: label = list_to_1d_numpy(_label_from_pandas(label), name='label') self.set_field('label', label) return self
Set weight of each instance. Parameters ---------- weight : list, numpy 1-D array, pandas Series or None Weight to be set for each data point. Returns ------- self : Dataset Dataset with set weight. def set_weight(self, weight): """Set weight of each instance. Parameters ---------- weight : list, numpy 1-D array, pandas Series or None Weight to be set for each data point. Returns ------- self : Dataset Dataset with set weight. """ if weight is not None and np.all(weight == 1): weight = None self.weight = weight if self.handle is not None and weight is not None: weight = list_to_1d_numpy(weight, name='weight') self.set_field('weight', weight) return self
Set init score of Booster to start from. Parameters ---------- init_score : list, numpy 1-D array, pandas Series or None Init score for Booster. Returns ------- self : Dataset Dataset with set init score. def set_init_score(self, init_score): """Set init score of Booster to start from. Parameters ---------- init_score : list, numpy 1-D array, pandas Series or None Init score for Booster. Returns ------- self : Dataset Dataset with set init score. """ self.init_score = init_score if self.handle is not None and init_score is not None: init_score = list_to_1d_numpy(init_score, np.float64, name='init_score') self.set_field('init_score', init_score) return self
Set group size of Dataset (used for ranking). Parameters ---------- group : list, numpy 1-D array, pandas Series or None Group size of each group. Returns ------- self : Dataset Dataset with set group. def set_group(self, group): """Set group size of Dataset (used for ranking). Parameters ---------- group : list, numpy 1-D array, pandas Series or None Group size of each group. Returns ------- self : Dataset Dataset with set group. """ self.group = group if self.handle is not None and group is not None: group = list_to_1d_numpy(group, np.int32, name='group') self.set_field('group', group) return self
Get the label of the Dataset. Returns ------- label : numpy array or None The label information from the Dataset. def get_label(self): """Get the label of the Dataset. Returns ------- label : numpy array or None The label information from the Dataset. """ if self.label is None: self.label = self.get_field('label') return self.label
Get the weight of the Dataset. Returns ------- weight : numpy array or None Weight for each data point from the Dataset. def get_weight(self): """Get the weight of the Dataset. Returns ------- weight : numpy array or None Weight for each data point from the Dataset. """ if self.weight is None: self.weight = self.get_field('weight') return self.weight
Get the feature penalty of the Dataset. Returns ------- feature_penalty : numpy array or None Feature penalty for each feature in the Dataset. def get_feature_penalty(self): """Get the feature penalty of the Dataset. Returns ------- feature_penalty : numpy array or None Feature penalty for each feature in the Dataset. """ if self.feature_penalty is None: self.feature_penalty = self.get_field('feature_penalty') return self.feature_penalty
Get the monotone constraints of the Dataset. Returns ------- monotone_constraints : numpy array or None Monotone constraints: -1, 0 or 1, for each feature in the Dataset. def get_monotone_constraints(self): """Get the monotone constraints of the Dataset. Returns ------- monotone_constraints : numpy array or None Monotone constraints: -1, 0 or 1, for each feature in the Dataset. """ if self.monotone_constraints is None: self.monotone_constraints = self.get_field('monotone_constraints') return self.monotone_constraints
Get the initial score of the Dataset. Returns ------- init_score : numpy array or None Init score of Booster. def get_init_score(self): """Get the initial score of the Dataset. Returns ------- init_score : numpy array or None Init score of Booster. """ if self.init_score is None: self.init_score = self.get_field('init_score') return self.init_score
Get the raw data of the Dataset. Returns ------- data : string, numpy array, pandas DataFrame, H2O DataTable's Frame, scipy.sparse, list of numpy arrays or None Raw data used in the Dataset construction. def get_data(self): """Get the raw data of the Dataset. Returns ------- data : string, numpy array, pandas DataFrame, H2O DataTable's Frame, scipy.sparse, list of numpy arrays or None Raw data used in the Dataset construction. """ if self.handle is None: raise Exception("Cannot get data before construct Dataset") if self.data is not None and self.used_indices is not None and self.need_slice: if isinstance(self.data, np.ndarray) or scipy.sparse.issparse(self.data): self.data = self.data[self.used_indices, :] elif isinstance(self.data, DataFrame): self.data = self.data.iloc[self.used_indices].copy() elif isinstance(self.data, DataTable): self.data = self.data[self.used_indices, :] else: warnings.warn("Cannot subset {} type of raw data.\n" "Returning original raw data".format(type(self.data).__name__)) self.need_slice = False return self.data
Get the group of the Dataset. Returns ------- group : numpy array or None Group size of each group. def get_group(self): """Get the group of the Dataset. Returns ------- group : numpy array or None Group size of each group. """ if self.group is None: self.group = self.get_field('group') if self.group is not None: # group data from LightGBM is boundaries data, need to convert to group size self.group = np.diff(self.group) return self.group
Get the number of rows in the Dataset. Returns ------- number_of_rows : int The number of rows in the Dataset. def num_data(self): """Get the number of rows in the Dataset. Returns ------- number_of_rows : int The number of rows in the Dataset. """ if self.handle is not None: ret = ctypes.c_int() _safe_call(_LIB.LGBM_DatasetGetNumData(self.handle, ctypes.byref(ret))) return ret.value else: raise LightGBMError("Cannot get num_data before construct dataset")
Get the number of columns (features) in the Dataset. Returns ------- number_of_columns : int The number of columns (features) in the Dataset. def num_feature(self): """Get the number of columns (features) in the Dataset. Returns ------- number_of_columns : int The number of columns (features) in the Dataset. """ if self.handle is not None: ret = ctypes.c_int() _safe_call(_LIB.LGBM_DatasetGetNumFeature(self.handle, ctypes.byref(ret))) return ret.value else: raise LightGBMError("Cannot get num_feature before construct dataset")
Get a chain of Dataset objects. Starts with r, then goes to r.reference (if exists), then to r.reference.reference, etc. until we hit ``ref_limit`` or a reference loop. Parameters ---------- ref_limit : int, optional (default=100) The limit number of references. Returns ------- ref_chain : set of Dataset Chain of references of the Datasets. def get_ref_chain(self, ref_limit=100): """Get a chain of Dataset objects. Starts with r, then goes to r.reference (if exists), then to r.reference.reference, etc. until we hit ``ref_limit`` or a reference loop. Parameters ---------- ref_limit : int, optional (default=100) The limit number of references. Returns ------- ref_chain : set of Dataset Chain of references of the Datasets. """ head = self ref_chain = set() while len(ref_chain) < ref_limit: if isinstance(head, Dataset): ref_chain.add(head) if (head.reference is not None) and (head.reference not in ref_chain): head = head.reference else: break else: break return ref_chain
Add features from other Dataset to the current Dataset. Both Datasets must be constructed before calling this method. Parameters ---------- other : Dataset The Dataset to take features from. Returns ------- self : Dataset Dataset with the new features added. def add_features_from(self, other): """Add features from other Dataset to the current Dataset. Both Datasets must be constructed before calling this method. Parameters ---------- other : Dataset The Dataset to take features from. Returns ------- self : Dataset Dataset with the new features added. """ if self.handle is None or other.handle is None: raise ValueError('Both source and target Datasets must be constructed before adding features') _safe_call(_LIB.LGBM_DatasetAddFeaturesFrom(self.handle, other.handle)) return self
Save Dataset to a text file. This format cannot be loaded back in by LightGBM, but is useful for debugging purposes. Parameters ---------- filename : string Name of the output file. Returns ------- self : Dataset Returns self. def dump_text(self, filename): """Save Dataset to a text file. This format cannot be loaded back in by LightGBM, but is useful for debugging purposes. Parameters ---------- filename : string Name of the output file. Returns ------- self : Dataset Returns self. """ _safe_call(_LIB.LGBM_DatasetDumpText( self.construct().handle, c_str(filename))) return self
Free Booster's Datasets. Returns ------- self : Booster Booster without Datasets. def free_dataset(self): """Free Booster's Datasets. Returns ------- self : Booster Booster without Datasets. """ self.__dict__.pop('train_set', None) self.__dict__.pop('valid_sets', None) self.__num_dataset = 0 return self
Set the network configuration. Parameters ---------- machines : list, set or string Names of machines. local_listen_port : int, optional (default=12400) TCP listen port for local machines. listen_time_out : int, optional (default=120) Socket time-out in minutes. num_machines : int, optional (default=1) The number of machines for parallel learning application. Returns ------- self : Booster Booster with set network. def set_network(self, machines, local_listen_port=12400, listen_time_out=120, num_machines=1): """Set the network configuration. Parameters ---------- machines : list, set or string Names of machines. local_listen_port : int, optional (default=12400) TCP listen port for local machines. listen_time_out : int, optional (default=120) Socket time-out in minutes. num_machines : int, optional (default=1) The number of machines for parallel learning application. Returns ------- self : Booster Booster with set network. """ _safe_call(_LIB.LGBM_NetworkInit(c_str(machines), ctypes.c_int(local_listen_port), ctypes.c_int(listen_time_out), ctypes.c_int(num_machines))) self.network = True return self
Add validation data. Parameters ---------- data : Dataset Validation data. name : string Name of validation data. Returns ------- self : Booster Booster with set validation data. def add_valid(self, data, name): """Add validation data. Parameters ---------- data : Dataset Validation data. name : string Name of validation data. Returns ------- self : Booster Booster with set validation data. """ if not isinstance(data, Dataset): raise TypeError('Validation data should be Dataset instance, met {}' .format(type(data).__name__)) if data._predictor is not self.__init_predictor: raise LightGBMError("Add validation data failed, " "you should use same predictor for these data") _safe_call(_LIB.LGBM_BoosterAddValidData( self.handle, data.construct().handle)) self.valid_sets.append(data) self.name_valid_sets.append(name) self.__num_dataset += 1 self.__inner_predict_buffer.append(None) self.__is_predicted_cur_iter.append(False) return self
Reset parameters of Booster. Parameters ---------- params : dict New parameters for Booster. Returns ------- self : Booster Booster with new parameters. def reset_parameter(self, params): """Reset parameters of Booster. Parameters ---------- params : dict New parameters for Booster. Returns ------- self : Booster Booster with new parameters. """ if any(metric_alias in params for metric_alias in ('metric', 'metrics', 'metric_types')): self.__need_reload_eval_info = True params_str = param_dict_to_str(params) if params_str: _safe_call(_LIB.LGBM_BoosterResetParameter( self.handle, c_str(params_str))) self.params.update(params) return self
Update Booster for one iteration. Parameters ---------- train_set : Dataset or None, optional (default=None) Training data. If None, last training data is used. fobj : callable or None, optional (default=None) Customized objective function. For multi-class task, the score is group by class_id first, then group by row_id. If you want to get i-th row score in j-th class, the access way is score[j * num_data + i] and you should group grad and hess in this way as well. Returns ------- is_finished : bool Whether the update was successfully finished. def update(self, train_set=None, fobj=None): """Update Booster for one iteration. Parameters ---------- train_set : Dataset or None, optional (default=None) Training data. If None, last training data is used. fobj : callable or None, optional (default=None) Customized objective function. For multi-class task, the score is group by class_id first, then group by row_id. If you want to get i-th row score in j-th class, the access way is score[j * num_data + i] and you should group grad and hess in this way as well. Returns ------- is_finished : bool Whether the update was successfully finished. """ # need reset training data if train_set is not None and train_set is not self.train_set: if not isinstance(train_set, Dataset): raise TypeError('Training data should be Dataset instance, met {}' .format(type(train_set).__name__)) if train_set._predictor is not self.__init_predictor: raise LightGBMError("Replace training data failed, " "you should use same predictor for these data") self.train_set = train_set _safe_call(_LIB.LGBM_BoosterResetTrainingData( self.handle, self.train_set.construct().handle)) self.__inner_predict_buffer[0] = None is_finished = ctypes.c_int(0) if fobj is None: if self.__set_objective_to_none: raise LightGBMError('Cannot update due to null objective function.') _safe_call(_LIB.LGBM_BoosterUpdateOneIter( self.handle, ctypes.byref(is_finished))) self.__is_predicted_cur_iter = [False for _ in range_(self.__num_dataset)] return is_finished.value == 1 else: if not self.__set_objective_to_none: self.reset_parameter({"objective": "none"}).__set_objective_to_none = True grad, hess = fobj(self.__inner_predict(0), self.train_set) return self.__boost(grad, hess)
Boost Booster for one iteration with customized gradient statistics. Note ---- For multi-class task, the score is group by class_id first, then group by row_id. If you want to get i-th row score in j-th class, the access way is score[j * num_data + i] and you should group grad and hess in this way as well. Parameters ---------- grad : 1-D numpy array or 1-D list The first order derivative (gradient). hess : 1-D numpy array or 1-D list The second order derivative (Hessian). Returns ------- is_finished : bool Whether the boost was successfully finished. def __boost(self, grad, hess): """Boost Booster for one iteration with customized gradient statistics. Note ---- For multi-class task, the score is group by class_id first, then group by row_id. If you want to get i-th row score in j-th class, the access way is score[j * num_data + i] and you should group grad and hess in this way as well. Parameters ---------- grad : 1-D numpy array or 1-D list The first order derivative (gradient). hess : 1-D numpy array or 1-D list The second order derivative (Hessian). Returns ------- is_finished : bool Whether the boost was successfully finished. """ grad = list_to_1d_numpy(grad, name='gradient') hess = list_to_1d_numpy(hess, name='hessian') assert grad.flags.c_contiguous assert hess.flags.c_contiguous if len(grad) != len(hess): raise ValueError("Lengths of gradient({}) and hessian({}) don't match" .format(len(grad), len(hess))) is_finished = ctypes.c_int(0) _safe_call(_LIB.LGBM_BoosterUpdateOneIterCustom( self.handle, grad.ctypes.data_as(ctypes.POINTER(ctypes.c_float)), hess.ctypes.data_as(ctypes.POINTER(ctypes.c_float)), ctypes.byref(is_finished))) self.__is_predicted_cur_iter = [False for _ in range_(self.__num_dataset)] return is_finished.value == 1
Rollback one iteration. Returns ------- self : Booster Booster with rolled back one iteration. def rollback_one_iter(self): """Rollback one iteration. Returns ------- self : Booster Booster with rolled back one iteration. """ _safe_call(_LIB.LGBM_BoosterRollbackOneIter( self.handle)) self.__is_predicted_cur_iter = [False for _ in range_(self.__num_dataset)] return self
Get the index of the current iteration. Returns ------- cur_iter : int The index of the current iteration. def current_iteration(self): """Get the index of the current iteration. Returns ------- cur_iter : int The index of the current iteration. """ out_cur_iter = ctypes.c_int(0) _safe_call(_LIB.LGBM_BoosterGetCurrentIteration( self.handle, ctypes.byref(out_cur_iter))) return out_cur_iter.value
Get number of models per iteration. Returns ------- model_per_iter : int The number of models per iteration. def num_model_per_iteration(self): """Get number of models per iteration. Returns ------- model_per_iter : int The number of models per iteration. """ model_per_iter = ctypes.c_int(0) _safe_call(_LIB.LGBM_BoosterNumModelPerIteration( self.handle, ctypes.byref(model_per_iter))) return model_per_iter.value
Get number of weak sub-models. Returns ------- num_trees : int The number of weak sub-models. def num_trees(self): """Get number of weak sub-models. Returns ------- num_trees : int The number of weak sub-models. """ num_trees = ctypes.c_int(0) _safe_call(_LIB.LGBM_BoosterNumberOfTotalModel( self.handle, ctypes.byref(num_trees))) return num_trees.value
Evaluate for data. Parameters ---------- data : Dataset Data for the evaluating. name : string Name of the data. feval : callable or None, optional (default=None) Customized evaluation function. Should accept two parameters: preds, train_data, and return (eval_name, eval_result, is_higher_better) or list of such tuples. For multi-class task, the preds is group by class_id first, then group by row_id. If you want to get i-th row preds in j-th class, the access way is preds[j * num_data + i]. Returns ------- result : list List with evaluation results. def eval(self, data, name, feval=None): """Evaluate for data. Parameters ---------- data : Dataset Data for the evaluating. name : string Name of the data. feval : callable or None, optional (default=None) Customized evaluation function. Should accept two parameters: preds, train_data, and return (eval_name, eval_result, is_higher_better) or list of such tuples. For multi-class task, the preds is group by class_id first, then group by row_id. If you want to get i-th row preds in j-th class, the access way is preds[j * num_data + i]. Returns ------- result : list List with evaluation results. """ if not isinstance(data, Dataset): raise TypeError("Can only eval for Dataset instance") data_idx = -1 if data is self.train_set: data_idx = 0 else: for i in range_(len(self.valid_sets)): if data is self.valid_sets[i]: data_idx = i + 1 break # need to push new valid data if data_idx == -1: self.add_valid(data, name) data_idx = self.__num_dataset - 1 return self.__inner_eval(name, data_idx, feval)
Evaluate for validation data. Parameters ---------- feval : callable or None, optional (default=None) Customized evaluation function. Should accept two parameters: preds, train_data, and return (eval_name, eval_result, is_higher_better) or list of such tuples. For multi-class task, the preds is group by class_id first, then group by row_id. If you want to get i-th row preds in j-th class, the access way is preds[j * num_data + i]. Returns ------- result : list List with evaluation results. def eval_valid(self, feval=None): """Evaluate for validation data. Parameters ---------- feval : callable or None, optional (default=None) Customized evaluation function. Should accept two parameters: preds, train_data, and return (eval_name, eval_result, is_higher_better) or list of such tuples. For multi-class task, the preds is group by class_id first, then group by row_id. If you want to get i-th row preds in j-th class, the access way is preds[j * num_data + i]. Returns ------- result : list List with evaluation results. """ return [item for i in range_(1, self.__num_dataset) for item in self.__inner_eval(self.name_valid_sets[i - 1], i, feval)]
Save Booster to file. Parameters ---------- filename : string Filename to save Booster. num_iteration : int or None, optional (default=None) Index of the iteration that should be saved. If None, if the best iteration exists, it is saved; otherwise, all iterations are saved. If <= 0, all iterations are saved. start_iteration : int, optional (default=0) Start index of the iteration that should be saved. Returns ------- self : Booster Returns self. def save_model(self, filename, num_iteration=None, start_iteration=0): """Save Booster to file. Parameters ---------- filename : string Filename to save Booster. num_iteration : int or None, optional (default=None) Index of the iteration that should be saved. If None, if the best iteration exists, it is saved; otherwise, all iterations are saved. If <= 0, all iterations are saved. start_iteration : int, optional (default=0) Start index of the iteration that should be saved. Returns ------- self : Booster Returns self. """ if num_iteration is None: num_iteration = self.best_iteration _safe_call(_LIB.LGBM_BoosterSaveModel( self.handle, ctypes.c_int(start_iteration), ctypes.c_int(num_iteration), c_str(filename))) _dump_pandas_categorical(self.pandas_categorical, filename) return self
Shuffle models. Parameters ---------- start_iteration : int, optional (default=0) The first iteration that will be shuffled. end_iteration : int, optional (default=-1) The last iteration that will be shuffled. If <= 0, means the last available iteration. Returns ------- self : Booster Booster with shuffled models. def shuffle_models(self, start_iteration=0, end_iteration=-1): """Shuffle models. Parameters ---------- start_iteration : int, optional (default=0) The first iteration that will be shuffled. end_iteration : int, optional (default=-1) The last iteration that will be shuffled. If <= 0, means the last available iteration. Returns ------- self : Booster Booster with shuffled models. """ _safe_call(_LIB.LGBM_BoosterShuffleModels( self.handle, ctypes.c_int(start_iteration), ctypes.c_int(end_iteration))) return self
Load Booster from a string. Parameters ---------- model_str : string Model will be loaded from this string. verbose : bool, optional (default=True) Whether to print messages while loading model. Returns ------- self : Booster Loaded Booster object. def model_from_string(self, model_str, verbose=True): """Load Booster from a string. Parameters ---------- model_str : string Model will be loaded from this string. verbose : bool, optional (default=True) Whether to print messages while loading model. Returns ------- self : Booster Loaded Booster object. """ if self.handle is not None: _safe_call(_LIB.LGBM_BoosterFree(self.handle)) self._free_buffer() self.handle = ctypes.c_void_p() out_num_iterations = ctypes.c_int(0) _safe_call(_LIB.LGBM_BoosterLoadModelFromString( c_str(model_str), ctypes.byref(out_num_iterations), ctypes.byref(self.handle))) out_num_class = ctypes.c_int(0) _safe_call(_LIB.LGBM_BoosterGetNumClasses( self.handle, ctypes.byref(out_num_class))) if verbose: print('Finished loading model, total used %d iterations' % int(out_num_iterations.value)) self.__num_class = out_num_class.value self.pandas_categorical = _load_pandas_categorical(model_str=model_str) return self
Save Booster to string. Parameters ---------- num_iteration : int or None, optional (default=None) Index of the iteration that should be saved. If None, if the best iteration exists, it is saved; otherwise, all iterations are saved. If <= 0, all iterations are saved. start_iteration : int, optional (default=0) Start index of the iteration that should be saved. Returns ------- str_repr : string String representation of Booster. def model_to_string(self, num_iteration=None, start_iteration=0): """Save Booster to string. Parameters ---------- num_iteration : int or None, optional (default=None) Index of the iteration that should be saved. If None, if the best iteration exists, it is saved; otherwise, all iterations are saved. If <= 0, all iterations are saved. start_iteration : int, optional (default=0) Start index of the iteration that should be saved. Returns ------- str_repr : string String representation of Booster. """ if num_iteration is None: num_iteration = self.best_iteration buffer_len = 1 << 20 tmp_out_len = ctypes.c_int64(0) string_buffer = ctypes.create_string_buffer(buffer_len) ptr_string_buffer = ctypes.c_char_p(*[ctypes.addressof(string_buffer)]) _safe_call(_LIB.LGBM_BoosterSaveModelToString( self.handle, ctypes.c_int(start_iteration), ctypes.c_int(num_iteration), ctypes.c_int64(buffer_len), ctypes.byref(tmp_out_len), ptr_string_buffer)) actual_len = tmp_out_len.value # if buffer length is not long enough, re-allocate a buffer if actual_len > buffer_len: string_buffer = ctypes.create_string_buffer(actual_len) ptr_string_buffer = ctypes.c_char_p(*[ctypes.addressof(string_buffer)]) _safe_call(_LIB.LGBM_BoosterSaveModelToString( self.handle, ctypes.c_int(start_iteration), ctypes.c_int(num_iteration), ctypes.c_int64(actual_len), ctypes.byref(tmp_out_len), ptr_string_buffer)) ret = string_buffer.value.decode() ret += _dump_pandas_categorical(self.pandas_categorical) return ret
Dump Booster to JSON format. Parameters ---------- num_iteration : int or None, optional (default=None) Index of the iteration that should be dumped. If None, if the best iteration exists, it is dumped; otherwise, all iterations are dumped. If <= 0, all iterations are dumped. start_iteration : int, optional (default=0) Start index of the iteration that should be dumped. Returns ------- json_repr : dict JSON format of Booster. def dump_model(self, num_iteration=None, start_iteration=0): """Dump Booster to JSON format. Parameters ---------- num_iteration : int or None, optional (default=None) Index of the iteration that should be dumped. If None, if the best iteration exists, it is dumped; otherwise, all iterations are dumped. If <= 0, all iterations are dumped. start_iteration : int, optional (default=0) Start index of the iteration that should be dumped. Returns ------- json_repr : dict JSON format of Booster. """ if num_iteration is None: num_iteration = self.best_iteration buffer_len = 1 << 20 tmp_out_len = ctypes.c_int64(0) string_buffer = ctypes.create_string_buffer(buffer_len) ptr_string_buffer = ctypes.c_char_p(*[ctypes.addressof(string_buffer)]) _safe_call(_LIB.LGBM_BoosterDumpModel( self.handle, ctypes.c_int(start_iteration), ctypes.c_int(num_iteration), ctypes.c_int64(buffer_len), ctypes.byref(tmp_out_len), ptr_string_buffer)) actual_len = tmp_out_len.value # if buffer length is not long enough, reallocate a buffer if actual_len > buffer_len: string_buffer = ctypes.create_string_buffer(actual_len) ptr_string_buffer = ctypes.c_char_p(*[ctypes.addressof(string_buffer)]) _safe_call(_LIB.LGBM_BoosterDumpModel( self.handle, ctypes.c_int(start_iteration), ctypes.c_int(num_iteration), ctypes.c_int64(actual_len), ctypes.byref(tmp_out_len), ptr_string_buffer)) ret = json.loads(string_buffer.value.decode()) ret['pandas_categorical'] = json.loads(json.dumps(self.pandas_categorical, default=json_default_with_numpy)) return ret
Make a prediction. Parameters ---------- data : string, numpy array, pandas DataFrame, H2O DataTable's Frame or scipy.sparse Data source for prediction. If string, it represents the path to txt file. num_iteration : int or None, optional (default=None) Limit number of iterations in the prediction. If None, if the best iteration exists, it is used; otherwise, all iterations are used. If <= 0, all iterations are used (no limits). raw_score : bool, optional (default=False) Whether to predict raw scores. pred_leaf : bool, optional (default=False) Whether to predict leaf index. pred_contrib : bool, optional (default=False) Whether to predict feature contributions. Note ---- If you want to get more explanations for your model's predictions using SHAP values, like SHAP interaction values, you can install the shap package (https://github.com/slundberg/shap). Note that unlike the shap package, with ``pred_contrib`` we return a matrix with an extra column, where the last column is the expected value. data_has_header : bool, optional (default=False) Whether the data has header. Used only if data is string. is_reshape : bool, optional (default=True) If True, result is reshaped to [nrow, ncol]. **kwargs Other parameters for the prediction. Returns ------- result : numpy array Prediction result. def predict(self, data, num_iteration=None, raw_score=False, pred_leaf=False, pred_contrib=False, data_has_header=False, is_reshape=True, **kwargs): """Make a prediction. Parameters ---------- data : string, numpy array, pandas DataFrame, H2O DataTable's Frame or scipy.sparse Data source for prediction. If string, it represents the path to txt file. num_iteration : int or None, optional (default=None) Limit number of iterations in the prediction. If None, if the best iteration exists, it is used; otherwise, all iterations are used. If <= 0, all iterations are used (no limits). raw_score : bool, optional (default=False) Whether to predict raw scores. pred_leaf : bool, optional (default=False) Whether to predict leaf index. pred_contrib : bool, optional (default=False) Whether to predict feature contributions. Note ---- If you want to get more explanations for your model's predictions using SHAP values, like SHAP interaction values, you can install the shap package (https://github.com/slundberg/shap). Note that unlike the shap package, with ``pred_contrib`` we return a matrix with an extra column, where the last column is the expected value. data_has_header : bool, optional (default=False) Whether the data has header. Used only if data is string. is_reshape : bool, optional (default=True) If True, result is reshaped to [nrow, ncol]. **kwargs Other parameters for the prediction. Returns ------- result : numpy array Prediction result. """ predictor = self._to_predictor(copy.deepcopy(kwargs)) if num_iteration is None: num_iteration = self.best_iteration return predictor.predict(data, num_iteration, raw_score, pred_leaf, pred_contrib, data_has_header, is_reshape)
Refit the existing Booster by new data. Parameters ---------- data : string, numpy array, pandas DataFrame, H2O DataTable's Frame or scipy.sparse Data source for refit. If string, it represents the path to txt file. label : list, numpy 1-D array or pandas Series / one-column DataFrame Label for refit. decay_rate : float, optional (default=0.9) Decay rate of refit, will use ``leaf_output = decay_rate * old_leaf_output + (1.0 - decay_rate) * new_leaf_output`` to refit trees. **kwargs Other parameters for refit. These parameters will be passed to ``predict`` method. Returns ------- result : Booster Refitted Booster. def refit(self, data, label, decay_rate=0.9, **kwargs): """Refit the existing Booster by new data. Parameters ---------- data : string, numpy array, pandas DataFrame, H2O DataTable's Frame or scipy.sparse Data source for refit. If string, it represents the path to txt file. label : list, numpy 1-D array or pandas Series / one-column DataFrame Label for refit. decay_rate : float, optional (default=0.9) Decay rate of refit, will use ``leaf_output = decay_rate * old_leaf_output + (1.0 - decay_rate) * new_leaf_output`` to refit trees. **kwargs Other parameters for refit. These parameters will be passed to ``predict`` method. Returns ------- result : Booster Refitted Booster. """ if self.__set_objective_to_none: raise LightGBMError('Cannot refit due to null objective function.') predictor = self._to_predictor(copy.deepcopy(kwargs)) leaf_preds = predictor.predict(data, -1, pred_leaf=True) nrow, ncol = leaf_preds.shape train_set = Dataset(data, label, silent=True) new_booster = Booster(self.params, train_set, silent=True) # Copy models _safe_call(_LIB.LGBM_BoosterMerge( new_booster.handle, predictor.handle)) leaf_preds = leaf_preds.reshape(-1) ptr_data, type_ptr_data, _ = c_int_array(leaf_preds) _safe_call(_LIB.LGBM_BoosterRefit( new_booster.handle, ptr_data, ctypes.c_int(nrow), ctypes.c_int(ncol))) new_booster.network = self.network new_booster.__attr = self.__attr.copy() return new_booster
Get the output of a leaf. Parameters ---------- tree_id : int The index of the tree. leaf_id : int The index of the leaf in the tree. Returns ------- result : float The output of the leaf. def get_leaf_output(self, tree_id, leaf_id): """Get the output of a leaf. Parameters ---------- tree_id : int The index of the tree. leaf_id : int The index of the leaf in the tree. Returns ------- result : float The output of the leaf. """ ret = ctypes.c_double(0) _safe_call(_LIB.LGBM_BoosterGetLeafValue( self.handle, ctypes.c_int(tree_id), ctypes.c_int(leaf_id), ctypes.byref(ret))) return ret.value
Convert to predictor. def _to_predictor(self, pred_parameter=None): """Convert to predictor.""" predictor = _InnerPredictor(booster_handle=self.handle, pred_parameter=pred_parameter) predictor.pandas_categorical = self.pandas_categorical return predictor
Get number of features. Returns ------- num_feature : int The number of features. def num_feature(self): """Get number of features. Returns ------- num_feature : int The number of features. """ out_num_feature = ctypes.c_int(0) _safe_call(_LIB.LGBM_BoosterGetNumFeature( self.handle, ctypes.byref(out_num_feature))) return out_num_feature.value
Get names of features. Returns ------- result : list List with names of features. def feature_name(self): """Get names of features. Returns ------- result : list List with names of features. """ num_feature = self.num_feature() # Get name of features tmp_out_len = ctypes.c_int(0) string_buffers = [ctypes.create_string_buffer(255) for i in range_(num_feature)] ptr_string_buffers = (ctypes.c_char_p * num_feature)(*map(ctypes.addressof, string_buffers)) _safe_call(_LIB.LGBM_BoosterGetFeatureNames( self.handle, ctypes.byref(tmp_out_len), ptr_string_buffers)) if num_feature != tmp_out_len.value: raise ValueError("Length of feature names doesn't equal with num_feature") return [string_buffers[i].value.decode() for i in range_(num_feature)]
Get feature importances. Parameters ---------- importance_type : string, optional (default="split") How the importance is calculated. If "split", result contains numbers of times the feature is used in a model. If "gain", result contains total gains of splits which use the feature. iteration : int or None, optional (default=None) Limit number of iterations in the feature importance calculation. If None, if the best iteration exists, it is used; otherwise, all trees are used. If <= 0, all trees are used (no limits). Returns ------- result : numpy array Array with feature importances. def feature_importance(self, importance_type='split', iteration=None): """Get feature importances. Parameters ---------- importance_type : string, optional (default="split") How the importance is calculated. If "split", result contains numbers of times the feature is used in a model. If "gain", result contains total gains of splits which use the feature. iteration : int or None, optional (default=None) Limit number of iterations in the feature importance calculation. If None, if the best iteration exists, it is used; otherwise, all trees are used. If <= 0, all trees are used (no limits). Returns ------- result : numpy array Array with feature importances. """ if iteration is None: iteration = self.best_iteration if importance_type == "split": importance_type_int = 0 elif importance_type == "gain": importance_type_int = 1 else: importance_type_int = -1 result = np.zeros(self.num_feature(), dtype=np.float64) _safe_call(_LIB.LGBM_BoosterFeatureImportance( self.handle, ctypes.c_int(iteration), ctypes.c_int(importance_type_int), result.ctypes.data_as(ctypes.POINTER(ctypes.c_double)))) if importance_type_int == 0: return result.astype(int) else: return result
Get split value histogram for the specified feature. Parameters ---------- feature : int or string The feature name or index the histogram is calculated for. If int, interpreted as index. If string, interpreted as name. Note ---- Categorical features are not supported. bins : int, string or None, optional (default=None) The maximum number of bins. If None, or int and > number of unique split values and ``xgboost_style=True``, the number of bins equals number of unique split values. If string, it should be one from the list of the supported values by ``numpy.histogram()`` function. xgboost_style : bool, optional (default=False) Whether the returned result should be in the same form as it is in XGBoost. If False, the returned value is tuple of 2 numpy arrays as it is in ``numpy.histogram()`` function. If True, the returned value is matrix, in which the first column is the right edges of non-empty bins and the second one is the histogram values. Returns ------- result_tuple : tuple of 2 numpy arrays If ``xgboost_style=False``, the values of the histogram of used splitting values for the specified feature and the bin edges. result_array_like : numpy array or pandas DataFrame (if pandas is installed) If ``xgboost_style=True``, the histogram of used splitting values for the specified feature. def get_split_value_histogram(self, feature, bins=None, xgboost_style=False): """Get split value histogram for the specified feature. Parameters ---------- feature : int or string The feature name or index the histogram is calculated for. If int, interpreted as index. If string, interpreted as name. Note ---- Categorical features are not supported. bins : int, string or None, optional (default=None) The maximum number of bins. If None, or int and > number of unique split values and ``xgboost_style=True``, the number of bins equals number of unique split values. If string, it should be one from the list of the supported values by ``numpy.histogram()`` function. xgboost_style : bool, optional (default=False) Whether the returned result should be in the same form as it is in XGBoost. If False, the returned value is tuple of 2 numpy arrays as it is in ``numpy.histogram()`` function. If True, the returned value is matrix, in which the first column is the right edges of non-empty bins and the second one is the histogram values. Returns ------- result_tuple : tuple of 2 numpy arrays If ``xgboost_style=False``, the values of the histogram of used splitting values for the specified feature and the bin edges. result_array_like : numpy array or pandas DataFrame (if pandas is installed) If ``xgboost_style=True``, the histogram of used splitting values for the specified feature. """ def add(root): """Recursively add thresholds.""" if 'split_index' in root: # non-leaf if feature_names is not None and isinstance(feature, string_type): split_feature = feature_names[root['split_feature']] else: split_feature = root['split_feature'] if split_feature == feature: if isinstance(root['threshold'], string_type): raise LightGBMError('Cannot compute split value histogram for the categorical feature') else: values.append(root['threshold']) add(root['left_child']) add(root['right_child']) model = self.dump_model() feature_names = model.get('feature_names') tree_infos = model['tree_info'] values = [] for tree_info in tree_infos: add(tree_info['tree_structure']) if bins is None or isinstance(bins, integer_types) and xgboost_style: n_unique = len(np.unique(values)) bins = max(min(n_unique, bins) if bins is not None else n_unique, 1) hist, bin_edges = np.histogram(values, bins=bins) if xgboost_style: ret = np.column_stack((bin_edges[1:], hist)) ret = ret[ret[:, 1] > 0] if PANDAS_INSTALLED: return DataFrame(ret, columns=['SplitValue', 'Count']) else: return ret else: return hist, bin_edges
Evaluate training or validation data. def __inner_eval(self, data_name, data_idx, feval=None): """Evaluate training or validation data.""" if data_idx >= self.__num_dataset: raise ValueError("Data_idx should be smaller than number of dataset") self.__get_eval_info() ret = [] if self.__num_inner_eval > 0: result = np.zeros(self.__num_inner_eval, dtype=np.float64) tmp_out_len = ctypes.c_int(0) _safe_call(_LIB.LGBM_BoosterGetEval( self.handle, ctypes.c_int(data_idx), ctypes.byref(tmp_out_len), result.ctypes.data_as(ctypes.POINTER(ctypes.c_double)))) if tmp_out_len.value != self.__num_inner_eval: raise ValueError("Wrong length of eval results") for i in range_(self.__num_inner_eval): ret.append((data_name, self.__name_inner_eval[i], result[i], self.__higher_better_inner_eval[i])) if feval is not None: if data_idx == 0: cur_data = self.train_set else: cur_data = self.valid_sets[data_idx - 1] feval_ret = feval(self.__inner_predict(data_idx), cur_data) if isinstance(feval_ret, list): for eval_name, val, is_higher_better in feval_ret: ret.append((data_name, eval_name, val, is_higher_better)) else: eval_name, val, is_higher_better = feval_ret ret.append((data_name, eval_name, val, is_higher_better)) return ret
Predict for training and validation dataset. def __inner_predict(self, data_idx): """Predict for training and validation dataset.""" if data_idx >= self.__num_dataset: raise ValueError("Data_idx should be smaller than number of dataset") if self.__inner_predict_buffer[data_idx] is None: if data_idx == 0: n_preds = self.train_set.num_data() * self.__num_class else: n_preds = self.valid_sets[data_idx - 1].num_data() * self.__num_class self.__inner_predict_buffer[data_idx] = np.zeros(n_preds, dtype=np.float64) # avoid to predict many time in one iteration if not self.__is_predicted_cur_iter[data_idx]: tmp_out_len = ctypes.c_int64(0) data_ptr = self.__inner_predict_buffer[data_idx].ctypes.data_as(ctypes.POINTER(ctypes.c_double)) _safe_call(_LIB.LGBM_BoosterGetPredict( self.handle, ctypes.c_int(data_idx), ctypes.byref(tmp_out_len), data_ptr)) if tmp_out_len.value != len(self.__inner_predict_buffer[data_idx]): raise ValueError("Wrong length of predict results for data %d" % (data_idx)) self.__is_predicted_cur_iter[data_idx] = True return self.__inner_predict_buffer[data_idx]
Get inner evaluation count and names. def __get_eval_info(self): """Get inner evaluation count and names.""" if self.__need_reload_eval_info: self.__need_reload_eval_info = False out_num_eval = ctypes.c_int(0) # Get num of inner evals _safe_call(_LIB.LGBM_BoosterGetEvalCounts( self.handle, ctypes.byref(out_num_eval))) self.__num_inner_eval = out_num_eval.value if self.__num_inner_eval > 0: # Get name of evals tmp_out_len = ctypes.c_int(0) string_buffers = [ctypes.create_string_buffer(255) for i in range_(self.__num_inner_eval)] ptr_string_buffers = (ctypes.c_char_p * self.__num_inner_eval)(*map(ctypes.addressof, string_buffers)) _safe_call(_LIB.LGBM_BoosterGetEvalNames( self.handle, ctypes.byref(tmp_out_len), ptr_string_buffers)) if self.__num_inner_eval != tmp_out_len.value: raise ValueError("Length of eval names doesn't equal with num_evals") self.__name_inner_eval = \ [string_buffers[i].value.decode() for i in range_(self.__num_inner_eval)] self.__higher_better_inner_eval = \ [name.startswith(('auc', 'ndcg@', 'map@')) for name in self.__name_inner_eval]
Set attributes to the Booster. Parameters ---------- **kwargs The attributes to set. Setting a value to None deletes an attribute. Returns ------- self : Booster Booster with set attributes. def set_attr(self, **kwargs): """Set attributes to the Booster. Parameters ---------- **kwargs The attributes to set. Setting a value to None deletes an attribute. Returns ------- self : Booster Booster with set attributes. """ for key, value in kwargs.items(): if value is not None: if not isinstance(value, string_type): raise ValueError("Only string values are accepted") self.__attr[key] = value else: self.__attr.pop(key, None) return self
Find the path to LightGBM library files. Returns ------- lib_path: list of strings List of all found library paths to LightGBM. def find_lib_path(): """Find the path to LightGBM library files. Returns ------- lib_path: list of strings List of all found library paths to LightGBM. """ if os.environ.get('LIGHTGBM_BUILD_DOC', False): # we don't need lib_lightgbm while building docs return [] curr_path = os.path.dirname(os.path.abspath(os.path.expanduser(__file__))) dll_path = [curr_path, os.path.join(curr_path, '../../'), os.path.join(curr_path, 'compile'), os.path.join(curr_path, '../compile'), os.path.join(curr_path, '../../lib/')] if system() in ('Windows', 'Microsoft'): dll_path.append(os.path.join(curr_path, '../compile/Release/')) dll_path.append(os.path.join(curr_path, '../compile/windows/x64/DLL/')) dll_path.append(os.path.join(curr_path, '../../Release/')) dll_path.append(os.path.join(curr_path, '../../windows/x64/DLL/')) dll_path = [os.path.join(p, 'lib_lightgbm.dll') for p in dll_path] else: dll_path = [os.path.join(p, 'lib_lightgbm.so') for p in dll_path] lib_path = [p for p in dll_path if os.path.exists(p) and os.path.isfile(p)] if not lib_path: dll_path = [os.path.realpath(p) for p in dll_path] raise Exception('Cannot find lightgbm library file in following paths:\n' + '\n'.join(dll_path)) return lib_path
Convert numpy classes to JSON serializable objects. def json_default_with_numpy(obj): """Convert numpy classes to JSON serializable objects.""" if isinstance(obj, (np.integer, np.floating, np.bool_)): return obj.item() elif isinstance(obj, np.ndarray): return obj.tolist() else: return obj
Format metric string. def _format_eval_result(value, show_stdv=True): """Format metric string.""" if len(value) == 4: return '%s\'s %s: %g' % (value[0], value[1], value[2]) elif len(value) == 5: if show_stdv: return '%s\'s %s: %g + %g' % (value[0], value[1], value[2], value[4]) else: return '%s\'s %s: %g' % (value[0], value[1], value[2]) else: raise ValueError("Wrong metric value")
Create a callback that prints the evaluation results. Parameters ---------- period : int, optional (default=1) The period to print the evaluation results. show_stdv : bool, optional (default=True) Whether to show stdv (if provided). Returns ------- callback : function The callback that prints the evaluation results every ``period`` iteration(s). def print_evaluation(period=1, show_stdv=True): """Create a callback that prints the evaluation results. Parameters ---------- period : int, optional (default=1) The period to print the evaluation results. show_stdv : bool, optional (default=True) Whether to show stdv (if provided). Returns ------- callback : function The callback that prints the evaluation results every ``period`` iteration(s). """ def _callback(env): if period > 0 and env.evaluation_result_list and (env.iteration + 1) % period == 0: result = '\t'.join([_format_eval_result(x, show_stdv) for x in env.evaluation_result_list]) print('[%d]\t%s' % (env.iteration + 1, result)) _callback.order = 10 return _callback
Create a callback that records the evaluation history into ``eval_result``. Parameters ---------- eval_result : dict A dictionary to store the evaluation results. Returns ------- callback : function The callback that records the evaluation history into the passed dictionary. def record_evaluation(eval_result): """Create a callback that records the evaluation history into ``eval_result``. Parameters ---------- eval_result : dict A dictionary to store the evaluation results. Returns ------- callback : function The callback that records the evaluation history into the passed dictionary. """ if not isinstance(eval_result, dict): raise TypeError('Eval_result should be a dictionary') eval_result.clear() def _init(env): for data_name, _, _, _ in env.evaluation_result_list: eval_result.setdefault(data_name, collections.defaultdict(list)) def _callback(env): if not eval_result: _init(env) for data_name, eval_name, result, _ in env.evaluation_result_list: eval_result[data_name][eval_name].append(result) _callback.order = 20 return _callback
Create a callback that resets the parameter after the first iteration. Note ---- The initial parameter will still take in-effect on first iteration. Parameters ---------- **kwargs : value should be list or function List of parameters for each boosting round or a customized function that calculates the parameter in terms of current number of round (e.g. yields learning rate decay). If list lst, parameter = lst[current_round]. If function func, parameter = func(current_round). Returns ------- callback : function The callback that resets the parameter after the first iteration. def reset_parameter(**kwargs): """Create a callback that resets the parameter after the first iteration. Note ---- The initial parameter will still take in-effect on first iteration. Parameters ---------- **kwargs : value should be list or function List of parameters for each boosting round or a customized function that calculates the parameter in terms of current number of round (e.g. yields learning rate decay). If list lst, parameter = lst[current_round]. If function func, parameter = func(current_round). Returns ------- callback : function The callback that resets the parameter after the first iteration. """ def _callback(env): new_parameters = {} for key, value in kwargs.items(): if key in ['num_class', 'num_classes', 'boosting', 'boost', 'boosting_type', 'metric', 'metrics', 'metric_types']: raise RuntimeError("cannot reset {} during training".format(repr(key))) if isinstance(value, list): if len(value) != env.end_iteration - env.begin_iteration: raise ValueError("Length of list {} has to equal to 'num_boost_round'." .format(repr(key))) new_param = value[env.iteration - env.begin_iteration] else: new_param = value(env.iteration - env.begin_iteration) if new_param != env.params.get(key, None): new_parameters[key] = new_param if new_parameters: env.model.reset_parameter(new_parameters) env.params.update(new_parameters) _callback.before_iteration = True _callback.order = 10 return _callback
Create a callback that activates early stopping. Note ---- Activates early stopping. The model will train until the validation score stops improving. Validation score needs to improve at least every ``early_stopping_rounds`` round(s) to continue training. Requires at least one validation data and one metric. If there's more than one, will check all of them. But the training data is ignored anyway. To check only the first metric set ``first_metric_only`` to True. Parameters ---------- stopping_rounds : int The possible number of rounds without the trend occurrence. first_metric_only : bool, optional (default=False) Whether to use only the first metric for early stopping. verbose : bool, optional (default=True) Whether to print message with early stopping information. Returns ------- callback : function The callback that activates early stopping. def early_stopping(stopping_rounds, first_metric_only=False, verbose=True): """Create a callback that activates early stopping. Note ---- Activates early stopping. The model will train until the validation score stops improving. Validation score needs to improve at least every ``early_stopping_rounds`` round(s) to continue training. Requires at least one validation data and one metric. If there's more than one, will check all of them. But the training data is ignored anyway. To check only the first metric set ``first_metric_only`` to True. Parameters ---------- stopping_rounds : int The possible number of rounds without the trend occurrence. first_metric_only : bool, optional (default=False) Whether to use only the first metric for early stopping. verbose : bool, optional (default=True) Whether to print message with early stopping information. Returns ------- callback : function The callback that activates early stopping. """ best_score = [] best_iter = [] best_score_list = [] cmp_op = [] enabled = [True] def _init(env): enabled[0] = not any((boost_alias in env.params and env.params[boost_alias] == 'dart') for boost_alias in ('boosting', 'boosting_type', 'boost')) if not enabled[0]: warnings.warn('Early stopping is not available in dart mode') return if not env.evaluation_result_list: raise ValueError('For early stopping, ' 'at least one dataset and eval metric is required for evaluation') if verbose: msg = "Training until validation scores don't improve for {} rounds." print(msg.format(stopping_rounds)) for eval_ret in env.evaluation_result_list: best_iter.append(0) best_score_list.append(None) if eval_ret[3]: best_score.append(float('-inf')) cmp_op.append(gt) else: best_score.append(float('inf')) cmp_op.append(lt) def _callback(env): if not cmp_op: _init(env) if not enabled[0]: return for i in range_(len(env.evaluation_result_list)): score = env.evaluation_result_list[i][2] if best_score_list[i] is None or cmp_op[i](score, best_score[i]): best_score[i] = score best_iter[i] = env.iteration best_score_list[i] = env.evaluation_result_list elif env.iteration - best_iter[i] >= stopping_rounds: if verbose: print('Early stopping, best iteration is:\n[%d]\t%s' % ( best_iter[i] + 1, '\t'.join([_format_eval_result(x) for x in best_score_list[i]]))) raise EarlyStopException(best_iter[i], best_score_list[i]) if env.iteration == env.end_iteration - 1: if verbose: print('Did not meet early stopping. Best iteration is:\n[%d]\t%s' % ( best_iter[i] + 1, '\t'.join([_format_eval_result(x) for x in best_score_list[i]]))) raise EarlyStopException(best_iter[i], best_score_list[i]) if first_metric_only: # the only first metric is used for early stopping break _callback.order = 30 return _callback
Perform the training with given parameters. Parameters ---------- params : dict Parameters for training. train_set : Dataset Data to be trained on. num_boost_round : int, optional (default=100) Number of boosting iterations. valid_sets : list of Datasets or None, optional (default=None) List of data to be evaluated on during training. valid_names : list of strings or None, optional (default=None) Names of ``valid_sets``. fobj : callable or None, optional (default=None) Customized objective function. feval : callable or None, optional (default=None) Customized evaluation function. Should accept two parameters: preds, train_data, and return (eval_name, eval_result, is_higher_better) or list of such tuples. For multi-class task, the preds is group by class_id first, then group by row_id. If you want to get i-th row preds in j-th class, the access way is preds[j * num_data + i]. To ignore the default metric corresponding to the used objective, set the ``metric`` parameter to the string ``"None"`` in ``params``. init_model : string, Booster or None, optional (default=None) Filename of LightGBM model or Booster instance used for continue training. feature_name : list of strings or 'auto', optional (default="auto") Feature names. If 'auto' and data is pandas DataFrame, data columns names are used. categorical_feature : list of strings or int, or 'auto', optional (default="auto") Categorical features. If list of int, interpreted as indices. If list of strings, interpreted as feature names (need to specify ``feature_name`` as well). If 'auto' and data is pandas DataFrame, pandas unordered categorical columns are used. All values in categorical features should be less than int32 max value (2147483647). Large values could be memory consuming. Consider using consecutive integers starting from zero. All negative values in categorical features will be treated as missing values. early_stopping_rounds : int or None, optional (default=None) Activates early stopping. The model will train until the validation score stops improving. Validation score needs to improve at least every ``early_stopping_rounds`` round(s) to continue training. Requires at least one validation data and one metric. If there's more than one, will check all of them. But the training data is ignored anyway. To check only the first metric you can pass in ``callbacks`` ``early_stopping`` callback with ``first_metric_only=True``. The index of iteration that has the best performance will be saved in the ``best_iteration`` field if early stopping logic is enabled by setting ``early_stopping_rounds``. evals_result: dict or None, optional (default=None) This dictionary used to store all evaluation results of all the items in ``valid_sets``. Example ------- With a ``valid_sets`` = [valid_set, train_set], ``valid_names`` = ['eval', 'train'] and a ``params`` = {'metric': 'logloss'} returns {'train': {'logloss': ['0.48253', '0.35953', ...]}, 'eval': {'logloss': ['0.480385', '0.357756', ...]}}. verbose_eval : bool or int, optional (default=True) Requires at least one validation data. If True, the eval metric on the valid set is printed at each boosting stage. If int, the eval metric on the valid set is printed at every ``verbose_eval`` boosting stage. The last boosting stage or the boosting stage found by using ``early_stopping_rounds`` is also printed. Example ------- With ``verbose_eval`` = 4 and at least one item in ``valid_sets``, an evaluation metric is printed every 4 (instead of 1) boosting stages. learning_rates : list, callable or None, optional (default=None) List of learning rates for each boosting round or a customized function that calculates ``learning_rate`` in terms of current number of round (e.g. yields learning rate decay). keep_training_booster : bool, optional (default=False) Whether the returned Booster will be used to keep training. If False, the returned value will be converted into _InnerPredictor before returning. You can still use _InnerPredictor as ``init_model`` for future continue training. callbacks : list of callables or None, optional (default=None) List of callback functions that are applied at each iteration. See Callbacks in Python API for more information. Returns ------- booster : Booster The trained Booster model. def train(params, train_set, num_boost_round=100, valid_sets=None, valid_names=None, fobj=None, feval=None, init_model=None, feature_name='auto', categorical_feature='auto', early_stopping_rounds=None, evals_result=None, verbose_eval=True, learning_rates=None, keep_training_booster=False, callbacks=None): """Perform the training with given parameters. Parameters ---------- params : dict Parameters for training. train_set : Dataset Data to be trained on. num_boost_round : int, optional (default=100) Number of boosting iterations. valid_sets : list of Datasets or None, optional (default=None) List of data to be evaluated on during training. valid_names : list of strings or None, optional (default=None) Names of ``valid_sets``. fobj : callable or None, optional (default=None) Customized objective function. feval : callable or None, optional (default=None) Customized evaluation function. Should accept two parameters: preds, train_data, and return (eval_name, eval_result, is_higher_better) or list of such tuples. For multi-class task, the preds is group by class_id first, then group by row_id. If you want to get i-th row preds in j-th class, the access way is preds[j * num_data + i]. To ignore the default metric corresponding to the used objective, set the ``metric`` parameter to the string ``"None"`` in ``params``. init_model : string, Booster or None, optional (default=None) Filename of LightGBM model or Booster instance used for continue training. feature_name : list of strings or 'auto', optional (default="auto") Feature names. If 'auto' and data is pandas DataFrame, data columns names are used. categorical_feature : list of strings or int, or 'auto', optional (default="auto") Categorical features. If list of int, interpreted as indices. If list of strings, interpreted as feature names (need to specify ``feature_name`` as well). If 'auto' and data is pandas DataFrame, pandas unordered categorical columns are used. All values in categorical features should be less than int32 max value (2147483647). Large values could be memory consuming. Consider using consecutive integers starting from zero. All negative values in categorical features will be treated as missing values. early_stopping_rounds : int or None, optional (default=None) Activates early stopping. The model will train until the validation score stops improving. Validation score needs to improve at least every ``early_stopping_rounds`` round(s) to continue training. Requires at least one validation data and one metric. If there's more than one, will check all of them. But the training data is ignored anyway. To check only the first metric you can pass in ``callbacks`` ``early_stopping`` callback with ``first_metric_only=True``. The index of iteration that has the best performance will be saved in the ``best_iteration`` field if early stopping logic is enabled by setting ``early_stopping_rounds``. evals_result: dict or None, optional (default=None) This dictionary used to store all evaluation results of all the items in ``valid_sets``. Example ------- With a ``valid_sets`` = [valid_set, train_set], ``valid_names`` = ['eval', 'train'] and a ``params`` = {'metric': 'logloss'} returns {'train': {'logloss': ['0.48253', '0.35953', ...]}, 'eval': {'logloss': ['0.480385', '0.357756', ...]}}. verbose_eval : bool or int, optional (default=True) Requires at least one validation data. If True, the eval metric on the valid set is printed at each boosting stage. If int, the eval metric on the valid set is printed at every ``verbose_eval`` boosting stage. The last boosting stage or the boosting stage found by using ``early_stopping_rounds`` is also printed. Example ------- With ``verbose_eval`` = 4 and at least one item in ``valid_sets``, an evaluation metric is printed every 4 (instead of 1) boosting stages. learning_rates : list, callable or None, optional (default=None) List of learning rates for each boosting round or a customized function that calculates ``learning_rate`` in terms of current number of round (e.g. yields learning rate decay). keep_training_booster : bool, optional (default=False) Whether the returned Booster will be used to keep training. If False, the returned value will be converted into _InnerPredictor before returning. You can still use _InnerPredictor as ``init_model`` for future continue training. callbacks : list of callables or None, optional (default=None) List of callback functions that are applied at each iteration. See Callbacks in Python API for more information. Returns ------- booster : Booster The trained Booster model. """ # create predictor first params = copy.deepcopy(params) if fobj is not None: params['objective'] = 'none' for alias in ["num_iterations", "num_iteration", "n_iter", "num_tree", "num_trees", "num_round", "num_rounds", "num_boost_round", "n_estimators"]: if alias in params: num_boost_round = int(params.pop(alias)) warnings.warn("Found `{}` in params. Will use it instead of argument".format(alias)) break for alias in ["early_stopping_round", "early_stopping_rounds", "early_stopping"]: if alias in params and params[alias] is not None: early_stopping_rounds = int(params.pop(alias)) warnings.warn("Found `{}` in params. Will use it instead of argument".format(alias)) break if num_boost_round <= 0: raise ValueError("num_boost_round should be greater than zero.") if isinstance(init_model, string_type): predictor = _InnerPredictor(model_file=init_model, pred_parameter=params) elif isinstance(init_model, Booster): predictor = init_model._to_predictor(dict(init_model.params, **params)) else: predictor = None init_iteration = predictor.num_total_iteration if predictor is not None else 0 # check dataset if not isinstance(train_set, Dataset): raise TypeError("Training only accepts Dataset object") train_set._update_params(params) \ ._set_predictor(predictor) \ .set_feature_name(feature_name) \ .set_categorical_feature(categorical_feature) is_valid_contain_train = False train_data_name = "training" reduced_valid_sets = [] name_valid_sets = [] if valid_sets is not None: if isinstance(valid_sets, Dataset): valid_sets = [valid_sets] if isinstance(valid_names, string_type): valid_names = [valid_names] for i, valid_data in enumerate(valid_sets): # reduce cost for prediction training data if valid_data is train_set: is_valid_contain_train = True if valid_names is not None: train_data_name = valid_names[i] continue if not isinstance(valid_data, Dataset): raise TypeError("Traninig only accepts Dataset object") reduced_valid_sets.append(valid_data._update_params(params).set_reference(train_set)) if valid_names is not None and len(valid_names) > i: name_valid_sets.append(valid_names[i]) else: name_valid_sets.append('valid_' + str(i)) # process callbacks if callbacks is None: callbacks = set() else: for i, cb in enumerate(callbacks): cb.__dict__.setdefault('order', i - len(callbacks)) callbacks = set(callbacks) # Most of legacy advanced options becomes callbacks if verbose_eval is True: callbacks.add(callback.print_evaluation()) elif isinstance(verbose_eval, integer_types): callbacks.add(callback.print_evaluation(verbose_eval)) if early_stopping_rounds is not None: callbacks.add(callback.early_stopping(early_stopping_rounds, verbose=bool(verbose_eval))) if learning_rates is not None: callbacks.add(callback.reset_parameter(learning_rate=learning_rates)) if evals_result is not None: callbacks.add(callback.record_evaluation(evals_result)) callbacks_before_iter = {cb for cb in callbacks if getattr(cb, 'before_iteration', False)} callbacks_after_iter = callbacks - callbacks_before_iter callbacks_before_iter = sorted(callbacks_before_iter, key=attrgetter('order')) callbacks_after_iter = sorted(callbacks_after_iter, key=attrgetter('order')) # construct booster try: booster = Booster(params=params, train_set=train_set) if is_valid_contain_train: booster.set_train_data_name(train_data_name) for valid_set, name_valid_set in zip_(reduced_valid_sets, name_valid_sets): booster.add_valid(valid_set, name_valid_set) finally: train_set._reverse_update_params() for valid_set in reduced_valid_sets: valid_set._reverse_update_params() booster.best_iteration = 0 # start training for i in range_(init_iteration, init_iteration + num_boost_round): for cb in callbacks_before_iter: cb(callback.CallbackEnv(model=booster, params=params, iteration=i, begin_iteration=init_iteration, end_iteration=init_iteration + num_boost_round, evaluation_result_list=None)) booster.update(fobj=fobj) evaluation_result_list = [] # check evaluation result. if valid_sets is not None: if is_valid_contain_train: evaluation_result_list.extend(booster.eval_train(feval)) evaluation_result_list.extend(booster.eval_valid(feval)) try: for cb in callbacks_after_iter: cb(callback.CallbackEnv(model=booster, params=params, iteration=i, begin_iteration=init_iteration, end_iteration=init_iteration + num_boost_round, evaluation_result_list=evaluation_result_list)) except callback.EarlyStopException as earlyStopException: booster.best_iteration = earlyStopException.best_iteration + 1 evaluation_result_list = earlyStopException.best_score break booster.best_score = collections.defaultdict(dict) for dataset_name, eval_name, score, _ in evaluation_result_list: booster.best_score[dataset_name][eval_name] = score if not keep_training_booster: booster.model_from_string(booster.model_to_string(), False).free_dataset() return booster
Make a n-fold list of Booster from random indices. def _make_n_folds(full_data, folds, nfold, params, seed, fpreproc=None, stratified=True, shuffle=True, eval_train_metric=False): """Make a n-fold list of Booster from random indices.""" full_data = full_data.construct() num_data = full_data.num_data() if folds is not None: if not hasattr(folds, '__iter__') and not hasattr(folds, 'split'): raise AttributeError("folds should be a generator or iterator of (train_idx, test_idx) tuples " "or scikit-learn splitter object with split method") if hasattr(folds, 'split'): group_info = full_data.get_group() if group_info is not None: group_info = group_info.astype(int) flatted_group = np.repeat(range_(len(group_info)), repeats=group_info) else: flatted_group = np.zeros(num_data, dtype=int) folds = folds.split(X=np.zeros(num_data), y=full_data.get_label(), groups=flatted_group) else: if 'objective' in params and params['objective'] == 'lambdarank': if not SKLEARN_INSTALLED: raise LightGBMError('Scikit-learn is required for lambdarank cv.') # lambdarank task, split according to groups group_info = full_data.get_group().astype(int) flatted_group = np.repeat(range_(len(group_info)), repeats=group_info) group_kfold = _LGBMGroupKFold(n_splits=nfold) folds = group_kfold.split(X=np.zeros(num_data), groups=flatted_group) elif stratified: if not SKLEARN_INSTALLED: raise LightGBMError('Scikit-learn is required for stratified cv.') skf = _LGBMStratifiedKFold(n_splits=nfold, shuffle=shuffle, random_state=seed) folds = skf.split(X=np.zeros(num_data), y=full_data.get_label()) else: if shuffle: randidx = np.random.RandomState(seed).permutation(num_data) else: randidx = np.arange(num_data) kstep = int(num_data / nfold) test_id = [randidx[i: i + kstep] for i in range_(0, num_data, kstep)] train_id = [np.concatenate([test_id[i] for i in range_(nfold) if k != i]) for k in range_(nfold)] folds = zip_(train_id, test_id) ret = _CVBooster() for train_idx, test_idx in folds: train_set = full_data.subset(train_idx) valid_set = full_data.subset(test_idx) # run preprocessing on the data set if needed if fpreproc is not None: train_set, valid_set, tparam = fpreproc(train_set, valid_set, params.copy()) else: tparam = params cvbooster = Booster(tparam, train_set) if eval_train_metric: cvbooster.add_valid(train_set, 'train') cvbooster.add_valid(valid_set, 'valid') ret.append(cvbooster) return ret
Aggregate cross-validation results. def _agg_cv_result(raw_results, eval_train_metric=False): """Aggregate cross-validation results.""" cvmap = collections.defaultdict(list) metric_type = {} for one_result in raw_results: for one_line in one_result: if eval_train_metric: key = "{} {}".format(one_line[0], one_line[1]) else: key = one_line[1] metric_type[key] = one_line[3] cvmap[key].append(one_line[2]) return [('cv_agg', k, np.mean(v), metric_type[k], np.std(v)) for k, v in cvmap.items()]
Perform the cross-validation with given paramaters. Parameters ---------- params : dict Parameters for Booster. train_set : Dataset Data to be trained on. num_boost_round : int, optional (default=100) Number of boosting iterations. folds : generator or iterator of (train_idx, test_idx) tuples, scikit-learn splitter object or None, optional (default=None) If generator or iterator, it should yield the train and test indices for each fold. If object, it should be one of the scikit-learn splitter classes (https://scikit-learn.org/stable/modules/classes.html#splitter-classes) and have ``split`` method. This argument has highest priority over other data split arguments. nfold : int, optional (default=5) Number of folds in CV. stratified : bool, optional (default=True) Whether to perform stratified sampling. shuffle : bool, optional (default=True) Whether to shuffle before splitting data. metrics : string, list of strings or None, optional (default=None) Evaluation metrics to be monitored while CV. If not None, the metric in ``params`` will be overridden. fobj : callable or None, optional (default=None) Custom objective function. feval : callable or None, optional (default=None) Customized evaluation function. Should accept two parameters: preds, train_data, and return (eval_name, eval_result, is_higher_better) or list of such tuples. For multi-class task, the preds is group by class_id first, then group by row_id. If you want to get i-th row preds in j-th class, the access way is preds[j * num_data + i]. To ignore the default metric corresponding to the used objective, set ``metrics`` to the string ``"None"``. init_model : string, Booster or None, optional (default=None) Filename of LightGBM model or Booster instance used for continue training. feature_name : list of strings or 'auto', optional (default="auto") Feature names. If 'auto' and data is pandas DataFrame, data columns names are used. categorical_feature : list of strings or int, or 'auto', optional (default="auto") Categorical features. If list of int, interpreted as indices. If list of strings, interpreted as feature names (need to specify ``feature_name`` as well). If 'auto' and data is pandas DataFrame, pandas unordered categorical columns are used. All values in categorical features should be less than int32 max value (2147483647). Large values could be memory consuming. Consider using consecutive integers starting from zero. All negative values in categorical features will be treated as missing values. early_stopping_rounds : int or None, optional (default=None) Activates early stopping. CV score needs to improve at least every ``early_stopping_rounds`` round(s) to continue. Requires at least one metric. If there's more than one, will check all of them. To check only the first metric you can pass in ``callbacks`` ``early_stopping`` callback with ``first_metric_only=True``. Last entry in evaluation history is the one from the best iteration. fpreproc : callable or None, optional (default=None) Preprocessing function that takes (dtrain, dtest, params) and returns transformed versions of those. verbose_eval : bool, int, or None, optional (default=None) Whether to display the progress. If None, progress will be displayed when np.ndarray is returned. If True, progress will be displayed at every boosting stage. If int, progress will be displayed at every given ``verbose_eval`` boosting stage. show_stdv : bool, optional (default=True) Whether to display the standard deviation in progress. Results are not affected by this parameter, and always contain std. seed : int, optional (default=0) Seed used to generate the folds (passed to numpy.random.seed). callbacks : list of callables or None, optional (default=None) List of callback functions that are applied at each iteration. See Callbacks in Python API for more information. eval_train_metric : bool, optional (default=False) Whether to display the train metric in progress. The score of the metric is calculated again after each training step, so there is some impact on performance. Returns ------- eval_hist : dict Evaluation history. The dictionary has the following format: {'metric1-mean': [values], 'metric1-stdv': [values], 'metric2-mean': [values], 'metric2-stdv': [values], ...}. def cv(params, train_set, num_boost_round=100, folds=None, nfold=5, stratified=True, shuffle=True, metrics=None, fobj=None, feval=None, init_model=None, feature_name='auto', categorical_feature='auto', early_stopping_rounds=None, fpreproc=None, verbose_eval=None, show_stdv=True, seed=0, callbacks=None, eval_train_metric=False): """Perform the cross-validation with given paramaters. Parameters ---------- params : dict Parameters for Booster. train_set : Dataset Data to be trained on. num_boost_round : int, optional (default=100) Number of boosting iterations. folds : generator or iterator of (train_idx, test_idx) tuples, scikit-learn splitter object or None, optional (default=None) If generator or iterator, it should yield the train and test indices for each fold. If object, it should be one of the scikit-learn splitter classes (https://scikit-learn.org/stable/modules/classes.html#splitter-classes) and have ``split`` method. This argument has highest priority over other data split arguments. nfold : int, optional (default=5) Number of folds in CV. stratified : bool, optional (default=True) Whether to perform stratified sampling. shuffle : bool, optional (default=True) Whether to shuffle before splitting data. metrics : string, list of strings or None, optional (default=None) Evaluation metrics to be monitored while CV. If not None, the metric in ``params`` will be overridden. fobj : callable or None, optional (default=None) Custom objective function. feval : callable or None, optional (default=None) Customized evaluation function. Should accept two parameters: preds, train_data, and return (eval_name, eval_result, is_higher_better) or list of such tuples. For multi-class task, the preds is group by class_id first, then group by row_id. If you want to get i-th row preds in j-th class, the access way is preds[j * num_data + i]. To ignore the default metric corresponding to the used objective, set ``metrics`` to the string ``"None"``. init_model : string, Booster or None, optional (default=None) Filename of LightGBM model or Booster instance used for continue training. feature_name : list of strings or 'auto', optional (default="auto") Feature names. If 'auto' and data is pandas DataFrame, data columns names are used. categorical_feature : list of strings or int, or 'auto', optional (default="auto") Categorical features. If list of int, interpreted as indices. If list of strings, interpreted as feature names (need to specify ``feature_name`` as well). If 'auto' and data is pandas DataFrame, pandas unordered categorical columns are used. All values in categorical features should be less than int32 max value (2147483647). Large values could be memory consuming. Consider using consecutive integers starting from zero. All negative values in categorical features will be treated as missing values. early_stopping_rounds : int or None, optional (default=None) Activates early stopping. CV score needs to improve at least every ``early_stopping_rounds`` round(s) to continue. Requires at least one metric. If there's more than one, will check all of them. To check only the first metric you can pass in ``callbacks`` ``early_stopping`` callback with ``first_metric_only=True``. Last entry in evaluation history is the one from the best iteration. fpreproc : callable or None, optional (default=None) Preprocessing function that takes (dtrain, dtest, params) and returns transformed versions of those. verbose_eval : bool, int, or None, optional (default=None) Whether to display the progress. If None, progress will be displayed when np.ndarray is returned. If True, progress will be displayed at every boosting stage. If int, progress will be displayed at every given ``verbose_eval`` boosting stage. show_stdv : bool, optional (default=True) Whether to display the standard deviation in progress. Results are not affected by this parameter, and always contain std. seed : int, optional (default=0) Seed used to generate the folds (passed to numpy.random.seed). callbacks : list of callables or None, optional (default=None) List of callback functions that are applied at each iteration. See Callbacks in Python API for more information. eval_train_metric : bool, optional (default=False) Whether to display the train metric in progress. The score of the metric is calculated again after each training step, so there is some impact on performance. Returns ------- eval_hist : dict Evaluation history. The dictionary has the following format: {'metric1-mean': [values], 'metric1-stdv': [values], 'metric2-mean': [values], 'metric2-stdv': [values], ...}. """ if not isinstance(train_set, Dataset): raise TypeError("Traninig only accepts Dataset object") params = copy.deepcopy(params) if fobj is not None: params['objective'] = 'none' for alias in ["num_iterations", "num_iteration", "n_iter", "num_tree", "num_trees", "num_round", "num_rounds", "num_boost_round", "n_estimators"]: if alias in params: warnings.warn("Found `{}` in params. Will use it instead of argument".format(alias)) num_boost_round = params.pop(alias) break for alias in ["early_stopping_round", "early_stopping_rounds", "early_stopping"]: if alias in params: warnings.warn("Found `{}` in params. Will use it instead of argument".format(alias)) early_stopping_rounds = params.pop(alias) break if num_boost_round <= 0: raise ValueError("num_boost_round should be greater than zero.") if isinstance(init_model, string_type): predictor = _InnerPredictor(model_file=init_model, pred_parameter=params) elif isinstance(init_model, Booster): predictor = init_model._to_predictor(dict(init_model.params, **params)) else: predictor = None train_set._update_params(params) \ ._set_predictor(predictor) \ .set_feature_name(feature_name) \ .set_categorical_feature(categorical_feature) if metrics is not None: params['metric'] = metrics results = collections.defaultdict(list) cvfolds = _make_n_folds(train_set, folds=folds, nfold=nfold, params=params, seed=seed, fpreproc=fpreproc, stratified=stratified, shuffle=shuffle, eval_train_metric=eval_train_metric) # setup callbacks if callbacks is None: callbacks = set() else: for i, cb in enumerate(callbacks): cb.__dict__.setdefault('order', i - len(callbacks)) callbacks = set(callbacks) if early_stopping_rounds is not None: callbacks.add(callback.early_stopping(early_stopping_rounds, verbose=False)) if verbose_eval is True: callbacks.add(callback.print_evaluation(show_stdv=show_stdv)) elif isinstance(verbose_eval, integer_types): callbacks.add(callback.print_evaluation(verbose_eval, show_stdv=show_stdv)) callbacks_before_iter = {cb for cb in callbacks if getattr(cb, 'before_iteration', False)} callbacks_after_iter = callbacks - callbacks_before_iter callbacks_before_iter = sorted(callbacks_before_iter, key=attrgetter('order')) callbacks_after_iter = sorted(callbacks_after_iter, key=attrgetter('order')) for i in range_(num_boost_round): for cb in callbacks_before_iter: cb(callback.CallbackEnv(model=cvfolds, params=params, iteration=i, begin_iteration=0, end_iteration=num_boost_round, evaluation_result_list=None)) cvfolds.update(fobj=fobj) res = _agg_cv_result(cvfolds.eval_valid(feval), eval_train_metric) for _, key, mean, _, std in res: results[key + '-mean'].append(mean) results[key + '-stdv'].append(std) try: for cb in callbacks_after_iter: cb(callback.CallbackEnv(model=cvfolds, params=params, iteration=i, begin_iteration=0, end_iteration=num_boost_round, evaluation_result_list=res)) except callback.EarlyStopException as earlyStopException: cvfolds.best_iteration = earlyStopException.best_iteration + 1 for k in results: results[k] = results[k][:cvfolds.best_iteration] break return dict(results)
Logarithmic loss with non-necessarily-binary labels. def log_loss(preds, labels): """Logarithmic loss with non-necessarily-binary labels.""" log_likelihood = np.sum(labels * np.log(preds)) / len(preds) return -log_likelihood
Measure performance of an objective. Parameters ---------- objective : string 'binary' or 'xentropy' Objective function. label_type : string 'binary' or 'probability' Type of the label. data : dict Data for training. Returns ------- result : dict Experiment summary stats. def experiment(objective, label_type, data): """Measure performance of an objective. Parameters ---------- objective : string 'binary' or 'xentropy' Objective function. label_type : string 'binary' or 'probability' Type of the label. data : dict Data for training. Returns ------- result : dict Experiment summary stats. """ np.random.seed(0) nrounds = 5 lgb_data = data['lgb_with_' + label_type + '_labels'] params = { 'objective': objective, 'feature_fraction': 1, 'bagging_fraction': 1, 'verbose': -1 } time_zero = time.time() gbm = lgb.train(params, lgb_data, num_boost_round=nrounds) y_fitted = gbm.predict(data['X']) y_true = data[label_type + '_labels'] duration = time.time() - time_zero return { 'time': duration, 'correlation': np.corrcoef(y_fitted, y_true)[0, 1], 'logloss': log_loss(y_fitted, y_true) }
Check object is not tuple or does not have 2 elements. def _check_not_tuple_of_2_elements(obj, obj_name='obj'): """Check object is not tuple or does not have 2 elements.""" if not isinstance(obj, tuple) or len(obj) != 2: raise TypeError('%s must be a tuple of 2 elements.' % obj_name)
Plot model's feature importances. Parameters ---------- booster : Booster or LGBMModel Booster or LGBMModel instance which feature importance should be plotted. ax : matplotlib.axes.Axes or None, optional (default=None) Target axes instance. If None, new figure and axes will be created. height : float, optional (default=0.2) Bar height, passed to ``ax.barh()``. xlim : tuple of 2 elements or None, optional (default=None) Tuple passed to ``ax.xlim()``. ylim : tuple of 2 elements or None, optional (default=None) Tuple passed to ``ax.ylim()``. title : string or None, optional (default="Feature importance") Axes title. If None, title is disabled. xlabel : string or None, optional (default="Feature importance") X-axis title label. If None, title is disabled. ylabel : string or None, optional (default="Features") Y-axis title label. If None, title is disabled. importance_type : string, optional (default="split") How the importance is calculated. If "split", result contains numbers of times the feature is used in a model. If "gain", result contains total gains of splits which use the feature. max_num_features : int or None, optional (default=None) Max number of top features displayed on plot. If None or <1, all features will be displayed. ignore_zero : bool, optional (default=True) Whether to ignore features with zero importance. figsize : tuple of 2 elements or None, optional (default=None) Figure size. grid : bool, optional (default=True) Whether to add a grid for axes. precision : int or None, optional (default=None) Used to restrict the display of floating point values to a certain precision. **kwargs Other parameters passed to ``ax.barh()``. Returns ------- ax : matplotlib.axes.Axes The plot with model's feature importances. def plot_importance(booster, ax=None, height=0.2, xlim=None, ylim=None, title='Feature importance', xlabel='Feature importance', ylabel='Features', importance_type='split', max_num_features=None, ignore_zero=True, figsize=None, grid=True, precision=None, **kwargs): """Plot model's feature importances. Parameters ---------- booster : Booster or LGBMModel Booster or LGBMModel instance which feature importance should be plotted. ax : matplotlib.axes.Axes or None, optional (default=None) Target axes instance. If None, new figure and axes will be created. height : float, optional (default=0.2) Bar height, passed to ``ax.barh()``. xlim : tuple of 2 elements or None, optional (default=None) Tuple passed to ``ax.xlim()``. ylim : tuple of 2 elements or None, optional (default=None) Tuple passed to ``ax.ylim()``. title : string or None, optional (default="Feature importance") Axes title. If None, title is disabled. xlabel : string or None, optional (default="Feature importance") X-axis title label. If None, title is disabled. ylabel : string or None, optional (default="Features") Y-axis title label. If None, title is disabled. importance_type : string, optional (default="split") How the importance is calculated. If "split", result contains numbers of times the feature is used in a model. If "gain", result contains total gains of splits which use the feature. max_num_features : int or None, optional (default=None) Max number of top features displayed on plot. If None or <1, all features will be displayed. ignore_zero : bool, optional (default=True) Whether to ignore features with zero importance. figsize : tuple of 2 elements or None, optional (default=None) Figure size. grid : bool, optional (default=True) Whether to add a grid for axes. precision : int or None, optional (default=None) Used to restrict the display of floating point values to a certain precision. **kwargs Other parameters passed to ``ax.barh()``. Returns ------- ax : matplotlib.axes.Axes The plot with model's feature importances. """ if MATPLOTLIB_INSTALLED: import matplotlib.pyplot as plt else: raise ImportError('You must install matplotlib to plot importance.') if isinstance(booster, LGBMModel): booster = booster.booster_ elif not isinstance(booster, Booster): raise TypeError('booster must be Booster or LGBMModel.') importance = booster.feature_importance(importance_type=importance_type) feature_name = booster.feature_name() if not len(importance): raise ValueError("Booster's feature_importance is empty.") tuples = sorted(zip_(feature_name, importance), key=lambda x: x[1]) if ignore_zero: tuples = [x for x in tuples if x[1] > 0] if max_num_features is not None and max_num_features > 0: tuples = tuples[-max_num_features:] labels, values = zip_(*tuples) if ax is None: if figsize is not None: _check_not_tuple_of_2_elements(figsize, 'figsize') _, ax = plt.subplots(1, 1, figsize=figsize) ylocs = np.arange(len(values)) ax.barh(ylocs, values, align='center', height=height, **kwargs) for x, y in zip_(values, ylocs): ax.text(x + 1, y, _float2str(x, precision) if importance_type == 'gain' else x, va='center') ax.set_yticks(ylocs) ax.set_yticklabels(labels) if xlim is not None: _check_not_tuple_of_2_elements(xlim, 'xlim') else: xlim = (0, max(values) * 1.1) ax.set_xlim(xlim) if ylim is not None: _check_not_tuple_of_2_elements(ylim, 'ylim') else: ylim = (-1, len(values)) ax.set_ylim(ylim) if title is not None: ax.set_title(title) if xlabel is not None: ax.set_xlabel(xlabel) if ylabel is not None: ax.set_ylabel(ylabel) ax.grid(grid) return ax
Plot one metric during training. Parameters ---------- booster : dict or LGBMModel Dictionary returned from ``lightgbm.train()`` or LGBMModel instance. metric : string or None, optional (default=None) The metric name to plot. Only one metric supported because different metrics have various scales. If None, first metric picked from dictionary (according to hashcode). dataset_names : list of strings or None, optional (default=None) List of the dataset names which are used to calculate metric to plot. If None, all datasets are used. ax : matplotlib.axes.Axes or None, optional (default=None) Target axes instance. If None, new figure and axes will be created. xlim : tuple of 2 elements or None, optional (default=None) Tuple passed to ``ax.xlim()``. ylim : tuple of 2 elements or None, optional (default=None) Tuple passed to ``ax.ylim()``. title : string or None, optional (default="Metric during training") Axes title. If None, title is disabled. xlabel : string or None, optional (default="Iterations") X-axis title label. If None, title is disabled. ylabel : string or None, optional (default="auto") Y-axis title label. If 'auto', metric name is used. If None, title is disabled. figsize : tuple of 2 elements or None, optional (default=None) Figure size. grid : bool, optional (default=True) Whether to add a grid for axes. Returns ------- ax : matplotlib.axes.Axes The plot with metric's history over the training. def plot_metric(booster, metric=None, dataset_names=None, ax=None, xlim=None, ylim=None, title='Metric during training', xlabel='Iterations', ylabel='auto', figsize=None, grid=True): """Plot one metric during training. Parameters ---------- booster : dict or LGBMModel Dictionary returned from ``lightgbm.train()`` or LGBMModel instance. metric : string or None, optional (default=None) The metric name to plot. Only one metric supported because different metrics have various scales. If None, first metric picked from dictionary (according to hashcode). dataset_names : list of strings or None, optional (default=None) List of the dataset names which are used to calculate metric to plot. If None, all datasets are used. ax : matplotlib.axes.Axes or None, optional (default=None) Target axes instance. If None, new figure and axes will be created. xlim : tuple of 2 elements or None, optional (default=None) Tuple passed to ``ax.xlim()``. ylim : tuple of 2 elements or None, optional (default=None) Tuple passed to ``ax.ylim()``. title : string or None, optional (default="Metric during training") Axes title. If None, title is disabled. xlabel : string or None, optional (default="Iterations") X-axis title label. If None, title is disabled. ylabel : string or None, optional (default="auto") Y-axis title label. If 'auto', metric name is used. If None, title is disabled. figsize : tuple of 2 elements or None, optional (default=None) Figure size. grid : bool, optional (default=True) Whether to add a grid for axes. Returns ------- ax : matplotlib.axes.Axes The plot with metric's history over the training. """ if MATPLOTLIB_INSTALLED: import matplotlib.pyplot as plt else: raise ImportError('You must install matplotlib to plot metric.') if isinstance(booster, LGBMModel): eval_results = deepcopy(booster.evals_result_) elif isinstance(booster, dict): eval_results = deepcopy(booster) else: raise TypeError('booster must be dict or LGBMModel.') num_data = len(eval_results) if not num_data: raise ValueError('eval results cannot be empty.') if ax is None: if figsize is not None: _check_not_tuple_of_2_elements(figsize, 'figsize') _, ax = plt.subplots(1, 1, figsize=figsize) if dataset_names is None: dataset_names = iter(eval_results.keys()) elif not isinstance(dataset_names, (list, tuple, set)) or not dataset_names: raise ValueError('dataset_names should be iterable and cannot be empty') else: dataset_names = iter(dataset_names) name = next(dataset_names) # take one as sample metrics_for_one = eval_results[name] num_metric = len(metrics_for_one) if metric is None: if num_metric > 1: msg = """more than one metric available, picking one to plot.""" warnings.warn(msg, stacklevel=2) metric, results = metrics_for_one.popitem() else: if metric not in metrics_for_one: raise KeyError('No given metric in eval results.') results = metrics_for_one[metric] num_iteration, max_result, min_result = len(results), max(results), min(results) x_ = range_(num_iteration) ax.plot(x_, results, label=name) for name in dataset_names: metrics_for_one = eval_results[name] results = metrics_for_one[metric] max_result, min_result = max(max(results), max_result), min(min(results), min_result) ax.plot(x_, results, label=name) ax.legend(loc='best') if xlim is not None: _check_not_tuple_of_2_elements(xlim, 'xlim') else: xlim = (0, num_iteration) ax.set_xlim(xlim) if ylim is not None: _check_not_tuple_of_2_elements(ylim, 'ylim') else: range_result = max_result - min_result ylim = (min_result - range_result * 0.2, max_result + range_result * 0.2) ax.set_ylim(ylim) if ylabel == 'auto': ylabel = metric if title is not None: ax.set_title(title) if xlabel is not None: ax.set_xlabel(xlabel) if ylabel is not None: ax.set_ylabel(ylabel) ax.grid(grid) return ax
Convert specified tree to graphviz instance. See: - https://graphviz.readthedocs.io/en/stable/api.html#digraph def _to_graphviz(tree_info, show_info, feature_names, precision=None, **kwargs): """Convert specified tree to graphviz instance. See: - https://graphviz.readthedocs.io/en/stable/api.html#digraph """ if GRAPHVIZ_INSTALLED: from graphviz import Digraph else: raise ImportError('You must install graphviz to plot tree.') def add(root, parent=None, decision=None): """Recursively add node or edge.""" if 'split_index' in root: # non-leaf name = 'split{0}'.format(root['split_index']) if feature_names is not None: label = 'split_feature_name: {0}'.format(feature_names[root['split_feature']]) else: label = 'split_feature_index: {0}'.format(root['split_feature']) label += r'\nthreshold: {0}'.format(_float2str(root['threshold'], precision)) for info in show_info: if info in {'split_gain', 'internal_value'}: label += r'\n{0}: {1}'.format(info, _float2str(root[info], precision)) elif info == 'internal_count': label += r'\n{0}: {1}'.format(info, root[info]) graph.node(name, label=label) if root['decision_type'] == '<=': l_dec, r_dec = '<=', '>' elif root['decision_type'] == '==': l_dec, r_dec = 'is', "isn't" else: raise ValueError('Invalid decision type in tree model.') add(root['left_child'], name, l_dec) add(root['right_child'], name, r_dec) else: # leaf name = 'leaf{0}'.format(root['leaf_index']) label = 'leaf_index: {0}'.format(root['leaf_index']) label += r'\nleaf_value: {0}'.format(_float2str(root['leaf_value'], precision)) if 'leaf_count' in show_info: label += r'\nleaf_count: {0}'.format(root['leaf_count']) graph.node(name, label=label) if parent is not None: graph.edge(parent, name, decision) graph = Digraph(**kwargs) add(tree_info['tree_structure']) return graph
Create a digraph representation of specified tree. Note ---- For more information please visit https://graphviz.readthedocs.io/en/stable/api.html#digraph. Parameters ---------- booster : Booster or LGBMModel Booster or LGBMModel instance to be converted. tree_index : int, optional (default=0) The index of a target tree to convert. show_info : list of strings or None, optional (default=None) What information should be shown in nodes. Possible values of list items: 'split_gain', 'internal_value', 'internal_count', 'leaf_count'. precision : int or None, optional (default=None) Used to restrict the display of floating point values to a certain precision. **kwargs Other parameters passed to ``Digraph`` constructor. Check https://graphviz.readthedocs.io/en/stable/api.html#digraph for the full list of supported parameters. Returns ------- graph : graphviz.Digraph The digraph representation of specified tree. def create_tree_digraph(booster, tree_index=0, show_info=None, precision=None, old_name=None, old_comment=None, old_filename=None, old_directory=None, old_format=None, old_engine=None, old_encoding=None, old_graph_attr=None, old_node_attr=None, old_edge_attr=None, old_body=None, old_strict=False, **kwargs): """Create a digraph representation of specified tree. Note ---- For more information please visit https://graphviz.readthedocs.io/en/stable/api.html#digraph. Parameters ---------- booster : Booster or LGBMModel Booster or LGBMModel instance to be converted. tree_index : int, optional (default=0) The index of a target tree to convert. show_info : list of strings or None, optional (default=None) What information should be shown in nodes. Possible values of list items: 'split_gain', 'internal_value', 'internal_count', 'leaf_count'. precision : int or None, optional (default=None) Used to restrict the display of floating point values to a certain precision. **kwargs Other parameters passed to ``Digraph`` constructor. Check https://graphviz.readthedocs.io/en/stable/api.html#digraph for the full list of supported parameters. Returns ------- graph : graphviz.Digraph The digraph representation of specified tree. """ if isinstance(booster, LGBMModel): booster = booster.booster_ elif not isinstance(booster, Booster): raise TypeError('booster must be Booster or LGBMModel.') for param_name in ['old_name', 'old_comment', 'old_filename', 'old_directory', 'old_format', 'old_engine', 'old_encoding', 'old_graph_attr', 'old_node_attr', 'old_edge_attr', 'old_body']: param = locals().get(param_name) if param is not None: warnings.warn('{0} parameter is deprecated and will be removed in 2.4 version.\n' 'Please use **kwargs to pass {1} parameter.'.format(param_name, param_name[4:]), LGBMDeprecationWarning) if param_name[4:] not in kwargs: kwargs[param_name[4:]] = param if locals().get('strict'): warnings.warn('old_strict parameter is deprecated and will be removed in 2.4 version.\n' 'Please use **kwargs to pass strict parameter.', LGBMDeprecationWarning) if 'strict' not in kwargs: kwargs['strict'] = True model = booster.dump_model() tree_infos = model['tree_info'] if 'feature_names' in model: feature_names = model['feature_names'] else: feature_names = None if tree_index < len(tree_infos): tree_info = tree_infos[tree_index] else: raise IndexError('tree_index is out of range.') if show_info is None: show_info = [] graph = _to_graphviz(tree_info, show_info, feature_names, precision, **kwargs) return graph
Plot specified tree. Note ---- It is preferable to use ``create_tree_digraph()`` because of its lossless quality and returned objects can be also rendered and displayed directly inside a Jupyter notebook. Parameters ---------- booster : Booster or LGBMModel Booster or LGBMModel instance to be plotted. ax : matplotlib.axes.Axes or None, optional (default=None) Target axes instance. If None, new figure and axes will be created. tree_index : int, optional (default=0) The index of a target tree to plot. figsize : tuple of 2 elements or None, optional (default=None) Figure size. show_info : list of strings or None, optional (default=None) What information should be shown in nodes. Possible values of list items: 'split_gain', 'internal_value', 'internal_count', 'leaf_count'. precision : int or None, optional (default=None) Used to restrict the display of floating point values to a certain precision. **kwargs Other parameters passed to ``Digraph`` constructor. Check https://graphviz.readthedocs.io/en/stable/api.html#digraph for the full list of supported parameters. Returns ------- ax : matplotlib.axes.Axes The plot with single tree. def plot_tree(booster, ax=None, tree_index=0, figsize=None, old_graph_attr=None, old_node_attr=None, old_edge_attr=None, show_info=None, precision=None, **kwargs): """Plot specified tree. Note ---- It is preferable to use ``create_tree_digraph()`` because of its lossless quality and returned objects can be also rendered and displayed directly inside a Jupyter notebook. Parameters ---------- booster : Booster or LGBMModel Booster or LGBMModel instance to be plotted. ax : matplotlib.axes.Axes or None, optional (default=None) Target axes instance. If None, new figure and axes will be created. tree_index : int, optional (default=0) The index of a target tree to plot. figsize : tuple of 2 elements or None, optional (default=None) Figure size. show_info : list of strings or None, optional (default=None) What information should be shown in nodes. Possible values of list items: 'split_gain', 'internal_value', 'internal_count', 'leaf_count'. precision : int or None, optional (default=None) Used to restrict the display of floating point values to a certain precision. **kwargs Other parameters passed to ``Digraph`` constructor. Check https://graphviz.readthedocs.io/en/stable/api.html#digraph for the full list of supported parameters. Returns ------- ax : matplotlib.axes.Axes The plot with single tree. """ if MATPLOTLIB_INSTALLED: import matplotlib.pyplot as plt import matplotlib.image as image else: raise ImportError('You must install matplotlib to plot tree.') for param_name in ['old_graph_attr', 'old_node_attr', 'old_edge_attr']: param = locals().get(param_name) if param is not None: warnings.warn('{0} parameter is deprecated and will be removed in 2.4 version.\n' 'Please use **kwargs to pass {1} parameter.'.format(param_name, param_name[4:]), LGBMDeprecationWarning) if param_name[4:] not in kwargs: kwargs[param_name[4:]] = param if ax is None: if figsize is not None: _check_not_tuple_of_2_elements(figsize, 'figsize') _, ax = plt.subplots(1, 1, figsize=figsize) graph = create_tree_digraph(booster=booster, tree_index=tree_index, show_info=show_info, precision=precision, **kwargs) s = BytesIO() s.write(graph.pipe(format='png')) s.seek(0) img = image.imread(s) ax.imshow(img) ax.axis('off') return ax
Return the -std=c++[0x/11/14] compiler flag. The c++14 is preferred over c++0x/11 (when it is available). def cpp_flag(compiler): """Return the -std=c++[0x/11/14] compiler flag. The c++14 is preferred over c++0x/11 (when it is available). """ standards = ['-std=c++14', '-std=c++11', '-std=c++0x'] for standard in standards: if has_flag(compiler, [standard]): return standard raise RuntimeError( 'Unsupported compiler -- at least C++0x support ' 'is needed!' )
query is a 1d numpy array corresponding to the vector to which you want to find the closest vector vectors is a 2d numpy array corresponding to the vectors you want to consider ban_set is a set of indicies within vectors you want to ignore for nearest match cossims is a 1d numpy array of size len(vectors), which can be passed for efficiency returns the index of the closest match to query within vectors def find_nearest_neighbor(query, vectors, ban_set, cossims=None): """ query is a 1d numpy array corresponding to the vector to which you want to find the closest vector vectors is a 2d numpy array corresponding to the vectors you want to consider ban_set is a set of indicies within vectors you want to ignore for nearest match cossims is a 1d numpy array of size len(vectors), which can be passed for efficiency returns the index of the closest match to query within vectors """ if cossims is None: cossims = np.matmul(vectors, query, out=cossims) else: np.matmul(vectors, query, out=cossims) rank = len(cossims) - 1 result_i = np.argpartition(cossims, rank)[rank] while result_i in ban_set: rank -= 1 result_i = np.argpartition(cossims, rank)[rank] return result_i
Train a supervised model and return a model object. input must be a filepath. The input text does not need to be tokenized as per the tokenize function, but it must be preprocessed and encoded as UTF-8. You might want to consult standard preprocessing scripts such as tokenizer.perl mentioned here: http://www.statmt.org/wmt07/baseline.html The input file must must contain at least one label per line. For an example consult the example datasets which are part of the fastText repository such as the dataset pulled by classification-example.sh. def train_supervised( input, lr=0.1, dim=100, ws=5, epoch=5, minCount=1, minCountLabel=0, minn=0, maxn=0, neg=5, wordNgrams=1, loss="softmax", bucket=2000000, thread=multiprocessing.cpu_count() - 1, lrUpdateRate=100, t=1e-4, label="__label__", verbose=2, pretrainedVectors="", ): """ Train a supervised model and return a model object. input must be a filepath. The input text does not need to be tokenized as per the tokenize function, but it must be preprocessed and encoded as UTF-8. You might want to consult standard preprocessing scripts such as tokenizer.perl mentioned here: http://www.statmt.org/wmt07/baseline.html The input file must must contain at least one label per line. For an example consult the example datasets which are part of the fastText repository such as the dataset pulled by classification-example.sh. """ model = "supervised" a = _build_args(locals()) ft = _FastText() fasttext.train(ft.f, a) return ft
Get the vector representation of word. def get_word_vector(self, word): """Get the vector representation of word.""" dim = self.get_dimension() b = fasttext.Vector(dim) self.f.getWordVector(b, word) return np.array(b)
Given a string, get a single vector represenation. This function assumes to be given a single line of text. We split words on whitespace (space, newline, tab, vertical tab) and the control characters carriage return, formfeed and the null character. def get_sentence_vector(self, text): """ Given a string, get a single vector represenation. This function assumes to be given a single line of text. We split words on whitespace (space, newline, tab, vertical tab) and the control characters carriage return, formfeed and the null character. """ if text.find('\n') != -1: raise ValueError( "predict processes one line at a time (remove \'\\n\')" ) text += "\n" dim = self.get_dimension() b = fasttext.Vector(dim) self.f.getSentenceVector(b, text) return np.array(b)
Given a word, get the subwords and their indicies. def get_subwords(self, word, on_unicode_error='strict'): """ Given a word, get the subwords and their indicies. """ pair = self.f.getSubwords(word, on_unicode_error) return pair[0], np.array(pair[1])
Given an index, get the corresponding vector of the Input Matrix. def get_input_vector(self, ind): """ Given an index, get the corresponding vector of the Input Matrix. """ dim = self.get_dimension() b = fasttext.Vector(dim) self.f.getInputVector(b, ind) return np.array(b)
Given a string, get a list of labels and a list of corresponding probabilities. k controls the number of returned labels. A choice of 5, will return the 5 most probable labels. By default this returns only the most likely label and probability. threshold filters the returned labels by a threshold on probability. A choice of 0.5 will return labels with at least 0.5 probability. k and threshold will be applied together to determine the returned labels. This function assumes to be given a single line of text. We split words on whitespace (space, newline, tab, vertical tab) and the control characters carriage return, formfeed and the null character. If the model is not supervised, this function will throw a ValueError. If given a list of strings, it will return a list of results as usually received for a single line of text. def predict(self, text, k=1, threshold=0.0, on_unicode_error='strict'): """ Given a string, get a list of labels and a list of corresponding probabilities. k controls the number of returned labels. A choice of 5, will return the 5 most probable labels. By default this returns only the most likely label and probability. threshold filters the returned labels by a threshold on probability. A choice of 0.5 will return labels with at least 0.5 probability. k and threshold will be applied together to determine the returned labels. This function assumes to be given a single line of text. We split words on whitespace (space, newline, tab, vertical tab) and the control characters carriage return, formfeed and the null character. If the model is not supervised, this function will throw a ValueError. If given a list of strings, it will return a list of results as usually received for a single line of text. """ def check(entry): if entry.find('\n') != -1: raise ValueError( "predict processes one line at a time (remove \'\\n\')" ) entry += "\n" return entry if type(text) == list: text = [check(entry) for entry in text] predictions = self.f.multilinePredict(text, k, threshold, on_unicode_error) dt = np.dtype([('probability', 'float64'), ('label', 'object')]) result_as_pair = np.array(predictions, dtype=dt) return result_as_pair['label'].tolist(), result_as_pair['probability'] else: text = check(text) predictions = self.f.predict(text, k, threshold, on_unicode_error) probs, labels = zip(*predictions) return labels, np.array(probs, copy=False)
Get a copy of the full input matrix of a Model. This only works if the model is not quantized. def get_input_matrix(self): """ Get a copy of the full input matrix of a Model. This only works if the model is not quantized. """ if self.f.isQuant(): raise ValueError("Can't get quantized Matrix") return np.array(self.f.getInputMatrix())
Get a copy of the full output matrix of a Model. This only works if the model is not quantized. def get_output_matrix(self): """ Get a copy of the full output matrix of a Model. This only works if the model is not quantized. """ if self.f.isQuant(): raise ValueError("Can't get quantized Matrix") return np.array(self.f.getOutputMatrix())
Get the entire list of words of the dictionary optionally including the frequency of the individual words. This does not include any subwords. For that please consult the function get_subwords. def get_words(self, include_freq=False, on_unicode_error='strict'): """ Get the entire list of words of the dictionary optionally including the frequency of the individual words. This does not include any subwords. For that please consult the function get_subwords. """ pair = self.f.getVocab(on_unicode_error) if include_freq: return (pair[0], np.array(pair[1])) else: return pair[0]
Get the entire list of labels of the dictionary optionally including the frequency of the individual labels. Unsupervised models use words as labels, which is why get_labels will call and return get_words for this type of model. def get_labels(self, include_freq=False, on_unicode_error='strict'): """ Get the entire list of labels of the dictionary optionally including the frequency of the individual labels. Unsupervised models use words as labels, which is why get_labels will call and return get_words for this type of model. """ a = self.f.getArgs() if a.model == model_name.supervised: pair = self.f.getLabels(on_unicode_error) if include_freq: return (pair[0], np.array(pair[1])) else: return pair[0] else: return self.get_words(include_freq)