diff --git a/mantis_evalkit/lib/python3.10/site-packages/scipy/_lib/cobyqa/__init__.py b/mantis_evalkit/lib/python3.10/site-packages/scipy/_lib/cobyqa/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e2418395425d7ecce9e1a4da68985c8fde93bc1c --- /dev/null +++ b/mantis_evalkit/lib/python3.10/site-packages/scipy/_lib/cobyqa/__init__.py @@ -0,0 +1,20 @@ +from .main import minimize +from .utils import show_versions + +# PEP0440 compatible formatted version, see: +# https://www.python.org/dev/peps/pep-0440/ +# +# Final release markers: +# X.Y.0 # For first release after an increment in Y +# X.Y.Z # For bugfix releases +# +# Admissible pre-release markers: +# X.YaN # Alpha release +# X.YbN # Beta release +# X.YrcN # Release Candidate +# +# Dev branch marker is: 'X.Y.dev' or 'X.Y.devN' where N is an integer. +# 'X.Y.dev0' is the canonical version of 'X.Y.dev'. +__version__ = "1.1.2" + +__all__ = ["minimize", "show_versions"] diff --git a/mantis_evalkit/lib/python3.10/site-packages/scipy/_lib/cobyqa/__pycache__/__init__.cpython-310.pyc b/mantis_evalkit/lib/python3.10/site-packages/scipy/_lib/cobyqa/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..bcaa64bac388eb3bdc024ca9b8a31a587b39719b Binary files /dev/null and b/mantis_evalkit/lib/python3.10/site-packages/scipy/_lib/cobyqa/__pycache__/__init__.cpython-310.pyc differ diff --git a/mantis_evalkit/lib/python3.10/site-packages/scipy/_lib/cobyqa/__pycache__/framework.cpython-310.pyc b/mantis_evalkit/lib/python3.10/site-packages/scipy/_lib/cobyqa/__pycache__/framework.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..9222dafadc1f6f23f299f4906f3fbe796bacaf1f Binary files /dev/null and b/mantis_evalkit/lib/python3.10/site-packages/scipy/_lib/cobyqa/__pycache__/framework.cpython-310.pyc differ diff --git a/mantis_evalkit/lib/python3.10/site-packages/scipy/_lib/cobyqa/__pycache__/main.cpython-310.pyc b/mantis_evalkit/lib/python3.10/site-packages/scipy/_lib/cobyqa/__pycache__/main.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..a86ff30e24ab1b019d259612f94bb9584972dfb3 Binary files /dev/null and b/mantis_evalkit/lib/python3.10/site-packages/scipy/_lib/cobyqa/__pycache__/main.cpython-310.pyc differ diff --git a/mantis_evalkit/lib/python3.10/site-packages/scipy/_lib/cobyqa/__pycache__/models.cpython-310.pyc b/mantis_evalkit/lib/python3.10/site-packages/scipy/_lib/cobyqa/__pycache__/models.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..7cb07edd9eafbd8a7f1574b30cd4bc7ef45731be Binary files /dev/null and b/mantis_evalkit/lib/python3.10/site-packages/scipy/_lib/cobyqa/__pycache__/models.cpython-310.pyc differ diff --git a/mantis_evalkit/lib/python3.10/site-packages/scipy/_lib/cobyqa/__pycache__/problem.cpython-310.pyc b/mantis_evalkit/lib/python3.10/site-packages/scipy/_lib/cobyqa/__pycache__/problem.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..55416569bdf08fa7113fbc04378ff51f3755ec2e Binary files /dev/null and b/mantis_evalkit/lib/python3.10/site-packages/scipy/_lib/cobyqa/__pycache__/problem.cpython-310.pyc differ diff --git a/mantis_evalkit/lib/python3.10/site-packages/scipy/_lib/cobyqa/__pycache__/settings.cpython-310.pyc b/mantis_evalkit/lib/python3.10/site-packages/scipy/_lib/cobyqa/__pycache__/settings.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..53b641ff74fbd358ed17d7142617bcbfb63d34b9 Binary files /dev/null and b/mantis_evalkit/lib/python3.10/site-packages/scipy/_lib/cobyqa/__pycache__/settings.cpython-310.pyc differ diff --git a/mantis_evalkit/lib/python3.10/site-packages/scipy/_lib/cobyqa/framework.py b/mantis_evalkit/lib/python3.10/site-packages/scipy/_lib/cobyqa/framework.py new file mode 100644 index 0000000000000000000000000000000000000000..9afea66281067e27a486ff317a4bffa03ec3e68b --- /dev/null +++ b/mantis_evalkit/lib/python3.10/site-packages/scipy/_lib/cobyqa/framework.py @@ -0,0 +1,1240 @@ +import warnings + +import numpy as np +from scipy.optimize import lsq_linear + +from .models import Models, Quadratic +from .settings import Options, Constants +from .subsolvers import ( + cauchy_geometry, + spider_geometry, + normal_byrd_omojokun, + tangential_byrd_omojokun, + constrained_tangential_byrd_omojokun, +) +from .subsolvers.optim import qr_tangential_byrd_omojokun +from .utils import get_arrays_tol + + +TINY = np.finfo(float).tiny +EPS = np.finfo(float).eps + + +class TrustRegion: + """ + Trust-region framework. + """ + + def __init__(self, pb, options, constants): + """ + Initialize the trust-region framework. + + Parameters + ---------- + pb : `cobyqa.problem.Problem` + Problem to solve. + options : dict + Options of the solver. + constants : dict + Constants of the solver. + + Raises + ------ + `cobyqa.utils.MaxEvalError` + If the maximum number of evaluations is reached. + `cobyqa.utils.TargetSuccess` + If a nearly feasible point has been found with an objective + function value below the target. + `cobyqa.utils.FeasibleSuccess` + If a feasible point has been found for a feasibility problem. + `numpy.linalg.LinAlgError` + If the initial interpolation system is ill-defined. + """ + # Set the initial penalty parameter. + self._penalty = 0.0 + + # Initialize the models. + self._pb = pb + self._models = Models(self._pb, options, self.penalty) + self._constants = constants + + # Set the index of the best interpolation point. + self._best_index = 0 + self.set_best_index() + + # Set the initial Lagrange multipliers. + self._lm_linear_ub = np.zeros(self.m_linear_ub) + self._lm_linear_eq = np.zeros(self.m_linear_eq) + self._lm_nonlinear_ub = np.zeros(self.m_nonlinear_ub) + self._lm_nonlinear_eq = np.zeros(self.m_nonlinear_eq) + self.set_multipliers(self.x_best) + + # Set the initial trust-region radius and the resolution. + self._resolution = options[Options.RHOBEG] + self._radius = self.resolution + + @property + def n(self): + """ + Number of variables. + + Returns + ------- + int + Number of variables. + """ + return self._pb.n + + @property + def m_linear_ub(self): + """ + Number of linear inequality constraints. + + Returns + ------- + int + Number of linear inequality constraints. + """ + return self._pb.m_linear_ub + + @property + def m_linear_eq(self): + """ + Number of linear equality constraints. + + Returns + ------- + int + Number of linear equality constraints. + """ + return self._pb.m_linear_eq + + @property + def m_nonlinear_ub(self): + """ + Number of nonlinear inequality constraints. + + Returns + ------- + int + Number of nonlinear inequality constraints. + """ + return self._pb.m_nonlinear_ub + + @property + def m_nonlinear_eq(self): + """ + Number of nonlinear equality constraints. + + Returns + ------- + int + Number of nonlinear equality constraints. + """ + return self._pb.m_nonlinear_eq + + @property + def radius(self): + """ + Trust-region radius. + + Returns + ------- + float + Trust-region radius. + """ + return self._radius + + @radius.setter + def radius(self, radius): + """ + Set the trust-region radius. + + Parameters + ---------- + radius : float + New trust-region radius. + """ + self._radius = radius + if ( + self.radius + <= self._constants[Constants.DECREASE_RADIUS_THRESHOLD] + * self.resolution + ): + self._radius = self.resolution + + @property + def resolution(self): + """ + Resolution of the trust-region framework. + + The resolution is a lower bound on the trust-region radius. + + Returns + ------- + float + Resolution of the trust-region framework. + """ + return self._resolution + + @resolution.setter + def resolution(self, resolution): + """ + Set the resolution of the trust-region framework. + + Parameters + ---------- + resolution : float + New resolution of the trust-region framework. + """ + self._resolution = resolution + + @property + def penalty(self): + """ + Penalty parameter. + + Returns + ------- + float + Penalty parameter. + """ + return self._penalty + + @property + def models(self): + """ + Models of the objective function and constraints. + + Returns + ------- + `cobyqa.models.Models` + Models of the objective function and constraints. + """ + return self._models + + @property + def best_index(self): + """ + Index of the best interpolation point. + + Returns + ------- + int + Index of the best interpolation point. + """ + return self._best_index + + @property + def x_best(self): + """ + Best interpolation point. + + Its value is interpreted as relative to the origin, not the base point. + + Returns + ------- + `numpy.ndarray` + Best interpolation point. + """ + return self.models.interpolation.point(self.best_index) + + @property + def fun_best(self): + """ + Value of the objective function at `x_best`. + + Returns + ------- + float + Value of the objective function at `x_best`. + """ + return self.models.fun_val[self.best_index] + + @property + def cub_best(self): + """ + Values of the nonlinear inequality constraints at `x_best`. + + Returns + ------- + `numpy.ndarray`, shape (m_nonlinear_ub,) + Values of the nonlinear inequality constraints at `x_best`. + """ + return self.models.cub_val[self.best_index, :] + + @property + def ceq_best(self): + """ + Values of the nonlinear equality constraints at `x_best`. + + Returns + ------- + `numpy.ndarray`, shape (m_nonlinear_eq,) + Values of the nonlinear equality constraints at `x_best`. + """ + return self.models.ceq_val[self.best_index, :] + + def lag_model(self, x): + """ + Evaluate the Lagrangian model at a given point. + + Parameters + ---------- + x : `numpy.ndarray`, shape (n,) + Point at which the Lagrangian model is evaluated. + + Returns + ------- + float + Value of the Lagrangian model at `x`. + """ + return ( + self.models.fun(x) + + self._lm_linear_ub + @ (self._pb.linear.a_ub @ x - self._pb.linear.b_ub) + + self._lm_linear_eq + @ (self._pb.linear.a_eq @ x - self._pb.linear.b_eq) + + self._lm_nonlinear_ub @ self.models.cub(x) + + self._lm_nonlinear_eq @ self.models.ceq(x) + ) + + def lag_model_grad(self, x): + """ + Evaluate the gradient of the Lagrangian model at a given point. + + Parameters + ---------- + x : `numpy.ndarray`, shape (n,) + Point at which the gradient of the Lagrangian model is evaluated. + + Returns + ------- + `numpy.ndarray`, shape (n,) + Gradient of the Lagrangian model at `x`. + """ + return ( + self.models.fun_grad(x) + + self._lm_linear_ub @ self._pb.linear.a_ub + + self._lm_linear_eq @ self._pb.linear.a_eq + + self._lm_nonlinear_ub @ self.models.cub_grad(x) + + self._lm_nonlinear_eq @ self.models.ceq_grad(x) + ) + + def lag_model_hess(self): + """ + Evaluate the Hessian matrix of the Lagrangian model at a given point. + + Returns + ------- + `numpy.ndarray`, shape (n, n) + Hessian matrix of the Lagrangian model at `x`. + """ + hess = self.models.fun_hess() + if self.m_nonlinear_ub > 0: + hess += self._lm_nonlinear_ub @ self.models.cub_hess() + if self.m_nonlinear_eq > 0: + hess += self._lm_nonlinear_eq @ self.models.ceq_hess() + return hess + + def lag_model_hess_prod(self, v): + """ + Evaluate the right product of the Hessian matrix of the Lagrangian + model with a given vector. + + Parameters + ---------- + v : `numpy.ndarray`, shape (n,) + Vector with which the Hessian matrix of the Lagrangian model is + multiplied from the right. + + Returns + ------- + `numpy.ndarray`, shape (n,) + Right product of the Hessian matrix of the Lagrangian model with + `v`. + """ + return ( + self.models.fun_hess_prod(v) + + self._lm_nonlinear_ub @ self.models.cub_hess_prod(v) + + self._lm_nonlinear_eq @ self.models.ceq_hess_prod(v) + ) + + def lag_model_curv(self, v): + """ + Evaluate the curvature of the Lagrangian model along a given direction. + + Parameters + ---------- + v : `numpy.ndarray`, shape (n,) + Direction along which the curvature of the Lagrangian model is + evaluated. + + Returns + ------- + float + Curvature of the Lagrangian model along `v`. + """ + return ( + self.models.fun_curv(v) + + self._lm_nonlinear_ub @ self.models.cub_curv(v) + + self._lm_nonlinear_eq @ self.models.ceq_curv(v) + ) + + def sqp_fun(self, step): + """ + Evaluate the objective function of the SQP subproblem. + + Parameters + ---------- + step : `numpy.ndarray`, shape (n,) + Step along which the objective function of the SQP subproblem is + evaluated. + + Returns + ------- + float + Value of the objective function of the SQP subproblem along `step`. + """ + return step @ ( + self.models.fun_grad(self.x_best) + + 0.5 * self.lag_model_hess_prod(step) + ) + + def sqp_cub(self, step): + """ + Evaluate the linearization of the nonlinear inequality constraints. + + Parameters + ---------- + step : `numpy.ndarray`, shape (n,) + Step along which the linearization of the nonlinear inequality + constraints is evaluated. + + Returns + ------- + `numpy.ndarray`, shape (m_nonlinear_ub,) + Value of the linearization of the nonlinear inequality constraints + along `step`. + """ + return ( + self.models.cub(self.x_best) + + self.models.cub_grad(self.x_best) @ step + ) + + def sqp_ceq(self, step): + """ + Evaluate the linearization of the nonlinear equality constraints. + + Parameters + ---------- + step : `numpy.ndarray`, shape (n,) + Step along which the linearization of the nonlinear equality + constraints is evaluated. + + Returns + ------- + `numpy.ndarray`, shape (m_nonlinear_ub,) + Value of the linearization of the nonlinear equality constraints + along `step`. + """ + return ( + self.models.ceq(self.x_best) + + self.models.ceq_grad(self.x_best) @ step + ) + + def merit(self, x, fun_val=None, cub_val=None, ceq_val=None): + """ + Evaluate the merit function at a given point. + + Parameters + ---------- + x : `numpy.ndarray`, shape (n,) + Point at which the merit function is evaluated. + fun_val : float, optional + Value of the objective function at `x`. If not provided, the + objective function is evaluated at `x`. + cub_val : `numpy.ndarray`, shape (m_nonlinear_ub,), optional + Values of the nonlinear inequality constraints. If not provided, + the nonlinear inequality constraints are evaluated at `x`. + ceq_val : `numpy.ndarray`, shape (m_nonlinear_eq,), optional + Values of the nonlinear equality constraints. If not provided, + the nonlinear equality constraints are evaluated at `x`. + + Returns + ------- + float + Value of the merit function at `x`. + """ + if fun_val is None or cub_val is None or ceq_val is None: + fun_val, cub_val, ceq_val = self._pb(x, self.penalty) + m_val = fun_val + if self._penalty > 0.0: + c_val = self._pb.violation(x, cub_val=cub_val, ceq_val=ceq_val) + if np.count_nonzero(c_val): + m_val += self._penalty * np.linalg.norm(c_val) + return m_val + + def get_constraint_linearizations(self, x): + """ + Get the linearizations of the constraints at a given point. + + Parameters + ---------- + x : `numpy.ndarray`, shape (n,) + Point at which the linearizations of the constraints are evaluated. + + Returns + ------- + `numpy.ndarray`, shape (m_linear_ub + m_nonlinear_ub, n) + Left-hand side matrix of the linearized inequality constraints. + `numpy.ndarray`, shape (m_linear_ub + m_nonlinear_ub,) + Right-hand side vector of the linearized inequality constraints. + `numpy.ndarray`, shape (m_linear_eq + m_nonlinear_eq, n) + Left-hand side matrix of the linearized equality constraints. + `numpy.ndarray`, shape (m_linear_eq + m_nonlinear_eq,) + Right-hand side vector of the linearized equality constraints. + """ + aub = np.block( + [ + [self._pb.linear.a_ub], + [self.models.cub_grad(x)], + ] + ) + bub = np.block( + [ + self._pb.linear.b_ub - self._pb.linear.a_ub @ x, + -self.models.cub(x), + ] + ) + aeq = np.block( + [ + [self._pb.linear.a_eq], + [self.models.ceq_grad(x)], + ] + ) + beq = np.block( + [ + self._pb.linear.b_eq - self._pb.linear.a_eq @ x, + -self.models.ceq(x), + ] + ) + return aub, bub, aeq, beq + + def get_trust_region_step(self, options): + """ + Get the trust-region step. + + The trust-region step is computed by solving the derivative-free + trust-region SQP subproblem using a Byrd-Omojokun composite-step + approach. For more details, see Section 5.2.3 of [1]_. + + Parameters + ---------- + options : dict + Options of the solver. + + Returns + ------- + `numpy.ndarray`, shape (n,) + Normal step. + `numpy.ndarray`, shape (n,) + Tangential step. + + References + ---------- + .. [1] T. M. Ragonneau. *Model-Based Derivative-Free Optimization + Methods and Software*. PhD thesis, Department of Applied + Mathematics, The Hong Kong Polytechnic University, Hong Kong, China, + 2022. URL: https://theses.lib.polyu.edu.hk/handle/200/12294. + """ + # Evaluate the linearizations of the constraints. + aub, bub, aeq, beq = self.get_constraint_linearizations(self.x_best) + xl = self._pb.bounds.xl - self.x_best + xu = self._pb.bounds.xu - self.x_best + + # Evaluate the normal step. + radius = self._constants[Constants.BYRD_OMOJOKUN_FACTOR] * self.radius + normal_step = normal_byrd_omojokun( + aub, + bub, + aeq, + beq, + xl, + xu, + radius, + options[Options.DEBUG], + **self._constants, + ) + if options[Options.DEBUG]: + tol = get_arrays_tol(xl, xu) + if (np.any(normal_step + tol < xl) + or np.any(xu < normal_step - tol)): + warnings.warn( + "the normal step does not respect the bound constraint.", + RuntimeWarning, + 2, + ) + if np.linalg.norm(normal_step) > 1.1 * radius: + warnings.warn( + "the normal step does not respect the trust-region " + "constraint.", + RuntimeWarning, + 2, + ) + + # Evaluate the tangential step. + radius = np.sqrt(self.radius**2.0 - normal_step @ normal_step) + xl -= normal_step + xu -= normal_step + bub = np.maximum(bub - aub @ normal_step, 0.0) + g_best = self.models.fun_grad(self.x_best) + self.lag_model_hess_prod( + normal_step + ) + if self._pb.type in ["unconstrained", "bound-constrained"]: + tangential_step = tangential_byrd_omojokun( + g_best, + self.lag_model_hess_prod, + xl, + xu, + radius, + options[Options.DEBUG], + **self._constants, + ) + else: + tangential_step = constrained_tangential_byrd_omojokun( + g_best, + self.lag_model_hess_prod, + xl, + xu, + aub, + bub, + aeq, + radius, + options["debug"], + **self._constants, + ) + if options[Options.DEBUG]: + tol = get_arrays_tol(xl, xu) + if np.any(tangential_step + tol < xl) or np.any( + xu < tangential_step - tol + ): + warnings.warn( + "The tangential step does not respect the bound " + "constraints.", + RuntimeWarning, + 2, + ) + if ( + np.linalg.norm(normal_step + tangential_step) + > 1.1 * np.sqrt(2.0) * self.radius + ): + warnings.warn( + "The trial step does not respect the trust-region " + "constraint.", + RuntimeWarning, + 2, + ) + return normal_step, tangential_step + + def get_geometry_step(self, k_new, options): + """ + Get the geometry-improving step. + + Three different geometry-improving steps are computed and the best one + is returned. For more details, see Section 5.2.7 of [1]_. + + Parameters + ---------- + k_new : int + Index of the interpolation point to be modified. + options : dict + Options of the solver. + + Returns + ------- + `numpy.ndarray`, shape (n,) + Geometry-improving step. + + Raises + ------ + `numpy.linalg.LinAlgError` + If the computation of a determinant fails. + + References + ---------- + .. [1] T. M. Ragonneau. *Model-Based Derivative-Free Optimization + Methods and Software*. PhD thesis, Department of Applied + Mathematics, The Hong Kong Polytechnic University, Hong Kong, China, + 2022. URL: https://theses.lib.polyu.edu.hk/handle/200/12294. + """ + if options[Options.DEBUG]: + assert ( + k_new != self.best_index + ), "The index `k_new` must be different from the best index." + + # Build the k_new-th Lagrange polynomial. + coord_vec = np.squeeze(np.eye(1, self.models.npt, k_new)) + lag = Quadratic( + self.models.interpolation, + coord_vec, + options[Options.DEBUG], + ) + g_lag = lag.grad(self.x_best, self.models.interpolation) + + # Compute a simple constrained Cauchy step. + xl = self._pb.bounds.xl - self.x_best + xu = self._pb.bounds.xu - self.x_best + step = cauchy_geometry( + 0.0, + g_lag, + lambda v: lag.curv(v, self.models.interpolation), + xl, + xu, + self.radius, + options[Options.DEBUG], + ) + sigma = self.models.determinants(self.x_best + step, k_new) + + # Compute the solution on the straight lines joining the interpolation + # points to the k-th one, and choose it if it provides a larger value + # of the determinant of the interpolation system in absolute value. + xpt = ( + self.models.interpolation.xpt + - self.models.interpolation.xpt[:, self.best_index, np.newaxis] + ) + xpt[:, [0, self.best_index]] = xpt[:, [self.best_index, 0]] + step_alt = spider_geometry( + 0.0, + g_lag, + lambda v: lag.curv(v, self.models.interpolation), + xpt[:, 1:], + xl, + xu, + self.radius, + options[Options.DEBUG], + ) + sigma_alt = self.models.determinants(self.x_best + step_alt, k_new) + if abs(sigma_alt) > abs(sigma): + step = step_alt + sigma = sigma_alt + + # Compute a Cauchy step on the tangent space of the active constraints. + if self._pb.type in [ + "linearly constrained", + "nonlinearly constrained", + ]: + aub, bub, aeq, beq = ( + self.get_constraint_linearizations(self.x_best)) + tol_bd = get_arrays_tol(xl, xu) + tol_ub = get_arrays_tol(bub) + free_xl = xl <= -tol_bd + free_xu = xu >= tol_bd + free_ub = bub >= tol_ub + + # Compute the Cauchy step. + n_act, q = qr_tangential_byrd_omojokun( + aub, + aeq, + free_xl, + free_xu, + free_ub, + ) + g_lag_proj = q[:, n_act:] @ (q[:, n_act:].T @ g_lag) + norm_g_lag_proj = np.linalg.norm(g_lag_proj) + if 0 < n_act < self._pb.n and norm_g_lag_proj > TINY * self.radius: + step_alt = (self.radius / norm_g_lag_proj) * g_lag_proj + if lag.curv(step_alt, self.models.interpolation) < 0.0: + step_alt = -step_alt + + # Evaluate the constraint violation at the Cauchy step. + cbd = np.block([xl - step_alt, step_alt - xu]) + cub = aub @ step_alt - bub + ceq = aeq @ step_alt - beq + maxcv_val = max( + np.max(array, initial=0.0) + for array in [cbd, cub, np.abs(ceq)] + ) + + # Accept the new step if it is nearly feasible and do not + # drastically worsen the determinant of the interpolation + # system in absolute value. + tol = np.max(np.abs(step_alt[~free_xl]), initial=0.0) + tol = np.max(np.abs(step_alt[~free_xu]), initial=tol) + tol = np.max(np.abs(aub[~free_ub, :] @ step_alt), initial=tol) + tol = min(10.0 * tol, 1e-2 * np.linalg.norm(step_alt)) + if maxcv_val <= tol: + sigma_alt = self.models.determinants( + self.x_best + step_alt, k_new + ) + if abs(sigma_alt) >= 0.1 * abs(sigma): + step = np.clip(step_alt, xl, xu) + + if options[Options.DEBUG]: + tol = get_arrays_tol(xl, xu) + if np.any(step + tol < xl) or np.any(xu < step - tol): + warnings.warn( + "The geometry step does not respect the bound " + "constraints.", + RuntimeWarning, + 2, + ) + if np.linalg.norm(step) > 1.1 * self.radius: + warnings.warn( + "The geometry step does not respect the " + "trust-region constraint.", + RuntimeWarning, + 2, + ) + return step + + def get_second_order_correction_step(self, step, options): + """ + Get the second-order correction step. + + Parameters + ---------- + step : `numpy.ndarray`, shape (n,) + Trust-region step. + options : dict + Options of the solver. + + Returns + ------- + `numpy.ndarray`, shape (n,) + Second-order correction step. + """ + # Evaluate the linearizations of the constraints. + aub, bub, aeq, beq = self.get_constraint_linearizations(self.x_best) + xl = self._pb.bounds.xl - self.x_best + xu = self._pb.bounds.xu - self.x_best + radius = np.linalg.norm(step) + soc_step = normal_byrd_omojokun( + aub, + bub, + aeq, + beq, + xl, + xu, + radius, + options[Options.DEBUG], + **self._constants, + ) + if options[Options.DEBUG]: + tol = get_arrays_tol(xl, xu) + if np.any(soc_step + tol < xl) or np.any(xu < soc_step - tol): + warnings.warn( + "The second-order correction step does not " + "respect the bound constraints.", + RuntimeWarning, + 2, + ) + if np.linalg.norm(soc_step) > 1.1 * radius: + warnings.warn( + "The second-order correction step does not " + "respect the trust-region constraint.", + RuntimeWarning, + 2, + ) + return soc_step + + def get_reduction_ratio(self, step, fun_val, cub_val, ceq_val): + """ + Get the reduction ratio. + + Parameters + ---------- + step : `numpy.ndarray`, shape (n,) + Trust-region step. + fun_val : float + Objective function value at the trial point. + cub_val : `numpy.ndarray`, shape (m_nonlinear_ub,) + Nonlinear inequality constraint values at the trial point. + ceq_val : `numpy.ndarray`, shape (m_nonlinear_eq,) + Nonlinear equality constraint values at the trial point. + + Returns + ------- + float + Reduction ratio. + """ + merit_old = self.merit( + self.x_best, + self.fun_best, + self.cub_best, + self.ceq_best, + ) + merit_new = self.merit(self.x_best + step, fun_val, cub_val, ceq_val) + merit_model_old = self.merit( + self.x_best, + 0.0, + self.models.cub(self.x_best), + self.models.ceq(self.x_best), + ) + merit_model_new = self.merit( + self.x_best + step, + self.sqp_fun(step), + self.sqp_cub(step), + self.sqp_ceq(step), + ) + if abs(merit_model_old - merit_model_new) > TINY * abs( + merit_old - merit_new + ): + return (merit_old - merit_new) / abs( + merit_model_old - merit_model_new + ) + else: + return -1.0 + + def increase_penalty(self, step): + """ + Increase the penalty parameter. + + Parameters + ---------- + step : `numpy.ndarray`, shape (n,) + Trust-region step. + """ + aub, bub, aeq, beq = self.get_constraint_linearizations(self.x_best) + viol_diff = max( + np.linalg.norm( + np.block( + [ + np.maximum(0.0, -bub), + beq, + ] + ) + ) + - np.linalg.norm( + np.block( + [ + np.maximum(0.0, aub @ step - bub), + aeq @ step - beq, + ] + ) + ), + 0.0, + ) + sqp_val = self.sqp_fun(step) + + threshold = np.linalg.norm( + np.block( + [ + self._lm_linear_ub, + self._lm_linear_eq, + self._lm_nonlinear_ub, + self._lm_nonlinear_eq, + ] + ) + ) + if abs(viol_diff) > TINY * abs(sqp_val): + threshold = max(threshold, sqp_val / viol_diff) + best_index_save = self.best_index + if ( + self._penalty + <= self._constants[Constants.PENALTY_INCREASE_THRESHOLD] + * threshold + ): + self._penalty = max( + self._constants[Constants.PENALTY_INCREASE_FACTOR] * threshold, + 1.0, + ) + self.set_best_index() + return best_index_save == self.best_index + + def decrease_penalty(self): + """ + Decrease the penalty parameter. + """ + self._penalty = min(self._penalty, self._get_low_penalty()) + self.set_best_index() + + def set_best_index(self): + """ + Set the index of the best point. + """ + best_index = self.best_index + m_best = self.merit( + self.x_best, + self.models.fun_val[best_index], + self.models.cub_val[best_index, :], + self.models.ceq_val[best_index, :], + ) + r_best = self._pb.maxcv( + self.x_best, + self.models.cub_val[best_index, :], + self.models.ceq_val[best_index, :], + ) + tol = ( + 10.0 + * EPS + * max(self.models.n, self.models.npt) + * max(abs(m_best), 1.0) + ) + for k in range(self.models.npt): + if k != self.best_index: + x_val = self.models.interpolation.point(k) + m_val = self.merit( + x_val, + self.models.fun_val[k], + self.models.cub_val[k, :], + self.models.ceq_val[k, :], + ) + r_val = self._pb.maxcv( + x_val, + self.models.cub_val[k, :], + self.models.ceq_val[k, :], + ) + if m_val < m_best or (m_val < m_best + tol and r_val < r_best): + best_index = k + m_best = m_val + r_best = r_val + self._best_index = best_index + + def get_index_to_remove(self, x_new=None): + """ + Get the index of the interpolation point to remove. + + If `x_new` is not provided, the index returned should be used during + the geometry-improvement phase. Otherwise, the index returned is the + best index for included `x_new` in the interpolation set. + + Parameters + ---------- + x_new : `numpy.ndarray`, shape (n,), optional + New point to be included in the interpolation set. + + Returns + ------- + int + Index of the interpolation point to remove. + float + Distance between `x_best` and the removed point. + + Raises + ------ + `numpy.linalg.LinAlgError` + If the computation of a determinant fails. + """ + dist_sq = np.sum( + ( + self.models.interpolation.xpt + - self.models.interpolation.xpt[:, self.best_index, np.newaxis] + ) + ** 2.0, + axis=0, + ) + if x_new is None: + sigma = 1.0 + weights = dist_sq + else: + sigma = self.models.determinants(x_new) + weights = ( + np.maximum( + 1.0, + dist_sq + / max( + self._constants[Constants.LOW_RADIUS_FACTOR] + * self.radius, + self.resolution, + ) + ** 2.0, + ) + ** 3.0 + ) + weights[self.best_index] = -1.0 # do not remove the best point + k_max = np.argmax(weights * np.abs(sigma)) + return k_max, np.sqrt(dist_sq[k_max]) + + def update_radius(self, step, ratio): + """ + Update the trust-region radius. + + Parameters + ---------- + step : `numpy.ndarray`, shape (n,) + Trust-region step. + ratio : float + Reduction ratio. + """ + s_norm = np.linalg.norm(step) + if ratio <= self._constants[Constants.LOW_RATIO]: + self.radius *= self._constants[Constants.DECREASE_RADIUS_FACTOR] + elif ratio <= self._constants[Constants.HIGH_RATIO]: + self.radius = max( + self._constants[Constants.DECREASE_RADIUS_FACTOR] + * self.radius, + s_norm, + ) + else: + self.radius = min( + self._constants[Constants.INCREASE_RADIUS_FACTOR] + * self.radius, + max( + self._constants[Constants.DECREASE_RADIUS_FACTOR] + * self.radius, + self._constants[Constants.INCREASE_RADIUS_THRESHOLD] + * s_norm, + ), + ) + + def enhance_resolution(self, options): + """ + Enhance the resolution of the trust-region framework. + + Parameters + ---------- + options : dict + Options of the solver. + """ + if ( + self._constants[Constants.LARGE_RESOLUTION_THRESHOLD] + * options[Options.RHOEND] + < self.resolution + ): + self.resolution *= self._constants[ + Constants.DECREASE_RESOLUTION_FACTOR + ] + elif ( + self._constants[Constants.MODERATE_RESOLUTION_THRESHOLD] + * options[Options.RHOEND] + < self.resolution + ): + self.resolution = np.sqrt(self.resolution + * options[Options.RHOEND]) + else: + self.resolution = options[Options.RHOEND] + + # Reduce the trust-region radius. + self._radius = max( + self._constants[Constants.DECREASE_RADIUS_FACTOR] * self._radius, + self.resolution, + ) + + def shift_x_base(self, options): + """ + Shift the base point to `x_best`. + + Parameters + ---------- + options : dict + Options of the solver. + """ + self.models.shift_x_base(np.copy(self.x_best), options) + + def set_multipliers(self, x): + """ + Set the Lagrange multipliers. + + This method computes and set the Lagrange multipliers of the linear and + nonlinear constraints to be the QP multipliers. + + Parameters + ---------- + x : `numpy.ndarray`, shape (n,) + Point at which the Lagrange multipliers are computed. + """ + # Build the constraints of the least-squares problem. + incl_linear_ub = self._pb.linear.a_ub @ x >= self._pb.linear.b_ub + incl_nonlinear_ub = self.cub_best >= 0.0 + incl_xl = self._pb.bounds.xl >= x + incl_xu = self._pb.bounds.xu <= x + m_linear_ub = np.count_nonzero(incl_linear_ub) + m_nonlinear_ub = np.count_nonzero(incl_nonlinear_ub) + m_xl = np.count_nonzero(incl_xl) + m_xu = np.count_nonzero(incl_xu) + + if ( + m_linear_ub + m_nonlinear_ub + self.m_linear_eq + + self.m_nonlinear_eq > 0 + ): + identity = np.eye(self._pb.n) + c_jac = np.r_[ + -identity[incl_xl, :], + identity[incl_xu, :], + self._pb.linear.a_ub[incl_linear_ub, :], + self.models.cub_grad(x, incl_nonlinear_ub), + self._pb.linear.a_eq, + self.models.ceq_grad(x), + ] + + # Solve the least-squares problem. + g_best = self.models.fun_grad(x) + xl_lm = np.full(c_jac.shape[0], -np.inf) + xl_lm[: m_xl + m_xu + m_linear_ub + m_nonlinear_ub] = 0.0 + res = lsq_linear( + c_jac.T, + -g_best, + bounds=(xl_lm, np.inf), + method="bvls", + ) + + # Extract the Lagrange multipliers. + self._lm_linear_ub[incl_linear_ub] = res.x[ + m_xl + m_xu:m_xl + m_xu + m_linear_ub + ] + self._lm_linear_ub[~incl_linear_ub] = 0.0 + self._lm_nonlinear_ub[incl_nonlinear_ub] = res.x[ + m_xl + + m_xu + + m_linear_ub:m_xl + + m_xu + + m_linear_ub + + m_nonlinear_ub + ] + self._lm_nonlinear_ub[~incl_nonlinear_ub] = 0.0 + self._lm_linear_eq[:] = res.x[ + m_xl + + m_xu + + m_linear_ub + + m_nonlinear_ub:m_xl + + m_xu + + m_linear_ub + + m_nonlinear_ub + + self.m_linear_eq + ] + self._lm_nonlinear_eq[:] = res.x[ + m_xl + m_xu + m_linear_ub + m_nonlinear_ub + self.m_linear_eq: + ] + + def _get_low_penalty(self): + r_val_ub = np.c_[ + ( + self.models.interpolation.x_base[np.newaxis, :] + + self.models.interpolation.xpt.T + ) + @ self._pb.linear.a_ub.T + - self._pb.linear.b_ub[np.newaxis, :], + self.models.cub_val, + ] + r_val_eq = ( + self.models.interpolation.x_base[np.newaxis, :] + + self.models.interpolation.xpt.T + ) @ self._pb.linear.a_eq.T - self._pb.linear.b_eq[np.newaxis, :] + r_val_eq = np.block( + [ + r_val_eq, + -r_val_eq, + self.models.ceq_val, + -self.models.ceq_val, + ] + ) + r_val = np.block([r_val_ub, r_val_eq]) + c_min = np.nanmin(r_val, axis=0) + c_max = np.nanmax(r_val, axis=0) + indices = ( + c_min + < self._constants[Constants.THRESHOLD_RATIO_CONSTRAINTS] * c_max + ) + if np.any(indices): + f_min = np.nanmin(self.models.fun_val) + f_max = np.nanmax(self.models.fun_val) + c_min_neg = np.minimum(0.0, c_min[indices]) + c_diff = np.min(c_max[indices] - c_min_neg) + if c_diff > TINY * (f_max - f_min): + penalty = (f_max - f_min) / c_diff + else: + penalty = np.inf + else: + penalty = 0.0 + return penalty diff --git a/mantis_evalkit/lib/python3.10/site-packages/scipy/_lib/cobyqa/main.py b/mantis_evalkit/lib/python3.10/site-packages/scipy/_lib/cobyqa/main.py new file mode 100644 index 0000000000000000000000000000000000000000..01e5159e0dfebed9a78c6948cb99bfb1d744b6c7 --- /dev/null +++ b/mantis_evalkit/lib/python3.10/site-packages/scipy/_lib/cobyqa/main.py @@ -0,0 +1,1506 @@ +import warnings + +import numpy as np +from scipy.optimize import ( + Bounds, + LinearConstraint, + NonlinearConstraint, + OptimizeResult, +) + +from .framework import TrustRegion +from .problem import ( + ObjectiveFunction, + BoundConstraints, + LinearConstraints, + NonlinearConstraints, + Problem, +) +from .utils import ( + MaxEvalError, + TargetSuccess, + CallbackSuccess, + FeasibleSuccess, + exact_1d_array, +) +from .settings import ( + ExitStatus, + Options, + Constants, + DEFAULT_OPTIONS, + DEFAULT_CONSTANTS, + PRINT_OPTIONS, +) + + +def minimize( + fun, + x0, + args=(), + bounds=None, + constraints=(), + callback=None, + options=None, + **kwargs, +): + r""" + Minimize a scalar function using the COBYQA method. + + The Constrained Optimization BY Quadratic Approximations (COBYQA) method is + a derivative-free optimization method designed to solve general nonlinear + optimization problems. A complete description of COBYQA is given in [3]_. + + Parameters + ---------- + fun : {callable, None} + Objective function to be minimized. + + ``fun(x, *args) -> float`` + + where ``x`` is an array with shape (n,) and `args` is a tuple. If `fun` + is ``None``, the objective function is assumed to be the zero function, + resulting in a feasibility problem. + x0 : array_like, shape (n,) + Initial guess. + args : tuple, optional + Extra arguments passed to the objective function. + bounds : {`scipy.optimize.Bounds`, array_like, shape (n, 2)}, optional + Bound constraints of the problem. It can be one of the cases below. + + #. An instance of `scipy.optimize.Bounds`. For the time being, the + argument ``keep_feasible`` is disregarded, and all the constraints + are considered unrelaxable and will be enforced. + #. An array with shape (n, 2). The bound constraints for ``x[i]`` are + ``bounds[i][0] <= x[i] <= bounds[i][1]``. Set ``bounds[i][0]`` to + :math:`-\infty` if there is no lower bound, and set ``bounds[i][1]`` + to :math:`\infty` if there is no upper bound. + + The COBYQA method always respect the bound constraints. + constraints : {Constraint, list}, optional + General constraints of the problem. It can be one of the cases below. + + #. An instance of `scipy.optimize.LinearConstraint`. The argument + ``keep_feasible`` is disregarded. + #. An instance of `scipy.optimize.NonlinearConstraint`. The arguments + ``jac``, ``hess``, ``keep_feasible``, ``finite_diff_rel_step``, and + ``finite_diff_jac_sparsity`` are disregarded. + + #. A list, each of whose elements are described in the cases above. + + callback : callable, optional + A callback executed at each objective function evaluation. The method + terminates if a ``StopIteration`` exception is raised by the callback + function. Its signature can be one of the following: + + ``callback(intermediate_result)`` + + where ``intermediate_result`` is a keyword parameter that contains an + instance of `scipy.optimize.OptimizeResult`, with attributes ``x`` + and ``fun``, being the point at which the objective function is + evaluated and the value of the objective function, respectively. The + name of the parameter must be ``intermediate_result`` for the callback + to be passed an instance of `scipy.optimize.OptimizeResult`. + + Alternatively, the callback function can have the signature: + + ``callback(xk)`` + + where ``xk`` is the point at which the objective function is evaluated. + Introspection is used to determine which of the signatures to invoke. + options : dict, optional + Options passed to the solver. Accepted keys are: + + disp : bool, optional + Whether to print information about the optimization procedure. + Default is ``False``. + maxfev : int, optional + Maximum number of function evaluations. Default is ``500 * n``. + maxiter : int, optional + Maximum number of iterations. Default is ``1000 * n``. + target : float, optional + Target on the objective function value. The optimization + procedure is terminated when the objective function value of a + feasible point is less than or equal to this target. Default is + ``-numpy.inf``. + feasibility_tol : float, optional + Tolerance on the constraint violation. If the maximum + constraint violation at a point is less than or equal to this + tolerance, the point is considered feasible. Default is + ``numpy.sqrt(numpy.finfo(float).eps)``. + radius_init : float, optional + Initial trust-region radius. Typically, this value should be in + the order of one tenth of the greatest expected change to `x0`. + Default is ``1.0``. + radius_final : float, optional + Final trust-region radius. It should indicate the accuracy + required in the final values of the variables. Default is + ``1e-6``. + nb_points : int, optional + Number of interpolation points used to build the quadratic + models of the objective and constraint functions. Default is + ``2 * n + 1``. + scale : bool, optional + Whether to scale the variables according to the bounds. Default + is ``False``. + filter_size : int, optional + Maximum number of points in the filter. The filter is used to + select the best point returned by the optimization procedure. + Default is ``sys.maxsize``. + store_history : bool, optional + Whether to store the history of the function evaluations. + Default is ``False``. + history_size : int, optional + Maximum number of function evaluations to store in the history. + Default is ``sys.maxsize``. + debug : bool, optional + Whether to perform additional checks during the optimization + procedure. This option should be used only for debugging + purposes and is highly discouraged to general users. Default is + ``False``. + + Other constants (from the keyword arguments) are described below. They + are not intended to be changed by general users. They should only be + changed by users with a deep understanding of the algorithm, who want + to experiment with different settings. + + Returns + ------- + `scipy.optimize.OptimizeResult` + Result of the optimization procedure, with the following fields: + + message : str + Description of the cause of the termination. + success : bool + Whether the optimization procedure terminated successfully. + status : int + Termination status of the optimization procedure. + x : `numpy.ndarray`, shape (n,) + Solution point. + fun : float + Objective function value at the solution point. + maxcv : float + Maximum constraint violation at the solution point. + nfev : int + Number of function evaluations. + nit : int + Number of iterations. + + If ``store_history`` is True, the result also has the following fields: + + fun_history : `numpy.ndarray`, shape (nfev,) + History of the objective function values. + maxcv_history : `numpy.ndarray`, shape (nfev,) + History of the maximum constraint violations. + + A description of the termination statuses is given below. + + .. list-table:: + :widths: 25 75 + :header-rows: 1 + + * - Exit status + - Description + * - 0 + - The lower bound for the trust-region radius has been reached. + * - 1 + - The target objective function value has been reached. + * - 2 + - All variables are fixed by the bound constraints. + * - 3 + - The callback requested to stop the optimization procedure. + * - 4 + - The feasibility problem received has been solved successfully. + * - 5 + - The maximum number of function evaluations has been exceeded. + * - 6 + - The maximum number of iterations has been exceeded. + * - -1 + - The bound constraints are infeasible. + * - -2 + - A linear algebra error occurred. + + Other Parameters + ---------------- + decrease_radius_factor : float, optional + Factor by which the trust-region radius is reduced when the reduction + ratio is low or negative. Default is ``0.5``. + increase_radius_factor : float, optional + Factor by which the trust-region radius is increased when the reduction + ratio is large. Default is ``numpy.sqrt(2.0)``. + increase_radius_threshold : float, optional + Threshold that controls the increase of the trust-region radius when + the reduction ratio is large. Default is ``2.0``. + decrease_radius_threshold : float, optional + Threshold used to determine whether the trust-region radius should be + reduced to the resolution. Default is ``1.4``. + decrease_resolution_factor : float, optional + Factor by which the resolution is reduced when the current value is far + from its final value. Default is ``0.1``. + large_resolution_threshold : float, optional + Threshold used to determine whether the resolution is far from its + final value. Default is ``250.0``. + moderate_resolution_threshold : float, optional + Threshold used to determine whether the resolution is close to its + final value. Default is ``16.0``. + low_ratio : float, optional + Threshold used to determine whether the reduction ratio is low. Default + is ``0.1``. + high_ratio : float, optional + Threshold used to determine whether the reduction ratio is high. + Default is ``0.7``. + very_low_ratio : float, optional + Threshold used to determine whether the reduction ratio is very low. + This is used to determine whether the models should be reset. Default + is ``0.01``. + penalty_increase_threshold : float, optional + Threshold used to determine whether the penalty parameter should be + increased. Default is ``1.5``. + penalty_increase_factor : float, optional + Factor by which the penalty parameter is increased. Default is ``2.0``. + short_step_threshold : float, optional + Factor used to determine whether the trial step is too short. Default + is ``0.5``. + low_radius_factor : float, optional + Factor used to determine which interpolation point should be removed + from the interpolation set at each iteration. Default is ``0.1``. + byrd_omojokun_factor : float, optional + Factor by which the trust-region radius is reduced for the computations + of the normal step in the Byrd-Omojokun composite-step approach. + Default is ``0.8``. + threshold_ratio_constraints : float, optional + Threshold used to determine which constraints should be taken into + account when decreasing the penalty parameter. Default is ``2.0``. + large_shift_factor : float, optional + Factor used to determine whether the point around which the quadratic + models are built should be updated. Default is ``10.0``. + large_gradient_factor : float, optional + Factor used to determine whether the models should be reset. Default is + ``10.0``. + resolution_factor : float, optional + Factor by which the resolution is decreased. Default is ``2.0``. + improve_tcg : bool, optional + Whether to improve the steps computed by the truncated conjugate + gradient method when the trust-region boundary is reached. Default is + ``True``. + + References + ---------- + .. [1] J. Nocedal and S. J. Wright. *Numerical Optimization*. Springer Ser. + Oper. Res. Financ. Eng. Springer, New York, NY, USA, second edition, + 2006. `doi:10.1007/978-0-387-40065-5 + `_. + .. [2] M. J. D. Powell. A direct search optimization method that models the + objective and constraint functions by linear interpolation. In S. Gomez + and J.-P. Hennart, editors, *Advances in Optimization and Numerical + Analysis*, volume 275 of Math. Appl., pages 51--67. Springer, Dordrecht, + Netherlands, 1994. `doi:10.1007/978-94-015-8330-5_4 + `_. + .. [3] T. M. Ragonneau. *Model-Based Derivative-Free Optimization Methods + and Software*. PhD thesis, Department of Applied Mathematics, The Hong + Kong Polytechnic University, Hong Kong, China, 2022. URL: + https://theses.lib.polyu.edu.hk/handle/200/12294. + + Examples + -------- + To demonstrate how to use `minimize`, we first minimize the Rosenbrock + function implemented in `scipy.optimize` in an unconstrained setting. + + .. testsetup:: + + import numpy as np + np.set_printoptions(precision=3, suppress=True) + + >>> from cobyqa import minimize + >>> from scipy.optimize import rosen + + To solve the problem using COBYQA, run: + + >>> x0 = [1.3, 0.7, 0.8, 1.9, 1.2] + >>> res = minimize(rosen, x0) + >>> res.x + array([1., 1., 1., 1., 1.]) + + To see how bound and constraints are handled using `minimize`, we solve + Example 16.4 of [1]_, defined as + + .. math:: + + \begin{aligned} + \min_{x \in \mathbb{R}^2} & \quad (x_1 - 1)^2 + (x_2 - 2.5)^2\\ + \text{s.t.} & \quad -x_1 + 2x_2 \le 2,\\ + & \quad x_1 + 2x_2 \le 6,\\ + & \quad x_1 - 2x_2 \le 2,\\ + & \quad x_1 \ge 0,\\ + & \quad x_2 \ge 0. + \end{aligned} + + >>> import numpy as np + >>> from scipy.optimize import Bounds, LinearConstraint + + Its objective function can be implemented as: + + >>> def fun(x): + ... return (x[0] - 1.0)**2 + (x[1] - 2.5)**2 + + This problem can be solved using `minimize` as: + + >>> x0 = [2.0, 0.0] + >>> bounds = Bounds([0.0, 0.0], np.inf) + >>> constraints = LinearConstraint([ + ... [-1.0, 2.0], + ... [1.0, 2.0], + ... [1.0, -2.0], + ... ], -np.inf, [2.0, 6.0, 2.0]) + >>> res = minimize(fun, x0, bounds=bounds, constraints=constraints) + >>> res.x + array([1.4, 1.7]) + + To see how nonlinear constraints are handled, we solve Problem (F) of [2]_, + defined as + + .. math:: + + \begin{aligned} + \min_{x \in \mathbb{R}^2} & \quad -x_1 - x_2\\ + \text{s.t.} & \quad x_1^2 - x_2 \le 0,\\ + & \quad x_1^2 + x_2^2 \le 1. + \end{aligned} + + >>> from scipy.optimize import NonlinearConstraint + + Its objective and constraint functions can be implemented as: + + >>> def fun(x): + ... return -x[0] - x[1] + >>> + >>> def cub(x): + ... return [x[0]**2 - x[1], x[0]**2 + x[1]**2] + + This problem can be solved using `minimize` as: + + >>> x0 = [1.0, 1.0] + >>> constraints = NonlinearConstraint(cub, -np.inf, [0.0, 1.0]) + >>> res = minimize(fun, x0, constraints=constraints) + >>> res.x + array([0.707, 0.707]) + + Finally, to see how to supply linear and nonlinear constraints + simultaneously, we solve Problem (G) of [2]_, defined as + + .. math:: + + \begin{aligned} + \min_{x \in \mathbb{R}^3} & \quad x_3\\ + \text{s.t.} & \quad 5x_1 - x_2 + x_3 \ge 0,\\ + & \quad -5x_1 - x_2 + x_3 \ge 0,\\ + & \quad x_1^2 + x_2^2 + 4x_2 \le x_3. + \end{aligned} + + Its objective and nonlinear constraint functions can be implemented as: + + >>> def fun(x): + ... return x[2] + >>> + >>> def cub(x): + ... return x[0]**2 + x[1]**2 + 4.0*x[1] - x[2] + + This problem can be solved using `minimize` as: + + >>> x0 = [1.0, 1.0, 1.0] + >>> constraints = [ + ... LinearConstraint( + ... [[5.0, -1.0, 1.0], [-5.0, -1.0, 1.0]], + ... [0.0, 0.0], + ... np.inf, + ... ), + ... NonlinearConstraint(cub, -np.inf, 0.0), + ... ] + >>> res = minimize(fun, x0, constraints=constraints) + >>> res.x + array([ 0., -3., -3.]) + """ + # Get basic options that are needed for the initialization. + if options is None: + options = {} + else: + options = dict(options) + verbose = options.get(Options.VERBOSE, DEFAULT_OPTIONS[Options.VERBOSE]) + verbose = bool(verbose) + feasibility_tol = options.get( + Options.FEASIBILITY_TOL, + DEFAULT_OPTIONS[Options.FEASIBILITY_TOL], + ) + feasibility_tol = float(feasibility_tol) + scale = options.get(Options.SCALE, DEFAULT_OPTIONS[Options.SCALE]) + scale = bool(scale) + store_history = options.get( + Options.STORE_HISTORY, + DEFAULT_OPTIONS[Options.STORE_HISTORY], + ) + store_history = bool(store_history) + if Options.HISTORY_SIZE in options and options[Options.HISTORY_SIZE] <= 0: + raise ValueError("The size of the history must be positive.") + history_size = options.get( + Options.HISTORY_SIZE, + DEFAULT_OPTIONS[Options.HISTORY_SIZE], + ) + history_size = int(history_size) + if Options.FILTER_SIZE in options and options[Options.FILTER_SIZE] <= 0: + raise ValueError("The size of the filter must be positive.") + filter_size = options.get( + Options.FILTER_SIZE, + DEFAULT_OPTIONS[Options.FILTER_SIZE], + ) + filter_size = int(filter_size) + debug = options.get(Options.DEBUG, DEFAULT_OPTIONS[Options.DEBUG]) + debug = bool(debug) + + # Initialize the objective function. + if not isinstance(args, tuple): + args = (args,) + obj = ObjectiveFunction(fun, verbose, debug, *args) + + # Initialize the bound constraints. + if not hasattr(x0, "__len__"): + x0 = [x0] + n_orig = len(x0) + bounds = BoundConstraints(_get_bounds(bounds, n_orig)) + + # Initialize the constraints. + linear_constraints, nonlinear_constraints = _get_constraints(constraints) + linear = LinearConstraints(linear_constraints, n_orig, debug) + nonlinear = NonlinearConstraints(nonlinear_constraints, verbose, debug) + + # Initialize the problem (and remove the fixed variables). + pb = Problem( + obj, + x0, + bounds, + linear, + nonlinear, + callback, + feasibility_tol, + scale, + store_history, + history_size, + filter_size, + debug, + ) + + # Set the default options. + _set_default_options(options, pb.n) + constants = _set_default_constants(**kwargs) + + # Initialize the models and skip the computations whenever possible. + if not pb.bounds.is_feasible: + # The bound constraints are infeasible. + return _build_result( + pb, + 0.0, + False, + ExitStatus.INFEASIBLE_ERROR, + 0, + options, + ) + elif pb.n == 0: + # All variables are fixed by the bound constraints. + return _build_result( + pb, + 0.0, + True, + ExitStatus.FIXED_SUCCESS, + 0, + options, + ) + if verbose: + print("Starting the optimization procedure.") + print(f"Initial trust-region radius: {options[Options.RHOBEG]}.") + print(f"Final trust-region radius: {options[Options.RHOEND]}.") + print( + f"Maximum number of function evaluations: " + f"{options[Options.MAX_EVAL]}." + ) + print(f"Maximum number of iterations: {options[Options.MAX_ITER]}.") + print() + try: + framework = TrustRegion(pb, options, constants) + except TargetSuccess: + # The target on the objective function value has been reached + return _build_result( + pb, + 0.0, + True, + ExitStatus.TARGET_SUCCESS, + 0, + options, + ) + except CallbackSuccess: + # The callback raised a StopIteration exception. + return _build_result( + pb, + 0.0, + True, + ExitStatus.CALLBACK_SUCCESS, + 0, + options, + ) + except FeasibleSuccess: + # The feasibility problem has been solved successfully. + return _build_result( + pb, + 0.0, + True, + ExitStatus.FEASIBLE_SUCCESS, + 0, + options, + ) + except MaxEvalError: + # The maximum number of function evaluations has been exceeded. + return _build_result( + pb, + 0.0, + False, + ExitStatus.MAX_ITER_WARNING, + 0, + options, + ) + except np.linalg.LinAlgError: + # The construction of the initial interpolation set failed. + return _build_result( + pb, + 0.0, + False, + ExitStatus.LINALG_ERROR, + 0, + options, + ) + + # Start the optimization procedure. + success = False + n_iter = 0 + k_new = None + n_short_steps = 0 + n_very_short_steps = 0 + n_alt_models = 0 + while True: + # Stop the optimization procedure if the maximum number of iterations + # has been exceeded. We do not write the main loop as a for loop + # because we want to access the number of iterations outside the loop. + if n_iter >= options[Options.MAX_ITER]: + status = ExitStatus.MAX_ITER_WARNING + break + n_iter += 1 + + # Update the point around which the quadratic models are built. + if ( + np.linalg.norm( + framework.x_best - framework.models.interpolation.x_base + ) + >= constants[Constants.LARGE_SHIFT_FACTOR] * framework.radius + ): + framework.shift_x_base(options) + + # Evaluate the trial step. + radius_save = framework.radius + normal_step, tangential_step = framework.get_trust_region_step(options) + step = normal_step + tangential_step + s_norm = np.linalg.norm(step) + + # If the trial step is too short, we do not attempt to evaluate the + # objective and constraint functions. Instead, we reduce the + # trust-region radius and check whether the resolution should be + # enhanced and whether the geometry of the interpolation set should be + # improved. Otherwise, we entertain a classical iteration. The + # criterion for performing an exceptional jump is taken from NEWUOA. + if ( + s_norm + <= constants[Constants.SHORT_STEP_THRESHOLD] * framework.resolution + ): + framework.radius *= constants[Constants.DECREASE_RESOLUTION_FACTOR] + if radius_save > framework.resolution: + n_short_steps = 0 + n_very_short_steps = 0 + else: + n_short_steps += 1 + n_very_short_steps += 1 + if s_norm > 0.1 * framework.resolution: + n_very_short_steps = 0 + enhance_resolution = n_short_steps >= 5 or n_very_short_steps >= 3 + if enhance_resolution: + n_short_steps = 0 + n_very_short_steps = 0 + improve_geometry = False + else: + try: + k_new, dist_new = framework.get_index_to_remove() + except np.linalg.LinAlgError: + status = ExitStatus.LINALG_ERROR + break + improve_geometry = dist_new > max( + framework.radius, + constants[Constants.RESOLUTION_FACTOR] + * framework.resolution, + ) + else: + # Increase the penalty parameter if necessary. + same_best_point = framework.increase_penalty(step) + if same_best_point: + # Evaluate the objective and constraint functions. + try: + fun_val, cub_val, ceq_val = _eval( + pb, + framework, + step, + options, + ) + except TargetSuccess: + status = ExitStatus.TARGET_SUCCESS + success = True + break + except FeasibleSuccess: + status = ExitStatus.FEASIBLE_SUCCESS + success = True + break + except CallbackSuccess: + status = ExitStatus.CALLBACK_SUCCESS + success = True + break + except MaxEvalError: + status = ExitStatus.MAX_EVAL_WARNING + break + + # Perform a second-order correction step if necessary. + merit_old = framework.merit( + framework.x_best, + framework.fun_best, + framework.cub_best, + framework.ceq_best, + ) + merit_new = framework.merit( + framework.x_best + step, fun_val, cub_val, ceq_val + ) + if ( + pb.type == "nonlinearly constrained" + and merit_new > merit_old + and np.linalg.norm(normal_step) + > constants[Constants.BYRD_OMOJOKUN_FACTOR] ** 2.0 + * framework.radius + ): + soc_step = framework.get_second_order_correction_step( + step, options + ) + if np.linalg.norm(soc_step) > 0.0: + step += soc_step + + # Evaluate the objective and constraint functions. + try: + fun_val, cub_val, ceq_val = _eval( + pb, + framework, + step, + options, + ) + except TargetSuccess: + status = ExitStatus.TARGET_SUCCESS + success = True + break + except FeasibleSuccess: + status = ExitStatus.FEASIBLE_SUCCESS + success = True + break + except CallbackSuccess: + status = ExitStatus.CALLBACK_SUCCESS + success = True + break + except MaxEvalError: + status = ExitStatus.MAX_EVAL_WARNING + break + + # Calculate the reduction ratio. + ratio = framework.get_reduction_ratio( + step, + fun_val, + cub_val, + ceq_val, + ) + + # Choose an interpolation point to remove. + try: + k_new = framework.get_index_to_remove( + framework.x_best + step + )[0] + except np.linalg.LinAlgError: + status = ExitStatus.LINALG_ERROR + break + + # Update the interpolation set. + try: + ill_conditioned = framework.models.update_interpolation( + k_new, framework.x_best + step, fun_val, cub_val, + ceq_val + ) + except np.linalg.LinAlgError: + status = ExitStatus.LINALG_ERROR + break + framework.set_best_index() + + # Update the trust-region radius. + framework.update_radius(step, ratio) + + # Attempt to replace the models by the alternative ones. + if framework.radius <= framework.resolution: + if ratio >= constants[Constants.VERY_LOW_RATIO]: + n_alt_models = 0 + else: + n_alt_models += 1 + grad = framework.models.fun_grad(framework.x_best) + try: + grad_alt = framework.models.fun_alt_grad( + framework.x_best + ) + except np.linalg.LinAlgError: + status = ExitStatus.LINALG_ERROR + break + if np.linalg.norm(grad) < constants[ + Constants.LARGE_GRADIENT_FACTOR + ] * np.linalg.norm(grad_alt): + n_alt_models = 0 + if n_alt_models >= 3: + try: + framework.models.reset_models() + except np.linalg.LinAlgError: + status = ExitStatus.LINALG_ERROR + break + n_alt_models = 0 + + # Update the Lagrange multipliers. + framework.set_multipliers(framework.x_best + step) + + # Check whether the resolution should be enhanced. + try: + k_new, dist_new = framework.get_index_to_remove() + except np.linalg.LinAlgError: + status = ExitStatus.LINALG_ERROR + break + improve_geometry = ( + ill_conditioned + or ratio <= constants[Constants.LOW_RATIO] + and dist_new + > max( + framework.radius, + constants[Constants.RESOLUTION_FACTOR] + * framework.resolution, + ) + ) + enhance_resolution = ( + radius_save <= framework.resolution + and ratio <= constants[Constants.LOW_RATIO] + and not improve_geometry + ) + else: + # When increasing the penalty parameter, the best point so far + # may change. In this case, we restart the iteration. + enhance_resolution = False + improve_geometry = False + + # Reduce the resolution if necessary. + if enhance_resolution: + if framework.resolution <= options[Options.RHOEND]: + success = True + status = ExitStatus.RADIUS_SUCCESS + break + framework.enhance_resolution(options) + framework.decrease_penalty() + + if verbose: + maxcv_val = pb.maxcv( + framework.x_best, framework.cub_best, framework.ceq_best + ) + _print_step( + f"New trust-region radius: {framework.resolution}", + pb, + pb.build_x(framework.x_best), + framework.fun_best, + maxcv_val, + pb.n_eval, + n_iter, + ) + print() + + # Improve the geometry of the interpolation set if necessary. + if improve_geometry: + try: + step = framework.get_geometry_step(k_new, options) + except np.linalg.LinAlgError: + status = ExitStatus.LINALG_ERROR + break + + # Evaluate the objective and constraint functions. + try: + fun_val, cub_val, ceq_val = _eval(pb, framework, step, options) + except TargetSuccess: + status = ExitStatus.TARGET_SUCCESS + success = True + break + except FeasibleSuccess: + status = ExitStatus.FEASIBLE_SUCCESS + success = True + break + except CallbackSuccess: + status = ExitStatus.CALLBACK_SUCCESS + success = True + break + except MaxEvalError: + status = ExitStatus.MAX_EVAL_WARNING + break + + # Update the interpolation set. + try: + framework.models.update_interpolation( + k_new, + framework.x_best + step, + fun_val, + cub_val, + ceq_val, + ) + except np.linalg.LinAlgError: + status = ExitStatus.LINALG_ERROR + break + framework.set_best_index() + + return _build_result( + pb, + framework.penalty, + success, + status, + n_iter, + options, + ) + + +def _get_bounds(bounds, n): + """ + Uniformize the bounds. + """ + if bounds is None: + return Bounds(np.full(n, -np.inf), np.full(n, np.inf)) + elif isinstance(bounds, Bounds): + if bounds.lb.shape != (n,) or bounds.ub.shape != (n,): + raise ValueError(f"The bounds must have {n} elements.") + return Bounds(bounds.lb, bounds.ub) + elif hasattr(bounds, "__len__"): + bounds = np.asarray(bounds) + if bounds.shape != (n, 2): + raise ValueError( + "The shape of the bounds is not compatible with " + "the number of variables." + ) + return Bounds(bounds[:, 0], bounds[:, 1]) + else: + raise TypeError( + "The bounds must be an instance of " + "scipy.optimize.Bounds or an array-like object." + ) + + +def _get_constraints(constraints): + """ + Extract the linear and nonlinear constraints. + """ + if isinstance(constraints, dict) or not hasattr(constraints, "__len__"): + constraints = (constraints,) + + # Extract the linear and nonlinear constraints. + linear_constraints = [] + nonlinear_constraints = [] + for constraint in constraints: + if isinstance(constraint, LinearConstraint): + lb = exact_1d_array( + constraint.lb, + "The lower bound of the linear constraints must be a vector.", + ) + ub = exact_1d_array( + constraint.ub, + "The upper bound of the linear constraints must be a vector.", + ) + linear_constraints.append( + LinearConstraint( + constraint.A, + *np.broadcast_arrays(lb, ub), + ) + ) + elif isinstance(constraint, NonlinearConstraint): + lb = exact_1d_array( + constraint.lb, + "The lower bound of the " + "nonlinear constraints must be a " + "vector.", + ) + ub = exact_1d_array( + constraint.ub, + "The upper bound of the " + "nonlinear constraints must be a " + "vector.", + ) + nonlinear_constraints.append( + NonlinearConstraint( + constraint.fun, + *np.broadcast_arrays(lb, ub), + ) + ) + elif isinstance(constraint, dict): + if "type" not in constraint or constraint["type"] not in ( + "eq", + "ineq", + ): + raise ValueError('The constraint type must be "eq" or "ineq".') + if "fun" not in constraint or not callable(constraint["fun"]): + raise ValueError("The constraint function must be callable.") + nonlinear_constraints.append( + { + "fun": constraint["fun"], + "type": constraint["type"], + "args": constraint.get("args", ()), + } + ) + else: + raise TypeError( + "The constraints must be instances of " + "scipy.optimize.LinearConstraint, " + "scipy.optimize.NonlinearConstraint, or dict." + ) + return linear_constraints, nonlinear_constraints + + +def _set_default_options(options, n): + """ + Set the default options. + """ + if Options.RHOBEG in options and options[Options.RHOBEG] <= 0.0: + raise ValueError("The initial trust-region radius must be positive.") + if Options.RHOEND in options and options[Options.RHOEND] < 0.0: + raise ValueError("The final trust-region radius must be nonnegative.") + if Options.RHOBEG in options and Options.RHOEND in options: + if options[Options.RHOBEG] < options[Options.RHOEND]: + raise ValueError( + "The initial trust-region radius must be greater " + "than or equal to the final trust-region radius." + ) + elif Options.RHOBEG in options: + options[Options.RHOEND.value] = np.min( + [ + DEFAULT_OPTIONS[Options.RHOEND], + options[Options.RHOBEG], + ] + ) + elif Options.RHOEND in options: + options[Options.RHOBEG.value] = np.max( + [ + DEFAULT_OPTIONS[Options.RHOBEG], + options[Options.RHOEND], + ] + ) + else: + options[Options.RHOBEG.value] = DEFAULT_OPTIONS[Options.RHOBEG] + options[Options.RHOEND.value] = DEFAULT_OPTIONS[Options.RHOEND] + options[Options.RHOBEG.value] = float(options[Options.RHOBEG]) + options[Options.RHOEND.value] = float(options[Options.RHOEND]) + if Options.NPT in options and options[Options.NPT] <= 0: + raise ValueError("The number of interpolation points must be " + "positive.") + if ( + Options.NPT in options + and options[Options.NPT] > ((n + 1) * (n + 2)) // 2 + ): + raise ValueError( + f"The number of interpolation points must be at most " + f"{((n + 1) * (n + 2)) // 2}." + ) + options.setdefault(Options.NPT.value, DEFAULT_OPTIONS[Options.NPT](n)) + options[Options.NPT.value] = int(options[Options.NPT]) + if Options.MAX_EVAL in options and options[Options.MAX_EVAL] <= 0: + raise ValueError( + "The maximum number of function evaluations must be positive." + ) + options.setdefault( + Options.MAX_EVAL.value, + np.max( + [ + DEFAULT_OPTIONS[Options.MAX_EVAL](n), + options[Options.NPT] + 1, + ] + ), + ) + options[Options.MAX_EVAL.value] = int(options[Options.MAX_EVAL]) + if Options.MAX_ITER in options and options[Options.MAX_ITER] <= 0: + raise ValueError("The maximum number of iterations must be positive.") + options.setdefault( + Options.MAX_ITER.value, + DEFAULT_OPTIONS[Options.MAX_ITER](n), + ) + options[Options.MAX_ITER.value] = int(options[Options.MAX_ITER]) + options.setdefault(Options.TARGET.value, DEFAULT_OPTIONS[Options.TARGET]) + options[Options.TARGET.value] = float(options[Options.TARGET]) + options.setdefault( + Options.FEASIBILITY_TOL.value, + DEFAULT_OPTIONS[Options.FEASIBILITY_TOL], + ) + options[Options.FEASIBILITY_TOL.value] = float( + options[Options.FEASIBILITY_TOL] + ) + options.setdefault(Options.VERBOSE.value, DEFAULT_OPTIONS[Options.VERBOSE]) + options[Options.VERBOSE.value] = bool(options[Options.VERBOSE]) + options.setdefault(Options.SCALE.value, DEFAULT_OPTIONS[Options.SCALE]) + options[Options.SCALE.value] = bool(options[Options.SCALE]) + options.setdefault( + Options.FILTER_SIZE.value, + DEFAULT_OPTIONS[Options.FILTER_SIZE], + ) + options[Options.FILTER_SIZE.value] = int(options[Options.FILTER_SIZE]) + options.setdefault( + Options.STORE_HISTORY.value, + DEFAULT_OPTIONS[Options.STORE_HISTORY], + ) + options[Options.STORE_HISTORY.value] = bool(options[Options.STORE_HISTORY]) + options.setdefault( + Options.HISTORY_SIZE.value, + DEFAULT_OPTIONS[Options.HISTORY_SIZE], + ) + options[Options.HISTORY_SIZE.value] = int(options[Options.HISTORY_SIZE]) + options.setdefault(Options.DEBUG.value, DEFAULT_OPTIONS[Options.DEBUG]) + options[Options.DEBUG.value] = bool(options[Options.DEBUG]) + + # Check whether they are any unknown options. + for key in options: + if key not in Options.__members__.values(): + warnings.warn(f"Unknown option: {key}.", RuntimeWarning, 3) + + +def _set_default_constants(**kwargs): + """ + Set the default constants. + """ + constants = dict(kwargs) + constants.setdefault( + Constants.DECREASE_RADIUS_FACTOR.value, + DEFAULT_CONSTANTS[Constants.DECREASE_RADIUS_FACTOR], + ) + constants[Constants.DECREASE_RADIUS_FACTOR.value] = float( + constants[Constants.DECREASE_RADIUS_FACTOR] + ) + if ( + constants[Constants.DECREASE_RADIUS_FACTOR] <= 0.0 + or constants[Constants.DECREASE_RADIUS_FACTOR] >= 1.0 + ): + raise ValueError( + "The constant decrease_radius_factor must be in the interval " + "(0, 1)." + ) + constants.setdefault( + Constants.INCREASE_RADIUS_THRESHOLD.value, + DEFAULT_CONSTANTS[Constants.INCREASE_RADIUS_THRESHOLD], + ) + constants[Constants.INCREASE_RADIUS_THRESHOLD.value] = float( + constants[Constants.INCREASE_RADIUS_THRESHOLD] + ) + if constants[Constants.INCREASE_RADIUS_THRESHOLD] <= 1.0: + raise ValueError( + "The constant increase_radius_threshold must be greater than 1." + ) + if ( + Constants.INCREASE_RADIUS_FACTOR in constants + and constants[Constants.INCREASE_RADIUS_FACTOR] <= 1.0 + ): + raise ValueError( + "The constant increase_radius_factor must be greater than 1." + ) + if ( + Constants.DECREASE_RADIUS_THRESHOLD in constants + and constants[Constants.DECREASE_RADIUS_THRESHOLD] <= 1.0 + ): + raise ValueError( + "The constant decrease_radius_threshold must be greater than 1." + ) + if ( + Constants.INCREASE_RADIUS_FACTOR in constants + and Constants.DECREASE_RADIUS_THRESHOLD in constants + ): + if ( + constants[Constants.DECREASE_RADIUS_THRESHOLD] + >= constants[Constants.INCREASE_RADIUS_FACTOR] + ): + raise ValueError( + "The constant decrease_radius_threshold must be " + "less than increase_radius_factor." + ) + elif Constants.INCREASE_RADIUS_FACTOR in constants: + constants[Constants.DECREASE_RADIUS_THRESHOLD.value] = np.min( + [ + DEFAULT_CONSTANTS[Constants.DECREASE_RADIUS_THRESHOLD], + 0.5 * (1.0 + constants[Constants.INCREASE_RADIUS_FACTOR]), + ] + ) + elif Constants.DECREASE_RADIUS_THRESHOLD in constants: + constants[Constants.INCREASE_RADIUS_FACTOR.value] = np.max( + [ + DEFAULT_CONSTANTS[Constants.INCREASE_RADIUS_FACTOR], + 2.0 * constants[Constants.DECREASE_RADIUS_THRESHOLD], + ] + ) + else: + constants[Constants.INCREASE_RADIUS_FACTOR.value] = DEFAULT_CONSTANTS[ + Constants.INCREASE_RADIUS_FACTOR + ] + constants[Constants.DECREASE_RADIUS_THRESHOLD.value] = ( + DEFAULT_CONSTANTS[Constants.DECREASE_RADIUS_THRESHOLD]) + constants.setdefault( + Constants.DECREASE_RESOLUTION_FACTOR.value, + DEFAULT_CONSTANTS[Constants.DECREASE_RESOLUTION_FACTOR], + ) + constants[Constants.DECREASE_RESOLUTION_FACTOR.value] = float( + constants[Constants.DECREASE_RESOLUTION_FACTOR] + ) + if ( + constants[Constants.DECREASE_RESOLUTION_FACTOR] <= 0.0 + or constants[Constants.DECREASE_RESOLUTION_FACTOR] >= 1.0 + ): + raise ValueError( + "The constant decrease_resolution_factor must be in the interval " + "(0, 1)." + ) + if ( + Constants.LARGE_RESOLUTION_THRESHOLD in constants + and constants[Constants.LARGE_RESOLUTION_THRESHOLD] <= 1.0 + ): + raise ValueError( + "The constant large_resolution_threshold must be greater than 1." + ) + if ( + Constants.MODERATE_RESOLUTION_THRESHOLD in constants + and constants[Constants.MODERATE_RESOLUTION_THRESHOLD] <= 1.0 + ): + raise ValueError( + "The constant moderate_resolution_threshold must be greater than " + "1." + ) + if ( + Constants.LARGE_RESOLUTION_THRESHOLD in constants + and Constants.MODERATE_RESOLUTION_THRESHOLD in constants + ): + if ( + constants[Constants.MODERATE_RESOLUTION_THRESHOLD] + > constants[Constants.LARGE_RESOLUTION_THRESHOLD] + ): + raise ValueError( + "The constant moderate_resolution_threshold " + "must be at most large_resolution_threshold." + ) + elif Constants.LARGE_RESOLUTION_THRESHOLD in constants: + constants[Constants.MODERATE_RESOLUTION_THRESHOLD.value] = np.min( + [ + DEFAULT_CONSTANTS[Constants.MODERATE_RESOLUTION_THRESHOLD], + constants[Constants.LARGE_RESOLUTION_THRESHOLD], + ] + ) + elif Constants.MODERATE_RESOLUTION_THRESHOLD in constants: + constants[Constants.LARGE_RESOLUTION_THRESHOLD.value] = np.max( + [ + DEFAULT_CONSTANTS[Constants.LARGE_RESOLUTION_THRESHOLD], + constants[Constants.MODERATE_RESOLUTION_THRESHOLD], + ] + ) + else: + constants[Constants.LARGE_RESOLUTION_THRESHOLD.value] = ( + DEFAULT_CONSTANTS[Constants.LARGE_RESOLUTION_THRESHOLD] + ) + constants[Constants.MODERATE_RESOLUTION_THRESHOLD.value] = ( + DEFAULT_CONSTANTS[Constants.MODERATE_RESOLUTION_THRESHOLD] + ) + if Constants.LOW_RATIO in constants and ( + constants[Constants.LOW_RATIO] <= 0.0 + or constants[Constants.LOW_RATIO] >= 1.0 + ): + raise ValueError( + "The constant low_ratio must be in the interval (0, 1)." + ) + if Constants.HIGH_RATIO in constants and ( + constants[Constants.HIGH_RATIO] <= 0.0 + or constants[Constants.HIGH_RATIO] >= 1.0 + ): + raise ValueError( + "The constant high_ratio must be in the interval (0, 1)." + ) + if Constants.LOW_RATIO in constants and Constants.HIGH_RATIO in constants: + if constants[Constants.LOW_RATIO] > constants[Constants.HIGH_RATIO]: + raise ValueError( + "The constant low_ratio must be at most high_ratio." + ) + elif Constants.LOW_RATIO in constants: + constants[Constants.HIGH_RATIO.value] = np.max( + [ + DEFAULT_CONSTANTS[Constants.HIGH_RATIO], + constants[Constants.LOW_RATIO], + ] + ) + elif Constants.HIGH_RATIO in constants: + constants[Constants.LOW_RATIO.value] = np.min( + [ + DEFAULT_CONSTANTS[Constants.LOW_RATIO], + constants[Constants.HIGH_RATIO], + ] + ) + else: + constants[Constants.LOW_RATIO.value] = DEFAULT_CONSTANTS[ + Constants.LOW_RATIO + ] + constants[Constants.HIGH_RATIO.value] = DEFAULT_CONSTANTS[ + Constants.HIGH_RATIO + ] + constants.setdefault( + Constants.VERY_LOW_RATIO.value, + DEFAULT_CONSTANTS[Constants.VERY_LOW_RATIO], + ) + constants[Constants.VERY_LOW_RATIO.value] = float( + constants[Constants.VERY_LOW_RATIO] + ) + if ( + constants[Constants.VERY_LOW_RATIO] <= 0.0 + or constants[Constants.VERY_LOW_RATIO] >= 1.0 + ): + raise ValueError( + "The constant very_low_ratio must be in the interval (0, 1)." + ) + if ( + Constants.PENALTY_INCREASE_THRESHOLD in constants + and constants[Constants.PENALTY_INCREASE_THRESHOLD] < 1.0 + ): + raise ValueError( + "The constant penalty_increase_threshold must be " + "greater than or equal to 1." + ) + if ( + Constants.PENALTY_INCREASE_FACTOR in constants + and constants[Constants.PENALTY_INCREASE_FACTOR] <= 1.0 + ): + raise ValueError( + "The constant penalty_increase_factor must be greater than 1." + ) + if ( + Constants.PENALTY_INCREASE_THRESHOLD in constants + and Constants.PENALTY_INCREASE_FACTOR in constants + ): + if ( + constants[Constants.PENALTY_INCREASE_FACTOR] + < constants[Constants.PENALTY_INCREASE_THRESHOLD] + ): + raise ValueError( + "The constant penalty_increase_factor must be " + "greater than or equal to " + "penalty_increase_threshold." + ) + elif Constants.PENALTY_INCREASE_THRESHOLD in constants: + constants[Constants.PENALTY_INCREASE_FACTOR.value] = np.max( + [ + DEFAULT_CONSTANTS[Constants.PENALTY_INCREASE_FACTOR], + constants[Constants.PENALTY_INCREASE_THRESHOLD], + ] + ) + elif Constants.PENALTY_INCREASE_FACTOR in constants: + constants[Constants.PENALTY_INCREASE_THRESHOLD.value] = np.min( + [ + DEFAULT_CONSTANTS[Constants.PENALTY_INCREASE_THRESHOLD], + constants[Constants.PENALTY_INCREASE_FACTOR], + ] + ) + else: + constants[Constants.PENALTY_INCREASE_THRESHOLD.value] = ( + DEFAULT_CONSTANTS[Constants.PENALTY_INCREASE_THRESHOLD] + ) + constants[Constants.PENALTY_INCREASE_FACTOR.value] = DEFAULT_CONSTANTS[ + Constants.PENALTY_INCREASE_FACTOR + ] + constants.setdefault( + Constants.SHORT_STEP_THRESHOLD.value, + DEFAULT_CONSTANTS[Constants.SHORT_STEP_THRESHOLD], + ) + constants[Constants.SHORT_STEP_THRESHOLD.value] = float( + constants[Constants.SHORT_STEP_THRESHOLD] + ) + if ( + constants[Constants.SHORT_STEP_THRESHOLD] <= 0.0 + or constants[Constants.SHORT_STEP_THRESHOLD] >= 1.0 + ): + raise ValueError( + "The constant short_step_threshold must be in the interval (0, 1)." + ) + constants.setdefault( + Constants.LOW_RADIUS_FACTOR.value, + DEFAULT_CONSTANTS[Constants.LOW_RADIUS_FACTOR], + ) + constants[Constants.LOW_RADIUS_FACTOR.value] = float( + constants[Constants.LOW_RADIUS_FACTOR] + ) + if ( + constants[Constants.LOW_RADIUS_FACTOR] <= 0.0 + or constants[Constants.LOW_RADIUS_FACTOR] >= 1.0 + ): + raise ValueError( + "The constant low_radius_factor must be in the interval (0, 1)." + ) + constants.setdefault( + Constants.BYRD_OMOJOKUN_FACTOR.value, + DEFAULT_CONSTANTS[Constants.BYRD_OMOJOKUN_FACTOR], + ) + constants[Constants.BYRD_OMOJOKUN_FACTOR.value] = float( + constants[Constants.BYRD_OMOJOKUN_FACTOR] + ) + if ( + constants[Constants.BYRD_OMOJOKUN_FACTOR] <= 0.0 + or constants[Constants.BYRD_OMOJOKUN_FACTOR] >= 1.0 + ): + raise ValueError( + "The constant byrd_omojokun_factor must be in the interval (0, 1)." + ) + constants.setdefault( + Constants.THRESHOLD_RATIO_CONSTRAINTS.value, + DEFAULT_CONSTANTS[Constants.THRESHOLD_RATIO_CONSTRAINTS], + ) + constants[Constants.THRESHOLD_RATIO_CONSTRAINTS.value] = float( + constants[Constants.THRESHOLD_RATIO_CONSTRAINTS] + ) + if constants[Constants.THRESHOLD_RATIO_CONSTRAINTS] <= 1.0: + raise ValueError( + "The constant threshold_ratio_constraints must be greater than 1." + ) + constants.setdefault( + Constants.LARGE_SHIFT_FACTOR.value, + DEFAULT_CONSTANTS[Constants.LARGE_SHIFT_FACTOR], + ) + constants[Constants.LARGE_SHIFT_FACTOR.value] = float( + constants[Constants.LARGE_SHIFT_FACTOR] + ) + if constants[Constants.LARGE_SHIFT_FACTOR] < 0.0: + raise ValueError("The constant large_shift_factor must be " + "nonnegative.") + constants.setdefault( + Constants.LARGE_GRADIENT_FACTOR.value, + DEFAULT_CONSTANTS[Constants.LARGE_GRADIENT_FACTOR], + ) + constants[Constants.LARGE_GRADIENT_FACTOR.value] = float( + constants[Constants.LARGE_GRADIENT_FACTOR] + ) + if constants[Constants.LARGE_GRADIENT_FACTOR] <= 1.0: + raise ValueError( + "The constant large_gradient_factor must be greater than 1." + ) + constants.setdefault( + Constants.RESOLUTION_FACTOR.value, + DEFAULT_CONSTANTS[Constants.RESOLUTION_FACTOR], + ) + constants[Constants.RESOLUTION_FACTOR.value] = float( + constants[Constants.RESOLUTION_FACTOR] + ) + if constants[Constants.RESOLUTION_FACTOR] <= 1.0: + raise ValueError( + "The constant resolution_factor must be greater than 1." + ) + constants.setdefault( + Constants.IMPROVE_TCG.value, + DEFAULT_CONSTANTS[Constants.IMPROVE_TCG], + ) + constants[Constants.IMPROVE_TCG.value] = bool( + constants[Constants.IMPROVE_TCG] + ) + + # Check whether they are any unknown options. + for key in kwargs: + if key not in Constants.__members__.values(): + warnings.warn(f"Unknown constant: {key}.", RuntimeWarning, 3) + return constants + + +def _eval(pb, framework, step, options): + """ + Evaluate the objective and constraint functions. + """ + if pb.n_eval >= options[Options.MAX_EVAL]: + raise MaxEvalError + x_eval = framework.x_best + step + fun_val, cub_val, ceq_val = pb(x_eval, framework.penalty) + r_val = pb.maxcv(x_eval, cub_val, ceq_val) + if ( + fun_val <= options[Options.TARGET] + and r_val <= options[Options.FEASIBILITY_TOL] + ): + raise TargetSuccess + if pb.is_feasibility and r_val <= options[Options.FEASIBILITY_TOL]: + raise FeasibleSuccess + return fun_val, cub_val, ceq_val + + +def _build_result(pb, penalty, success, status, n_iter, options): + """ + Build the result of the optimization process. + """ + # Build the result. + x, fun, maxcv = pb.best_eval(penalty) + success = success and np.isfinite(fun) and np.isfinite(maxcv) + if status not in [ExitStatus.TARGET_SUCCESS, ExitStatus.FEASIBLE_SUCCESS]: + success = success and maxcv <= options[Options.FEASIBILITY_TOL] + result = OptimizeResult() + result.message = { + ExitStatus.RADIUS_SUCCESS: "The lower bound for the trust-region " + "radius has been reached", + ExitStatus.TARGET_SUCCESS: "The target objective function value has " + "been reached", + ExitStatus.FIXED_SUCCESS: "All variables are fixed by the bound " + "constraints", + ExitStatus.CALLBACK_SUCCESS: "The callback requested to stop the " + "optimization procedure", + ExitStatus.FEASIBLE_SUCCESS: "The feasibility problem received has " + "been solved successfully", + ExitStatus.MAX_EVAL_WARNING: "The maximum number of function " + "evaluations has been exceeded", + ExitStatus.MAX_ITER_WARNING: "The maximum number of iterations has " + "been exceeded", + ExitStatus.INFEASIBLE_ERROR: "The bound constraints are infeasible", + ExitStatus.LINALG_ERROR: "A linear algebra error occurred", + }.get(status, "Unknown exit status") + result.success = success + result.status = status.value + result.x = pb.build_x(x) + result.fun = fun + result.maxcv = maxcv + result.nfev = pb.n_eval + result.nit = n_iter + if options[Options.STORE_HISTORY]: + result.fun_history = pb.fun_history + result.maxcv_history = pb.maxcv_history + + # Print the result if requested. + if options[Options.VERBOSE]: + _print_step( + result.message, + pb, + result.x, + result.fun, + result.maxcv, + result.nfev, + result.nit, + ) + return result + + +def _print_step(message, pb, x, fun_val, r_val, n_eval, n_iter): + """ + Print information about the current state of the optimization process. + """ + print() + print(f"{message}.") + print(f"Number of function evaluations: {n_eval}.") + print(f"Number of iterations: {n_iter}.") + if not pb.is_feasibility: + print(f"Least value of {pb.fun_name}: {fun_val}.") + print(f"Maximum constraint violation: {r_val}.") + with np.printoptions(**PRINT_OPTIONS): + print(f"Corresponding point: {x}.") diff --git a/mantis_evalkit/lib/python3.10/site-packages/scipy/_lib/cobyqa/models.py b/mantis_evalkit/lib/python3.10/site-packages/scipy/_lib/cobyqa/models.py new file mode 100644 index 0000000000000000000000000000000000000000..4891b074bfd6dd3f7d43fa95b0b845a764cac114 --- /dev/null +++ b/mantis_evalkit/lib/python3.10/site-packages/scipy/_lib/cobyqa/models.py @@ -0,0 +1,1529 @@ +import warnings + +import numpy as np +from scipy.linalg import eigh + +from .settings import Options +from .utils import MaxEvalError, TargetSuccess, FeasibleSuccess + + +EPS = np.finfo(float).eps + + +class Interpolation: + """ + Interpolation set. + + This class stores a base point around which the models are expanded and the + interpolation points. The coordinates of the interpolation points are + relative to the base point. + """ + + def __init__(self, pb, options): + """ + Initialize the interpolation set. + + Parameters + ---------- + pb : `cobyqa.problem.Problem` + Problem to be solved. + options : dict + Options of the solver. + """ + # Reduce the initial trust-region radius if necessary. + self._debug = options[Options.DEBUG] + max_radius = 0.5 * np.min(pb.bounds.xu - pb.bounds.xl) + if options[Options.RHOBEG] > max_radius: + options[Options.RHOBEG.value] = max_radius + options[Options.RHOEND.value] = np.min( + [ + options[Options.RHOEND], + max_radius, + ] + ) + + # Set the initial point around which the models are expanded. + self._x_base = np.copy(pb.x0) + very_close_xl_idx = ( + self.x_base <= pb.bounds.xl + 0.5 * options[Options.RHOBEG] + ) + self.x_base[very_close_xl_idx] = pb.bounds.xl[very_close_xl_idx] + close_xl_idx = ( + pb.bounds.xl + 0.5 * options[Options.RHOBEG] < self.x_base + ) & (self.x_base <= pb.bounds.xl + options[Options.RHOBEG]) + self.x_base[close_xl_idx] = np.minimum( + pb.bounds.xl[close_xl_idx] + options[Options.RHOBEG], + pb.bounds.xu[close_xl_idx], + ) + very_close_xu_idx = ( + self.x_base >= pb.bounds.xu - 0.5 * options[Options.RHOBEG] + ) + self.x_base[very_close_xu_idx] = pb.bounds.xu[very_close_xu_idx] + close_xu_idx = ( + self.x_base < pb.bounds.xu - 0.5 * options[Options.RHOBEG] + ) & (pb.bounds.xu - options[Options.RHOBEG] <= self.x_base) + self.x_base[close_xu_idx] = np.maximum( + pb.bounds.xu[close_xu_idx] - options[Options.RHOBEG], + pb.bounds.xl[close_xu_idx], + ) + + # Set the initial interpolation set. + self._xpt = np.zeros((pb.n, options[Options.NPT])) + for k in range(1, options[Options.NPT]): + if k <= pb.n: + if very_close_xu_idx[k - 1]: + self.xpt[k - 1, k] = -options[Options.RHOBEG] + else: + self.xpt[k - 1, k] = options[Options.RHOBEG] + elif k <= 2 * pb.n: + if very_close_xl_idx[k - pb.n - 1]: + self.xpt[k - pb.n - 1, k] = 2.0 * options[Options.RHOBEG] + elif very_close_xu_idx[k - pb.n - 1]: + self.xpt[k - pb.n - 1, k] = -2.0 * options[Options.RHOBEG] + else: + self.xpt[k - pb.n - 1, k] = -options[Options.RHOBEG] + else: + spread = (k - pb.n - 1) // pb.n + k1 = k - (1 + spread) * pb.n - 1 + k2 = (k1 + spread) % pb.n + self.xpt[k1, k] = self.xpt[k1, k1 + 1] + self.xpt[k2, k] = self.xpt[k2, k2 + 1] + + @property + def n(self): + """ + Number of variables. + + Returns + ------- + int + Number of variables. + """ + return self.xpt.shape[0] + + @property + def npt(self): + """ + Number of interpolation points. + + Returns + ------- + int + Number of interpolation points. + """ + return self.xpt.shape[1] + + @property + def xpt(self): + """ + Interpolation points. + + Returns + ------- + `numpy.ndarray`, shape (n, npt) + Interpolation points. + """ + return self._xpt + + @xpt.setter + def xpt(self, xpt): + """ + Set the interpolation points. + + Parameters + ---------- + xpt : `numpy.ndarray`, shape (n, npt) + New interpolation points. + """ + if self._debug: + assert xpt.shape == ( + self.n, + self.npt, + ), "The shape of `xpt` is not valid." + self._xpt = xpt + + @property + def x_base(self): + """ + Base point around which the models are expanded. + + Returns + ------- + `numpy.ndarray`, shape (n,) + Base point around which the models are expanded. + """ + return self._x_base + + @x_base.setter + def x_base(self, x_base): + """ + Set the base point around which the models are expanded. + + Parameters + ---------- + x_base : `numpy.ndarray`, shape (n,) + New base point around which the models are expanded. + """ + if self._debug: + assert x_base.shape == ( + self.n, + ), "The shape of `x_base` is not valid." + self._x_base = x_base + + def point(self, k): + """ + Get the `k`-th interpolation point. + + The return point is relative to the origin. + + Parameters + ---------- + k : int + Index of the interpolation point. + + Returns + ------- + `numpy.ndarray`, shape (n,) + `k`-th interpolation point. + """ + if self._debug: + assert 0 <= k < self.npt, "The index `k` is not valid." + return self.x_base + self.xpt[:, k] + + +_cache = {"xpt": None, "a": None, "right_scaling": None, "eigh": None} + + +def build_system(interpolation): + """ + Build the left-hand side matrix of the interpolation system. The + matrix below stores W * diag(right_scaling), + where W is the theoretical matrix of the interpolation system. The + right scaling matrices is chosen to keep the elements in + the matrix well-balanced. + + Parameters + ---------- + interpolation : `cobyqa.models.Interpolation` + Interpolation set. + """ + + # Compute the scaled directions from the base point to the + # interpolation points. We scale the directions to avoid numerical + # difficulties. + if _cache["xpt"] is not None and np.array_equal( + interpolation.xpt, _cache["xpt"] + ): + return _cache["a"], _cache["right_scaling"], _cache["eigh"] + + scale = np.max(np.linalg.norm(interpolation.xpt, axis=0), initial=EPS) + xpt_scale = interpolation.xpt / scale + + n, npt = xpt_scale.shape + a = np.zeros((npt + n + 1, npt + n + 1)) + a[:npt, :npt] = 0.5 * (xpt_scale.T @ xpt_scale) ** 2.0 + a[:npt, npt] = 1.0 + a[:npt, npt + 1:] = xpt_scale.T + a[npt, :npt] = 1.0 + a[npt + 1:, :npt] = xpt_scale + + # Build the left and right scaling diagonal matrices. + right_scaling = np.empty(npt + n + 1) + right_scaling[:npt] = 1.0 / scale**2.0 + right_scaling[npt] = scale**2.0 + right_scaling[npt + 1:] = scale + + eig_values, eig_vectors = eigh(a, check_finite=False) + + _cache["xpt"] = np.copy(interpolation.xpt) + _cache["a"] = np.copy(a) + _cache["right_scaling"] = np.copy(right_scaling) + _cache["eigh"] = (eig_values, eig_vectors) + + return a, right_scaling, (eig_values, eig_vectors) + + +class Quadratic: + """ + Quadratic model. + + This class stores the Hessian matrix of the quadratic model using the + implicit/explicit representation designed by Powell for NEWUOA [1]_. + + References + ---------- + .. [1] M. J. D. Powell. The NEWUOA software for unconstrained optimization + without derivatives. In G. Di Pillo and M. Roma, editors, *Large-Scale + Nonlinear Optimization*, volume 83 of Nonconvex Optim. Appl., pages + 255--297. Springer, Boston, MA, USA, 2006. `doi:10.1007/0-387-30065-1_16 + `_. + """ + + def __init__(self, interpolation, values, debug): + """ + Initialize the quadratic model. + + Parameters + ---------- + interpolation : `cobyqa.models.Interpolation` + Interpolation set. + values : `numpy.ndarray`, shape (npt,) + Values of the interpolated function at the interpolation points. + debug : bool + Whether to make debugging tests during the execution. + + Raises + ------ + `numpy.linalg.LinAlgError` + If the interpolation system is ill-defined. + """ + self._debug = debug + if self._debug: + assert values.shape == ( + interpolation.npt, + ), "The shape of `values` is not valid." + if interpolation.npt < interpolation.n + 1: + raise ValueError( + f"The number of interpolation points must be at least " + f"{interpolation.n + 1}." + ) + self._const, self._grad, self._i_hess, _ = self._get_model( + interpolation, + values, + ) + self._e_hess = np.zeros((self.n, self.n)) + + def __call__(self, x, interpolation): + """ + Evaluate the quadratic model at a given point. + + Parameters + ---------- + x : `numpy.ndarray`, shape (n,) + Point at which the quadratic model is evaluated. + interpolation : `cobyqa.models.Interpolation` + Interpolation set. + + Returns + ------- + float + Value of the quadratic model at `x`. + """ + if self._debug: + assert x.shape == (self.n,), "The shape of `x` is not valid." + x_diff = x - interpolation.x_base + return ( + self._const + + self._grad @ x_diff + + 0.5 + * ( + self._i_hess @ (interpolation.xpt.T @ x_diff) ** 2.0 + + x_diff @ self._e_hess @ x_diff + ) + ) + + @property + def n(self): + """ + Number of variables. + + Returns + ------- + int + Number of variables. + """ + return self._grad.size + + @property + def npt(self): + """ + Number of interpolation points used to define the quadratic model. + + Returns + ------- + int + Number of interpolation points used to define the quadratic model. + """ + return self._i_hess.size + + def grad(self, x, interpolation): + """ + Evaluate the gradient of the quadratic model at a given point. + + Parameters + ---------- + x : `numpy.ndarray`, shape (n,) + Point at which the gradient of the quadratic model is evaluated. + interpolation : `cobyqa.models.Interpolation` + Interpolation set. + + Returns + ------- + `numpy.ndarray`, shape (n,) + Gradient of the quadratic model at `x`. + """ + if self._debug: + assert x.shape == (self.n,), "The shape of `x` is not valid." + x_diff = x - interpolation.x_base + return self._grad + self.hess_prod(x_diff, interpolation) + + def hess(self, interpolation): + """ + Evaluate the Hessian matrix of the quadratic model. + + Parameters + ---------- + interpolation : `cobyqa.models.Interpolation` + Interpolation set. + + Returns + ------- + `numpy.ndarray`, shape (n, n) + Hessian matrix of the quadratic model. + """ + return self._e_hess + interpolation.xpt @ ( + self._i_hess[:, np.newaxis] * interpolation.xpt.T + ) + + def hess_prod(self, v, interpolation): + """ + Evaluate the right product of the Hessian matrix of the quadratic model + with a given vector. + + Parameters + ---------- + v : `numpy.ndarray`, shape (n,) + Vector with which the Hessian matrix of the quadratic model is + multiplied from the right. + interpolation : `cobyqa.models.Interpolation` + Interpolation set. + + Returns + ------- + `numpy.ndarray`, shape (n,) + Right product of the Hessian matrix of the quadratic model with + `v`. + """ + if self._debug: + assert v.shape == (self.n,), "The shape of `v` is not valid." + return self._e_hess @ v + interpolation.xpt @ ( + self._i_hess * (interpolation.xpt.T @ v) + ) + + def curv(self, v, interpolation): + """ + Evaluate the curvature of the quadratic model along a given direction. + + Parameters + ---------- + v : `numpy.ndarray`, shape (n,) + Direction along which the curvature of the quadratic model is + evaluated. + interpolation : `cobyqa.models.Interpolation` + Interpolation set. + + Returns + ------- + float + Curvature of the quadratic model along `v`. + """ + if self._debug: + assert v.shape == (self.n,), "The shape of `v` is not valid." + return ( + v @ self._e_hess @ v + + self._i_hess @ (interpolation.xpt.T @ v) ** 2.0 + ) + + def update(self, interpolation, k_new, dir_old, values_diff): + """ + Update the quadratic model. + + This method applies the derivative-free symmetric Broyden update to the + quadratic model. The `knew`-th interpolation point must be updated + before calling this method. + + Parameters + ---------- + interpolation : `cobyqa.models.Interpolation` + Updated interpolation set. + k_new : int + Index of the updated interpolation point. + dir_old : `numpy.ndarray`, shape (n,) + Value of ``interpolation.xpt[:, k_new]`` before the update. + values_diff : `numpy.ndarray`, shape (npt,) + Differences between the values of the interpolated nonlinear + function and the previous quadratic model at the updated + interpolation points. + + Raises + ------ + `numpy.linalg.LinAlgError` + If the interpolation system is ill-defined. + """ + if self._debug: + assert 0 <= k_new < self.npt, "The index `k_new` is not valid." + assert dir_old.shape == ( + self.n, + ), "The shape of `dir_old` is not valid." + assert values_diff.shape == ( + self.npt, + ), "The shape of `values_diff` is not valid." + + # Forward the k_new-th element of the implicit Hessian matrix to the + # explicit Hessian matrix. This must be done because the implicit + # Hessian matrix is related to the interpolation points, and the + # k_new-th interpolation point is modified. + self._e_hess += self._i_hess[k_new] * np.outer(dir_old, dir_old) + self._i_hess[k_new] = 0.0 + + # Update the quadratic model. + const, grad, i_hess, ill_conditioned = self._get_model( + interpolation, + values_diff, + ) + self._const += const + self._grad += grad + self._i_hess += i_hess + return ill_conditioned + + def shift_x_base(self, interpolation, new_x_base): + """ + Shift the point around which the quadratic model is defined. + + Parameters + ---------- + interpolation : `cobyqa.models.Interpolation` + Previous interpolation set. + new_x_base : `numpy.ndarray`, shape (n,) + Point that will replace ``interpolation.x_base``. + """ + if self._debug: + assert new_x_base.shape == ( + self.n, + ), "The shape of `new_x_base` is not valid." + self._const = self(new_x_base, interpolation) + self._grad = self.grad(new_x_base, interpolation) + shift = new_x_base - interpolation.x_base + update = np.outer( + shift, + (interpolation.xpt - 0.5 * shift[:, np.newaxis]) @ self._i_hess, + ) + self._e_hess += update + update.T + + @staticmethod + def solve_systems(interpolation, rhs): + """ + Solve the interpolation systems. + + Parameters + ---------- + interpolation : `cobyqa.models.Interpolation` + Interpolation set. + rhs : `numpy.ndarray`, shape (npt + n + 1, m) + Right-hand side vectors of the ``m`` interpolation systems. + + Returns + ------- + `numpy.ndarray`, shape (npt + n + 1, m) + Solutions of the interpolation systems. + `numpy.ndarray`, shape (m, ) + Whether the interpolation systems are ill-conditioned. + + Raises + ------ + `numpy.linalg.LinAlgError` + If the interpolation systems are ill-defined. + """ + n, npt = interpolation.xpt.shape + assert ( + rhs.ndim == 2 and rhs.shape[0] == npt + n + 1 + ), "The shape of `rhs` is not valid." + + # Build the left-hand side matrix of the interpolation system. The + # matrix below stores diag(left_scaling) * W * diag(right_scaling), + # where W is the theoretical matrix of the interpolation system. The + # left and right scaling matrices are chosen to keep the elements in + # the matrix well-balanced. + a, right_scaling, eig = build_system(interpolation) + + # Build the solution. After a discussion with Mike Saunders and Alexis + # Montoison during their visit to the Hong Kong Polytechnic University + # in 2024, we decided to use the eigendecomposition of the symmetric + # matrix a. This is more stable than the previously employed LBL + # decomposition, and allows us to directly detect ill-conditioning of + # the system and to build the least-squares solution if necessary. + # Numerical experiments have shown that this strategy improves the + # performance of the solver. + rhs_scaled = rhs * right_scaling[:, np.newaxis] + if not (np.all(np.isfinite(a)) and np.all(np.isfinite(rhs_scaled))): + raise np.linalg.LinAlgError( + "The interpolation system is ill-defined." + ) + + # calculated in build_system + eig_values, eig_vectors = eig + + large_eig_values = np.abs(eig_values) > EPS + eig_vectors = eig_vectors[:, large_eig_values] + inv_eig_values = 1.0 / eig_values[large_eig_values] + ill_conditioned = ~np.all(large_eig_values, 0) + left_scaled_solutions = eig_vectors @ ( + (eig_vectors.T @ rhs_scaled) * inv_eig_values[:, np.newaxis] + ) + return ( + left_scaled_solutions * right_scaling[:, np.newaxis], + ill_conditioned, + ) + + @staticmethod + def _get_model(interpolation, values): + """ + Solve the interpolation system. + + Parameters + ---------- + interpolation : `cobyqa.models.Interpolation` + Interpolation set. + values : `numpy.ndarray`, shape (npt,) + Values of the interpolated function at the interpolation points. + + Returns + ------- + float + Constant term of the quadratic model. + `numpy.ndarray`, shape (n,) + Gradient of the quadratic model at ``interpolation.x_base``. + `numpy.ndarray`, shape (npt,) + Implicit Hessian matrix of the quadratic model. + + Raises + ------ + `numpy.linalg.LinAlgError` + If the interpolation system is ill-defined. + """ + assert values.shape == ( + interpolation.npt, + ), "The shape of `values` is not valid." + n, npt = interpolation.xpt.shape + x, ill_conditioned = Quadratic.solve_systems( + interpolation, + np.block( + [ + [ + values, + np.zeros(n + 1), + ] + ] + ).T, + ) + return x[npt, 0], x[npt + 1:, 0], x[:npt, 0], ill_conditioned + + +class Models: + """ + Models for a nonlinear optimization problem. + """ + + def __init__(self, pb, options, penalty): + """ + Initialize the models. + + Parameters + ---------- + pb : `cobyqa.problem.Problem` + Problem to be solved. + options : dict + Options of the solver. + penalty : float + Penalty parameter used to select the point in the filter to forward + to the callback function. + + Raises + ------ + `cobyqa.utils.MaxEvalError` + If the maximum number of evaluations is reached. + `cobyqa.utils.TargetSuccess` + If a nearly feasible point has been found with an objective + function value below the target. + `cobyqa.utils.FeasibleSuccess` + If a feasible point has been found for a feasibility problem. + `numpy.linalg.LinAlgError` + If the interpolation system is ill-defined. + """ + # Set the initial interpolation set. + self._debug = options[Options.DEBUG] + self._interpolation = Interpolation(pb, options) + + # Evaluate the nonlinear functions at the initial interpolation points. + x_eval = self.interpolation.point(0) + fun_init, cub_init, ceq_init = pb(x_eval, penalty) + self._fun_val = np.full(options[Options.NPT], np.nan) + self._cub_val = np.full((options[Options.NPT], cub_init.size), np.nan) + self._ceq_val = np.full((options[Options.NPT], ceq_init.size), np.nan) + for k in range(options[Options.NPT]): + if k >= options[Options.MAX_EVAL]: + raise MaxEvalError + if k == 0: + self.fun_val[k] = fun_init + self.cub_val[k, :] = cub_init + self.ceq_val[k, :] = ceq_init + else: + x_eval = self.interpolation.point(k) + self.fun_val[k], self.cub_val[k, :], self.ceq_val[k, :] = pb( + x_eval, + penalty, + ) + + # Stop the iterations if the problem is a feasibility problem and + # the current interpolation point is feasible. + if ( + pb.is_feasibility + and pb.maxcv( + self.interpolation.point(k), + self.cub_val[k, :], + self.ceq_val[k, :], + ) + <= options[Options.FEASIBILITY_TOL] + ): + raise FeasibleSuccess + + # Stop the iterations if the current interpolation point is nearly + # feasible and has an objective function value below the target. + if ( + self._fun_val[k] <= options[Options.TARGET] + and pb.maxcv( + self.interpolation.point(k), + self.cub_val[k, :], + self.ceq_val[k, :], + ) + <= options[Options.FEASIBILITY_TOL] + ): + raise TargetSuccess + + # Build the initial quadratic models. + self._fun = Quadratic( + self.interpolation, + self._fun_val, + options[Options.DEBUG], + ) + self._cub = np.empty(self.m_nonlinear_ub, dtype=Quadratic) + self._ceq = np.empty(self.m_nonlinear_eq, dtype=Quadratic) + for i in range(self.m_nonlinear_ub): + self._cub[i] = Quadratic( + self.interpolation, + self.cub_val[:, i], + options[Options.DEBUG], + ) + for i in range(self.m_nonlinear_eq): + self._ceq[i] = Quadratic( + self.interpolation, + self.ceq_val[:, i], + options[Options.DEBUG], + ) + if self._debug: + self._check_interpolation_conditions() + + @property + def n(self): + """ + Dimension of the problem. + + Returns + ------- + int + Dimension of the problem. + """ + return self.interpolation.n + + @property + def npt(self): + """ + Number of interpolation points. + + Returns + ------- + int + Number of interpolation points. + """ + return self.interpolation.npt + + @property + def m_nonlinear_ub(self): + """ + Number of nonlinear inequality constraints. + + Returns + ------- + int + Number of nonlinear inequality constraints. + """ + return self.cub_val.shape[1] + + @property + def m_nonlinear_eq(self): + """ + Number of nonlinear equality constraints. + + Returns + ------- + int + Number of nonlinear equality constraints. + """ + return self.ceq_val.shape[1] + + @property + def interpolation(self): + """ + Interpolation set. + + Returns + ------- + `cobyqa.models.Interpolation` + Interpolation set. + """ + return self._interpolation + + @property + def fun_val(self): + """ + Values of the objective function at the interpolation points. + + Returns + ------- + `numpy.ndarray`, shape (npt,) + Values of the objective function at the interpolation points. + """ + return self._fun_val + + @property + def cub_val(self): + """ + Values of the nonlinear inequality constraint functions at the + interpolation points. + + Returns + ------- + `numpy.ndarray`, shape (npt, m_nonlinear_ub) + Values of the nonlinear inequality constraint functions at the + interpolation points. + """ + return self._cub_val + + @property + def ceq_val(self): + """ + Values of the nonlinear equality constraint functions at the + interpolation points. + + Returns + ------- + `numpy.ndarray`, shape (npt, m_nonlinear_eq) + Values of the nonlinear equality constraint functions at the + interpolation points. + """ + return self._ceq_val + + def fun(self, x): + """ + Evaluate the quadratic model of the objective function at a given + point. + + Parameters + ---------- + x : `numpy.ndarray`, shape (n,) + Point at which to evaluate the quadratic model of the objective + function. + + Returns + ------- + float + Value of the quadratic model of the objective function at `x`. + """ + if self._debug: + assert x.shape == (self.n,), "The shape of `x` is not valid." + return self._fun(x, self.interpolation) + + def fun_grad(self, x): + """ + Evaluate the gradient of the quadratic model of the objective function + at a given point. + + Parameters + ---------- + x : `numpy.ndarray`, shape (n,) + Point at which to evaluate the gradient of the quadratic model of + the objective function. + + Returns + ------- + `numpy.ndarray`, shape (n,) + Gradient of the quadratic model of the objective function at `x`. + """ + if self._debug: + assert x.shape == (self.n,), "The shape of `x` is not valid." + return self._fun.grad(x, self.interpolation) + + def fun_hess(self): + """ + Evaluate the Hessian matrix of the quadratic model of the objective + function. + + Returns + ------- + `numpy.ndarray`, shape (n, n) + Hessian matrix of the quadratic model of the objective function. + """ + return self._fun.hess(self.interpolation) + + def fun_hess_prod(self, v): + """ + Evaluate the right product of the Hessian matrix of the quadratic model + of the objective function with a given vector. + + Parameters + ---------- + v : `numpy.ndarray`, shape (n,) + Vector with which the Hessian matrix of the quadratic model of the + objective function is multiplied from the right. + + Returns + ------- + `numpy.ndarray`, shape (n,) + Right product of the Hessian matrix of the quadratic model of the + objective function with `v`. + """ + if self._debug: + assert v.shape == (self.n,), "The shape of `v` is not valid." + return self._fun.hess_prod(v, self.interpolation) + + def fun_curv(self, v): + """ + Evaluate the curvature of the quadratic model of the objective function + along a given direction. + + Parameters + ---------- + v : `numpy.ndarray`, shape (n,) + Direction along which the curvature of the quadratic model of the + objective function is evaluated. + + Returns + ------- + float + Curvature of the quadratic model of the objective function along + `v`. + """ + if self._debug: + assert v.shape == (self.n,), "The shape of `v` is not valid." + return self._fun.curv(v, self.interpolation) + + def fun_alt_grad(self, x): + """ + Evaluate the gradient of the alternative quadratic model of the + objective function at a given point. + + Parameters + ---------- + x : `numpy.ndarray`, shape (n,) + Point at which to evaluate the gradient of the alternative + quadratic model of the objective function. + + Returns + ------- + `numpy.ndarray`, shape (n,) + Gradient of the alternative quadratic model of the objective + function at `x`. + + Raises + ------ + `numpy.linalg.LinAlgError` + If the interpolation system is ill-defined. + """ + if self._debug: + assert x.shape == (self.n,), "The shape of `x` is not valid." + model = Quadratic(self.interpolation, self.fun_val, self._debug) + return model.grad(x, self.interpolation) + + def cub(self, x, mask=None): + """ + Evaluate the quadratic models of the nonlinear inequality functions at + a given point. + + Parameters + ---------- + x : `numpy.ndarray`, shape (n,) + Point at which to evaluate the quadratic models of the nonlinear + inequality functions. + mask : `numpy.ndarray`, shape (m_nonlinear_ub,), optional + Mask of the quadratic models to consider. + + Returns + ------- + `numpy.ndarray` + Values of the quadratic model of the nonlinear inequality + functions. + """ + if self._debug: + assert x.shape == (self.n,), "The shape of `x` is not valid." + assert mask is None or mask.shape == ( + self.m_nonlinear_ub, + ), "The shape of `mask` is not valid." + return np.array( + [model(x, self.interpolation) for model in self._get_cub(mask)] + ) + + def cub_grad(self, x, mask=None): + """ + Evaluate the gradients of the quadratic models of the nonlinear + inequality functions at a given point. + + Parameters + ---------- + x : `numpy.ndarray`, shape (n,) + Point at which to evaluate the gradients of the quadratic models of + the nonlinear inequality functions. + mask : `numpy.ndarray`, shape (m_nonlinear_eq,), optional + Mask of the quadratic models to consider. + + Returns + ------- + `numpy.ndarray` + Gradients of the quadratic model of the nonlinear inequality + functions. + """ + if self._debug: + assert x.shape == (self.n,), "The shape of `x` is not valid." + assert mask is None or mask.shape == ( + self.m_nonlinear_ub, + ), "The shape of `mask` is not valid." + return np.reshape( + [model.grad(x, self.interpolation) + for model in self._get_cub(mask)], + (-1, self.n), + ) + + def cub_hess(self, mask=None): + """ + Evaluate the Hessian matrices of the quadratic models of the nonlinear + inequality functions. + + Parameters + ---------- + mask : `numpy.ndarray`, shape (m_nonlinear_ub,), optional + Mask of the quadratic models to consider. + + Returns + ------- + `numpy.ndarray` + Hessian matrices of the quadratic models of the nonlinear + inequality functions. + """ + if self._debug: + assert mask is None or mask.shape == ( + self.m_nonlinear_ub, + ), "The shape of `mask` is not valid." + return np.reshape( + [model.hess(self.interpolation) for model in self._get_cub(mask)], + (-1, self.n, self.n), + ) + + def cub_hess_prod(self, v, mask=None): + """ + Evaluate the right product of the Hessian matrices of the quadratic + models of the nonlinear inequality functions with a given vector. + + Parameters + ---------- + v : `numpy.ndarray`, shape (n,) + Vector with which the Hessian matrices of the quadratic models of + the nonlinear inequality functions are multiplied from the right. + mask : `numpy.ndarray`, shape (m_nonlinear_ub,), optional + Mask of the quadratic models to consider. + + Returns + ------- + `numpy.ndarray` + Right products of the Hessian matrices of the quadratic models of + the nonlinear inequality functions with `v`. + """ + if self._debug: + assert v.shape == (self.n,), "The shape of `v` is not valid." + assert mask is None or mask.shape == ( + self.m_nonlinear_ub, + ), "The shape of `mask` is not valid." + return np.reshape( + [ + model.hess_prod(v, self.interpolation) + for model in self._get_cub(mask) + ], + (-1, self.n), + ) + + def cub_curv(self, v, mask=None): + """ + Evaluate the curvature of the quadratic models of the nonlinear + inequality functions along a given direction. + + Parameters + ---------- + v : `numpy.ndarray`, shape (n,) + Direction along which the curvature of the quadratic models of the + nonlinear inequality functions is evaluated. + mask : `numpy.ndarray`, shape (m_nonlinear_ub,), optional + Mask of the quadratic models to consider. + + Returns + ------- + `numpy.ndarray` + Curvature of the quadratic models of the nonlinear inequality + functions along `v`. + """ + if self._debug: + assert v.shape == (self.n,), "The shape of `v` is not valid." + assert mask is None or mask.shape == ( + self.m_nonlinear_ub, + ), "The shape of `mask` is not valid." + return np.array( + [model.curv(v, self.interpolation) + for model in self._get_cub(mask)] + ) + + def ceq(self, x, mask=None): + """ + Evaluate the quadratic models of the nonlinear equality functions at a + given point. + + Parameters + ---------- + x : `numpy.ndarray`, shape (n,) + Point at which to evaluate the quadratic models of the nonlinear + equality functions. + mask : `numpy.ndarray`, shape (m_nonlinear_eq,), optional + Mask of the quadratic models to consider. + + Returns + ------- + `numpy.ndarray` + Values of the quadratic model of the nonlinear equality functions. + """ + if self._debug: + assert x.shape == (self.n,), "The shape of `x` is not valid." + assert mask is None or mask.shape == ( + self.m_nonlinear_eq, + ), "The shape of `mask` is not valid." + return np.array( + [model(x, self.interpolation) for model in self._get_ceq(mask)] + ) + + def ceq_grad(self, x, mask=None): + """ + Evaluate the gradients of the quadratic models of the nonlinear + equality functions at a given point. + + Parameters + ---------- + x : `numpy.ndarray`, shape (n,) + Point at which to evaluate the gradients of the quadratic models of + the nonlinear equality functions. + mask : `numpy.ndarray`, shape (m_nonlinear_eq,), optional + Mask of the quadratic models to consider. + + Returns + ------- + `numpy.ndarray` + Gradients of the quadratic model of the nonlinear equality + functions. + """ + if self._debug: + assert x.shape == (self.n,), "The shape of `x` is not valid." + assert mask is None or mask.shape == ( + self.m_nonlinear_eq, + ), "The shape of `mask` is not valid." + return np.reshape( + [model.grad(x, self.interpolation) + for model in self._get_ceq(mask)], + (-1, self.n), + ) + + def ceq_hess(self, mask=None): + """ + Evaluate the Hessian matrices of the quadratic models of the nonlinear + equality functions. + + Parameters + ---------- + mask : `numpy.ndarray`, shape (m_nonlinear_eq,), optional + Mask of the quadratic models to consider. + + Returns + ------- + `numpy.ndarray` + Hessian matrices of the quadratic models of the nonlinear equality + functions. + """ + if self._debug: + assert mask is None or mask.shape == ( + self.m_nonlinear_eq, + ), "The shape of `mask` is not valid." + return np.reshape( + [model.hess(self.interpolation) for model in self._get_ceq(mask)], + (-1, self.n, self.n), + ) + + def ceq_hess_prod(self, v, mask=None): + """ + Evaluate the right product of the Hessian matrices of the quadratic + models of the nonlinear equality functions with a given vector. + + Parameters + ---------- + v : `numpy.ndarray`, shape (n,) + Vector with which the Hessian matrices of the quadratic models of + the nonlinear equality functions are multiplied from the right. + mask : `numpy.ndarray`, shape (m_nonlinear_eq,), optional + Mask of the quadratic models to consider. + + Returns + ------- + `numpy.ndarray` + Right products of the Hessian matrices of the quadratic models of + the nonlinear equality functions with `v`. + """ + if self._debug: + assert v.shape == (self.n,), "The shape of `v` is not valid." + assert mask is None or mask.shape == ( + self.m_nonlinear_eq, + ), "The shape of `mask` is not valid." + return np.reshape( + [ + model.hess_prod(v, self.interpolation) + for model in self._get_ceq(mask) + ], + (-1, self.n), + ) + + def ceq_curv(self, v, mask=None): + """ + Evaluate the curvature of the quadratic models of the nonlinear + equality functions along a given direction. + + Parameters + ---------- + v : `numpy.ndarray`, shape (n,) + Direction along which the curvature of the quadratic models of the + nonlinear equality functions is evaluated. + mask : `numpy.ndarray`, shape (m_nonlinear_eq,), optional + Mask of the quadratic models to consider. + + Returns + ------- + `numpy.ndarray` + Curvature of the quadratic models of the nonlinear equality + functions along `v`. + """ + if self._debug: + assert v.shape == (self.n,), "The shape of `v` is not valid." + assert mask is None or mask.shape == ( + self.m_nonlinear_eq, + ), "The shape of `mask` is not valid." + return np.array( + [model.curv(v, self.interpolation) + for model in self._get_ceq(mask)] + ) + + def reset_models(self): + """ + Set the quadratic models of the objective function, nonlinear + inequality constraints, and nonlinear equality constraints to the + alternative quadratic models. + + Raises + ------ + `numpy.linalg.LinAlgError` + If the interpolation system is ill-defined. + """ + self._fun = Quadratic(self.interpolation, self.fun_val, self._debug) + for i in range(self.m_nonlinear_ub): + self._cub[i] = Quadratic( + self.interpolation, + self.cub_val[:, i], + self._debug, + ) + for i in range(self.m_nonlinear_eq): + self._ceq[i] = Quadratic( + self.interpolation, + self.ceq_val[:, i], + self._debug, + ) + if self._debug: + self._check_interpolation_conditions() + + def update_interpolation(self, k_new, x_new, fun_val, cub_val, ceq_val): + """ + Update the interpolation set. + + This method updates the interpolation set by replacing the `knew`-th + interpolation point with `xnew`. It also updates the function values + and the quadratic models. + + Parameters + ---------- + k_new : int + Index of the updated interpolation point. + x_new : `numpy.ndarray`, shape (n,) + New interpolation point. Its value is interpreted as relative to + the origin, not the base point. + fun_val : float + Value of the objective function at `x_new`. + Objective function value at `x_new`. + cub_val : `numpy.ndarray`, shape (m_nonlinear_ub,) + Values of the nonlinear inequality constraints at `x_new`. + ceq_val : `numpy.ndarray`, shape (m_nonlinear_eq,) + Values of the nonlinear equality constraints at `x_new`. + + Raises + ------ + `numpy.linalg.LinAlgError` + If the interpolation system is ill-defined. + """ + if self._debug: + assert 0 <= k_new < self.npt, "The index `k_new` is not valid." + assert x_new.shape == (self.n,), \ + "The shape of `x_new` is not valid." + assert isinstance(fun_val, float), \ + "The function value is not valid." + assert cub_val.shape == ( + self.m_nonlinear_ub, + ), "The shape of `cub_val` is not valid." + assert ceq_val.shape == ( + self.m_nonlinear_eq, + ), "The shape of `ceq_val` is not valid." + + # Compute the updates in the interpolation conditions. + fun_diff = np.zeros(self.npt) + cub_diff = np.zeros(self.cub_val.shape) + ceq_diff = np.zeros(self.ceq_val.shape) + fun_diff[k_new] = fun_val - self.fun(x_new) + cub_diff[k_new, :] = cub_val - self.cub(x_new) + ceq_diff[k_new, :] = ceq_val - self.ceq(x_new) + + # Update the function values. + self.fun_val[k_new] = fun_val + self.cub_val[k_new, :] = cub_val + self.ceq_val[k_new, :] = ceq_val + + # Update the interpolation set. + dir_old = np.copy(self.interpolation.xpt[:, k_new]) + self.interpolation.xpt[:, k_new] = x_new - self.interpolation.x_base + + # Update the quadratic models. + ill_conditioned = self._fun.update( + self.interpolation, + k_new, + dir_old, + fun_diff, + ) + for i in range(self.m_nonlinear_ub): + ill_conditioned = ill_conditioned or self._cub[i].update( + self.interpolation, + k_new, + dir_old, + cub_diff[:, i], + ) + for i in range(self.m_nonlinear_eq): + ill_conditioned = ill_conditioned or self._ceq[i].update( + self.interpolation, + k_new, + dir_old, + ceq_diff[:, i], + ) + if self._debug: + self._check_interpolation_conditions() + return ill_conditioned + + def determinants(self, x_new, k_new=None): + """ + Compute the normalized determinants of the new interpolation systems. + + Parameters + ---------- + x_new : `numpy.ndarray`, shape (n,) + New interpolation point. Its value is interpreted as relative to + the origin, not the base point. + k_new : int, optional + Index of the updated interpolation point. If `k_new` is not + specified, all the possible determinants are computed. + + Returns + ------- + {float, `numpy.ndarray`, shape (npt,)} + Determinant(s) of the new interpolation system. + + Raises + ------ + `numpy.linalg.LinAlgError` + If the interpolation system is ill-defined. + + Notes + ----- + The determinants are normalized by the determinant of the current + interpolation system. For stability reasons, the calculations are done + using the formula (2.12) in [1]_. + + References + ---------- + .. [1] M. J. D. Powell. On updating the inverse of a KKT matrix. + Technical Report DAMTP 2004/NA01, Department of Applied Mathematics + and Theoretical Physics, University of Cambridge, Cambridge, UK, + 2004. + """ + if self._debug: + assert x_new.shape == (self.n,), \ + "The shape of `x_new` is not valid." + assert ( + k_new is None or 0 <= k_new < self.npt + ), "The index `k_new` is not valid." + + # Compute the values independent of k_new. + shift = x_new - self.interpolation.x_base + new_col = np.empty((self.npt + self.n + 1, 1)) + new_col[: self.npt, 0] = ( + 0.5 * (self.interpolation.xpt.T @ shift) ** 2.0) + new_col[self.npt, 0] = 1.0 + new_col[self.npt + 1:, 0] = shift + inv_new_col = Quadratic.solve_systems(self.interpolation, new_col)[0] + beta = 0.5 * (shift @ shift) ** 2.0 - new_col[:, 0] @ inv_new_col[:, 0] + + # Compute the values that depend on k. + if k_new is None: + coord_vec = np.eye(self.npt + self.n + 1, self.npt) + alpha = np.diag( + Quadratic.solve_systems( + self.interpolation, + coord_vec, + )[0] + ) + tau = inv_new_col[: self.npt, 0] + else: + coord_vec = np.eye(self.npt + self.n + 1, 1, -k_new) + alpha = Quadratic.solve_systems( + self.interpolation, + coord_vec, + )[ + 0 + ][k_new, 0] + tau = inv_new_col[k_new, 0] + return alpha * beta + tau**2.0 + + def shift_x_base(self, new_x_base, options): + """ + Shift the base point without changing the interpolation set. + + Parameters + ---------- + new_x_base : `numpy.ndarray`, shape (n,) + New base point. + options : dict + Options of the solver. + """ + if self._debug: + assert new_x_base.shape == ( + self.n, + ), "The shape of `new_x_base` is not valid." + + # Update the models. + self._fun.shift_x_base(self.interpolation, new_x_base) + for model in self._cub: + model.shift_x_base(self.interpolation, new_x_base) + for model in self._ceq: + model.shift_x_base(self.interpolation, new_x_base) + + # Update the base point and the interpolation points. + shift = new_x_base - self.interpolation.x_base + self.interpolation.x_base += shift + self.interpolation.xpt -= shift[:, np.newaxis] + if options[Options.DEBUG]: + self._check_interpolation_conditions() + + def _get_cub(self, mask=None): + """ + Get the quadratic models of the nonlinear inequality constraints. + + Parameters + ---------- + mask : `numpy.ndarray`, shape (m_nonlinear_ub,), optional + Mask of the quadratic models to return. + + Returns + ------- + `numpy.ndarray` + Quadratic models of the nonlinear inequality constraints. + """ + return self._cub if mask is None else self._cub[mask] + + def _get_ceq(self, mask=None): + """ + Get the quadratic models of the nonlinear equality constraints. + + Parameters + ---------- + mask : `numpy.ndarray`, shape (m_nonlinear_eq,), optional + Mask of the quadratic models to return. + + Returns + ------- + `numpy.ndarray` + Quadratic models of the nonlinear equality constraints. + """ + return self._ceq if mask is None else self._ceq[mask] + + def _check_interpolation_conditions(self): + """ + Check the interpolation conditions of all quadratic models. + """ + error_fun = 0.0 + error_cub = 0.0 + error_ceq = 0.0 + for k in range(self.npt): + error_fun = np.max( + [ + error_fun, + np.abs( + self.fun(self.interpolation.point(k)) - self.fun_val[k] + ), + ] + ) + error_cub = np.max( + np.abs( + self.cub(self.interpolation.point(k)) - self.cub_val[k, :] + ), + initial=error_cub, + ) + error_ceq = np.max( + np.abs( + self.ceq(self.interpolation.point(k)) - self.ceq_val[k, :] + ), + initial=error_ceq, + ) + tol = 10.0 * np.sqrt(EPS) * max(self.n, self.npt) + if error_fun > tol * np.max(np.abs(self.fun_val), initial=1.0): + warnings.warn( + "The interpolation conditions for the objective function are " + "not satisfied.", + RuntimeWarning, + 2, + ) + if error_cub > tol * np.max(np.abs(self.cub_val), initial=1.0): + warnings.warn( + "The interpolation conditions for the inequality constraint " + "function are not satisfied.", + RuntimeWarning, + 2, + ) + if error_ceq > tol * np.max(np.abs(self.ceq_val), initial=1.0): + warnings.warn( + "The interpolation conditions for the equality constraint " + "function are not satisfied.", + RuntimeWarning, + 2, + ) diff --git a/mantis_evalkit/lib/python3.10/site-packages/scipy/_lib/cobyqa/problem.py b/mantis_evalkit/lib/python3.10/site-packages/scipy/_lib/cobyqa/problem.py new file mode 100644 index 0000000000000000000000000000000000000000..2dbebce3a48067e97da2b75bd2cdd609e01029b2 --- /dev/null +++ b/mantis_evalkit/lib/python3.10/site-packages/scipy/_lib/cobyqa/problem.py @@ -0,0 +1,1296 @@ +from contextlib import suppress +from inspect import signature +import copy + +import numpy as np +from scipy.optimize import ( + Bounds, + LinearConstraint, + NonlinearConstraint, + OptimizeResult, +) +from scipy.optimize._constraints import PreparedConstraint + + +from .settings import PRINT_OPTIONS, BARRIER +from .utils import CallbackSuccess, get_arrays_tol +from .utils import exact_1d_array + + +class ObjectiveFunction: + """ + Real-valued objective function. + """ + + def __init__(self, fun, verbose, debug, *args): + """ + Initialize the objective function. + + Parameters + ---------- + fun : {callable, None} + Function to evaluate, or None. + + ``fun(x, *args) -> float`` + + where ``x`` is an array with shape (n,) and `args` is a tuple. + verbose : bool + Whether to print the function evaluations. + debug : bool + Whether to make debugging tests during the execution. + *args : tuple + Additional arguments to be passed to the function. + """ + if debug: + assert fun is None or callable(fun) + assert isinstance(verbose, bool) + assert isinstance(debug, bool) + + self._fun = fun + self._verbose = verbose + self._args = args + self._n_eval = 0 + + def __call__(self, x): + """ + Evaluate the objective function. + + Parameters + ---------- + x : array_like, shape (n,) + Point at which the objective function is evaluated. + + Returns + ------- + float + Function value at `x`. + """ + x = np.array(x, dtype=float) + if self._fun is None: + f = 0.0 + else: + f = float(np.squeeze(self._fun(x, *self._args))) + self._n_eval += 1 + if self._verbose: + with np.printoptions(**PRINT_OPTIONS): + print(f"{self.name}({x}) = {f}") + return f + + @property + def n_eval(self): + """ + Number of function evaluations. + + Returns + ------- + int + Number of function evaluations. + """ + return self._n_eval + + @property + def name(self): + """ + Name of the objective function. + + Returns + ------- + str + Name of the objective function. + """ + name = "" + if self._fun is not None: + try: + name = self._fun.__name__ + except AttributeError: + name = "fun" + return name + + +class BoundConstraints: + """ + Bound constraints ``xl <= x <= xu``. + """ + + def __init__(self, bounds): + """ + Initialize the bound constraints. + + Parameters + ---------- + bounds : scipy.optimize.Bounds + Bound constraints. + """ + self._xl = np.array(bounds.lb, float) + self._xu = np.array(bounds.ub, float) + + # Remove the ill-defined bounds. + self.xl[np.isnan(self.xl)] = -np.inf + self.xu[np.isnan(self.xu)] = np.inf + + self.is_feasible = ( + np.all(self.xl <= self.xu) + and np.all(self.xl < np.inf) + and np.all(self.xu > -np.inf) + ) + self.m = np.count_nonzero(self.xl > -np.inf) + np.count_nonzero( + self.xu < np.inf + ) + self.pcs = PreparedConstraint(bounds, np.ones(bounds.lb.size)) + + @property + def xl(self): + """ + Lower bound. + + Returns + ------- + `numpy.ndarray`, shape (n,) + Lower bound. + """ + return self._xl + + @property + def xu(self): + """ + Upper bound. + + Returns + ------- + `numpy.ndarray`, shape (n,) + Upper bound. + """ + return self._xu + + def maxcv(self, x): + """ + Evaluate the maximum constraint violation. + + Parameters + ---------- + x : array_like, shape (n,) + Point at which the maximum constraint violation is evaluated. + + Returns + ------- + float + Maximum constraint violation at `x`. + """ + x = np.asarray(x, dtype=float) + return self.violation(x) + + def violation(self, x): + # shortcut for no bounds + if self.is_feasible: + return np.array([0]) + else: + return self.pcs.violation(x) + + def project(self, x): + """ + Project a point onto the feasible set. + + Parameters + ---------- + x : array_like, shape (n,) + Point to be projected. + + Returns + ------- + `numpy.ndarray`, shape (n,) + Projection of `x` onto the feasible set. + """ + return np.clip(x, self.xl, self.xu) if self.is_feasible else x + + +class LinearConstraints: + """ + Linear constraints ``a_ub @ x <= b_ub`` and ``a_eq @ x == b_eq``. + """ + + def __init__(self, constraints, n, debug): + """ + Initialize the linear constraints. + + Parameters + ---------- + constraints : list of LinearConstraint + Linear constraints. + n : int + Number of variables. + debug : bool + Whether to make debugging tests during the execution. + """ + if debug: + assert isinstance(constraints, list) + for constraint in constraints: + assert isinstance(constraint, LinearConstraint) + assert isinstance(debug, bool) + + self._a_ub = np.empty((0, n)) + self._b_ub = np.empty(0) + self._a_eq = np.empty((0, n)) + self._b_eq = np.empty(0) + for constraint in constraints: + is_equality = np.abs( + constraint.ub - constraint.lb + ) <= get_arrays_tol(constraint.lb, constraint.ub) + if np.any(is_equality): + self._a_eq = np.vstack((self.a_eq, constraint.A[is_equality])) + self._b_eq = np.concatenate( + ( + self.b_eq, + 0.5 + * ( + constraint.lb[is_equality] + + constraint.ub[is_equality] + ), + ) + ) + if not np.all(is_equality): + self._a_ub = np.vstack( + ( + self.a_ub, + constraint.A[~is_equality], + -constraint.A[~is_equality], + ) + ) + self._b_ub = np.concatenate( + ( + self.b_ub, + constraint.ub[~is_equality], + -constraint.lb[~is_equality], + ) + ) + + # Remove the ill-defined constraints. + self.a_ub[np.isnan(self.a_ub)] = 0.0 + self.a_eq[np.isnan(self.a_eq)] = 0.0 + undef_ub = np.isnan(self.b_ub) | np.isinf(self.b_ub) + undef_eq = np.isnan(self.b_eq) + self._a_ub = self.a_ub[~undef_ub, :] + self._b_ub = self.b_ub[~undef_ub] + self._a_eq = self.a_eq[~undef_eq, :] + self._b_eq = self.b_eq[~undef_eq] + self.pcs = [ + PreparedConstraint(c, np.ones(n)) for c in constraints if c.A.size + ] + + @property + def a_ub(self): + """ + Left-hand side matrix of the linear inequality constraints. + + Returns + ------- + `numpy.ndarray`, shape (m, n) + Left-hand side matrix of the linear inequality constraints. + """ + return self._a_ub + + @property + def b_ub(self): + """ + Right-hand side vector of the linear inequality constraints. + + Returns + ------- + `numpy.ndarray`, shape (m, n) + Right-hand side vector of the linear inequality constraints. + """ + return self._b_ub + + @property + def a_eq(self): + """ + Left-hand side matrix of the linear equality constraints. + + Returns + ------- + `numpy.ndarray`, shape (m, n) + Left-hand side matrix of the linear equality constraints. + """ + return self._a_eq + + @property + def b_eq(self): + """ + Right-hand side vector of the linear equality constraints. + + Returns + ------- + `numpy.ndarray`, shape (m, n) + Right-hand side vector of the linear equality constraints. + """ + return self._b_eq + + @property + def m_ub(self): + """ + Number of linear inequality constraints. + + Returns + ------- + int + Number of linear inequality constraints. + """ + return self.b_ub.size + + @property + def m_eq(self): + """ + Number of linear equality constraints. + + Returns + ------- + int + Number of linear equality constraints. + """ + return self.b_eq.size + + def maxcv(self, x): + """ + Evaluate the maximum constraint violation. + + Parameters + ---------- + x : array_like, shape (n,) + Point at which the maximum constraint violation is evaluated. + + Returns + ------- + float + Maximum constraint violation at `x`. + """ + return np.max(self.violation(x), initial=0.0) + + def violation(self, x): + if len(self.pcs): + return np.concatenate([pc.violation(x) for pc in self.pcs]) + return np.array([]) + + +class NonlinearConstraints: + """ + Nonlinear constraints ``c_ub(x) <= 0`` and ``c_eq(x) == b_eq``. + """ + + def __init__(self, constraints, verbose, debug): + """ + Initialize the nonlinear constraints. + + Parameters + ---------- + constraints : list + Nonlinear constraints. + verbose : bool + Whether to print the function evaluations. + debug : bool + Whether to make debugging tests during the execution. + """ + if debug: + assert isinstance(constraints, list) + for constraint in constraints: + assert isinstance(constraint, NonlinearConstraint) + assert isinstance(verbose, bool) + assert isinstance(debug, bool) + + self._constraints = constraints + self.pcs = [] + self._verbose = verbose + + # map of indexes for equality and inequality constraints + self._map_ub = None + self._map_eq = None + self._m_ub = self._m_eq = None + + def __call__(self, x): + """ + Calculates the residual (slack) for the constraints. + + Parameters + ---------- + x : array_like, shape (n,) + Point at which the constraints are evaluated. + + Returns + ------- + `numpy.ndarray`, shape (m_nonlinear_ub,) + Nonlinear inequality constraint slack values. + `numpy.ndarray`, shape (m_nonlinear_eq,) + Nonlinear equality constraint slack values. + """ + if not len(self._constraints): + self._m_eq = self._m_ub = 0 + return np.array([]), np.array([]) + + x = np.array(x, dtype=float) + # first time around the constraints haven't been prepared + if not len(self.pcs): + self._map_ub = [] + self._map_eq = [] + self._m_eq = 0 + self._m_ub = 0 + + for constraint in self._constraints: + if not callable(constraint.jac): + # having a callable constraint function prevents + # constraint.fun from being evaluated when preparing + # constraint + c = copy.copy(constraint) + c.jac = lambda x0: x0 + c.hess = lambda x0, v: 0.0 + pc = PreparedConstraint(c, x) + else: + pc = PreparedConstraint(constraint, x) + # we're going to be using the same x value again immediately + # after this initialisation + pc.fun.f_updated = True + + self.pcs.append(pc) + idx = np.arange(pc.fun.m) + + # figure out equality and inequality maps + lb, ub = pc.bounds[0], pc.bounds[1] + arr_tol = get_arrays_tol(lb, ub) + is_equality = np.abs(ub - lb) <= arr_tol + self._map_eq.append(idx[is_equality]) + self._map_ub.append(idx[~is_equality]) + + # these values will be corrected to their proper values later + self._m_eq += np.count_nonzero(is_equality) + self._m_ub += np.count_nonzero(~is_equality) + + c_ub = [] + c_eq = [] + for i, pc in enumerate(self.pcs): + val = pc.fun.fun(x) + if self._verbose: + with np.printoptions(**PRINT_OPTIONS): + with suppress(AttributeError): + fun_name = self._constraints[i].fun.__name__ + print(f"{fun_name}({x}) = {val}") + + # separate violations into c_eq and c_ub + eq_idx = self._map_eq[i] + ub_idx = self._map_ub[i] + + ub_val = val[ub_idx] + if len(ub_idx): + xl = pc.bounds[0][ub_idx] + xu = pc.bounds[1][ub_idx] + + # calculate slack within lower bound + finite_xl = xl > -np.inf + _v = xl[finite_xl] - ub_val[finite_xl] + c_ub.append(_v) + + # calculate slack within lower bound + finite_xu = xu < np.inf + _v = ub_val[finite_xu] - xu[finite_xu] + c_ub.append(_v) + + # equality constraints taken from midpoint between lb and ub + eq_val = val[eq_idx] + if len(eq_idx): + midpoint = 0.5 * (pc.bounds[1][eq_idx] + pc.bounds[0][eq_idx]) + eq_val -= midpoint + c_eq.append(eq_val) + + if self._m_eq: + c_eq = np.concatenate(c_eq) + else: + c_eq = np.array([]) + + if self._m_ub: + c_ub = np.concatenate(c_ub) + else: + c_ub = np.array([]) + + self._m_ub = c_ub.size + self._m_eq = c_eq.size + + return c_ub, c_eq + + @property + def m_ub(self): + """ + Number of nonlinear inequality constraints. + + Returns + ------- + int + Number of nonlinear inequality constraints. + + Raises + ------ + ValueError + If the number of nonlinear inequality constraints is unknown. + """ + if self._m_ub is None: + raise ValueError( + "The number of nonlinear inequality constraints is unknown." + ) + else: + return self._m_ub + + @property + def m_eq(self): + """ + Number of nonlinear equality constraints. + + Returns + ------- + int + Number of nonlinear equality constraints. + + Raises + ------ + ValueError + If the number of nonlinear equality constraints is unknown. + """ + if self._m_eq is None: + raise ValueError( + "The number of nonlinear equality constraints is unknown." + ) + else: + return self._m_eq + + @property + def n_eval(self): + """ + Number of function evaluations. + + Returns + ------- + int + Number of function evaluations. + """ + if len(self.pcs): + return self.pcs[0].fun.nfev + else: + return 0 + + def maxcv(self, x, cub_val=None, ceq_val=None): + """ + Evaluate the maximum constraint violation. + + Parameters + ---------- + x : array_like, shape (n,) + Point at which the maximum constraint violation is evaluated. + cub_val : array_like, shape (m_nonlinear_ub,), optional + Values of the nonlinear inequality constraints. If not provided, + the nonlinear inequality constraints are evaluated at `x`. + ceq_val : array_like, shape (m_nonlinear_eq,), optional + Values of the nonlinear equality constraints. If not provided, + the nonlinear equality constraints are evaluated at `x`. + + Returns + ------- + float + Maximum constraint violation at `x`. + """ + return np.max( + self.violation(x, cub_val=cub_val, ceq_val=ceq_val), initial=0.0 + ) + + def violation(self, x, cub_val=None, ceq_val=None): + return np.concatenate([pc.violation(x) for pc in self.pcs]) + + +class Problem: + """ + Optimization problem. + """ + + def __init__( + self, + obj, + x0, + bounds, + linear, + nonlinear, + callback, + feasibility_tol, + scale, + store_history, + history_size, + filter_size, + debug, + ): + """ + Initialize the nonlinear problem. + + The problem is preprocessed to remove all the variables that are fixed + by the bound constraints. + + Parameters + ---------- + obj : ObjectiveFunction + Objective function. + x0 : array_like, shape (n,) + Initial guess. + bounds : BoundConstraints + Bound constraints. + linear : LinearConstraints + Linear constraints. + nonlinear : NonlinearConstraints + Nonlinear constraints. + callback : {callable, None} + Callback function. + feasibility_tol : float + Tolerance on the constraint violation. + scale : bool + Whether to scale the problem according to the bounds. + store_history : bool + Whether to store the function evaluations. + history_size : int + Maximum number of function evaluations to store. + filter_size : int + Maximum number of points in the filter. + debug : bool + Whether to make debugging tests during the execution. + """ + if debug: + assert isinstance(obj, ObjectiveFunction) + assert isinstance(bounds, BoundConstraints) + assert isinstance(linear, LinearConstraints) + assert isinstance(nonlinear, NonlinearConstraints) + assert isinstance(feasibility_tol, float) + assert isinstance(scale, bool) + assert isinstance(store_history, bool) + assert isinstance(history_size, int) + if store_history: + assert history_size > 0 + assert isinstance(filter_size, int) + assert filter_size > 0 + assert isinstance(debug, bool) + + self._obj = obj + self._linear = linear + self._nonlinear = nonlinear + if callback is not None: + if not callable(callback): + raise TypeError("The callback must be a callable function.") + self._callback = callback + + # Check the consistency of the problem. + x0 = exact_1d_array(x0, "The initial guess must be a vector.") + n = x0.size + if bounds.xl.size != n: + raise ValueError(f"The bounds must have {n} elements.") + if linear.a_ub.shape[1] != n: + raise ValueError( + f"The left-hand side matrices of the linear constraints must " + f"have {n} columns." + ) + + # Check which variables are fixed. + tol = get_arrays_tol(bounds.xl, bounds.xu) + self._fixed_idx = (bounds.xl <= bounds.xu) & ( + np.abs(bounds.xl - bounds.xu) < tol + ) + self._fixed_val = 0.5 * ( + bounds.xl[self._fixed_idx] + bounds.xu[self._fixed_idx] + ) + self._fixed_val = np.clip( + self._fixed_val, + bounds.xl[self._fixed_idx], + bounds.xu[self._fixed_idx], + ) + + # Set the bound constraints. + self._orig_bounds = bounds + self._bounds = BoundConstraints( + Bounds(bounds.xl[~self._fixed_idx], bounds.xu[~self._fixed_idx]) + ) + + # Set the initial guess. + self._x0 = self._bounds.project(x0[~self._fixed_idx]) + + # Set the linear constraints. + b_eq = linear.b_eq - linear.a_eq[:, self._fixed_idx] @ self._fixed_val + self._linear = LinearConstraints( + [ + LinearConstraint( + linear.a_ub[:, ~self._fixed_idx], + -np.inf, + linear.b_ub + - linear.a_ub[:, self._fixed_idx] @ self._fixed_val, + ), + LinearConstraint(linear.a_eq[:, ~self._fixed_idx], b_eq, b_eq), + ], + self.n, + debug, + ) + + # Scale the problem if necessary. + scale = ( + scale + and self._bounds.is_feasible + and np.all(np.isfinite(self._bounds.xl)) + and np.all(np.isfinite(self._bounds.xu)) + ) + if scale: + self._scaling_factor = 0.5 * (self._bounds.xu - self._bounds.xl) + self._scaling_shift = 0.5 * (self._bounds.xu + self._bounds.xl) + self._bounds = BoundConstraints( + Bounds(-np.ones(self.n), np.ones(self.n)) + ) + b_eq = self._linear.b_eq - self._linear.a_eq @ self._scaling_shift + self._linear = LinearConstraints( + [ + LinearConstraint( + self._linear.a_ub @ np.diag(self._scaling_factor), + -np.inf, + self._linear.b_ub + - self._linear.a_ub @ self._scaling_shift, + ), + LinearConstraint( + self._linear.a_eq @ np.diag(self._scaling_factor), + b_eq, + b_eq, + ), + ], + self.n, + debug, + ) + self._x0 = (self._x0 - self._scaling_shift) / self._scaling_factor + else: + self._scaling_factor = np.ones(self.n) + self._scaling_shift = np.zeros(self.n) + + # Set the initial filter. + self._feasibility_tol = feasibility_tol + self._filter_size = filter_size + self._fun_filter = [] + self._maxcv_filter = [] + self._x_filter = [] + + # Set the initial history. + self._store_history = store_history + self._history_size = history_size + self._fun_history = [] + self._maxcv_history = [] + self._x_history = [] + + def __call__(self, x, penalty=0.0): + """ + Evaluate the objective and nonlinear constraint functions. + + Parameters + ---------- + x : array_like, shape (n,) + Point at which the functions are evaluated. + penalty : float, optional + Penalty parameter used to select the point in the filter to forward + to the callback function. + + Returns + ------- + float + Objective function value. + `numpy.ndarray`, shape (m_nonlinear_ub,) + Nonlinear inequality constraint function values. + `numpy.ndarray`, shape (m_nonlinear_eq,) + Nonlinear equality constraint function values. + + Raises + ------ + `cobyqa.utils.CallbackSuccess` + If the callback function raises a ``StopIteration``. + """ + # Evaluate the objective and nonlinear constraint functions. + x = np.asarray(x, dtype=float) + x_full = self.build_x(x) + fun_val = self._obj(x_full) + cub_val, ceq_val = self._nonlinear(x_full) + maxcv_val = self.maxcv(x, cub_val, ceq_val) + if self._store_history: + self._fun_history.append(fun_val) + self._maxcv_history.append(maxcv_val) + self._x_history.append(x) + if len(self._fun_history) > self._history_size: + self._fun_history.pop(0) + self._maxcv_history.pop(0) + self._x_history.pop(0) + + # Add the point to the filter if it is not dominated by any point. + if np.isnan(fun_val) and np.isnan(maxcv_val): + include_point = len(self._fun_filter) == 0 + elif np.isnan(fun_val): + include_point = all( + np.isnan(fun_filter) + and maxcv_val < maxcv_filter + or np.isnan(maxcv_filter) + for fun_filter, maxcv_filter in zip( + self._fun_filter, + self._maxcv_filter, + ) + ) + elif np.isnan(maxcv_val): + include_point = all( + np.isnan(maxcv_filter) + and fun_val < fun_filter + or np.isnan(fun_filter) + for fun_filter, maxcv_filter in zip( + self._fun_filter, + self._maxcv_filter, + ) + ) + else: + include_point = all( + fun_val < fun_filter or maxcv_val < maxcv_filter + for fun_filter, maxcv_filter in zip( + self._fun_filter, + self._maxcv_filter, + ) + ) + if include_point: + self._fun_filter.append(fun_val) + self._maxcv_filter.append(maxcv_val) + self._x_filter.append(x) + + # Remove the points in the filter that are dominated by the new + # point. We must iterate in reverse order to avoid problems when + # removing elements from the list. + for k in range(len(self._fun_filter) - 2, -1, -1): + if np.isnan(fun_val): + remove_point = np.isnan(self._fun_filter[k]) + elif np.isnan(maxcv_val): + remove_point = np.isnan(self._maxcv_filter[k]) + else: + remove_point = ( + np.isnan(self._fun_filter[k]) + or np.isnan(self._maxcv_filter[k]) + or fun_val <= self._fun_filter[k] + and maxcv_val <= self._maxcv_filter[k] + ) + if remove_point: + self._fun_filter.pop(k) + self._maxcv_filter.pop(k) + self._x_filter.pop(k) + + # Keep only the most recent points in the filter. + if len(self._fun_filter) > self._filter_size: + self._fun_filter.pop(0) + self._maxcv_filter.pop(0) + self._x_filter.pop(0) + + # Evaluate the callback function after updating the filter to ensure + # that the current point can be returned by the method. + if self._callback is not None: + sig = signature(self._callback) + try: + x_best, fun_best, _ = self.best_eval(penalty) + x_best = self.build_x(x_best) + if set(sig.parameters) == {"intermediate_result"}: + intermediate_result = OptimizeResult( + x=x_best, + fun=fun_best, + # maxcv=maxcv_best, + ) + self._callback(intermediate_result=intermediate_result) + else: + self._callback(x_best) + except StopIteration as exc: + raise CallbackSuccess from exc + + # Apply the extreme barriers and return. + if np.isnan(fun_val): + fun_val = BARRIER + cub_val[np.isnan(cub_val)] = BARRIER + ceq_val[np.isnan(ceq_val)] = BARRIER + fun_val = max(min(fun_val, BARRIER), -BARRIER) + cub_val = np.maximum(np.minimum(cub_val, BARRIER), -BARRIER) + ceq_val = np.maximum(np.minimum(ceq_val, BARRIER), -BARRIER) + return fun_val, cub_val, ceq_val + + @property + def n(self): + """ + Number of variables. + + Returns + ------- + int + Number of variables. + """ + return self.x0.size + + @property + def n_orig(self): + """ + Number of variables in the original problem (with fixed variables). + + Returns + ------- + int + Number of variables in the original problem (with fixed variables). + """ + return self._fixed_idx.size + + @property + def x0(self): + """ + Initial guess. + + Returns + ------- + `numpy.ndarray`, shape (n,) + Initial guess. + """ + return self._x0 + + @property + def n_eval(self): + """ + Number of function evaluations. + + Returns + ------- + int + Number of function evaluations. + """ + return self._obj.n_eval + + @property + def fun_name(self): + """ + Name of the objective function. + + Returns + ------- + str + Name of the objective function. + """ + return self._obj.name + + @property + def bounds(self): + """ + Bound constraints. + + Returns + ------- + BoundConstraints + Bound constraints. + """ + return self._bounds + + @property + def linear(self): + """ + Linear constraints. + + Returns + ------- + LinearConstraints + Linear constraints. + """ + return self._linear + + @property + def m_bounds(self): + """ + Number of bound constraints. + + Returns + ------- + int + Number of bound constraints. + """ + return self.bounds.m + + @property + def m_linear_ub(self): + """ + Number of linear inequality constraints. + + Returns + ------- + int + Number of linear inequality constraints. + """ + return self.linear.m_ub + + @property + def m_linear_eq(self): + """ + Number of linear equality constraints. + + Returns + ------- + int + Number of linear equality constraints. + """ + return self.linear.m_eq + + @property + def m_nonlinear_ub(self): + """ + Number of nonlinear inequality constraints. + + Returns + ------- + int + Number of nonlinear inequality constraints. + + Raises + ------ + ValueError + If the number of nonlinear inequality constraints is not known. + """ + return self._nonlinear.m_ub + + @property + def m_nonlinear_eq(self): + """ + Number of nonlinear equality constraints. + + Returns + ------- + int + Number of nonlinear equality constraints. + + Raises + ------ + ValueError + If the number of nonlinear equality constraints is not known. + """ + return self._nonlinear.m_eq + + @property + def fun_history(self): + """ + History of objective function evaluations. + + Returns + ------- + `numpy.ndarray`, shape (n_eval,) + History of objective function evaluations. + """ + return np.array(self._fun_history, dtype=float) + + @property + def maxcv_history(self): + """ + History of maximum constraint violations. + + Returns + ------- + `numpy.ndarray`, shape (n_eval,) + History of maximum constraint violations. + """ + return np.array(self._maxcv_history, dtype=float) + + @property + def type(self): + """ + Type of the problem. + + The problem can be either 'unconstrained', 'bound-constrained', + 'linearly constrained', or 'nonlinearly constrained'. + + Returns + ------- + str + Type of the problem. + """ + try: + if self.m_nonlinear_ub > 0 or self.m_nonlinear_eq > 0: + return "nonlinearly constrained" + elif self.m_linear_ub > 0 or self.m_linear_eq > 0: + return "linearly constrained" + elif self.m_bounds > 0: + return "bound-constrained" + else: + return "unconstrained" + except ValueError: + # The number of nonlinear constraints is not known. It may be zero + # if the user provided a nonlinear inequality and/or equality + # constraint function that returns an empty array. However, as this + # is not known before the first call to the function, we assume + # that the problem is nonlinearly constrained. + return "nonlinearly constrained" + + @property + def is_feasibility(self): + """ + Whether the problem is a feasibility problem. + + Returns + ------- + bool + Whether the problem is a feasibility problem. + """ + return self.fun_name == "" + + def build_x(self, x): + """ + Build the full vector of variables from the reduced vector. + + Parameters + ---------- + x : array_like, shape (n,) + Reduced vector of variables. + + Returns + ------- + `numpy.ndarray`, shape (n_orig,) + Full vector of variables. + """ + x_full = np.empty(self.n_orig) + x_full[self._fixed_idx] = self._fixed_val + x_full[~self._fixed_idx] = (x * self._scaling_factor + + self._scaling_shift) + return self._orig_bounds.project(x_full) + + def maxcv(self, x, cub_val=None, ceq_val=None): + """ + Evaluate the maximum constraint violation. + + Parameters + ---------- + x : array_like, shape (n,) + Point at which the maximum constraint violation is evaluated. + cub_val : array_like, shape (m_nonlinear_ub,), optional + Values of the nonlinear inequality constraints. If not provided, + the nonlinear inequality constraints are evaluated at `x`. + ceq_val : array_like, shape (m_nonlinear_eq,), optional + Values of the nonlinear equality constraints. If not provided, + the nonlinear equality constraints are evaluated at `x`. + + Returns + ------- + float + Maximum constraint violation at `x`. + """ + violation = self.violation(x, cub_val=cub_val, ceq_val=ceq_val) + if np.count_nonzero(violation): + return np.max(violation, initial=0.0) + else: + return 0.0 + + def violation(self, x, cub_val=None, ceq_val=None): + violation = [] + if not self.bounds.is_feasible: + b = self.bounds.violation(x) + violation.append(b) + + if len(self.linear.pcs): + lc = self.linear.violation(x) + violation.append(lc) + if len(self._nonlinear.pcs): + nlc = self._nonlinear.violation(x, cub_val, ceq_val) + violation.append(nlc) + + if len(violation): + return np.concatenate(violation) + + def best_eval(self, penalty): + """ + Return the best point in the filter and the corresponding objective and + nonlinear constraint function evaluations. + + Parameters + ---------- + penalty : float + Penalty parameter + + Returns + ------- + `numpy.ndarray`, shape (n,) + Best point. + float + Corresponding objective function value. + float + Corresponding maximum constraint violation. + """ + # If the filter is empty, i.e., if no function evaluation has been + # performed, we evaluate the objective and nonlinear constraint + # functions at the initial guess. + if len(self._fun_filter) == 0: + self(self.x0) + + # Find the best point in the filter. + fun_filter = np.array(self._fun_filter) + maxcv_filter = np.array(self._maxcv_filter) + x_filter = np.array(self._x_filter) + finite_idx = np.isfinite(maxcv_filter) + if np.any(finite_idx): + # At least one point has a finite maximum constraint violation. + feasible_idx = maxcv_filter <= self._feasibility_tol + if np.any(feasible_idx) and not np.all( + np.isnan(fun_filter[feasible_idx]) + ): + # At least one point is feasible and has a well-defined + # objective function value. We select the point with the least + # objective function value. If there is a tie, we select the + # point with the least maximum constraint violation. If there + # is still a tie, we select the most recent point. + fun_min_idx = feasible_idx & ( + fun_filter <= np.nanmin(fun_filter[feasible_idx]) + ) + if np.count_nonzero(fun_min_idx) > 1: + fun_min_idx &= maxcv_filter <= np.min( + maxcv_filter[fun_min_idx] + ) + i = np.flatnonzero(fun_min_idx)[-1] + elif np.any(feasible_idx): + # At least one point is feasible but no feasible point has a + # well-defined objective function value. We select the most + # recent feasible point. + i = np.flatnonzero(feasible_idx)[-1] + else: + # No point is feasible. We first compute the merit function + # value for each point. + merit_filter = np.full_like(fun_filter, np.nan) + merit_filter[finite_idx] = ( + fun_filter[finite_idx] + penalty * maxcv_filter[finite_idx] + ) + if np.all(np.isnan(merit_filter)): + # No point has a well-defined merit function value. In + # other words, among the points with a well-defined maximum + # constraint violation, none has a well-defined objective + # function value. We select the point with the least + # maximum constraint violation. If there is a tie, we + # select the most recent point. + min_maxcv_idx = maxcv_filter <= np.nanmin(maxcv_filter) + i = np.flatnonzero(min_maxcv_idx)[-1] + else: + # At least one point has a well-defined merit function + # value. We select the point with the least merit function + # value. If there is a tie, we select the point with the + # least maximum constraint violation. If there is still a + # tie, we select the point with the least objective + # function value. If there is still a tie, we select the + # most recent point. + merit_min_idx = merit_filter <= np.nanmin(merit_filter) + if np.count_nonzero(merit_min_idx) > 1: + merit_min_idx &= maxcv_filter <= np.min( + maxcv_filter[merit_min_idx] + ) + + if np.count_nonzero(merit_min_idx) > 1: + merit_min_idx &= fun_filter <= np.min( + fun_filter[merit_min_idx] + ) + i = np.flatnonzero(merit_min_idx)[-1] + elif not np.all(np.isnan(fun_filter)): + # No maximum constraint violation is well-defined but at least one + # point has a well-defined objective function value. We select the + # point with the least objective function value. If there is a tie, + # we select the most recent point. + fun_min_idx = fun_filter <= np.nanmin(fun_filter) + i = np.flatnonzero(fun_min_idx)[-1] + else: + # No point has a well-defined maximum constraint violation or + # objective function value. We select the most recent point. + i = len(fun_filter) - 1 + return ( + self.bounds.project(x_filter[i, :]), + fun_filter[i], + maxcv_filter[i], + ) diff --git a/mantis_evalkit/lib/python3.10/site-packages/scipy/_lib/cobyqa/settings.py b/mantis_evalkit/lib/python3.10/site-packages/scipy/_lib/cobyqa/settings.py new file mode 100644 index 0000000000000000000000000000000000000000..6394822826e094a803a485556a298e342bf260ac --- /dev/null +++ b/mantis_evalkit/lib/python3.10/site-packages/scipy/_lib/cobyqa/settings.py @@ -0,0 +1,132 @@ +import sys +from enum import Enum + +import numpy as np + + +# Exit status. +class ExitStatus(Enum): + """ + Exit statuses. + """ + + RADIUS_SUCCESS = 0 + TARGET_SUCCESS = 1 + FIXED_SUCCESS = 2 + CALLBACK_SUCCESS = 3 + FEASIBLE_SUCCESS = 4 + MAX_EVAL_WARNING = 5 + MAX_ITER_WARNING = 6 + INFEASIBLE_ERROR = -1 + LINALG_ERROR = -2 + + +class Options(str, Enum): + """ + Options. + """ + + DEBUG = "debug" + FEASIBILITY_TOL = "feasibility_tol" + FILTER_SIZE = "filter_size" + HISTORY_SIZE = "history_size" + MAX_EVAL = "maxfev" + MAX_ITER = "maxiter" + NPT = "nb_points" + RHOBEG = "radius_init" + RHOEND = "radius_final" + SCALE = "scale" + STORE_HISTORY = "store_history" + TARGET = "target" + VERBOSE = "disp" + + +class Constants(str, Enum): + """ + Constants. + """ + + DECREASE_RADIUS_FACTOR = "decrease_radius_factor" + INCREASE_RADIUS_FACTOR = "increase_radius_factor" + INCREASE_RADIUS_THRESHOLD = "increase_radius_threshold" + DECREASE_RADIUS_THRESHOLD = "decrease_radius_threshold" + DECREASE_RESOLUTION_FACTOR = "decrease_resolution_factor" + LARGE_RESOLUTION_THRESHOLD = "large_resolution_threshold" + MODERATE_RESOLUTION_THRESHOLD = "moderate_resolution_threshold" + LOW_RATIO = "low_ratio" + HIGH_RATIO = "high_ratio" + VERY_LOW_RATIO = "very_low_ratio" + PENALTY_INCREASE_THRESHOLD = "penalty_increase_threshold" + PENALTY_INCREASE_FACTOR = "penalty_increase_factor" + SHORT_STEP_THRESHOLD = "short_step_threshold" + LOW_RADIUS_FACTOR = "low_radius_factor" + BYRD_OMOJOKUN_FACTOR = "byrd_omojokun_factor" + THRESHOLD_RATIO_CONSTRAINTS = "threshold_ratio_constraints" + LARGE_SHIFT_FACTOR = "large_shift_factor" + LARGE_GRADIENT_FACTOR = "large_gradient_factor" + RESOLUTION_FACTOR = "resolution_factor" + IMPROVE_TCG = "improve_tcg" + + +# Default options. +DEFAULT_OPTIONS = { + Options.DEBUG.value: False, + Options.FEASIBILITY_TOL.value: np.sqrt(np.finfo(float).eps), + Options.FILTER_SIZE.value: sys.maxsize, + Options.HISTORY_SIZE.value: sys.maxsize, + Options.MAX_EVAL.value: lambda n: 500 * n, + Options.MAX_ITER.value: lambda n: 1000 * n, + Options.NPT.value: lambda n: 2 * n + 1, + Options.RHOBEG.value: 1.0, + Options.RHOEND.value: 1e-6, + Options.SCALE.value: False, + Options.STORE_HISTORY.value: False, + Options.TARGET.value: -np.inf, + Options.VERBOSE.value: False, +} + +# Default constants. +DEFAULT_CONSTANTS = { + Constants.DECREASE_RADIUS_FACTOR.value: 0.5, + Constants.INCREASE_RADIUS_FACTOR.value: np.sqrt(2.0), + Constants.INCREASE_RADIUS_THRESHOLD.value: 2.0, + Constants.DECREASE_RADIUS_THRESHOLD.value: 1.4, + Constants.DECREASE_RESOLUTION_FACTOR.value: 0.1, + Constants.LARGE_RESOLUTION_THRESHOLD.value: 250.0, + Constants.MODERATE_RESOLUTION_THRESHOLD.value: 16.0, + Constants.LOW_RATIO.value: 0.1, + Constants.HIGH_RATIO.value: 0.7, + Constants.VERY_LOW_RATIO.value: 0.01, + Constants.PENALTY_INCREASE_THRESHOLD.value: 1.5, + Constants.PENALTY_INCREASE_FACTOR.value: 2.0, + Constants.SHORT_STEP_THRESHOLD.value: 0.5, + Constants.LOW_RADIUS_FACTOR.value: 0.1, + Constants.BYRD_OMOJOKUN_FACTOR.value: 0.8, + Constants.THRESHOLD_RATIO_CONSTRAINTS.value: 2.0, + Constants.LARGE_SHIFT_FACTOR.value: 10.0, + Constants.LARGE_GRADIENT_FACTOR.value: 10.0, + Constants.RESOLUTION_FACTOR.value: 2.0, + Constants.IMPROVE_TCG.value: True, +} + +# Printing options. +PRINT_OPTIONS = { + "threshold": 6, + "edgeitems": 2, + "linewidth": sys.maxsize, + "formatter": { + "float_kind": lambda x: np.format_float_scientific( + x, + precision=3, + unique=False, + pad_left=2, + ) + }, +} + +# Constants. +BARRIER = 2.0 ** min( + 100, + np.finfo(float).maxexp // 2, + -np.finfo(float).minexp // 2, +) diff --git a/mantis_evalkit/lib/python3.10/site-packages/scipy/_lib/cobyqa/subsolvers/__init__.py b/mantis_evalkit/lib/python3.10/site-packages/scipy/_lib/cobyqa/subsolvers/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..01a1ad3c6f4cb5c0c9b99d1ce35fea92e7618ff5 --- /dev/null +++ b/mantis_evalkit/lib/python3.10/site-packages/scipy/_lib/cobyqa/subsolvers/__init__.py @@ -0,0 +1,14 @@ +from .geometry import cauchy_geometry, spider_geometry +from .optim import ( + tangential_byrd_omojokun, + constrained_tangential_byrd_omojokun, + normal_byrd_omojokun, +) + +__all__ = [ + "cauchy_geometry", + "spider_geometry", + "tangential_byrd_omojokun", + "constrained_tangential_byrd_omojokun", + "normal_byrd_omojokun", +] diff --git a/mantis_evalkit/lib/python3.10/site-packages/scipy/_lib/cobyqa/subsolvers/__pycache__/__init__.cpython-310.pyc b/mantis_evalkit/lib/python3.10/site-packages/scipy/_lib/cobyqa/subsolvers/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..56983ed316230ecfba11c541c752014275ef4700 Binary files /dev/null and b/mantis_evalkit/lib/python3.10/site-packages/scipy/_lib/cobyqa/subsolvers/__pycache__/__init__.cpython-310.pyc differ diff --git a/mantis_evalkit/lib/python3.10/site-packages/scipy/_lib/cobyqa/subsolvers/__pycache__/geometry.cpython-310.pyc b/mantis_evalkit/lib/python3.10/site-packages/scipy/_lib/cobyqa/subsolvers/__pycache__/geometry.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..13d7889e61a31e2de8ca58077cbe6fa76bf0c895 Binary files /dev/null and b/mantis_evalkit/lib/python3.10/site-packages/scipy/_lib/cobyqa/subsolvers/__pycache__/geometry.cpython-310.pyc differ diff --git a/mantis_evalkit/lib/python3.10/site-packages/scipy/_lib/cobyqa/subsolvers/__pycache__/optim.cpython-310.pyc b/mantis_evalkit/lib/python3.10/site-packages/scipy/_lib/cobyqa/subsolvers/__pycache__/optim.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..954ea2420ab137bd7c09b485090bd47dda4dbeae Binary files /dev/null and b/mantis_evalkit/lib/python3.10/site-packages/scipy/_lib/cobyqa/subsolvers/__pycache__/optim.cpython-310.pyc differ diff --git a/mantis_evalkit/lib/python3.10/site-packages/scipy/_lib/cobyqa/subsolvers/geometry.py b/mantis_evalkit/lib/python3.10/site-packages/scipy/_lib/cobyqa/subsolvers/geometry.py new file mode 100644 index 0000000000000000000000000000000000000000..7b67fd7c813ee493b18720d1daf71324d72330b6 --- /dev/null +++ b/mantis_evalkit/lib/python3.10/site-packages/scipy/_lib/cobyqa/subsolvers/geometry.py @@ -0,0 +1,387 @@ +import inspect + +import numpy as np + +from ..utils import get_arrays_tol + + +TINY = np.finfo(float).tiny + + +def cauchy_geometry(const, grad, curv, xl, xu, delta, debug): + r""" + Maximize approximately the absolute value of a quadratic function subject + to bound constraints in a trust region. + + This function solves approximately + + .. math:: + + \max_{s \in \mathbb{R}^n} \quad \bigg\lvert c + g^{\mathsf{T}} s + + \frac{1}{2} s^{\mathsf{T}} H s \bigg\rvert \quad \text{s.t.} \quad + \left\{ \begin{array}{l} + l \le s \le u,\\ + \lVert s \rVert \le \Delta, + \end{array} \right. + + by maximizing the objective function along the constrained Cauchy + direction. + + Parameters + ---------- + const : float + Constant :math:`c` as shown above. + grad : `numpy.ndarray`, shape (n,) + Gradient :math:`g` as shown above. + curv : callable + Curvature of :math:`H` along any vector. + + ``curv(s) -> float`` + + returns :math:`s^{\mathsf{T}} H s`. + xl : `numpy.ndarray`, shape (n,) + Lower bounds :math:`l` as shown above. + xu : `numpy.ndarray`, shape (n,) + Upper bounds :math:`u` as shown above. + delta : float + Trust-region radius :math:`\Delta` as shown above. + debug : bool + Whether to make debugging tests during the execution. + + Returns + ------- + `numpy.ndarray`, shape (n,) + Approximate solution :math:`s`. + + Notes + ----- + This function is described as the first alternative in Section 6.5 of [1]_. + It is assumed that the origin is feasible with respect to the bound + constraints and that `delta` is finite and positive. + + References + ---------- + .. [1] T. M. Ragonneau. *Model-Based Derivative-Free Optimization Methods + and Software*. PhD thesis, Department of Applied Mathematics, The Hong + Kong Polytechnic University, Hong Kong, China, 2022. URL: + https://theses.lib.polyu.edu.hk/handle/200/12294. + """ + if debug: + assert isinstance(const, float) + assert isinstance(grad, np.ndarray) and grad.ndim == 1 + assert inspect.signature(curv).bind(grad) + assert isinstance(xl, np.ndarray) and xl.shape == grad.shape + assert isinstance(xu, np.ndarray) and xu.shape == grad.shape + assert isinstance(delta, float) + assert isinstance(debug, bool) + tol = get_arrays_tol(xl, xu) + assert np.all(xl <= tol) + assert np.all(xu >= -tol) + assert np.isfinite(delta) and delta > 0.0 + xl = np.minimum(xl, 0.0) + xu = np.maximum(xu, 0.0) + + # To maximize the absolute value of a quadratic function, we maximize the + # function itself or its negative, and we choose the solution that provides + # the largest function value. + step1, q_val1 = _cauchy_geom(const, grad, curv, xl, xu, delta, debug) + step2, q_val2 = _cauchy_geom( + -const, + -grad, + lambda x: -curv(x), + xl, + xu, + delta, + debug, + ) + step = step1 if abs(q_val1) >= abs(q_val2) else step2 + + if debug: + assert np.all(xl <= step) + assert np.all(step <= xu) + assert np.linalg.norm(step) < 1.1 * delta + return step + + +def spider_geometry(const, grad, curv, xpt, xl, xu, delta, debug): + r""" + Maximize approximately the absolute value of a quadratic function subject + to bound constraints in a trust region. + + This function solves approximately + + .. math:: + + \max_{s \in \mathbb{R}^n} \quad \bigg\lvert c + g^{\mathsf{T}} s + + \frac{1}{2} s^{\mathsf{T}} H s \bigg\rvert \quad \text{s.t.} \quad + \left\{ \begin{array}{l} + l \le s \le u,\\ + \lVert s \rVert \le \Delta, + \end{array} \right. + + by maximizing the objective function along given straight lines. + + Parameters + ---------- + const : float + Constant :math:`c` as shown above. + grad : `numpy.ndarray`, shape (n,) + Gradient :math:`g` as shown above. + curv : callable + Curvature of :math:`H` along any vector. + + ``curv(s) -> float`` + + returns :math:`s^{\mathsf{T}} H s`. + xpt : `numpy.ndarray`, shape (n, npt) + Points defining the straight lines. The straight lines considered are + the ones passing through the origin and the points in `xpt`. + xl : `numpy.ndarray`, shape (n,) + Lower bounds :math:`l` as shown above. + xu : `numpy.ndarray`, shape (n,) + Upper bounds :math:`u` as shown above. + delta : float + Trust-region radius :math:`\Delta` as shown above. + debug : bool + Whether to make debugging tests during the execution. + + Returns + ------- + `numpy.ndarray`, shape (n,) + Approximate solution :math:`s`. + + Notes + ----- + This function is described as the second alternative in Section 6.5 of + [1]_. It is assumed that the origin is feasible with respect to the bound + constraints and that `delta` is finite and positive. + + References + ---------- + .. [1] T. M. Ragonneau. *Model-Based Derivative-Free Optimization Methods + and Software*. PhD thesis, Department of Applied Mathematics, The Hong + Kong Polytechnic University, Hong Kong, China, 2022. URL: + https://theses.lib.polyu.edu.hk/handle/200/12294. + """ + if debug: + assert isinstance(const, float) + assert isinstance(grad, np.ndarray) and grad.ndim == 1 + assert inspect.signature(curv).bind(grad) + assert ( + isinstance(xpt, np.ndarray) + and xpt.ndim == 2 + and xpt.shape[0] == grad.size + ) + assert isinstance(xl, np.ndarray) and xl.shape == grad.shape + assert isinstance(xu, np.ndarray) and xu.shape == grad.shape + assert isinstance(delta, float) + assert isinstance(debug, bool) + tol = get_arrays_tol(xl, xu) + assert np.all(xl <= tol) + assert np.all(xu >= -tol) + assert np.isfinite(delta) and delta > 0.0 + xl = np.minimum(xl, 0.0) + xu = np.maximum(xu, 0.0) + + # Iterate through the straight lines. + step = np.zeros_like(grad) + q_val = const + s_norm = np.linalg.norm(xpt, axis=0) + + # Set alpha_xl to the step size for the lower-bound constraint and + # alpha_xu to the step size for the upper-bound constraint. + + # xl.shape = (N,) + # xpt.shape = (N, M) + # i_xl_pos.shape = (M, N) + i_xl_pos = (xl > -np.inf) & (xpt.T > -TINY * xl) + i_xl_neg = (xl > -np.inf) & (xpt.T < TINY * xl) + i_xu_pos = (xu < np.inf) & (xpt.T > TINY * xu) + i_xu_neg = (xu < np.inf) & (xpt.T < -TINY * xu) + + # (M, N) + alpha_xl_pos = np.atleast_2d( + np.broadcast_to(xl, i_xl_pos.shape)[i_xl_pos] / xpt.T[i_xl_pos] + ) + # (M,) + alpha_xl_pos = np.max(alpha_xl_pos, axis=1, initial=-np.inf) + # make sure it's (M,) + alpha_xl_pos = np.broadcast_to(np.atleast_1d(alpha_xl_pos), xpt.shape[1]) + + alpha_xl_neg = np.atleast_2d( + np.broadcast_to(xl, i_xl_neg.shape)[i_xl_neg] / xpt.T[i_xl_neg] + ) + alpha_xl_neg = np.max(alpha_xl_neg, axis=1, initial=np.inf) + alpha_xl_neg = np.broadcast_to(np.atleast_1d(alpha_xl_neg), xpt.shape[1]) + + alpha_xu_neg = np.atleast_2d( + np.broadcast_to(xu, i_xu_neg.shape)[i_xu_neg] / xpt.T[i_xu_neg] + ) + alpha_xu_neg = np.max(alpha_xu_neg, axis=1, initial=-np.inf) + alpha_xu_neg = np.broadcast_to(np.atleast_1d(alpha_xu_neg), xpt.shape[1]) + + alpha_xu_pos = np.atleast_2d( + np.broadcast_to(xu, i_xu_pos.shape)[i_xu_pos] / xpt.T[i_xu_pos] + ) + alpha_xu_pos = np.max(alpha_xu_pos, axis=1, initial=np.inf) + alpha_xu_pos = np.broadcast_to(np.atleast_1d(alpha_xu_pos), xpt.shape[1]) + + for k in range(xpt.shape[1]): + # Set alpha_tr to the step size for the trust-region constraint. + if s_norm[k] > TINY * delta: + alpha_tr = max(delta / s_norm[k], 0.0) + else: + # The current straight line is basically zero. + continue + + alpha_bd_pos = max(min(alpha_xu_pos[k], alpha_xl_neg[k]), 0.0) + alpha_bd_neg = min(max(alpha_xl_pos[k], alpha_xu_neg[k]), 0.0) + + # Set alpha_quad_pos and alpha_quad_neg to the step size to the extrema + # of the quadratic function along the positive and negative directions. + grad_step = grad @ xpt[:, k] + curv_step = curv(xpt[:, k]) + if ( + grad_step >= 0.0 + and curv_step < -TINY * grad_step + or grad_step <= 0.0 + and curv_step > -TINY * grad_step + ): + alpha_quad_pos = max(-grad_step / curv_step, 0.0) + else: + alpha_quad_pos = np.inf + if ( + grad_step >= 0.0 + and curv_step > TINY * grad_step + or grad_step <= 0.0 + and curv_step < TINY * grad_step + ): + alpha_quad_neg = min(-grad_step / curv_step, 0.0) + else: + alpha_quad_neg = -np.inf + + # Select the step that provides the largest value of the objective + # function if it improves the current best. The best positive step is + # either the one that reaches the constraints or the one that reaches + # the extremum of the objective function along the current direction + # (only possible if the resulting step is feasible). We test both, and + # we perform similar calculations along the negative step. + # N.B.: we select the largest possible step among all the ones that + # maximize the objective function. This is to avoid returning the zero + # step in some extreme cases. + alpha_pos = min(alpha_tr, alpha_bd_pos) + alpha_neg = max(-alpha_tr, alpha_bd_neg) + q_val_pos = ( + const + alpha_pos * grad_step + 0.5 * alpha_pos**2.0 * curv_step + ) + q_val_neg = ( + const + alpha_neg * grad_step + 0.5 * alpha_neg**2.0 * curv_step + ) + if alpha_quad_pos < alpha_pos: + q_val_quad_pos = ( + const + + alpha_quad_pos * grad_step + + 0.5 * alpha_quad_pos**2.0 * curv_step + ) + if abs(q_val_quad_pos) > abs(q_val_pos): + alpha_pos = alpha_quad_pos + q_val_pos = q_val_quad_pos + if alpha_quad_neg > alpha_neg: + q_val_quad_neg = ( + const + + alpha_quad_neg * grad_step + + 0.5 * alpha_quad_neg**2.0 * curv_step + ) + if abs(q_val_quad_neg) > abs(q_val_neg): + alpha_neg = alpha_quad_neg + q_val_neg = q_val_quad_neg + if abs(q_val_pos) >= abs(q_val_neg) and abs(q_val_pos) > abs(q_val): + step = np.clip(alpha_pos * xpt[:, k], xl, xu) + q_val = q_val_pos + elif abs(q_val_neg) > abs(q_val_pos) and abs(q_val_neg) > abs(q_val): + step = np.clip(alpha_neg * xpt[:, k], xl, xu) + q_val = q_val_neg + + if debug: + assert np.all(xl <= step) + assert np.all(step <= xu) + assert np.linalg.norm(step) < 1.1 * delta + return step + + +def _cauchy_geom(const, grad, curv, xl, xu, delta, debug): + """ + Same as `bound_constrained_cauchy_step` without the absolute value. + """ + # Calculate the initial active set. + fixed_xl = (xl < 0.0) & (grad > 0.0) + fixed_xu = (xu > 0.0) & (grad < 0.0) + + # Calculate the Cauchy step. + cauchy_step = np.zeros_like(grad) + cauchy_step[fixed_xl] = xl[fixed_xl] + cauchy_step[fixed_xu] = xu[fixed_xu] + if np.linalg.norm(cauchy_step) > delta: + working = fixed_xl | fixed_xu + while True: + # Calculate the Cauchy step for the directions in the working set. + g_norm = np.linalg.norm(grad[working]) + delta_reduced = np.sqrt( + delta**2.0 - cauchy_step[~working] @ cauchy_step[~working] + ) + if g_norm > TINY * abs(delta_reduced): + mu = max(delta_reduced / g_norm, 0.0) + else: + break + cauchy_step[working] = mu * grad[working] + + # Update the working set. + fixed_xl = working & (cauchy_step < xl) + fixed_xu = working & (cauchy_step > xu) + if not np.any(fixed_xl) and not np.any(fixed_xu): + # Stop the calculations as the Cauchy step is now feasible. + break + cauchy_step[fixed_xl] = xl[fixed_xl] + cauchy_step[fixed_xu] = xu[fixed_xu] + working = working & ~(fixed_xl | fixed_xu) + + # Calculate the step that maximizes the quadratic along the Cauchy step. + grad_step = grad @ cauchy_step + if grad_step >= 0.0: + # Set alpha_tr to the step size for the trust-region constraint. + s_norm = np.linalg.norm(cauchy_step) + if s_norm > TINY * delta: + alpha_tr = max(delta / s_norm, 0.0) + else: + # The Cauchy step is basically zero. + alpha_tr = 0.0 + + # Set alpha_quad to the step size for the maximization problem. + curv_step = curv(cauchy_step) + if curv_step < -TINY * grad_step: + alpha_quad = max(-grad_step / curv_step, 0.0) + else: + alpha_quad = np.inf + + # Set alpha_bd to the step size for the bound constraints. + i_xl = (xl > -np.inf) & (cauchy_step < TINY * xl) + i_xu = (xu < np.inf) & (cauchy_step > TINY * xu) + alpha_xl = np.min(xl[i_xl] / cauchy_step[i_xl], initial=np.inf) + alpha_xu = np.min(xu[i_xu] / cauchy_step[i_xu], initial=np.inf) + alpha_bd = min(alpha_xl, alpha_xu) + + # Calculate the solution and the corresponding function value. + alpha = min(alpha_tr, alpha_quad, alpha_bd) + step = np.clip(alpha * cauchy_step, xl, xu) + q_val = const + alpha * grad_step + 0.5 * alpha**2.0 * curv_step + else: + # This case is never reached in exact arithmetic. It prevents this + # function to return a step that decreases the objective function. + step = np.zeros_like(grad) + q_val = const + + if debug: + assert np.all(xl <= step) + assert np.all(step <= xu) + assert np.linalg.norm(step) < 1.1 * delta + return step, q_val diff --git a/mantis_evalkit/lib/python3.10/site-packages/scipy/_lib/cobyqa/subsolvers/optim.py b/mantis_evalkit/lib/python3.10/site-packages/scipy/_lib/cobyqa/subsolvers/optim.py new file mode 100644 index 0000000000000000000000000000000000000000..c4a960396fb2e992cf76bac0baf171b5af9b7717 --- /dev/null +++ b/mantis_evalkit/lib/python3.10/site-packages/scipy/_lib/cobyqa/subsolvers/optim.py @@ -0,0 +1,1203 @@ +import inspect + +import numpy as np +from scipy.linalg import qr + +from ..utils import get_arrays_tol + + +TINY = np.finfo(float).tiny +EPS = np.finfo(float).eps + + +def tangential_byrd_omojokun(grad, hess_prod, xl, xu, delta, debug, **kwargs): + r""" + Minimize approximately a quadratic function subject to bound constraints in + a trust region. + + This function solves approximately + + .. math:: + + \min_{s \in \mathbb{R}^n} \quad g^{\mathsf{T}} s + \frac{1}{2} + s^{\mathsf{T}} H s \quad \text{s.t.} \quad + \left\{ \begin{array}{l} + l \le s \le u\\ + \lVert s \rVert \le \Delta, + \end{array} \right. + + using an active-set variation of the truncated conjugate gradient method. + + Parameters + ---------- + grad : `numpy.ndarray`, shape (n,) + Gradient :math:`g` as shown above. + hess_prod : callable + Product of the Hessian matrix :math:`H` with any vector. + + ``hess_prod(s) -> `numpy.ndarray`, shape (n,)`` + + returns the product :math:`H s`. + xl : `numpy.ndarray`, shape (n,) + Lower bounds :math:`l` as shown above. + xu : `numpy.ndarray`, shape (n,) + Upper bounds :math:`u` as shown above. + delta : float + Trust-region radius :math:`\Delta` as shown above. + debug : bool + Whether to make debugging tests during the execution. + + Returns + ------- + `numpy.ndarray`, shape (n,) + Approximate solution :math:`s`. + + Other Parameters + ---------------- + improve_tcg : bool, optional + If True, a solution generated by the truncated conjugate gradient + method that is on the boundary of the trust region is improved by + moving around the trust-region boundary on the two-dimensional space + spanned by the solution and the gradient of the quadratic function at + the solution (default is True). + + Notes + ----- + This function implements Algorithm 6.2 of [1]_. It is assumed that the + origin is feasible with respect to the bound constraints and that `delta` + is finite and positive. + + References + ---------- + .. [1] T. M. Ragonneau. *Model-Based Derivative-Free Optimization Methods + and Software*. PhD thesis, Department of Applied Mathematics, The Hong + Kong Polytechnic University, Hong Kong, China, 2022. URL: + https://theses.lib.polyu.edu.hk/handle/200/12294. + """ + if debug: + assert isinstance(grad, np.ndarray) and grad.ndim == 1 + assert inspect.signature(hess_prod).bind(grad) + assert isinstance(xl, np.ndarray) and xl.shape == grad.shape + assert isinstance(xu, np.ndarray) and xu.shape == grad.shape + assert isinstance(delta, float) + assert isinstance(debug, bool) + tol = get_arrays_tol(xl, xu) + assert np.all(xl <= tol) + assert np.all(xu >= -tol) + assert np.isfinite(delta) and delta > 0.0 + xl = np.minimum(xl, 0.0) + xu = np.maximum(xu, 0.0) + + # Copy the arrays that may be modified by the code below. + n = grad.size + grad = np.copy(grad) + grad_orig = np.copy(grad) + + # Calculate the initial active set. + free_bd = ((xl < 0.0) | (grad < 0.0)) & ((xu > 0.0) | (grad > 0.0)) + + # Set the initial iterate and the initial search direction. + step = np.zeros_like(grad) + sd = np.zeros_like(step) + sd[free_bd] = -grad[free_bd] + + k = 0 + reduct = 0.0 + boundary_reached = False + while k < np.count_nonzero(free_bd): + # Stop the computations if sd is not a descent direction. + grad_sd = grad @ sd + if grad_sd >= -10.0 * EPS * n * max(1.0, np.linalg.norm(grad)): + break + + # Set alpha_tr to the step size for the trust-region constraint. + try: + alpha_tr = _alpha_tr(step, sd, delta) + except ZeroDivisionError: + break + + # Stop the computations if a step along sd is expected to give a + # relatively small reduction in the objective function. + if -alpha_tr * grad_sd <= 1e-8 * reduct: + break + + # Set alpha_quad to the step size for the minimization problem. + hess_sd = hess_prod(sd) + curv_sd = sd @ hess_sd + if curv_sd > TINY * abs(grad_sd): + alpha_quad = max(-grad_sd / curv_sd, 0.0) + else: + alpha_quad = np.inf + + # Stop the computations if the reduction in the objective function + # provided by an unconstrained step is small. + alpha = min(alpha_tr, alpha_quad) + if -alpha * (grad_sd + 0.5 * alpha * curv_sd) <= 1e-8 * reduct: + break + + # Set alpha_bd to the step size for the bound constraints. + i_xl = (xl > -np.inf) & (sd < -TINY * np.abs(xl - step)) + i_xu = (xu < np.inf) & (sd > TINY * np.abs(xu - step)) + all_alpha_xl = np.full_like(step, np.inf) + all_alpha_xu = np.full_like(step, np.inf) + all_alpha_xl[i_xl] = np.maximum( + (xl[i_xl] - step[i_xl]) / sd[i_xl], + 0.0, + ) + all_alpha_xu[i_xu] = np.maximum( + (xu[i_xu] - step[i_xu]) / sd[i_xu], + 0.0, + ) + alpha_xl = np.min(all_alpha_xl) + alpha_xu = np.min(all_alpha_xu) + alpha_bd = min(alpha_xl, alpha_xu) + + # Update the iterate. + alpha = min(alpha, alpha_bd) + if alpha > 0.0: + step[free_bd] = np.clip( + step[free_bd] + alpha * sd[free_bd], + xl[free_bd], + xu[free_bd], + ) + grad += alpha * hess_sd + reduct -= alpha * (grad_sd + 0.5 * alpha * curv_sd) + + if alpha < min(alpha_tr, alpha_bd): + # The current iteration is a conjugate gradient iteration. Update + # the search direction so that it is conjugate (with respect to H) + # to all the previous search directions. + beta = (grad[free_bd] @ hess_sd[free_bd]) / curv_sd + sd[free_bd] = beta * sd[free_bd] - grad[free_bd] + sd[~free_bd] = 0.0 + k += 1 + elif alpha < alpha_tr: + # The iterate is restricted by a bound constraint. Add this bound + # constraint to the active set, and restart the calculations. + if alpha_xl <= alpha: + i_new = np.argmin(all_alpha_xl) + step[i_new] = xl[i_new] + else: + i_new = np.argmin(all_alpha_xu) + step[i_new] = xu[i_new] + free_bd[i_new] = False + sd[free_bd] = -grad[free_bd] + sd[~free_bd] = 0.0 + k = 0 + else: + # The current iterate is on the trust-region boundary. Add all the + # active bounds to the working set to prepare for the improvement + # of the solution, and stop the iterations. + if alpha_xl <= alpha: + i_new = _argmin(all_alpha_xl) + step[i_new] = xl[i_new] + free_bd[i_new] = False + if alpha_xu <= alpha: + i_new = _argmin(all_alpha_xu) + step[i_new] = xu[i_new] + free_bd[i_new] = False + boundary_reached = True + break + + # Attempt to improve the solution on the trust-region boundary. + if kwargs.get("improve_tcg", True) and boundary_reached: + step_base = np.copy(step) + step_comparator = grad_orig @ step_base + 0.5 * step_base @ hess_prod( + step_base + ) + + while np.count_nonzero(free_bd) > 0: + # Check whether a substantial reduction in the objective function + # is possible, and set the search direction. + step_sq = step[free_bd] @ step[free_bd] + grad_sq = grad[free_bd] @ grad[free_bd] + grad_step = grad[free_bd] @ step[free_bd] + grad_sd = -np.sqrt(max(step_sq * grad_sq - grad_step**2.0, 0.0)) + sd[free_bd] = grad_step * step[free_bd] - step_sq * grad[free_bd] + sd[~free_bd] = 0.0 + if grad_sd >= -1e-8 * reduct or np.any( + grad_sd >= -TINY * np.abs(sd[free_bd]) + ): + break + sd[free_bd] /= -grad_sd + + # Calculate an upper bound for the tangent of half the angle theta + # of this alternative iteration. The step will be updated as: + # step = cos(theta) * step + sin(theta) * sd. + temp_xl = np.zeros(n) + temp_xu = np.zeros(n) + temp_xl[free_bd] = ( + step[free_bd] ** 2.0 + sd[free_bd] ** 2.0 - xl[free_bd] ** 2.0 + ) + temp_xu[free_bd] = ( + step[free_bd] ** 2.0 + sd[free_bd] ** 2.0 - xu[free_bd] ** 2.0 + ) + temp_xl[temp_xl > 0.0] = ( + np.sqrt(temp_xl[temp_xl > 0.0]) - sd[temp_xl > 0.0] + ) + temp_xu[temp_xu > 0.0] = ( + np.sqrt(temp_xu[temp_xu > 0.0]) + sd[temp_xu > 0.0] + ) + dist_xl = np.maximum(step - xl, 0.0) + dist_xu = np.maximum(xu - step, 0.0) + i_xl = temp_xl > TINY * dist_xl + i_xu = temp_xu > TINY * dist_xu + all_t_xl = np.ones(n) + all_t_xu = np.ones(n) + all_t_xl[i_xl] = np.minimum( + all_t_xl[i_xl], + dist_xl[i_xl] / temp_xl[i_xl], + ) + all_t_xu[i_xu] = np.minimum( + all_t_xu[i_xu], + dist_xu[i_xu] / temp_xu[i_xu], + ) + t_xl = np.min(all_t_xl) + t_xu = np.min(all_t_xu) + t_bd = min(t_xl, t_xu) + + # Calculate some curvature information. + hess_step = hess_prod(step) + hess_sd = hess_prod(sd) + curv_step = step @ hess_step + curv_sd = sd @ hess_sd + curv_step_sd = step @ hess_sd + + # For a range of equally spaced values of tan(0.5 * theta), + # calculate the reduction in the objective function that would be + # obtained by accepting the corresponding angle. + n_samples = 20 + n_samples = int((n_samples - 3) * t_bd + 3) + t_samples = np.linspace(t_bd / n_samples, t_bd, n_samples) + sin_values = 2.0 * t_samples / (1.0 + t_samples**2.0) + all_reduct = sin_values * ( + grad_step * t_samples + - grad_sd + - t_samples * curv_step + + sin_values + * (t_samples * curv_step_sd - 0.5 * (curv_sd - curv_step)) + ) + if np.all(all_reduct <= 0.0): + # No reduction in the objective function is obtained. + break + + # Accept the angle that provides the largest reduction in the + # objective function, and update the iterate. + i_max = np.argmax(all_reduct) + cos_value = (1.0 - t_samples[i_max] ** 2.0) / ( + 1.0 + t_samples[i_max] ** 2.0 + ) + step[free_bd] = ( + cos_value * step[free_bd] + sin_values[i_max] * sd[free_bd] + ) + grad += (cos_value - 1.0) * hess_step + sin_values[i_max] * hess_sd + reduct += all_reduct[i_max] + + # If the above angle is restricted by bound constraints, add them + # to the working set, and restart the alternative iteration. + # Otherwise, the calculations are terminated. + if t_bd < 1.0 and i_max == n_samples - 1: + if t_xl <= t_bd: + i_new = _argmin(all_t_xl) + step[i_new] = xl[i_new] + free_bd[i_new] = False + if t_xu <= t_bd: + i_new = _argmin(all_t_xu) + step[i_new] = xu[i_new] + free_bd[i_new] = False + else: + break + + # Ensure that the alternative iteration improves the objective + # function. + if grad_orig @ step + 0.5 * step @ hess_prod(step) > step_comparator: + step = step_base + + if debug: + assert np.all(xl <= step) + assert np.all(step <= xu) + assert np.linalg.norm(step) < 1.1 * delta + return step + + +def constrained_tangential_byrd_omojokun( + grad, + hess_prod, + xl, + xu, + aub, + bub, + aeq, + delta, + debug, + **kwargs, +): + r""" + Minimize approximately a quadratic function subject to bound and linear + constraints in a trust region. + + This function solves approximately + + .. math:: + + \min_{s \in \mathbb{R}^n} \quad g^{\mathsf{T}} s + \frac{1}{2} + s^{\mathsf{T}} H s \quad \text{s.t.} \quad + \left\{ \begin{array}{l} + l \le s \le u,\\ + A_{\scriptscriptstyle I} s \le b_{\scriptscriptstyle I},\\ + A_{\scriptscriptstyle E} s = 0,\\ + \lVert s \rVert \le \Delta, + \end{array} \right. + + using an active-set variation of the truncated conjugate gradient method. + + Parameters + ---------- + grad : `numpy.ndarray`, shape (n,) + Gradient :math:`g` as shown above. + hess_prod : callable + Product of the Hessian matrix :math:`H` with any vector. + + ``hess_prod(s) -> `numpy.ndarray`, shape (n,)`` + + returns the product :math:`H s`. + xl : `numpy.ndarray`, shape (n,) + Lower bounds :math:`l` as shown above. + xu : `numpy.ndarray`, shape (n,) + Upper bounds :math:`u` as shown above. + aub : `numpy.ndarray`, shape (m_linear_ub, n) + Coefficient matrix :math:`A_{\scriptscriptstyle I}` as shown above. + bub : `numpy.ndarray`, shape (m_linear_ub,) + Right-hand side :math:`b_{\scriptscriptstyle I}` as shown above. + aeq : `numpy.ndarray`, shape (m_linear_eq, n) + Coefficient matrix :math:`A_{\scriptscriptstyle E}` as shown above. + delta : float + Trust-region radius :math:`\Delta` as shown above. + debug : bool + Whether to make debugging tests during the execution. + + Returns + ------- + `numpy.ndarray`, shape (n,) + Approximate solution :math:`s`. + + Other Parameters + ---------------- + improve_tcg : bool, optional + If True, a solution generated by the truncated conjugate gradient + method that is on the boundary of the trust region is improved by + moving around the trust-region boundary on the two-dimensional space + spanned by the solution and the gradient of the quadratic function at + the solution (default is True). + + Notes + ----- + This function implements Algorithm 6.3 of [1]_. It is assumed that the + origin is feasible with respect to the bound and linear constraints, and + that `delta` is finite and positive. + + References + ---------- + .. [1] T. M. Ragonneau. *Model-Based Derivative-Free Optimization Methods + and Software*. PhD thesis, Department of Applied Mathematics, The Hong + Kong Polytechnic University, Hong Kong, China, 2022. URL: + https://theses.lib.polyu.edu.hk/handle/200/12294. + """ + if debug: + assert isinstance(grad, np.ndarray) and grad.ndim == 1 + assert inspect.signature(hess_prod).bind(grad) + assert isinstance(xl, np.ndarray) and xl.shape == grad.shape + assert isinstance(xu, np.ndarray) and xu.shape == grad.shape + assert ( + isinstance(aub, np.ndarray) + and aub.ndim == 2 + and aub.shape[1] == grad.size + ) + assert ( + isinstance(bub, np.ndarray) + and bub.ndim == 1 + and bub.size == aub.shape[0] + ) + assert ( + isinstance(aeq, np.ndarray) + and aeq.ndim == 2 + and aeq.shape[1] == grad.size + ) + assert isinstance(delta, float) + assert isinstance(debug, bool) + tol = get_arrays_tol(xl, xu) + assert np.all(xl <= tol) + assert np.all(xu >= -tol) + assert np.all(bub >= -tol) + assert np.isfinite(delta) and delta > 0.0 + xl = np.minimum(xl, 0.0) + xu = np.maximum(xu, 0.0) + bub = np.maximum(bub, 0.0) + + # Copy the arrays that may be modified by the code below. + n = grad.size + grad = np.copy(grad) + grad_orig = np.copy(grad) + + # Calculate the initial active set. + free_xl = (xl < 0.0) | (grad < 0.0) + free_xu = (xu > 0.0) | (grad > 0.0) + free_ub = (bub > 0.0) | (aub @ grad > 0.0) + n_act, q = qr_tangential_byrd_omojokun(aub, aeq, free_xl, free_xu, free_ub) + + # Set the initial iterate and the initial search direction. + step = np.zeros_like(grad) + sd = -q[:, n_act:] @ (q[:, n_act:].T @ grad) + resid = np.copy(bub) + + k = 0 + reduct = 0.0 + boundary_reached = False + while k < n - n_act: + # Stop the computations if sd is not a descent direction. + grad_sd = grad @ sd + if grad_sd >= -10.0 * EPS * n * max(1.0, np.linalg.norm(grad)): + break + + # Set alpha_tr to the step size for the trust-region constraint. + try: + alpha_tr = _alpha_tr(step, sd, delta) + except ZeroDivisionError: + break + + # Stop the computations if a step along sd is expected to give a + # relatively small reduction in the objective function. + if -alpha_tr * grad_sd <= 1e-8 * reduct: + break + + # Set alpha_quad to the step size for the minimization problem. + hess_sd = hess_prod(sd) + curv_sd = sd @ hess_sd + if curv_sd > TINY * abs(grad_sd): + alpha_quad = max(-grad_sd / curv_sd, 0.0) + else: + alpha_quad = np.inf + + # Stop the computations if the reduction in the objective function + # provided by an unconstrained step is small. + alpha = min(alpha_tr, alpha_quad) + if -alpha * (grad_sd + 0.5 * alpha * curv_sd) <= 1e-8 * reduct: + break + + # Set alpha_bd to the step size for the bound constraints. + i_xl = free_xl & (xl > -np.inf) & (sd < -TINY * np.abs(xl - step)) + i_xu = free_xu & (xu < np.inf) & (sd > TINY * np.abs(xu - step)) + all_alpha_xl = np.full_like(step, np.inf) + all_alpha_xu = np.full_like(step, np.inf) + all_alpha_xl[i_xl] = np.maximum( + (xl[i_xl] - step[i_xl]) / sd[i_xl], + 0.0, + ) + all_alpha_xu[i_xu] = np.maximum( + (xu[i_xu] - step[i_xu]) / sd[i_xu], + 0.0, + ) + alpha_xl = np.min(all_alpha_xl) + alpha_xu = np.min(all_alpha_xu) + alpha_bd = min(alpha_xl, alpha_xu) + + # Set alpha_ub to the step size for the linear constraints. + aub_sd = aub @ sd + i_ub = free_ub & (aub_sd > TINY * np.abs(resid)) + all_alpha_ub = np.full_like(bub, np.inf) + all_alpha_ub[i_ub] = resid[i_ub] / aub_sd[i_ub] + alpha_ub = np.min(all_alpha_ub, initial=np.inf) + + # Update the iterate. + alpha = min(alpha, alpha_bd, alpha_ub) + if alpha > 0.0: + step = np.clip(step + alpha * sd, xl, xu) + grad += alpha * hess_sd + resid = np.maximum(0.0, resid - alpha * aub_sd) + reduct -= alpha * (grad_sd + 0.5 * alpha * curv_sd) + + if alpha < min(alpha_tr, alpha_bd, alpha_ub): + # The current iteration is a conjugate gradient iteration. Update + # the search direction so that it is conjugate (with respect to H) + # to all the previous search directions. + grad_proj = q[:, n_act:] @ (q[:, n_act:].T @ grad) + beta = (grad_proj @ hess_sd) / curv_sd + sd = beta * sd - grad_proj + k += 1 + elif alpha < alpha_tr: + # The iterate is restricted by a bound/linear constraint. Add this + # constraint to the active set, and restart the calculations. + if alpha_xl <= alpha: + i_new = np.argmin(all_alpha_xl) + step[i_new] = xl[i_new] + free_xl[i_new] = False + elif alpha_xu <= alpha: + i_new = np.argmin(all_alpha_xu) + step[i_new] = xu[i_new] + free_xu[i_new] = False + else: + i_new = np.argmin(all_alpha_ub) + free_ub[i_new] = False + n_act, q = qr_tangential_byrd_omojokun( + aub, + aeq, + free_xl, + free_xu, + free_ub, + ) + sd = -q[:, n_act:] @ (q[:, n_act:].T @ grad) + k = 0 + else: + # The current iterate is on the trust-region boundary. Add all the + # active bound/linear constraints to the working set to prepare for + # the improvement of the solution, and stop the iterations. + if alpha_xl <= alpha: + i_new = _argmin(all_alpha_xl) + step[i_new] = xl[i_new] + free_xl[i_new] = False + if alpha_xu <= alpha: + i_new = _argmin(all_alpha_xu) + step[i_new] = xu[i_new] + free_xu[i_new] = False + if alpha_ub <= alpha: + i_new = _argmin(all_alpha_ub) + free_ub[i_new] = False + n_act, q = qr_tangential_byrd_omojokun( + aub, + aeq, + free_xl, + free_xu, + free_ub, + ) + boundary_reached = True + break + + # Attempt to improve the solution on the trust-region boundary. + if kwargs.get("improve_tcg", True) and boundary_reached and n_act < n: + step_base = np.copy(step) + while n_act < n: + # Check whether a substantial reduction in the objective function + # is possible, and set the search direction. + step_proj = q[:, n_act:] @ (q[:, n_act:].T @ step) + grad_proj = q[:, n_act:] @ (q[:, n_act:].T @ grad) + step_sq = step_proj @ step_proj + grad_sq = grad_proj @ grad_proj + grad_step = grad_proj @ step_proj + grad_sd = -np.sqrt(max(step_sq * grad_sq - grad_step**2.0, 0.0)) + sd = q[:, n_act:] @ ( + q[:, n_act:].T @ (grad_step * step - step_sq * grad) + ) + if grad_sd >= -1e-8 * reduct or np.any( + grad_sd >= -TINY * np.abs(sd) + ): + break + sd /= -grad_sd + + # Calculate an upper bound for the tangent of half the angle theta + # of this alternative iteration for the bound constraints. The step + # will be updated as: + # step += (cos(theta) - 1) * step_proj + sin(theta) * sd. + temp_xl = np.zeros(n) + temp_xu = np.zeros(n) + dist_xl = np.maximum(step - xl, 0.0) + dist_xu = np.maximum(xu - step, 0.0) + temp_xl[free_xl] = sd[free_xl] ** 2.0 - dist_xl[free_xl] * ( + dist_xl[free_xl] - 2.0 * step_proj[free_xl] + ) + temp_xu[free_xu] = sd[free_xu] ** 2.0 - dist_xu[free_xu] * ( + dist_xu[free_xu] + 2.0 * step_proj[free_xu] + ) + temp_xl[temp_xl > 0.0] = ( + np.sqrt(temp_xl[temp_xl > 0.0]) - sd[temp_xl > 0.0] + ) + temp_xu[temp_xu > 0.0] = ( + np.sqrt(temp_xu[temp_xu > 0.0]) + sd[temp_xu > 0.0] + ) + i_xl = temp_xl > TINY * dist_xl + i_xu = temp_xu > TINY * dist_xu + all_t_xl = np.ones(n) + all_t_xu = np.ones(n) + all_t_xl[i_xl] = np.minimum( + all_t_xl[i_xl], + dist_xl[i_xl] / temp_xl[i_xl], + ) + all_t_xu[i_xu] = np.minimum( + all_t_xu[i_xu], + dist_xu[i_xu] / temp_xu[i_xu], + ) + t_xl = np.min(all_t_xl) + t_xu = np.min(all_t_xu) + t_bd = min(t_xl, t_xu) + + # Calculate an upper bound for the tangent of half the angle theta + # of this alternative iteration for the linear constraints. + temp_ub = np.zeros_like(resid) + aub_step = aub @ step_proj + aub_sd = aub @ sd + temp_ub[free_ub] = aub_sd[free_ub] ** 2.0 - resid[free_ub] * ( + resid[free_ub] + 2.0 * aub_step[free_ub] + ) + temp_ub[temp_ub > 0.0] = ( + np.sqrt(temp_ub[temp_ub > 0.0]) + aub_sd[temp_ub > 0.0] + ) + i_ub = temp_ub > TINY * resid + all_t_ub = np.ones_like(resid) + all_t_ub[i_ub] = np.minimum( + all_t_ub[i_ub], + resid[i_ub] / temp_ub[i_ub], + ) + t_ub = np.min(all_t_ub, initial=1.0) + t_min = min(t_bd, t_ub) + + # Calculate some curvature information. + hess_step = hess_prod(step_proj) + hess_sd = hess_prod(sd) + curv_step = step_proj @ hess_step + curv_sd = sd @ hess_sd + curv_step_sd = step_proj @ hess_sd + + # For a range of equally spaced values of tan(0.5 * theta), + # calculate the reduction in the objective function that would be + # obtained by accepting the corresponding angle. + n_samples = 20 + n_samples = int((n_samples - 3) * t_min + 3) + t_samples = np.linspace(t_min / n_samples, t_min, n_samples) + sin_values = 2.0 * t_samples / (1.0 + t_samples**2.0) + all_reduct = sin_values * ( + grad_step * t_samples + - grad_sd + - sin_values + * ( + 0.5 * t_samples**2.0 * curv_step + - 2.0 * t_samples * curv_step_sd + + 0.5 * curv_sd + ) + ) + if np.all(all_reduct <= 0.0): + # No reduction in the objective function is obtained. + break + + # Accept the angle that provides the largest reduction in the + # objective function, and update the iterate. + i_max = np.argmax(all_reduct) + cos_value = (1.0 - t_samples[i_max] ** 2.0) / ( + 1.0 + t_samples[i_max] ** 2.0 + ) + step = np.clip( + step + (cos_value - 1.0) * step_proj + sin_values[i_max] * sd, + xl, + xu, + ) + grad += (cos_value - 1.0) * hess_step + sin_values[i_max] * hess_sd + resid = np.maximum( + 0.0, + resid + - (cos_value - 1.0) * aub_step + - sin_values[i_max] * aub_sd, + ) + reduct += all_reduct[i_max] + + # If the above angle is restricted by bound constraints, add them + # to the working set, and restart the alternative iteration. + # Otherwise, the calculations are terminated. + if t_min < 1.0 and i_max == n_samples - 1: + if t_xl <= t_min: + i_new = _argmin(all_t_xl) + step[i_new] = xl[i_new] + free_xl[i_new] = False + if t_xu <= t_min: + i_new = _argmin(all_t_xu) + step[i_new] = xu[i_new] + free_xl[i_new] = False + if t_ub <= t_min: + i_new = _argmin(all_t_ub) + free_ub[i_new] = False + n_act, q = qr_tangential_byrd_omojokun( + aub, + aeq, + free_xl, + free_xu, + free_ub, + ) + else: + break + + # Ensure that the alternative iteration improves the objective + # function. + if grad_orig @ step + 0.5 * step @ hess_prod( + step + ) > grad_orig @ step_base + 0.5 * step_base @ hess_prod(step_base): + step = step_base + + if debug: + tol = get_arrays_tol(xl, xu) + assert np.all(xl <= step) + assert np.all(step <= xu) + assert np.all(aub @ step <= bub + tol) + assert np.all(np.abs(aeq @ step) <= tol) + assert np.linalg.norm(step) < 1.1 * delta + return step + + +def normal_byrd_omojokun(aub, bub, aeq, beq, xl, xu, delta, debug, **kwargs): + r""" + Minimize approximately a linear constraint violation subject to bound + constraints in a trust region. + + This function solves approximately + + .. math:: + + \min_{s \in \mathbb{R}^n} \quad \frac{1}{2} \big( \lVert \max \{ + A_{\scriptscriptstyle I} s - b_{\scriptscriptstyle I}, 0 \} \rVert^2 + + \lVert A_{\scriptscriptstyle E} s - b_{\scriptscriptstyle E} \rVert^2 + \big) \quad \text{s.t.} + \quad + \left\{ \begin{array}{l} + l \le s \le u,\\ + \lVert s \rVert \le \Delta, + \end{array} \right. + + using a variation of the truncated conjugate gradient method. + + Parameters + ---------- + aub : `numpy.ndarray`, shape (m_linear_ub, n) + Matrix :math:`A_{\scriptscriptstyle I}` as shown above. + bub : `numpy.ndarray`, shape (m_linear_ub,) + Vector :math:`b_{\scriptscriptstyle I}` as shown above. + aeq : `numpy.ndarray`, shape (m_linear_eq, n) + Matrix :math:`A_{\scriptscriptstyle E}` as shown above. + beq : `numpy.ndarray`, shape (m_linear_eq,) + Vector :math:`b_{\scriptscriptstyle E}` as shown above. + xl : `numpy.ndarray`, shape (n,) + Lower bounds :math:`l` as shown above. + xu : `numpy.ndarray`, shape (n,) + Upper bounds :math:`u` as shown above. + delta : float + Trust-region radius :math:`\Delta` as shown above. + debug : bool + Whether to make debugging tests during the execution. + + Returns + ------- + `numpy.ndarray`, shape (n,) + Approximate solution :math:`s`. + + Other Parameters + ---------------- + improve_tcg : bool, optional + If True, a solution generated by the truncated conjugate gradient + method that is on the boundary of the trust region is improved by + moving around the trust-region boundary on the two-dimensional space + spanned by the solution and the gradient of the quadratic function at + the solution (default is True). + + Notes + ----- + This function implements Algorithm 6.4 of [1]_. It is assumed that the + origin is feasible with respect to the bound constraints and that `delta` + is finite and positive. + + References + ---------- + .. [1] T. M. Ragonneau. *Model-Based Derivative-Free Optimization Methods + and Software*. PhD thesis, Department of Applied Mathematics, The Hong + Kong Polytechnic University, Hong Kong, China, 2022. URL: + https://theses.lib.polyu.edu.hk/handle/200/12294. + """ + if debug: + assert isinstance(aub, np.ndarray) and aub.ndim == 2 + assert ( + isinstance(bub, np.ndarray) + and bub.ndim == 1 + and bub.size == aub.shape[0] + ) + assert ( + isinstance(aeq, np.ndarray) + and aeq.ndim == 2 + and aeq.shape[1] == aub.shape[1] + ) + assert ( + isinstance(beq, np.ndarray) + and beq.ndim == 1 + and beq.size == aeq.shape[0] + ) + assert isinstance(xl, np.ndarray) and xl.shape == (aub.shape[1],) + assert isinstance(xu, np.ndarray) and xu.shape == (aub.shape[1],) + assert isinstance(delta, float) + assert isinstance(debug, bool) + tol = get_arrays_tol(xl, xu) + assert np.all(xl <= tol) + assert np.all(xu >= -tol) + assert np.isfinite(delta) and delta > 0.0 + xl = np.minimum(xl, 0.0) + xu = np.maximum(xu, 0.0) + + # Calculate the initial active set. + m_linear_ub, n = aub.shape + grad = np.r_[aeq.T @ -beq, np.maximum(0.0, -bub)] + free_xl = (xl < 0.0) | (grad[:n] < 0.0) + free_xu = (xu > 0.0) | (grad[:n] > 0.0) + free_slack = bub < 0.0 + free_ub = (bub > 0.0) | (aub @ grad[:n] - grad[n:] > 0.0) + n_act, q = qr_normal_byrd_omojokun( + aub, + free_xl, + free_xu, + free_slack, + free_ub, + ) + + # Calculate an upper bound on the norm of the slack variables. It is not + # used in the original algorithm, but it may prevent undesired behaviors + # engendered by computer rounding errors. + delta_slack = np.sqrt(beq @ beq + grad[n:] @ grad[n:]) + + # Set the initial iterate and the initial search direction. + step = np.zeros(n) + sd = -q[:, n_act:] @ (q[:, n_act:].T @ grad) + resid = bub + grad[n:] + + k = 0 + reduct = 0.0 + boundary_reached = False + while k < n + m_linear_ub - n_act: + # Stop the computations if sd is not a descent direction. + grad_sd = grad @ sd + if grad_sd >= -10.0 * EPS * n * max(1.0, np.linalg.norm(grad)): + break + + # Set alpha_tr to the step size for the trust-region constraint. + try: + alpha_tr = _alpha_tr(step, sd[:n], delta) + except ZeroDivisionError: + alpha_tr = np.inf + + # Prevent undesired behaviors engendered by computer rounding errors by + # considering the trust-region constraint on the slack variables. + try: + alpha_tr = min(alpha_tr, _alpha_tr(grad[n:], sd[n:], delta_slack)) + except ZeroDivisionError: + pass + + # Stop the computations if a step along sd is expected to give a + # relatively small reduction in the objective function. + if -alpha_tr * grad_sd <= 1e-8 * reduct: + break + + # Set alpha_quad to the step size for the minimization problem. + hess_sd = np.r_[aeq.T @ (aeq @ sd[:n]), sd[n:]] + curv_sd = sd @ hess_sd + if curv_sd > TINY * abs(grad_sd): + alpha_quad = max(-grad_sd / curv_sd, 0.0) + else: + alpha_quad = np.inf + + # Stop the computations if the reduction in the objective function + # provided by an unconstrained step is small. + alpha = min(alpha_tr, alpha_quad) + if -alpha * (grad_sd + 0.5 * alpha * curv_sd) <= 1e-8 * reduct: + break + + # Set alpha_bd to the step size for the bound constraints. + i_xl = free_xl & (xl > -np.inf) & (sd[:n] < -TINY * np.abs(xl - step)) + i_xu = free_xu & (xu < np.inf) & (sd[:n] > TINY * np.abs(xu - step)) + i_slack = free_slack & (sd[n:] < -TINY * np.abs(grad[n:])) + all_alpha_xl = np.full_like(step, np.inf) + all_alpha_xu = np.full_like(step, np.inf) + all_alpha_slack = np.full_like(bub, np.inf) + all_alpha_xl[i_xl] = np.maximum( + (xl[i_xl] - step[i_xl]) / sd[:n][i_xl], + 0.0, + ) + all_alpha_xu[i_xu] = np.maximum( + (xu[i_xu] - step[i_xu]) / sd[:n][i_xu], + 0.0, + ) + all_alpha_slack[i_slack] = np.maximum( + -grad[n:][i_slack] / sd[n:][i_slack], + 0.0, + ) + alpha_xl = np.min(all_alpha_xl) + alpha_xu = np.min(all_alpha_xu) + alpha_slack = np.min(all_alpha_slack, initial=np.inf) + alpha_bd = min(alpha_xl, alpha_xu, alpha_slack) + + # Set alpha_ub to the step size for the linear constraints. + aub_sd = aub @ sd[:n] - sd[n:] + i_ub = free_ub & (aub_sd > TINY * np.abs(resid)) + all_alpha_ub = np.full_like(bub, np.inf) + all_alpha_ub[i_ub] = resid[i_ub] / aub_sd[i_ub] + alpha_ub = np.min(all_alpha_ub, initial=np.inf) + + # Update the iterate. + alpha = min(alpha, alpha_bd, alpha_ub) + if alpha > 0.0: + step = np.clip(step + alpha * sd[:n], xl, xu) + grad += alpha * hess_sd + resid = np.maximum(0.0, resid - alpha * aub_sd) + reduct -= alpha * (grad_sd + 0.5 * alpha * curv_sd) + + if alpha < min(alpha_tr, alpha_bd, alpha_ub): + # The current iteration is a conjugate gradient iteration. Update + # the search direction so that it is conjugate (with respect to H) + # to all the previous search directions. + grad_proj = q[:, n_act:] @ (q[:, n_act:].T @ grad) + beta = (grad_proj @ hess_sd) / curv_sd + sd = beta * sd - grad_proj + k += 1 + elif alpha < alpha_tr: + # The iterate is restricted by a bound/linear constraint. Add this + # constraint to the active set, and restart the calculations. + if alpha_xl <= alpha: + i_new = np.argmin(all_alpha_xl) + step[i_new] = xl[i_new] + free_xl[i_new] = False + elif alpha_xu <= alpha: + i_new = np.argmin(all_alpha_xu) + step[i_new] = xu[i_new] + free_xu[i_new] = False + elif alpha_slack <= alpha: + i_new = np.argmin(all_alpha_slack) + free_slack[i_new] = False + else: + i_new = np.argmin(all_alpha_ub) + free_ub[i_new] = False + n_act, q = qr_normal_byrd_omojokun( + aub, free_xl, free_xu, free_slack, free_ub + ) + sd = -q[:, n_act:] @ (q[:, n_act:].T @ grad) + k = 0 + else: + # The current iterate is on the trust-region boundary. Add all the + # active bound constraints to the working set to prepare for the + # improvement of the solution, and stop the iterations. + if alpha_xl <= alpha: + i_new = _argmin(all_alpha_xl) + step[i_new] = xl[i_new] + free_xl[i_new] = False + if alpha_xu <= alpha: + i_new = _argmin(all_alpha_xu) + step[i_new] = xu[i_new] + free_xu[i_new] = False + boundary_reached = True + break + + # Attempt to improve the solution on the trust-region boundary. + if kwargs.get("improve_tcg", True) and boundary_reached: + step_base = np.copy(step) + free_bd = free_xl & free_xu + grad = aub.T @ np.maximum(aub @ step - bub, 0.0) + aeq.T @ ( + aeq @ step - beq + ) + sd = np.zeros(n) + while np.count_nonzero(free_bd) > 0: + # Check whether a substantial reduction in the objective function + # is possible, and set the search direction. + step_sq = step[free_bd] @ step[free_bd] + grad_sq = grad[free_bd] @ grad[free_bd] + grad_step = grad[free_bd] @ step[free_bd] + grad_sd = -np.sqrt(max(step_sq * grad_sq - grad_step**2.0, 0.0)) + sd[free_bd] = grad_step * step[free_bd] - step_sq * grad[free_bd] + sd[~free_bd] = 0.0 + if grad_sd >= -1e-8 * reduct or np.any( + grad_sd >= -TINY * np.abs(sd[free_bd]) + ): + break + sd[free_bd] /= -grad_sd + + # Calculate an upper bound for the tangent of half the angle theta + # of this alternative iteration. The step will be updated as: + # step = cos(theta) * step + sin(theta) * sd. + temp_xl = np.zeros(n) + temp_xu = np.zeros(n) + temp_xl[free_bd] = ( + step[free_bd] ** 2.0 + sd[free_bd] ** 2.0 - xl[free_bd] ** 2.0 + ) + temp_xu[free_bd] = ( + step[free_bd] ** 2.0 + sd[free_bd] ** 2.0 - xu[free_bd] ** 2.0 + ) + temp_xl[temp_xl > 0.0] = ( + np.sqrt(temp_xl[temp_xl > 0.0]) - sd[temp_xl > 0.0] + ) + temp_xu[temp_xu > 0.0] = ( + np.sqrt(temp_xu[temp_xu > 0.0]) + sd[temp_xu > 0.0] + ) + dist_xl = np.maximum(step - xl, 0.0) + dist_xu = np.maximum(xu - step, 0.0) + i_xl = temp_xl > TINY * dist_xl + i_xu = temp_xu > TINY * dist_xu + all_t_xl = np.ones(n) + all_t_xu = np.ones(n) + all_t_xl[i_xl] = np.minimum( + all_t_xl[i_xl], + dist_xl[i_xl] / temp_xl[i_xl], + ) + all_t_xu[i_xu] = np.minimum( + all_t_xu[i_xu], + dist_xu[i_xu] / temp_xu[i_xu], + ) + t_xl = np.min(all_t_xl) + t_xu = np.min(all_t_xu) + t_bd = min(t_xl, t_xu) + + # For a range of equally spaced values of tan(0.5 * theta), + # calculate the reduction in the objective function that would be + # obtained by accepting the corresponding angle. + n_samples = 20 + n_samples = int((n_samples - 3) * t_bd + 3) + t_samples = np.linspace(t_bd / n_samples, t_bd, n_samples) + resid_ub = np.maximum(aub @ step - bub, 0.0) + resid_eq = aeq @ step - beq + step_proj = np.copy(step) + step_proj[~free_bd] = 0.0 + all_reduct = np.empty(n_samples) + for i in range(n_samples): + sin_value = 2.0 * t_samples[i] / (1.0 + t_samples[i] ** 2.0) + step_alt = np.clip( + step + sin_value * (sd - t_samples[i] * step_proj), + xl, + xu, + ) + resid_ub_alt = np.maximum(aub @ step_alt - bub, 0.0) + resid_eq_alt = aeq @ step_alt - beq + all_reduct[i] = 0.5 * ( + resid_ub @ resid_ub + + resid_eq @ resid_eq + - resid_ub_alt @ resid_ub_alt + - resid_eq_alt @ resid_eq_alt + ) + if np.all(all_reduct <= 0.0): + # No reduction in the objective function is obtained. + break + + # Accept the angle that provides the largest reduction in the + # objective function, and update the iterate. + i_max = np.argmax(all_reduct) + cos_value = (1.0 - t_samples[i_max] ** 2.0) / ( + 1.0 + t_samples[i_max] ** 2.0 + ) + sin_value = (2.0 * t_samples[i_max] + / (1.0 + t_samples[i_max] ** 2.0)) + step[free_bd] = cos_value * step[free_bd] + sin_value * sd[free_bd] + grad = aub.T @ np.maximum(aub @ step - bub, 0.0) + aeq.T @ ( + aeq @ step - beq + ) + reduct += all_reduct[i_max] + + # If the above angle is restricted by bound constraints, add them + # to the working set, and restart the alternative iteration. + # Otherwise, the calculations are terminated. + if t_bd < 1.0 and i_max == n_samples - 1: + if t_xl <= t_bd: + i_new = _argmin(all_t_xl) + step[i_new] = xl[i_new] + free_bd[i_new] = False + if t_xu <= t_bd: + i_new = _argmin(all_t_xu) + step[i_new] = xu[i_new] + free_bd[i_new] = False + else: + break + + # Ensure that the alternative iteration improves the objective + # function. + resid_ub = np.maximum(aub @ step - bub, 0.0) + resid_ub_base = np.maximum(aub @ step_base - bub, 0.0) + resid_eq = aeq @ step - beq + resid_eq_base = aeq @ step_base - beq + if ( + resid_ub @ resid_ub + resid_eq @ resid_eq + > resid_ub_base @ resid_ub_base + resid_eq_base @ resid_eq_base + ): + step = step_base + + if debug: + assert np.all(xl <= step) + assert np.all(step <= xu) + assert np.linalg.norm(step) < 1.1 * delta + return step + + +def qr_tangential_byrd_omojokun(aub, aeq, free_xl, free_xu, free_ub): + n = free_xl.size + identity = np.eye(n) + q, r, _ = qr( + np.block( + [ + [aeq], + [aub[~free_ub, :]], + [-identity[~free_xl, :]], + [identity[~free_xu, :]], + ] + ).T, + pivoting=True, + ) + n_act = np.count_nonzero( + np.abs(np.diag(r)) + >= 10.0 + * EPS + * n + * np.linalg.norm(r[: np.min(r.shape), : np.min(r.shape)], axis=0) + ) + return n_act, q + + +def qr_normal_byrd_omojokun(aub, free_xl, free_xu, free_slack, free_ub): + m_linear_ub, n = aub.shape + identity_n = np.eye(n) + identity_m = np.eye(m_linear_ub) + q, r, _ = qr( + np.block( + [ + [ + aub[~free_ub, :], + -identity_m[~free_ub, :], + ], + [ + np.zeros((m_linear_ub - np.count_nonzero(free_slack), n)), + -identity_m[~free_slack, :], + ], + [ + -identity_n[~free_xl, :], + np.zeros((n - np.count_nonzero(free_xl), m_linear_ub)), + ], + [ + identity_n[~free_xu, :], + np.zeros((n - np.count_nonzero(free_xu), m_linear_ub)), + ], + ] + ).T, + pivoting=True, + ) + n_act = np.count_nonzero( + np.abs(np.diag(r)) + >= 10.0 + * EPS + * (n + m_linear_ub) + * np.linalg.norm(r[: np.min(r.shape), : np.min(r.shape)], axis=0) + ) + return n_act, q + + +def _alpha_tr(step, sd, delta): + step_sd = step @ sd + sd_sq = sd @ sd + dist_tr_sq = delta**2.0 - step @ step + temp = np.sqrt(max(step_sd**2.0 + sd_sq * dist_tr_sq, 0.0)) + if step_sd <= 0.0 and sd_sq > TINY * abs(temp - step_sd): + alpha_tr = max((temp - step_sd) / sd_sq, 0.0) + elif abs(temp + step_sd) > TINY * dist_tr_sq: + alpha_tr = max(dist_tr_sq / (temp + step_sd), 0.0) + else: + raise ZeroDivisionError + return alpha_tr + + +def _argmax(x): + return np.flatnonzero(x >= np.max(x)) + + +def _argmin(x): + return np.flatnonzero(x <= np.min(x)) diff --git a/mantis_evalkit/lib/python3.10/site-packages/scipy/_lib/cobyqa/utils/__init__.py b/mantis_evalkit/lib/python3.10/site-packages/scipy/_lib/cobyqa/utils/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..fe6b4841ddff3a04bda5cbff744e30681b6963b9 --- /dev/null +++ b/mantis_evalkit/lib/python3.10/site-packages/scipy/_lib/cobyqa/utils/__init__.py @@ -0,0 +1,18 @@ +from .exceptions import ( + MaxEvalError, + TargetSuccess, + CallbackSuccess, + FeasibleSuccess, +) +from .math import get_arrays_tol, exact_1d_array +from .versions import show_versions + +__all__ = [ + "MaxEvalError", + "TargetSuccess", + "CallbackSuccess", + "FeasibleSuccess", + "get_arrays_tol", + "exact_1d_array", + "show_versions", +] diff --git a/mantis_evalkit/lib/python3.10/site-packages/scipy/_lib/cobyqa/utils/__pycache__/__init__.cpython-310.pyc b/mantis_evalkit/lib/python3.10/site-packages/scipy/_lib/cobyqa/utils/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..bad5256b4075ff3d8073d3356c8fc7cb226c3273 Binary files /dev/null and b/mantis_evalkit/lib/python3.10/site-packages/scipy/_lib/cobyqa/utils/__pycache__/__init__.cpython-310.pyc differ diff --git a/mantis_evalkit/lib/python3.10/site-packages/scipy/_lib/cobyqa/utils/__pycache__/exceptions.cpython-310.pyc b/mantis_evalkit/lib/python3.10/site-packages/scipy/_lib/cobyqa/utils/__pycache__/exceptions.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..d3c26e03073de90f9e87651965d924fe3c6ca159 Binary files /dev/null and b/mantis_evalkit/lib/python3.10/site-packages/scipy/_lib/cobyqa/utils/__pycache__/exceptions.cpython-310.pyc differ diff --git a/mantis_evalkit/lib/python3.10/site-packages/scipy/_lib/cobyqa/utils/__pycache__/math.cpython-310.pyc b/mantis_evalkit/lib/python3.10/site-packages/scipy/_lib/cobyqa/utils/__pycache__/math.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..6d8adfcf81362482bbece98ead069a7aa8b1a9ce Binary files /dev/null and b/mantis_evalkit/lib/python3.10/site-packages/scipy/_lib/cobyqa/utils/__pycache__/math.cpython-310.pyc differ diff --git a/mantis_evalkit/lib/python3.10/site-packages/scipy/_lib/cobyqa/utils/__pycache__/versions.cpython-310.pyc b/mantis_evalkit/lib/python3.10/site-packages/scipy/_lib/cobyqa/utils/__pycache__/versions.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..9d452ea32a52840f10d0ba1d22bebd0597617aee Binary files /dev/null and b/mantis_evalkit/lib/python3.10/site-packages/scipy/_lib/cobyqa/utils/__pycache__/versions.cpython-310.pyc differ diff --git a/mantis_evalkit/lib/python3.10/site-packages/scipy/_lib/cobyqa/utils/exceptions.py b/mantis_evalkit/lib/python3.10/site-packages/scipy/_lib/cobyqa/utils/exceptions.py new file mode 100644 index 0000000000000000000000000000000000000000..c85094894f378a8e3934ad109ea6166e33e4366b --- /dev/null +++ b/mantis_evalkit/lib/python3.10/site-packages/scipy/_lib/cobyqa/utils/exceptions.py @@ -0,0 +1,22 @@ +class MaxEvalError(Exception): + """ + Exception raised when the maximum number of evaluations is reached. + """ + + +class TargetSuccess(Exception): + """ + Exception raised when the target value is reached. + """ + + +class CallbackSuccess(StopIteration): + """ + Exception raised when the callback function raises a ``StopIteration``. + """ + + +class FeasibleSuccess(Exception): + """ + Exception raised when a feasible point of a feasible problem is found. + """ diff --git a/mantis_evalkit/lib/python3.10/site-packages/scipy/_lib/cobyqa/utils/math.py b/mantis_evalkit/lib/python3.10/site-packages/scipy/_lib/cobyqa/utils/math.py new file mode 100644 index 0000000000000000000000000000000000000000..1b16ae98a0df38752815f5a69d56da20f856f9f9 --- /dev/null +++ b/mantis_evalkit/lib/python3.10/site-packages/scipy/_lib/cobyqa/utils/math.py @@ -0,0 +1,77 @@ +import numpy as np + + +EPS = np.finfo(float).eps + + +def get_arrays_tol(*arrays): + """ + Get a relative tolerance for a set of arrays. + + Parameters + ---------- + *arrays: tuple + Set of `numpy.ndarray` to get the tolerance for. + + Returns + ------- + float + Relative tolerance for the set of arrays. + + Raises + ------ + ValueError + If no array is provided. + """ + if len(arrays) == 0: + raise ValueError("At least one array must be provided.") + size = max(array.size for array in arrays) + weight = max( + np.max(np.abs(array[np.isfinite(array)]), initial=1.0) + for array in arrays + ) + return 10.0 * EPS * max(size, 1.0) * weight + + +def exact_1d_array(x, message): + """ + Preprocess a 1-dimensional array. + + Parameters + ---------- + x : array_like + Array to be preprocessed. + message : str + Error message if `x` cannot be interpreter as a 1-dimensional array. + + Returns + ------- + `numpy.ndarray` + Preprocessed array. + """ + x = np.atleast_1d(np.squeeze(x)).astype(float) + if x.ndim != 1: + raise ValueError(message) + return x + + +def exact_2d_array(x, message): + """ + Preprocess a 2-dimensional array. + + Parameters + ---------- + x : array_like + Array to be preprocessed. + message : str + Error message if `x` cannot be interpreter as a 2-dimensional array. + + Returns + ------- + `numpy.ndarray` + Preprocessed array. + """ + x = np.atleast_2d(x).astype(float) + if x.ndim != 2: + raise ValueError(message) + return x diff --git a/mantis_evalkit/lib/python3.10/site-packages/scipy/_lib/cobyqa/utils/versions.py b/mantis_evalkit/lib/python3.10/site-packages/scipy/_lib/cobyqa/utils/versions.py new file mode 100644 index 0000000000000000000000000000000000000000..94a0f8f5cef626354f40901cbe06a84287291c1c --- /dev/null +++ b/mantis_evalkit/lib/python3.10/site-packages/scipy/_lib/cobyqa/utils/versions.py @@ -0,0 +1,67 @@ +import os +import platform +import sys +from importlib.metadata import PackageNotFoundError, version + + +def _get_sys_info(): + """ + Get useful system information. + + Returns + ------- + dict + Useful system information. + """ + return { + "python": sys.version.replace(os.linesep, " "), + "executable": sys.executable, + "machine": platform.platform(), + } + + +def _get_deps_info(): + """ + Get the versions of the dependencies. + + Returns + ------- + dict + Versions of the dependencies. + """ + deps = ["cobyqa", "numpy", "scipy", "setuptools", "pip"] + deps_info = {} + for module in deps: + try: + deps_info[module] = version(module) + except PackageNotFoundError: + deps_info[module] = None + return deps_info + + +def show_versions(): + """ + Display useful system and dependencies information. + + When reporting issues, please include this information. + """ + print("System settings") + print("---------------") + sys_info = _get_sys_info() + print( + "\n".join( + f"{k:>{max(map(len, sys_info.keys())) + 1}}: {v}" + for k, v in sys_info.items() + ) + ) + + print() + print("Python dependencies") + print("-------------------") + deps_info = _get_deps_info() + print( + "\n".join( + f"{k:>{max(map(len, deps_info.keys())) + 1}}: {v}" + for k, v in deps_info.items() + ) + ) diff --git a/mantis_evalkit/lib/python3.10/site-packages/scipy/_lib/tests/__init__.py b/mantis_evalkit/lib/python3.10/site-packages/scipy/_lib/tests/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/mantis_evalkit/lib/python3.10/site-packages/scipy/_lib/tests/__pycache__/test_bunch.cpython-310.pyc b/mantis_evalkit/lib/python3.10/site-packages/scipy/_lib/tests/__pycache__/test_bunch.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..2fcb0cbe5f6078f2a6d48e79e39d2a8a044a89be Binary files /dev/null and b/mantis_evalkit/lib/python3.10/site-packages/scipy/_lib/tests/__pycache__/test_bunch.cpython-310.pyc differ diff --git a/mantis_evalkit/lib/python3.10/site-packages/scipy/_lib/tests/__pycache__/test_ccallback.cpython-310.pyc b/mantis_evalkit/lib/python3.10/site-packages/scipy/_lib/tests/__pycache__/test_ccallback.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..db1750abd78594b2509a306aedb2570596dda739 Binary files /dev/null and b/mantis_evalkit/lib/python3.10/site-packages/scipy/_lib/tests/__pycache__/test_ccallback.cpython-310.pyc differ diff --git a/mantis_evalkit/lib/python3.10/site-packages/scipy/_lib/tests/test__gcutils.py b/mantis_evalkit/lib/python3.10/site-packages/scipy/_lib/tests/test__gcutils.py new file mode 100644 index 0000000000000000000000000000000000000000..0e397af4fb7e9bc69f31d1e39aa80716469d5470 --- /dev/null +++ b/mantis_evalkit/lib/python3.10/site-packages/scipy/_lib/tests/test__gcutils.py @@ -0,0 +1,110 @@ +""" Test for assert_deallocated context manager and gc utilities +""" +import gc +from threading import Lock + +from scipy._lib._gcutils import (set_gc_state, gc_state, assert_deallocated, + ReferenceError, IS_PYPY) + +from numpy.testing import assert_equal + +import pytest + + +@pytest.fixture +def gc_lock(): + return Lock() + + +def test_set_gc_state(gc_lock): + with gc_lock: + gc_status = gc.isenabled() + try: + for state in (True, False): + gc.enable() + set_gc_state(state) + assert_equal(gc.isenabled(), state) + gc.disable() + set_gc_state(state) + assert_equal(gc.isenabled(), state) + finally: + if gc_status: + gc.enable() + + +def test_gc_state(gc_lock): + # Test gc_state context manager + with gc_lock: + gc_status = gc.isenabled() + try: + for pre_state in (True, False): + set_gc_state(pre_state) + for with_state in (True, False): + # Check the gc state is with_state in with block + with gc_state(with_state): + assert_equal(gc.isenabled(), with_state) + # And returns to previous state outside block + assert_equal(gc.isenabled(), pre_state) + # Even if the gc state is set explicitly within the block + with gc_state(with_state): + assert_equal(gc.isenabled(), with_state) + set_gc_state(not with_state) + assert_equal(gc.isenabled(), pre_state) + finally: + if gc_status: + gc.enable() + + +@pytest.mark.skipif(IS_PYPY, reason="Test not meaningful on PyPy") +def test_assert_deallocated(gc_lock): + # Ordinary use + class C: + def __init__(self, arg0, arg1, name='myname'): + self.name = name + with gc_lock: + for gc_current in (True, False): + with gc_state(gc_current): + # We are deleting from with-block context, so that's OK + with assert_deallocated(C, 0, 2, 'another name') as c: + assert_equal(c.name, 'another name') + del c + # Or not using the thing in with-block context, also OK + with assert_deallocated(C, 0, 2, name='third name'): + pass + assert_equal(gc.isenabled(), gc_current) + + +@pytest.mark.skipif(IS_PYPY, reason="Test not meaningful on PyPy") +def test_assert_deallocated_nodel(): + class C: + pass + with pytest.raises(ReferenceError): + # Need to delete after using if in with-block context + # Note: assert_deallocated(C) needs to be assigned for the test + # to function correctly. It is assigned to _, but _ itself is + # not referenced in the body of the with, it is only there for + # the refcount. + with assert_deallocated(C) as _: + pass + + +@pytest.mark.skipif(IS_PYPY, reason="Test not meaningful on PyPy") +def test_assert_deallocated_circular(): + class C: + def __init__(self): + self._circular = self + with pytest.raises(ReferenceError): + # Circular reference, no automatic garbage collection + with assert_deallocated(C) as c: + del c + + +@pytest.mark.skipif(IS_PYPY, reason="Test not meaningful on PyPy") +def test_assert_deallocated_circular2(): + class C: + def __init__(self): + self._circular = self + with pytest.raises(ReferenceError): + # Still circular reference, no automatic garbage collection + with assert_deallocated(C): + pass diff --git a/mantis_evalkit/lib/python3.10/site-packages/scipy/_lib/tests/test__pep440.py b/mantis_evalkit/lib/python3.10/site-packages/scipy/_lib/tests/test__pep440.py new file mode 100644 index 0000000000000000000000000000000000000000..7f5b71c8f1e13b42de2e8e612a005dec409fc025 --- /dev/null +++ b/mantis_evalkit/lib/python3.10/site-packages/scipy/_lib/tests/test__pep440.py @@ -0,0 +1,67 @@ +from pytest import raises as assert_raises +from scipy._lib._pep440 import Version, parse + + +def test_main_versions(): + assert Version('1.8.0') == Version('1.8.0') + for ver in ['1.9.0', '2.0.0', '1.8.1']: + assert Version('1.8.0') < Version(ver) + + for ver in ['1.7.0', '1.7.1', '0.9.9']: + assert Version('1.8.0') > Version(ver) + + +def test_version_1_point_10(): + # regression test for gh-2998. + assert Version('1.9.0') < Version('1.10.0') + assert Version('1.11.0') < Version('1.11.1') + assert Version('1.11.0') == Version('1.11.0') + assert Version('1.99.11') < Version('1.99.12') + + +def test_alpha_beta_rc(): + assert Version('1.8.0rc1') == Version('1.8.0rc1') + for ver in ['1.8.0', '1.8.0rc2']: + assert Version('1.8.0rc1') < Version(ver) + + for ver in ['1.8.0a2', '1.8.0b3', '1.7.2rc4']: + assert Version('1.8.0rc1') > Version(ver) + + assert Version('1.8.0b1') > Version('1.8.0a2') + + +def test_dev_version(): + assert Version('1.9.0.dev+Unknown') < Version('1.9.0') + for ver in ['1.9.0', '1.9.0a1', '1.9.0b2', '1.9.0b2.dev+ffffffff', '1.9.0.dev1']: + assert Version('1.9.0.dev+f16acvda') < Version(ver) + + assert Version('1.9.0.dev+f16acvda') == Version('1.9.0.dev+f16acvda') + + +def test_dev_a_b_rc_mixed(): + assert Version('1.9.0a2.dev+f16acvda') == Version('1.9.0a2.dev+f16acvda') + assert Version('1.9.0a2.dev+6acvda54') < Version('1.9.0a2') + + +def test_dev0_version(): + assert Version('1.9.0.dev0+Unknown') < Version('1.9.0') + for ver in ['1.9.0', '1.9.0a1', '1.9.0b2', '1.9.0b2.dev0+ffffffff']: + assert Version('1.9.0.dev0+f16acvda') < Version(ver) + + assert Version('1.9.0.dev0+f16acvda') == Version('1.9.0.dev0+f16acvda') + + +def test_dev0_a_b_rc_mixed(): + assert Version('1.9.0a2.dev0+f16acvda') == Version('1.9.0a2.dev0+f16acvda') + assert Version('1.9.0a2.dev0+6acvda54') < Version('1.9.0a2') + + +def test_raises(): + for ver in ['1,9.0', '1.7.x']: + assert_raises(ValueError, Version, ver) + +def test_legacy_version(): + # Non-PEP-440 version identifiers always compare less. For NumPy this only + # occurs on dev builds prior to 1.10.0 which are unsupported anyway. + assert parse('invalid') < Version('0.0.0') + assert parse('1.9.0-f16acvda') < Version('1.0.0') diff --git a/mantis_evalkit/lib/python3.10/site-packages/scipy/_lib/tests/test__testutils.py b/mantis_evalkit/lib/python3.10/site-packages/scipy/_lib/tests/test__testutils.py new file mode 100644 index 0000000000000000000000000000000000000000..88db113d6d5a35c96ecc0a6a36ab42d74be49153 --- /dev/null +++ b/mantis_evalkit/lib/python3.10/site-packages/scipy/_lib/tests/test__testutils.py @@ -0,0 +1,32 @@ +import sys +from scipy._lib._testutils import _parse_size, _get_mem_available +import pytest + + +def test__parse_size(): + expected = { + '12': 12e6, + '12 b': 12, + '12k': 12e3, + ' 12 M ': 12e6, + ' 12 G ': 12e9, + ' 12Tb ': 12e12, + '12 Mib ': 12 * 1024.0**2, + '12Tib': 12 * 1024.0**4, + } + + for inp, outp in sorted(expected.items()): + if outp is None: + with pytest.raises(ValueError): + _parse_size(inp) + else: + assert _parse_size(inp) == outp + + +def test__mem_available(): + # May return None on non-Linux platforms + available = _get_mem_available() + if sys.platform.startswith('linux'): + assert available >= 0 + else: + assert available is None or available >= 0 diff --git a/mantis_evalkit/lib/python3.10/site-packages/scipy/_lib/tests/test__threadsafety.py b/mantis_evalkit/lib/python3.10/site-packages/scipy/_lib/tests/test__threadsafety.py new file mode 100644 index 0000000000000000000000000000000000000000..87ae85ef318da2b8bb104c4a87faa4e4021c01d5 --- /dev/null +++ b/mantis_evalkit/lib/python3.10/site-packages/scipy/_lib/tests/test__threadsafety.py @@ -0,0 +1,51 @@ +import threading +import time +import traceback + +from numpy.testing import assert_ +from pytest import raises as assert_raises + +from scipy._lib._threadsafety import ReentrancyLock, non_reentrant, ReentrancyError + + +def test_parallel_threads(): + # Check that ReentrancyLock serializes work in parallel threads. + # + # The test is not fully deterministic, and may succeed falsely if + # the timings go wrong. + + lock = ReentrancyLock("failure") + + failflag = [False] + exceptions_raised = [] + + def worker(k): + try: + with lock: + assert_(not failflag[0]) + failflag[0] = True + time.sleep(0.1 * k) + assert_(failflag[0]) + failflag[0] = False + except Exception: + exceptions_raised.append(traceback.format_exc(2)) + + threads = [threading.Thread(target=lambda k=k: worker(k)) + for k in range(3)] + for t in threads: + t.start() + for t in threads: + t.join() + + exceptions_raised = "\n".join(exceptions_raised) + assert_(not exceptions_raised, exceptions_raised) + + +def test_reentering(): + # Check that ReentrancyLock prevents re-entering from the same thread. + + @non_reentrant() + def func(x): + return func(x) + + assert_raises(ReentrancyError, func, 0) diff --git a/mantis_evalkit/lib/python3.10/site-packages/scipy/_lib/tests/test__util.py b/mantis_evalkit/lib/python3.10/site-packages/scipy/_lib/tests/test__util.py new file mode 100644 index 0000000000000000000000000000000000000000..2a4d22ce468ec951355961cb77dda15b56899818 --- /dev/null +++ b/mantis_evalkit/lib/python3.10/site-packages/scipy/_lib/tests/test__util.py @@ -0,0 +1,657 @@ +from multiprocessing import Pool +from multiprocessing.pool import Pool as PWL +import re +import math +from fractions import Fraction + +import numpy as np +from numpy.testing import assert_equal, assert_ +import pytest +from pytest import raises as assert_raises +import hypothesis.extra.numpy as npst +from hypothesis import given, strategies, reproduce_failure # noqa: F401 +from scipy.conftest import array_api_compatible, skip_xp_invalid_arg + +from scipy._lib._array_api import (xp_assert_equal, xp_assert_close, is_numpy, + xp_copy, is_array_api_strict) +from scipy._lib._util import (_aligned_zeros, check_random_state, MapWrapper, + getfullargspec_no_self, FullArgSpec, + rng_integers, _validate_int, _rename_parameter, + _contains_nan, _rng_html_rewrite, _lazywhere) +from scipy import cluster, interpolate, linalg, optimize, sparse, spatial, stats + +skip_xp_backends = pytest.mark.skip_xp_backends + + +@pytest.mark.slow +def test__aligned_zeros(): + niter = 10 + + def check(shape, dtype, order, align): + err_msg = repr((shape, dtype, order, align)) + x = _aligned_zeros(shape, dtype, order, align=align) + if align is None: + align = np.dtype(dtype).alignment + assert_equal(x.__array_interface__['data'][0] % align, 0) + if hasattr(shape, '__len__'): + assert_equal(x.shape, shape, err_msg) + else: + assert_equal(x.shape, (shape,), err_msg) + assert_equal(x.dtype, dtype) + if order == "C": + assert_(x.flags.c_contiguous, err_msg) + elif order == "F": + if x.size > 0: + # Size-0 arrays get invalid flags on NumPy 1.5 + assert_(x.flags.f_contiguous, err_msg) + elif order is None: + assert_(x.flags.c_contiguous, err_msg) + else: + raise ValueError() + + # try various alignments + for align in [1, 2, 3, 4, 8, 16, 32, 64, None]: + for n in [0, 1, 3, 11]: + for order in ["C", "F", None]: + for dtype in [np.uint8, np.float64]: + for shape in [n, (1, 2, 3, n)]: + for j in range(niter): + check(shape, dtype, order, align) + + +def test_check_random_state(): + # If seed is None, return the RandomState singleton used by np.random. + # If seed is an int, return a new RandomState instance seeded with seed. + # If seed is already a RandomState instance, return it. + # Otherwise raise ValueError. + rsi = check_random_state(1) + assert_equal(type(rsi), np.random.RandomState) + rsi = check_random_state(rsi) + assert_equal(type(rsi), np.random.RandomState) + rsi = check_random_state(None) + assert_equal(type(rsi), np.random.RandomState) + assert_raises(ValueError, check_random_state, 'a') + rg = np.random.Generator(np.random.PCG64()) + rsi = check_random_state(rg) + assert_equal(type(rsi), np.random.Generator) + + +def test_getfullargspec_no_self(): + p = MapWrapper(1) + argspec = getfullargspec_no_self(p.__init__) + assert_equal(argspec, FullArgSpec(['pool'], None, None, (1,), [], + None, {})) + argspec = getfullargspec_no_self(p.__call__) + assert_equal(argspec, FullArgSpec(['func', 'iterable'], None, None, None, + [], None, {})) + + class _rv_generic: + def _rvs(self, a, b=2, c=3, *args, size=None, **kwargs): + return None + + rv_obj = _rv_generic() + argspec = getfullargspec_no_self(rv_obj._rvs) + assert_equal(argspec, FullArgSpec(['a', 'b', 'c'], 'args', 'kwargs', + (2, 3), ['size'], {'size': None}, {})) + + +def test_mapwrapper_serial(): + in_arg = np.arange(10.) + out_arg = np.sin(in_arg) + + p = MapWrapper(1) + assert_(p._mapfunc is map) + assert_(p.pool is None) + assert_(p._own_pool is False) + out = list(p(np.sin, in_arg)) + assert_equal(out, out_arg) + + with assert_raises(RuntimeError): + p = MapWrapper(0) + + +def test_pool(): + with Pool(2) as p: + p.map(math.sin, [1, 2, 3, 4]) + + +def test_mapwrapper_parallel(): + in_arg = np.arange(10.) + out_arg = np.sin(in_arg) + + with MapWrapper(2) as p: + out = p(np.sin, in_arg) + assert_equal(list(out), out_arg) + + assert_(p._own_pool is True) + assert_(isinstance(p.pool, PWL)) + assert_(p._mapfunc is not None) + + # the context manager should've closed the internal pool + # check that it has by asking it to calculate again. + with assert_raises(Exception) as excinfo: + p(np.sin, in_arg) + + assert_(excinfo.type is ValueError) + + # can also set a PoolWrapper up with a map-like callable instance + with Pool(2) as p: + q = MapWrapper(p.map) + + assert_(q._own_pool is False) + q.close() + + # closing the PoolWrapper shouldn't close the internal pool + # because it didn't create it + out = p.map(np.sin, in_arg) + assert_equal(list(out), out_arg) + + +def test_rng_integers(): + rng = np.random.RandomState() + + # test that numbers are inclusive of high point + arr = rng_integers(rng, low=2, high=5, size=100, endpoint=True) + assert np.max(arr) == 5 + assert np.min(arr) == 2 + assert arr.shape == (100, ) + + # test that numbers are inclusive of high point + arr = rng_integers(rng, low=5, size=100, endpoint=True) + assert np.max(arr) == 5 + assert np.min(arr) == 0 + assert arr.shape == (100, ) + + # test that numbers are exclusive of high point + arr = rng_integers(rng, low=2, high=5, size=100, endpoint=False) + assert np.max(arr) == 4 + assert np.min(arr) == 2 + assert arr.shape == (100, ) + + # test that numbers are exclusive of high point + arr = rng_integers(rng, low=5, size=100, endpoint=False) + assert np.max(arr) == 4 + assert np.min(arr) == 0 + assert arr.shape == (100, ) + + # now try with np.random.Generator + try: + rng = np.random.default_rng() + except AttributeError: + return + + # test that numbers are inclusive of high point + arr = rng_integers(rng, low=2, high=5, size=100, endpoint=True) + assert np.max(arr) == 5 + assert np.min(arr) == 2 + assert arr.shape == (100, ) + + # test that numbers are inclusive of high point + arr = rng_integers(rng, low=5, size=100, endpoint=True) + assert np.max(arr) == 5 + assert np.min(arr) == 0 + assert arr.shape == (100, ) + + # test that numbers are exclusive of high point + arr = rng_integers(rng, low=2, high=5, size=100, endpoint=False) + assert np.max(arr) == 4 + assert np.min(arr) == 2 + assert arr.shape == (100, ) + + # test that numbers are exclusive of high point + arr = rng_integers(rng, low=5, size=100, endpoint=False) + assert np.max(arr) == 4 + assert np.min(arr) == 0 + assert arr.shape == (100, ) + + +class TestValidateInt: + + @pytest.mark.parametrize('n', [4, np.uint8(4), np.int16(4), np.array(4)]) + def test_validate_int(self, n): + n = _validate_int(n, 'n') + assert n == 4 + + @pytest.mark.parametrize('n', [4.0, np.array([4]), Fraction(4, 1)]) + def test_validate_int_bad(self, n): + with pytest.raises(TypeError, match='n must be an integer'): + _validate_int(n, 'n') + + def test_validate_int_below_min(self): + with pytest.raises(ValueError, match='n must be an integer not ' + 'less than 0'): + _validate_int(-1, 'n', 0) + + +class TestRenameParameter: + # check that wrapper `_rename_parameter` for backward-compatible + # keyword renaming works correctly + + # Example method/function that still accepts keyword `old` + @_rename_parameter("old", "new") + def old_keyword_still_accepted(self, new): + return new + + # Example method/function for which keyword `old` is deprecated + @_rename_parameter("old", "new", dep_version="1.9.0") + def old_keyword_deprecated(self, new): + return new + + def test_old_keyword_still_accepted(self): + # positional argument and both keyword work identically + res1 = self.old_keyword_still_accepted(10) + res2 = self.old_keyword_still_accepted(new=10) + res3 = self.old_keyword_still_accepted(old=10) + assert res1 == res2 == res3 == 10 + + # unexpected keyword raises an error + message = re.escape("old_keyword_still_accepted() got an unexpected") + with pytest.raises(TypeError, match=message): + self.old_keyword_still_accepted(unexpected=10) + + # multiple values for the same parameter raises an error + message = re.escape("old_keyword_still_accepted() got multiple") + with pytest.raises(TypeError, match=message): + self.old_keyword_still_accepted(10, new=10) + with pytest.raises(TypeError, match=message): + self.old_keyword_still_accepted(10, old=10) + with pytest.raises(TypeError, match=message): + self.old_keyword_still_accepted(new=10, old=10) + + @pytest.fixture + def kwarg_lock(self): + from threading import Lock + return Lock() + + def test_old_keyword_deprecated(self, kwarg_lock): + # positional argument and both keyword work identically, + # but use of old keyword results in DeprecationWarning + dep_msg = "Use of keyword argument `old` is deprecated" + res1 = self.old_keyword_deprecated(10) + res2 = self.old_keyword_deprecated(new=10) + # pytest warning filter is not thread-safe, enforce serialization + with kwarg_lock: + with pytest.warns(DeprecationWarning, match=dep_msg): + res3 = self.old_keyword_deprecated(old=10) + assert res1 == res2 == res3 == 10 + + # unexpected keyword raises an error + message = re.escape("old_keyword_deprecated() got an unexpected") + with pytest.raises(TypeError, match=message): + self.old_keyword_deprecated(unexpected=10) + + # multiple values for the same parameter raises an error and, + # if old keyword is used, results in DeprecationWarning + message = re.escape("old_keyword_deprecated() got multiple") + with pytest.raises(TypeError, match=message): + self.old_keyword_deprecated(10, new=10) + with kwarg_lock: + with pytest.raises(TypeError, match=message), \ + pytest.warns(DeprecationWarning, match=dep_msg): + # breakpoint() + self.old_keyword_deprecated(10, old=10) + with kwarg_lock: + with pytest.raises(TypeError, match=message), \ + pytest.warns(DeprecationWarning, match=dep_msg): + self.old_keyword_deprecated(new=10, old=10) + + +class TestContainsNaNTest: + + def test_policy(self): + data = np.array([1, 2, 3, np.nan]) + + contains_nan, nan_policy = _contains_nan(data, nan_policy="propagate") + assert contains_nan + assert nan_policy == "propagate" + + contains_nan, nan_policy = _contains_nan(data, nan_policy="omit") + assert contains_nan + assert nan_policy == "omit" + + msg = "The input contains nan values" + with pytest.raises(ValueError, match=msg): + _contains_nan(data, nan_policy="raise") + + msg = "nan_policy must be one of" + with pytest.raises(ValueError, match=msg): + _contains_nan(data, nan_policy="nan") + + def test_contains_nan(self): + data1 = np.array([1, 2, 3]) + assert not _contains_nan(data1)[0] + + data2 = np.array([1, 2, 3, np.nan]) + assert _contains_nan(data2)[0] + + data3 = np.array([np.nan, 2, 3, np.nan]) + assert _contains_nan(data3)[0] + + data4 = np.array([[1, 2], [3, 4]]) + assert not _contains_nan(data4)[0] + + data5 = np.array([[1, 2], [3, np.nan]]) + assert _contains_nan(data5)[0] + + @skip_xp_invalid_arg + def test_contains_nan_with_strings(self): + data1 = np.array([1, 2, "3", np.nan]) # converted to string "nan" + assert not _contains_nan(data1)[0] + + data2 = np.array([1, 2, "3", np.nan], dtype='object') + assert _contains_nan(data2)[0] + + data3 = np.array([["1", 2], [3, np.nan]]) # converted to string "nan" + assert not _contains_nan(data3)[0] + + data4 = np.array([["1", 2], [3, np.nan]], dtype='object') + assert _contains_nan(data4)[0] + + @skip_xp_backends('jax.numpy', + reason="JAX arrays do not support item assignment") + @pytest.mark.usefixtures("skip_xp_backends") + @array_api_compatible + @pytest.mark.parametrize("nan_policy", ['propagate', 'omit', 'raise']) + def test_array_api(self, xp, nan_policy): + rng = np.random.default_rng(932347235892482) + x0 = rng.random(size=(2, 3, 4)) + x = xp.asarray(x0) + x_nan = xp_copy(x, xp=xp) + x_nan[1, 2, 1] = np.nan + + contains_nan, nan_policy_out = _contains_nan(x, nan_policy=nan_policy) + assert not contains_nan + assert nan_policy_out == nan_policy + + if nan_policy == 'raise': + message = 'The input contains...' + with pytest.raises(ValueError, match=message): + _contains_nan(x_nan, nan_policy=nan_policy) + elif nan_policy == 'omit' and not is_numpy(xp): + message = "`nan_policy='omit' is incompatible..." + with pytest.raises(ValueError, match=message): + _contains_nan(x_nan, nan_policy=nan_policy) + elif nan_policy == 'propagate': + contains_nan, nan_policy_out = _contains_nan( + x_nan, nan_policy=nan_policy) + assert contains_nan + assert nan_policy_out == nan_policy + + +def test__rng_html_rewrite(): + def mock_str(): + lines = [ + 'np.random.default_rng(8989843)', + 'np.random.default_rng(seed)', + 'np.random.default_rng(0x9a71b21474694f919882289dc1559ca)', + ' bob ', + ] + return lines + + res = _rng_html_rewrite(mock_str)() + ref = [ + 'np.random.default_rng()', + 'np.random.default_rng(seed)', + 'np.random.default_rng()', + ' bob ', + ] + + assert res == ref + + +class TestTransitionToRNG: + def kmeans(self, **kwargs): + rng = np.random.default_rng(3458934594269824562) + return cluster.vq.kmeans2(rng.random(size=(20, 3)), 3, **kwargs) + + def kmeans2(self, **kwargs): + rng = np.random.default_rng(3458934594269824562) + return cluster.vq.kmeans2(rng.random(size=(20, 3)), 3, **kwargs) + + def barycentric(self, **kwargs): + rng = np.random.default_rng(3458934594269824562) + x1, x2, y1 = rng.random((3, 10)) + f = interpolate.BarycentricInterpolator(x1, y1, **kwargs) + return f(x2) + + def clarkson_woodruff_transform(self, **kwargs): + rng = np.random.default_rng(3458934594269824562) + return linalg.clarkson_woodruff_transform(rng.random((10, 10)), 3, **kwargs) + + def basinhopping(self, **kwargs): + rng = np.random.default_rng(3458934594269824562) + return optimize.basinhopping(optimize.rosen, rng.random(3), **kwargs).x + + def opt(self, fun, **kwargs): + rng = np.random.default_rng(3458934594269824562) + bounds = optimize.Bounds(-rng.random(3) * 10, rng.random(3) * 10) + return fun(optimize.rosen, bounds, **kwargs).x + + def differential_evolution(self, **kwargs): + return self.opt(optimize.differential_evolution, **kwargs) + + def dual_annealing(self, **kwargs): + return self.opt(optimize.dual_annealing, **kwargs) + + def check_grad(self, **kwargs): + rng = np.random.default_rng(3458934594269824562) + x = rng.random(3) + return optimize.check_grad(optimize.rosen, optimize.rosen_der, x, + direction='random', **kwargs) + + def random_array(self, **kwargs): + return sparse.random_array((10, 10), density=1.0, **kwargs).toarray() + + def random(self, **kwargs): + return sparse.random(10, 10, density=1.0, **kwargs).toarray() + + def rand(self, **kwargs): + return sparse.rand(10, 10, density=1.0, **kwargs).toarray() + + def svds(self, **kwargs): + rng = np.random.default_rng(3458934594269824562) + A = rng.random((10, 10)) + return sparse.linalg.svds(A, **kwargs) + + def random_rotation(self, **kwargs): + return spatial.transform.Rotation.random(3, **kwargs).as_matrix() + + def goodness_of_fit(self, **kwargs): + rng = np.random.default_rng(3458934594269824562) + data = rng.random(100) + return stats.goodness_of_fit(stats.laplace, data, **kwargs).pvalue + + def permutation_test(self, **kwargs): + rng = np.random.default_rng(3458934594269824562) + data = tuple(rng.random((2, 100))) + def statistic(x, y, axis): return np.mean(x, axis=axis) - np.mean(y, axis=axis) + return stats.permutation_test(data, statistic, **kwargs).pvalue + + def bootstrap(self, **kwargs): + rng = np.random.default_rng(3458934594269824562) + data = (rng.random(100),) + return stats.bootstrap(data, np.mean, **kwargs).confidence_interval + + def dunnett(self, **kwargs): + rng = np.random.default_rng(3458934594269824562) + x, y, control = rng.random((3, 100)) + return stats.dunnett(x, y, control=control, **kwargs).pvalue + + def sobol_indices(self, **kwargs): + def f_ishigami(x): return (np.sin(x[0]) + 7 * np.sin(x[1]) ** 2 + + 0.1 * (x[2] ** 4) * np.sin(x[0])) + dists = [stats.uniform(loc=-np.pi, scale=2 * np.pi), + stats.uniform(loc=-np.pi, scale=2 * np.pi), + stats.uniform(loc=-np.pi, scale=2 * np.pi)] + res = stats.sobol_indices(func=f_ishigami, n=1024, dists=dists, **kwargs) + return res.first_order + + def qmc_engine(self, engine, **kwargs): + qrng = engine(d=1, **kwargs) + return qrng.random(4) + + def halton(self, **kwargs): + return self.qmc_engine(stats.qmc.Halton, **kwargs) + + def sobol(self, **kwargs): + return self.qmc_engine(stats.qmc.Sobol, **kwargs) + + def latin_hypercube(self, **kwargs): + return self.qmc_engine(stats.qmc.LatinHypercube, **kwargs) + + def poisson_disk(self, **kwargs): + return self.qmc_engine(stats.qmc.PoissonDisk, **kwargs) + + def multivariate_normal_qmc(self, **kwargs): + X = stats.qmc.MultivariateNormalQMC([0], **kwargs) + return X.random(4) + + def multinomial_qmc(self, **kwargs): + X = stats.qmc.MultinomialQMC([0.5, 0.5], 4, **kwargs) + return X.random(4) + + def permutation_method(self, **kwargs): + rng = np.random.default_rng(3458934594269824562) + data = tuple(rng.random((2, 100))) + method = stats.PermutationMethod(**kwargs) + return stats.pearsonr(*data, method=method).pvalue + + def bootstrap_method(self, **kwargs): + rng = np.random.default_rng(3458934594269824562) + data = tuple(rng.random((2, 100))) + res = stats.pearsonr(*data) + method = stats.BootstrapMethod(**kwargs) + return res.confidence_interval(method=method) + + @pytest.mark.fail_slow(10) + @pytest.mark.slow + @pytest.mark.parametrize("method, arg_name", [ + (kmeans, "seed"), + (kmeans2, "seed"), + (barycentric, "random_state"), + (clarkson_woodruff_transform, "seed"), + (basinhopping, "seed"), + (differential_evolution, "seed"), + (dual_annealing, "seed"), + (check_grad, "seed"), + (random_array, 'random_state'), + (random, 'random_state'), + (rand, 'random_state'), + (svds, "random_state"), + (random_rotation, "random_state"), + (goodness_of_fit, "random_state"), + (permutation_test, "random_state"), + (bootstrap, "random_state"), + (permutation_method, "random_state"), + (bootstrap_method, "random_state"), + (dunnett, "random_state"), + (sobol_indices, "random_state"), + (halton, "seed"), + (sobol, "seed"), + (latin_hypercube, "seed"), + (poisson_disk, "seed"), + (multivariate_normal_qmc, "seed"), + (multinomial_qmc, "seed"), + ]) + def test_rng_deterministic(self, method, arg_name): + np.random.seed(None) + seed = 2949672964 + + rng = np.random.default_rng(seed) + message = "got multiple values for argument now known as `rng`" + with pytest.raises(TypeError, match=message): + method(self, **{'rng': rng, arg_name: seed}) + + rng = np.random.default_rng(seed) + res1 = method(self, rng=rng) + res2 = method(self, rng=seed) + assert_equal(res2, res1) + + if method.__name__ in {"dunnett", "sobol_indices"}: + # the two kwargs have essentially the same behavior for these functions + res3 = method(self, **{arg_name: seed}) + assert_equal(res3, res1) + return + + rng = np.random.RandomState(seed) + res1 = method(self, **{arg_name: rng}) + res2 = method(self, **{arg_name: seed}) + + if method.__name__ in {"halton", "sobol", "latin_hypercube", "poisson_disk", + "multivariate_normal_qmc", "multinomial_qmc"}: + # For these, passing `random_state=RandomState(seed)` is not the same as + # passing integer `seed`. + res1b = method(self, **{arg_name: np.random.RandomState(seed)}) + assert_equal(res1b, res1) + res2b = method(self, **{arg_name: seed}) + assert_equal(res2b, res2) + return + + np.random.seed(seed) + res3 = method(self, **{arg_name: None}) + assert_equal(res2, res1) + assert_equal(res3, res1) + + +class TestLazywhere: + n_arrays = strategies.integers(min_value=1, max_value=3) + rng_seed = strategies.integers(min_value=1000000000, max_value=9999999999) + dtype = strategies.sampled_from((np.float32, np.float64)) + p = strategies.floats(min_value=0, max_value=1) + data = strategies.data() + + @pytest.mark.fail_slow(10) + @pytest.mark.filterwarnings('ignore::RuntimeWarning') # overflows, etc. + @skip_xp_backends('jax.numpy', + reason="JAX arrays do not support item assignment") + @pytest.mark.usefixtures("skip_xp_backends") + @array_api_compatible + @given(n_arrays=n_arrays, rng_seed=rng_seed, dtype=dtype, p=p, data=data) + @pytest.mark.thread_unsafe + def test_basic(self, n_arrays, rng_seed, dtype, p, data, xp): + mbs = npst.mutually_broadcastable_shapes(num_shapes=n_arrays+1, + min_side=0) + input_shapes, result_shape = data.draw(mbs) + cond_shape, *shapes = input_shapes + elements = {'allow_subnormal': False} # cupy/cupy#8382 + fillvalue = xp.asarray(data.draw(npst.arrays(dtype=dtype, shape=tuple(), + elements=elements))) + float_fillvalue = float(fillvalue) + arrays = [xp.asarray(data.draw(npst.arrays(dtype=dtype, shape=shape))) + for shape in shapes] + + def f(*args): + return sum(arg for arg in args) + + def f2(*args): + return sum(arg for arg in args) / 2 + + rng = np.random.default_rng(rng_seed) + cond = xp.asarray(rng.random(size=cond_shape) > p) + + res1 = _lazywhere(cond, arrays, f, fillvalue) + res2 = _lazywhere(cond, arrays, f, f2=f2) + if not is_array_api_strict(xp): + res3 = _lazywhere(cond, arrays, f, float_fillvalue) + + # Ensure arrays are at least 1d to follow sane type promotion rules. + # This can be removed when minimum supported NumPy is 2.0 + if xp == np: + cond, fillvalue, *arrays = np.atleast_1d(cond, fillvalue, *arrays) + + ref1 = xp.where(cond, f(*arrays), fillvalue) + ref2 = xp.where(cond, f(*arrays), f2(*arrays)) + if not is_array_api_strict(xp): + # Array API standard doesn't currently define behavior when fillvalue is a + # Python scalar. When it does, test can be run with array_api_strict, too. + ref3 = xp.where(cond, f(*arrays), float_fillvalue) + + if xp == np: # because we ensured arrays are at least 1d + ref1 = ref1.reshape(result_shape) + ref2 = ref2.reshape(result_shape) + ref3 = ref3.reshape(result_shape) + + xp_assert_close(res1, ref1, rtol=2e-16) + xp_assert_equal(res2, ref2) + if not is_array_api_strict(xp): + xp_assert_equal(res3, ref3) diff --git a/mantis_evalkit/lib/python3.10/site-packages/scipy/_lib/tests/test_array_api.py b/mantis_evalkit/lib/python3.10/site-packages/scipy/_lib/tests/test_array_api.py new file mode 100644 index 0000000000000000000000000000000000000000..1a3806af71b593c4abc363a65958b75c05365044 --- /dev/null +++ b/mantis_evalkit/lib/python3.10/site-packages/scipy/_lib/tests/test_array_api.py @@ -0,0 +1,187 @@ +import numpy as np +import pytest + +from scipy.conftest import array_api_compatible +from scipy._lib._array_api import ( + _GLOBAL_CONFIG, array_namespace, _asarray, xp_copy, xp_assert_equal, is_numpy, + np_compat, +) +from scipy._lib._array_api_no_0d import xp_assert_equal as xp_assert_equal_no_0d + +skip_xp_backends = pytest.mark.skip_xp_backends + + +@pytest.mark.skipif(not _GLOBAL_CONFIG["SCIPY_ARRAY_API"], + reason="Array API test; set environment variable SCIPY_ARRAY_API=1 to run it") +class TestArrayAPI: + + def test_array_namespace(self): + x, y = np.array([0, 1, 2]), np.array([0, 1, 2]) + xp = array_namespace(x, y) + assert 'array_api_compat.numpy' in xp.__name__ + + _GLOBAL_CONFIG["SCIPY_ARRAY_API"] = False + xp = array_namespace(x, y) + assert 'array_api_compat.numpy' in xp.__name__ + _GLOBAL_CONFIG["SCIPY_ARRAY_API"] = True + + @array_api_compatible + def test_asarray(self, xp): + x, y = _asarray([0, 1, 2], xp=xp), _asarray(np.arange(3), xp=xp) + ref = xp.asarray([0, 1, 2]) + xp_assert_equal(x, ref) + xp_assert_equal(y, ref) + + @pytest.mark.filterwarnings("ignore: the matrix subclass") + def test_raises(self): + msg = "of type `numpy.ma.MaskedArray` are not supported" + with pytest.raises(TypeError, match=msg): + array_namespace(np.ma.array(1), np.array(1)) + + msg = "of type `numpy.matrix` are not supported" + with pytest.raises(TypeError, match=msg): + array_namespace(np.array(1), np.matrix(1)) + + msg = "only boolean and numerical dtypes are supported" + with pytest.raises(TypeError, match=msg): + array_namespace([object()]) + with pytest.raises(TypeError, match=msg): + array_namespace('abc') + + def test_array_likes(self): + # should be no exceptions + array_namespace([0, 1, 2]) + array_namespace(1, 2, 3) + array_namespace(1) + + @skip_xp_backends('jax.numpy', + reason="JAX arrays do not support item assignment") + @pytest.mark.usefixtures("skip_xp_backends") + @array_api_compatible + def test_copy(self, xp): + for _xp in [xp, None]: + x = xp.asarray([1, 2, 3]) + y = xp_copy(x, xp=_xp) + # with numpy we'd want to use np.shared_memory, but that's not specified + # in the array-api + x[0] = 10 + x[1] = 11 + x[2] = 12 + + assert x[0] != y[0] + assert x[1] != y[1] + assert x[2] != y[2] + assert id(x) != id(y) + + @array_api_compatible + @pytest.mark.parametrize('dtype', ['int32', 'int64', 'float32', 'float64']) + @pytest.mark.parametrize('shape', [(), (3,)]) + def test_strict_checks(self, xp, dtype, shape): + # Check that `_strict_check` behaves as expected + dtype = getattr(xp, dtype) + x = xp.broadcast_to(xp.asarray(1, dtype=dtype), shape) + x = x if shape else x[()] + y = np_compat.asarray(1)[()] + + kwarg_names = ["check_namespace", "check_dtype", "check_shape", "check_0d"] + options = dict(zip(kwarg_names, [True, False, False, False])) + if xp == np: + xp_assert_equal(x, y, **options) + else: + with pytest.raises(AssertionError, match="Namespaces do not match."): + xp_assert_equal(x, y, **options) + + options = dict(zip(kwarg_names, [False, True, False, False])) + if y.dtype.name in str(x.dtype): + xp_assert_equal(x, y, **options) + else: + with pytest.raises(AssertionError, match="dtypes do not match."): + xp_assert_equal(x, y, **options) + + options = dict(zip(kwarg_names, [False, False, True, False])) + if x.shape == y.shape: + xp_assert_equal(x, y, **options) + else: + with pytest.raises(AssertionError, match="Shapes do not match."): + xp_assert_equal(x, xp.asarray(y), **options) + + options = dict(zip(kwarg_names, [False, False, False, True])) + if is_numpy(xp) and x.shape == y.shape: + xp_assert_equal(x, y, **options) + elif is_numpy(xp): + with pytest.raises(AssertionError, match="Array-ness does not match."): + xp_assert_equal(x, y, **options) + + + @array_api_compatible + def test_check_scalar(self, xp): + if not is_numpy(xp): + pytest.skip("Scalars only exist in NumPy") + + # identity always passes + xp_assert_equal(xp.float64(0), xp.float64(0)) + xp_assert_equal(xp.asarray(0.), xp.asarray(0.)) + xp_assert_equal(xp.float64(0), xp.float64(0), check_0d=False) + xp_assert_equal(xp.asarray(0.), xp.asarray(0.), check_0d=False) + + # Check default convention: 0d-arrays are distinguished from scalars + message = "Array-ness does not match:.*" + with pytest.raises(AssertionError, match=message): + xp_assert_equal(xp.asarray(0.), xp.float64(0)) + with pytest.raises(AssertionError, match=message): + xp_assert_equal(xp.float64(0), xp.asarray(0.)) + with pytest.raises(AssertionError, match=message): + xp_assert_equal(xp.asarray(42), xp.int64(42)) + with pytest.raises(AssertionError, match=message): + xp_assert_equal(xp.int64(42), xp.asarray(42)) + + # with `check_0d=False`, scalars-vs-0d passes (if values match) + xp_assert_equal(xp.asarray(0.), xp.float64(0), check_0d=False) + xp_assert_equal(xp.float64(0), xp.asarray(0.), check_0d=False) + # also with regular python objects + xp_assert_equal(xp.asarray(0.), 0., check_0d=False) + xp_assert_equal(0., xp.asarray(0.), check_0d=False) + xp_assert_equal(xp.asarray(42), 42, check_0d=False) + xp_assert_equal(42, xp.asarray(42), check_0d=False) + + # as an alternative to `check_0d=False`, explicitly expect scalar + xp_assert_equal(xp.float64(0), xp.asarray(0.)[()]) + + + @array_api_compatible + def test_check_scalar_no_0d(self, xp): + if not is_numpy(xp): + pytest.skip("Scalars only exist in NumPy") + + # identity passes, if first argument is not 0d (or check_0d=True) + xp_assert_equal_no_0d(xp.float64(0), xp.float64(0)) + xp_assert_equal_no_0d(xp.float64(0), xp.float64(0), check_0d=True) + xp_assert_equal_no_0d(xp.asarray(0.), xp.asarray(0.), check_0d=True) + + # by default, 0d values are forbidden as the first argument + message = "Result is a NumPy 0d-array.*" + with pytest.raises(AssertionError, match=message): + xp_assert_equal_no_0d(xp.asarray(0.), xp.asarray(0.)) + with pytest.raises(AssertionError, match=message): + xp_assert_equal_no_0d(xp.asarray(0.), xp.float64(0)) + with pytest.raises(AssertionError, match=message): + xp_assert_equal_no_0d(xp.asarray(42), xp.int64(42)) + + # Check default convention: 0d-arrays are NOT distinguished from scalars + xp_assert_equal_no_0d(xp.float64(0), xp.asarray(0.)) + xp_assert_equal_no_0d(xp.int64(42), xp.asarray(42)) + + # opt in to 0d-check remains possible + message = "Array-ness does not match:.*" + with pytest.raises(AssertionError, match=message): + xp_assert_equal_no_0d(xp.asarray(0.), xp.float64(0), check_0d=True) + with pytest.raises(AssertionError, match=message): + xp_assert_equal_no_0d(xp.float64(0), xp.asarray(0.), check_0d=True) + with pytest.raises(AssertionError, match=message): + xp_assert_equal_no_0d(xp.asarray(42), xp.int64(0), check_0d=True) + with pytest.raises(AssertionError, match=message): + xp_assert_equal_no_0d(xp.int64(0), xp.asarray(42), check_0d=True) + + # scalars-vs-0d passes (if values match) also with regular python objects + xp_assert_equal_no_0d(0., xp.asarray(0.)) + xp_assert_equal_no_0d(42, xp.asarray(42)) diff --git a/mantis_evalkit/lib/python3.10/site-packages/scipy/_lib/tests/test_bunch.py b/mantis_evalkit/lib/python3.10/site-packages/scipy/_lib/tests/test_bunch.py new file mode 100644 index 0000000000000000000000000000000000000000..f19ca377129b925cad732dd25bf3089c646f923f --- /dev/null +++ b/mantis_evalkit/lib/python3.10/site-packages/scipy/_lib/tests/test_bunch.py @@ -0,0 +1,162 @@ +import pytest +import pickle +from numpy.testing import assert_equal +from scipy._lib._bunch import _make_tuple_bunch + + +# `Result` is defined at the top level of the module so it can be +# used to test pickling. +Result = _make_tuple_bunch('Result', ['x', 'y', 'z'], ['w', 'beta']) + + +class TestMakeTupleBunch: + + # - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + # Tests with Result + # - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + + def setup_method(self): + # Set up an instance of Result. + self.result = Result(x=1, y=2, z=3, w=99, beta=0.5) + + def test_attribute_access(self): + assert_equal(self.result.x, 1) + assert_equal(self.result.y, 2) + assert_equal(self.result.z, 3) + assert_equal(self.result.w, 99) + assert_equal(self.result.beta, 0.5) + + def test_indexing(self): + assert_equal(self.result[0], 1) + assert_equal(self.result[1], 2) + assert_equal(self.result[2], 3) + assert_equal(self.result[-1], 3) + with pytest.raises(IndexError, match='index out of range'): + self.result[3] + + def test_unpacking(self): + x0, y0, z0 = self.result + assert_equal((x0, y0, z0), (1, 2, 3)) + assert_equal(self.result, (1, 2, 3)) + + def test_slice(self): + assert_equal(self.result[1:], (2, 3)) + assert_equal(self.result[::2], (1, 3)) + assert_equal(self.result[::-1], (3, 2, 1)) + + def test_len(self): + assert_equal(len(self.result), 3) + + def test_repr(self): + s = repr(self.result) + assert_equal(s, 'Result(x=1, y=2, z=3, w=99, beta=0.5)') + + def test_hash(self): + assert_equal(hash(self.result), hash((1, 2, 3))) + + def test_pickle(self): + s = pickle.dumps(self.result) + obj = pickle.loads(s) + assert isinstance(obj, Result) + assert_equal(obj.x, self.result.x) + assert_equal(obj.y, self.result.y) + assert_equal(obj.z, self.result.z) + assert_equal(obj.w, self.result.w) + assert_equal(obj.beta, self.result.beta) + + def test_read_only_existing(self): + with pytest.raises(AttributeError, match="can't set attribute"): + self.result.x = -1 + + def test_read_only_new(self): + self.result.plate_of_shrimp = "lattice of coincidence" + assert self.result.plate_of_shrimp == "lattice of coincidence" + + def test_constructor_missing_parameter(self): + with pytest.raises(TypeError, match='missing'): + # `w` is missing. + Result(x=1, y=2, z=3, beta=0.75) + + def test_constructor_incorrect_parameter(self): + with pytest.raises(TypeError, match='unexpected'): + # `foo` is not an existing field. + Result(x=1, y=2, z=3, w=123, beta=0.75, foo=999) + + def test_module(self): + m = 'scipy._lib.tests.test_bunch' + assert_equal(Result.__module__, m) + assert_equal(self.result.__module__, m) + + def test_extra_fields_per_instance(self): + # This test exists to ensure that instances of the same class + # store their own values for the extra fields. That is, the values + # are stored per instance and not in the class. + result1 = Result(x=1, y=2, z=3, w=-1, beta=0.0) + result2 = Result(x=4, y=5, z=6, w=99, beta=1.0) + assert_equal(result1.w, -1) + assert_equal(result1.beta, 0.0) + # The rest of these checks aren't essential, but let's check + # them anyway. + assert_equal(result1[:], (1, 2, 3)) + assert_equal(result2.w, 99) + assert_equal(result2.beta, 1.0) + assert_equal(result2[:], (4, 5, 6)) + + # - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + # Other tests + # - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + + def test_extra_field_names_is_optional(self): + Square = _make_tuple_bunch('Square', ['width', 'height']) + sq = Square(width=1, height=2) + assert_equal(sq.width, 1) + assert_equal(sq.height, 2) + s = repr(sq) + assert_equal(s, 'Square(width=1, height=2)') + + def test_tuple_like(self): + Tup = _make_tuple_bunch('Tup', ['a', 'b']) + tu = Tup(a=1, b=2) + assert isinstance(tu, tuple) + assert isinstance(tu + (1,), tuple) + + def test_explicit_module(self): + m = 'some.module.name' + Foo = _make_tuple_bunch('Foo', ['x'], ['a', 'b'], module=m) + foo = Foo(x=1, a=355, b=113) + assert_equal(Foo.__module__, m) + assert_equal(foo.__module__, m) + + # - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + # Argument validation + # - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + + @pytest.mark.parametrize('args', [('123', ['a'], ['b']), + ('Foo', ['-3'], ['x']), + ('Foo', ['a'], ['+-*/'])]) + def test_identifiers_not_allowed(self, args): + with pytest.raises(ValueError, match='identifiers'): + _make_tuple_bunch(*args) + + @pytest.mark.parametrize('args', [('Foo', ['a', 'b', 'a'], ['x']), + ('Foo', ['a', 'b'], ['b', 'x'])]) + def test_repeated_field_names(self, args): + with pytest.raises(ValueError, match='Duplicate'): + _make_tuple_bunch(*args) + + @pytest.mark.parametrize('args', [('Foo', ['_a'], ['x']), + ('Foo', ['a'], ['_x'])]) + def test_leading_underscore_not_allowed(self, args): + with pytest.raises(ValueError, match='underscore'): + _make_tuple_bunch(*args) + + @pytest.mark.parametrize('args', [('Foo', ['def'], ['x']), + ('Foo', ['a'], ['or']), + ('and', ['a'], ['x'])]) + def test_keyword_not_allowed_in_fields(self, args): + with pytest.raises(ValueError, match='keyword'): + _make_tuple_bunch(*args) + + def test_at_least_one_field_name_required(self): + with pytest.raises(ValueError, match='at least one name'): + _make_tuple_bunch('Qwerty', [], ['a', 'b']) diff --git a/mantis_evalkit/lib/python3.10/site-packages/scipy/_lib/tests/test_ccallback.py b/mantis_evalkit/lib/python3.10/site-packages/scipy/_lib/tests/test_ccallback.py new file mode 100644 index 0000000000000000000000000000000000000000..82021775c294c7b881b9458b57d16deaac483cc7 --- /dev/null +++ b/mantis_evalkit/lib/python3.10/site-packages/scipy/_lib/tests/test_ccallback.py @@ -0,0 +1,204 @@ +from numpy.testing import assert_equal, assert_ +from pytest import raises as assert_raises + +import time +import pytest +import ctypes +import threading +from scipy._lib import _ccallback_c as _test_ccallback_cython +from scipy._lib import _test_ccallback +from scipy._lib._ccallback import LowLevelCallable + +try: + import cffi + HAVE_CFFI = True +except ImportError: + HAVE_CFFI = False + + +ERROR_VALUE = 2.0 + + +def callback_python(a, user_data=None): + if a == ERROR_VALUE: + raise ValueError("bad value") + + if user_data is None: + return a + 1 + else: + return a + user_data + +def _get_cffi_func(base, signature): + if not HAVE_CFFI: + pytest.skip("cffi not installed") + + # Get function address + voidp = ctypes.cast(base, ctypes.c_void_p) + address = voidp.value + + # Create corresponding cffi handle + ffi = cffi.FFI() + func = ffi.cast(signature, address) + return func + + +def _get_ctypes_data(): + value = ctypes.c_double(2.0) + return ctypes.cast(ctypes.pointer(value), ctypes.c_voidp) + + +def _get_cffi_data(): + if not HAVE_CFFI: + pytest.skip("cffi not installed") + ffi = cffi.FFI() + return ffi.new('double *', 2.0) + + +CALLERS = { + 'simple': _test_ccallback.test_call_simple, + 'nodata': _test_ccallback.test_call_nodata, + 'nonlocal': _test_ccallback.test_call_nonlocal, + 'cython': _test_ccallback_cython.test_call_cython, +} + +# These functions have signatures known to the callers +FUNCS = { + 'python': lambda: callback_python, + 'capsule': lambda: _test_ccallback.test_get_plus1_capsule(), + 'cython': lambda: LowLevelCallable.from_cython(_test_ccallback_cython, + "plus1_cython"), + 'ctypes': lambda: _test_ccallback_cython.plus1_ctypes, + 'cffi': lambda: _get_cffi_func(_test_ccallback_cython.plus1_ctypes, + 'double (*)(double, int *, void *)'), + 'capsule_b': lambda: _test_ccallback.test_get_plus1b_capsule(), + 'cython_b': lambda: LowLevelCallable.from_cython(_test_ccallback_cython, + "plus1b_cython"), + 'ctypes_b': lambda: _test_ccallback_cython.plus1b_ctypes, + 'cffi_b': lambda: _get_cffi_func(_test_ccallback_cython.plus1b_ctypes, + 'double (*)(double, double, int *, void *)'), +} + +# These functions have signatures the callers don't know +BAD_FUNCS = { + 'capsule_bc': lambda: _test_ccallback.test_get_plus1bc_capsule(), + 'cython_bc': lambda: LowLevelCallable.from_cython(_test_ccallback_cython, + "plus1bc_cython"), + 'ctypes_bc': lambda: _test_ccallback_cython.plus1bc_ctypes, + 'cffi_bc': lambda: _get_cffi_func( + _test_ccallback_cython.plus1bc_ctypes, + 'double (*)(double, double, double, int *, void *)' + ), +} + +USER_DATAS = { + 'ctypes': _get_ctypes_data, + 'cffi': _get_cffi_data, + 'capsule': _test_ccallback.test_get_data_capsule, +} + + +def test_callbacks(): + def check(caller, func, user_data): + caller = CALLERS[caller] + func = FUNCS[func]() + user_data = USER_DATAS[user_data]() + + if func is callback_python: + def func2(x): + return func(x, 2.0) + else: + func2 = LowLevelCallable(func, user_data) + func = LowLevelCallable(func) + + # Test basic call + assert_equal(caller(func, 1.0), 2.0) + + # Test 'bad' value resulting to an error + assert_raises(ValueError, caller, func, ERROR_VALUE) + + # Test passing in user_data + assert_equal(caller(func2, 1.0), 3.0) + + for caller in sorted(CALLERS.keys()): + for func in sorted(FUNCS.keys()): + for user_data in sorted(USER_DATAS.keys()): + check(caller, func, user_data) + + +def test_bad_callbacks(): + def check(caller, func, user_data): + caller = CALLERS[caller] + user_data = USER_DATAS[user_data]() + func = BAD_FUNCS[func]() + + if func is callback_python: + def func2(x): + return func(x, 2.0) + else: + func2 = LowLevelCallable(func, user_data) + func = LowLevelCallable(func) + + # Test that basic call fails + assert_raises(ValueError, caller, LowLevelCallable(func), 1.0) + + # Test that passing in user_data also fails + assert_raises(ValueError, caller, func2, 1.0) + + # Test error message + llfunc = LowLevelCallable(func) + try: + caller(llfunc, 1.0) + except ValueError as err: + msg = str(err) + assert_(llfunc.signature in msg, msg) + assert_('double (double, double, int *, void *)' in msg, msg) + + for caller in sorted(CALLERS.keys()): + for func in sorted(BAD_FUNCS.keys()): + for user_data in sorted(USER_DATAS.keys()): + check(caller, func, user_data) + + +def test_signature_override(): + caller = _test_ccallback.test_call_simple + func = _test_ccallback.test_get_plus1_capsule() + + llcallable = LowLevelCallable(func, signature="bad signature") + assert_equal(llcallable.signature, "bad signature") + assert_raises(ValueError, caller, llcallable, 3) + + llcallable = LowLevelCallable(func, signature="double (double, int *, void *)") + assert_equal(llcallable.signature, "double (double, int *, void *)") + assert_equal(caller(llcallable, 3), 4) + + +def test_threadsafety(): + def callback(a, caller): + if a <= 0: + return 1 + else: + res = caller(lambda x: callback(x, caller), a - 1) + return 2*res + + def check(caller): + caller = CALLERS[caller] + + results = [] + + count = 10 + + def run(): + time.sleep(0.01) + r = caller(lambda x: callback(x, caller), count) + results.append(r) + + threads = [threading.Thread(target=run) for j in range(20)] + for thread in threads: + thread.start() + for thread in threads: + thread.join() + + assert_equal(results, [2.0**count]*len(threads)) + + for caller in CALLERS.keys(): + check(caller) diff --git a/mantis_evalkit/lib/python3.10/site-packages/scipy/_lib/tests/test_config.py b/mantis_evalkit/lib/python3.10/site-packages/scipy/_lib/tests/test_config.py new file mode 100644 index 0000000000000000000000000000000000000000..794e365c0d8a5ce337765fc669d688e80240d540 --- /dev/null +++ b/mantis_evalkit/lib/python3.10/site-packages/scipy/_lib/tests/test_config.py @@ -0,0 +1,45 @@ +""" +Check the SciPy config is valid. +""" +import scipy +import pytest +from unittest.mock import patch + +pytestmark = pytest.mark.skipif( + not hasattr(scipy.__config__, "_built_with_meson"), + reason="Requires Meson builds", +) + + +class TestSciPyConfigs: + REQUIRED_CONFIG_KEYS = [ + "Compilers", + "Machine Information", + "Python Information", + ] + + @pytest.mark.thread_unsafe + @patch("scipy.__config__._check_pyyaml") + def test_pyyaml_not_found(self, mock_yaml_importer): + mock_yaml_importer.side_effect = ModuleNotFoundError() + with pytest.warns(UserWarning): + scipy.show_config() + + def test_dict_mode(self): + config = scipy.show_config(mode="dicts") + + assert isinstance(config, dict) + assert all([key in config for key in self.REQUIRED_CONFIG_KEYS]), ( + "Required key missing," + " see index of `False` with `REQUIRED_CONFIG_KEYS`" + ) + + def test_invalid_mode(self): + with pytest.raises(AttributeError): + scipy.show_config(mode="foo") + + def test_warn_to_add_tests(self): + assert len(scipy.__config__.DisplayModes) == 2, ( + "New mode detected," + " please add UT if applicable and increment this count" + ) diff --git a/mantis_evalkit/lib/python3.10/site-packages/scipy/_lib/tests/test_deprecation.py b/mantis_evalkit/lib/python3.10/site-packages/scipy/_lib/tests/test_deprecation.py new file mode 100644 index 0000000000000000000000000000000000000000..667e6ab94346fc8b22c0ea4d4624acf33124c8c0 --- /dev/null +++ b/mantis_evalkit/lib/python3.10/site-packages/scipy/_lib/tests/test_deprecation.py @@ -0,0 +1,10 @@ +import pytest + +@pytest.mark.thread_unsafe +def test_cython_api_deprecation(): + match = ("`scipy._lib._test_deprecation_def.foo_deprecated` " + "is deprecated, use `foo` instead!\n" + "Deprecated in Scipy 42.0.0") + with pytest.warns(DeprecationWarning, match=match): + from .. import _test_deprecation_call + assert _test_deprecation_call.call() == (1, 1) diff --git a/mantis_evalkit/lib/python3.10/site-packages/scipy/_lib/tests/test_doccer.py b/mantis_evalkit/lib/python3.10/site-packages/scipy/_lib/tests/test_doccer.py new file mode 100644 index 0000000000000000000000000000000000000000..176a69698b10bd1d0d23fc57f8e8a99ce7209f0f --- /dev/null +++ b/mantis_evalkit/lib/python3.10/site-packages/scipy/_lib/tests/test_doccer.py @@ -0,0 +1,143 @@ +''' Some tests for the documenting decorator and support functions ''' + +import sys +import pytest +from numpy.testing import assert_equal, suppress_warnings + +from scipy._lib import doccer + +# python -OO strips docstrings +DOCSTRINGS_STRIPPED = sys.flags.optimize > 1 + +docstring = \ +"""Docstring + %(strtest1)s + %(strtest2)s + %(strtest3)s +""" +param_doc1 = \ +"""Another test + with some indent""" + +param_doc2 = \ +"""Another test, one line""" + +param_doc3 = \ +""" Another test + with some indent""" + +doc_dict = {'strtest1':param_doc1, + 'strtest2':param_doc2, + 'strtest3':param_doc3} + +filled_docstring = \ +"""Docstring + Another test + with some indent + Another test, one line + Another test + with some indent +""" + + +def test_unindent(): + with suppress_warnings() as sup: + sup.filter(category=DeprecationWarning) + assert_equal(doccer.unindent_string(param_doc1), param_doc1) + assert_equal(doccer.unindent_string(param_doc2), param_doc2) + assert_equal(doccer.unindent_string(param_doc3), param_doc1) + + +def test_unindent_dict(): + with suppress_warnings() as sup: + sup.filter(category=DeprecationWarning) + d2 = doccer.unindent_dict(doc_dict) + assert_equal(d2['strtest1'], doc_dict['strtest1']) + assert_equal(d2['strtest2'], doc_dict['strtest2']) + assert_equal(d2['strtest3'], doc_dict['strtest1']) + + +def test_docformat(): + with suppress_warnings() as sup: + sup.filter(category=DeprecationWarning) + udd = doccer.unindent_dict(doc_dict) + formatted = doccer.docformat(docstring, udd) + assert_equal(formatted, filled_docstring) + single_doc = 'Single line doc %(strtest1)s' + formatted = doccer.docformat(single_doc, doc_dict) + # Note - initial indent of format string does not + # affect subsequent indent of inserted parameter + assert_equal(formatted, """Single line doc Another test + with some indent""") + + +@pytest.mark.skipif(DOCSTRINGS_STRIPPED, reason="docstrings stripped") +def test_decorator(): + with suppress_warnings() as sup: + sup.filter(category=DeprecationWarning) + # with unindentation of parameters + decorator = doccer.filldoc(doc_dict, True) + + @decorator + def func(): + """ Docstring + %(strtest3)s + """ + + def expected(): + """ Docstring + Another test + with some indent + """ + assert_equal(func.__doc__, expected.__doc__) + + # without unindentation of parameters + + # The docstring should be unindented for Python 3.13+ + # because of https://github.com/python/cpython/issues/81283 + decorator = doccer.filldoc(doc_dict, False if \ + sys.version_info < (3, 13) else True) + + @decorator + def func(): + """ Docstring + %(strtest3)s + """ + def expected(): + """ Docstring + Another test + with some indent + """ + assert_equal(func.__doc__, expected.__doc__) + + +@pytest.mark.skipif(DOCSTRINGS_STRIPPED, reason="docstrings stripped") +def test_inherit_docstring_from(): + + with suppress_warnings() as sup: + sup.filter(category=DeprecationWarning) + + class Foo: + def func(self): + '''Do something useful.''' + return + + def func2(self): + '''Something else.''' + + class Bar(Foo): + @doccer.inherit_docstring_from(Foo) + def func(self): + '''%(super)sABC''' + return + + @doccer.inherit_docstring_from(Foo) + def func2(self): + # No docstring. + return + + assert_equal(Bar.func.__doc__, Foo.func.__doc__ + 'ABC') + assert_equal(Bar.func2.__doc__, Foo.func2.__doc__) + bar = Bar() + assert_equal(bar.func.__doc__, Foo.func.__doc__ + 'ABC') + assert_equal(bar.func2.__doc__, Foo.func2.__doc__) diff --git a/mantis_evalkit/lib/python3.10/site-packages/scipy/_lib/tests/test_import_cycles.py b/mantis_evalkit/lib/python3.10/site-packages/scipy/_lib/tests/test_import_cycles.py new file mode 100644 index 0000000000000000000000000000000000000000..3a35800a8198af8215e0b5624738f9ac45b0bb96 --- /dev/null +++ b/mantis_evalkit/lib/python3.10/site-packages/scipy/_lib/tests/test_import_cycles.py @@ -0,0 +1,18 @@ +import pytest +import sys +import subprocess + +from .test_public_api import PUBLIC_MODULES + +# Regression tests for gh-6793. +# Check that all modules are importable in a new Python process. +# This is not necessarily true if there are import cycles present. + +@pytest.mark.fail_slow(40) +@pytest.mark.slow +@pytest.mark.thread_unsafe +def test_public_modules_importable(): + pids = [subprocess.Popen([sys.executable, '-c', f'import {module}']) + for module in PUBLIC_MODULES] + for i, pid in enumerate(pids): + assert pid.wait() == 0, f'Failed to import {PUBLIC_MODULES[i]}' diff --git a/mantis_evalkit/lib/python3.10/site-packages/scipy/_lib/tests/test_public_api.py b/mantis_evalkit/lib/python3.10/site-packages/scipy/_lib/tests/test_public_api.py new file mode 100644 index 0000000000000000000000000000000000000000..5332107cd21cdd2b6e40cc545c87138cee04ff97 --- /dev/null +++ b/mantis_evalkit/lib/python3.10/site-packages/scipy/_lib/tests/test_public_api.py @@ -0,0 +1,469 @@ +""" +This test script is adopted from: + https://github.com/numpy/numpy/blob/main/numpy/tests/test_public_api.py +""" + +import pkgutil +import types +import importlib +import warnings +from importlib import import_module + +import pytest + +import numpy as np +import scipy + +from scipy.conftest import xp_available_backends + + +def test_dir_testing(): + """Assert that output of dir has only one "testing/tester" + attribute without duplicate""" + assert len(dir(scipy)) == len(set(dir(scipy))) + + +# Historically SciPy has not used leading underscores for private submodules +# much. This has resulted in lots of things that look like public modules +# (i.e. things that can be imported as `import scipy.somesubmodule.somefile`), +# but were never intended to be public. The PUBLIC_MODULES list contains +# modules that are either public because they were meant to be, or because they +# contain public functions/objects that aren't present in any other namespace +# for whatever reason and therefore should be treated as public. +PUBLIC_MODULES = ["scipy." + s for s in [ + "cluster", + "cluster.vq", + "cluster.hierarchy", + "constants", + "datasets", + "differentiate", + "fft", + "fftpack", + "integrate", + "interpolate", + "io", + "io.arff", + "io.matlab", + "io.wavfile", + "linalg", + "linalg.blas", + "linalg.cython_blas", + "linalg.lapack", + "linalg.cython_lapack", + "linalg.interpolative", + "ndimage", + "odr", + "optimize", + "optimize.elementwise", + "signal", + "signal.windows", + "sparse", + "sparse.linalg", + "sparse.csgraph", + "spatial", + "spatial.distance", + "spatial.transform", + "special", + "stats", + "stats.contingency", + "stats.distributions", + "stats.mstats", + "stats.qmc", + "stats.sampling" +]] + +# The PRIVATE_BUT_PRESENT_MODULES list contains modules that lacked underscores +# in their name and hence looked public, but weren't meant to be. All these +# namespace were deprecated in the 1.8.0 release - see "clear split between +# public and private API" in the 1.8.0 release notes. +# These private modules support will be removed in SciPy v2.0.0, as the +# deprecation messages emitted by each of these modules say. +PRIVATE_BUT_PRESENT_MODULES = [ + 'scipy.constants.codata', + 'scipy.constants.constants', + 'scipy.fftpack.basic', + 'scipy.fftpack.convolve', + 'scipy.fftpack.helper', + 'scipy.fftpack.pseudo_diffs', + 'scipy.fftpack.realtransforms', + 'scipy.integrate.dop', + 'scipy.integrate.lsoda', + 'scipy.integrate.odepack', + 'scipy.integrate.quadpack', + 'scipy.integrate.vode', + 'scipy.interpolate.dfitpack', + 'scipy.interpolate.fitpack', + 'scipy.interpolate.fitpack2', + 'scipy.interpolate.interpnd', + 'scipy.interpolate.interpolate', + 'scipy.interpolate.ndgriddata', + 'scipy.interpolate.polyint', + 'scipy.interpolate.rbf', + 'scipy.io.arff.arffread', + 'scipy.io.harwell_boeing', + 'scipy.io.idl', + 'scipy.io.matlab.byteordercodes', + 'scipy.io.matlab.mio', + 'scipy.io.matlab.mio4', + 'scipy.io.matlab.mio5', + 'scipy.io.matlab.mio5_params', + 'scipy.io.matlab.mio5_utils', + 'scipy.io.matlab.mio_utils', + 'scipy.io.matlab.miobase', + 'scipy.io.matlab.streams', + 'scipy.io.mmio', + 'scipy.io.netcdf', + 'scipy.linalg.basic', + 'scipy.linalg.decomp', + 'scipy.linalg.decomp_cholesky', + 'scipy.linalg.decomp_lu', + 'scipy.linalg.decomp_qr', + 'scipy.linalg.decomp_schur', + 'scipy.linalg.decomp_svd', + 'scipy.linalg.matfuncs', + 'scipy.linalg.misc', + 'scipy.linalg.special_matrices', + 'scipy.misc', + 'scipy.misc.common', + 'scipy.misc.doccer', + 'scipy.ndimage.filters', + 'scipy.ndimage.fourier', + 'scipy.ndimage.interpolation', + 'scipy.ndimage.measurements', + 'scipy.ndimage.morphology', + 'scipy.odr.models', + 'scipy.odr.odrpack', + 'scipy.optimize.cobyla', + 'scipy.optimize.cython_optimize', + 'scipy.optimize.lbfgsb', + 'scipy.optimize.linesearch', + 'scipy.optimize.minpack', + 'scipy.optimize.minpack2', + 'scipy.optimize.moduleTNC', + 'scipy.optimize.nonlin', + 'scipy.optimize.optimize', + 'scipy.optimize.slsqp', + 'scipy.optimize.tnc', + 'scipy.optimize.zeros', + 'scipy.signal.bsplines', + 'scipy.signal.filter_design', + 'scipy.signal.fir_filter_design', + 'scipy.signal.lti_conversion', + 'scipy.signal.ltisys', + 'scipy.signal.signaltools', + 'scipy.signal.spectral', + 'scipy.signal.spline', + 'scipy.signal.waveforms', + 'scipy.signal.wavelets', + 'scipy.signal.windows.windows', + 'scipy.sparse.base', + 'scipy.sparse.bsr', + 'scipy.sparse.compressed', + 'scipy.sparse.construct', + 'scipy.sparse.coo', + 'scipy.sparse.csc', + 'scipy.sparse.csr', + 'scipy.sparse.data', + 'scipy.sparse.dia', + 'scipy.sparse.dok', + 'scipy.sparse.extract', + 'scipy.sparse.lil', + 'scipy.sparse.linalg.dsolve', + 'scipy.sparse.linalg.eigen', + 'scipy.sparse.linalg.interface', + 'scipy.sparse.linalg.isolve', + 'scipy.sparse.linalg.matfuncs', + 'scipy.sparse.sparsetools', + 'scipy.sparse.spfuncs', + 'scipy.sparse.sputils', + 'scipy.spatial.ckdtree', + 'scipy.spatial.kdtree', + 'scipy.spatial.qhull', + 'scipy.spatial.transform.rotation', + 'scipy.special.add_newdocs', + 'scipy.special.basic', + 'scipy.special.cython_special', + 'scipy.special.orthogonal', + 'scipy.special.sf_error', + 'scipy.special.specfun', + 'scipy.special.spfun_stats', + 'scipy.stats.biasedurn', + 'scipy.stats.kde', + 'scipy.stats.morestats', + 'scipy.stats.mstats_basic', + 'scipy.stats.mstats_extras', + 'scipy.stats.mvn', + 'scipy.stats.stats', +] + + +def is_unexpected(name): + """Check if this needs to be considered.""" + if '._' in name or '.tests' in name or '.setup' in name: + return False + + if name in PUBLIC_MODULES: + return False + + if name in PRIVATE_BUT_PRESENT_MODULES: + return False + + return True + + +SKIP_LIST = [ + 'scipy.conftest', + 'scipy.version', + 'scipy.special.libsf_error_state' +] + + +# XXX: this test does more than it says on the tin - in using `pkgutil.walk_packages`, +# it will raise if it encounters any exceptions which are not handled by `ignore_errors` +# while attempting to import each discovered package. +# For now, `ignore_errors` only ignores what is necessary, but this could be expanded - +# for example, to all errors from private modules or git subpackages - if desired. +@pytest.mark.thread_unsafe +def test_all_modules_are_expected(): + """ + Test that we don't add anything that looks like a new public module by + accident. Check is based on filenames. + """ + + def ignore_errors(name): + # if versions of other array libraries are installed which are incompatible + # with the installed NumPy version, there can be errors on importing + # `array_api_compat`. This should only raise if SciPy is configured with + # that library as an available backend. + backends = {'cupy', 'torch', 'dask.array'} + for backend in backends: + path = f'array_api_compat.{backend}' + if path in name and backend not in xp_available_backends: + return + raise + + modnames = [] + + with np.testing.suppress_warnings() as sup: + sup.filter(DeprecationWarning,"scipy.misc") + for _, modname, _ in pkgutil.walk_packages(path=scipy.__path__, + prefix=scipy.__name__ + '.', + onerror=ignore_errors): + if is_unexpected(modname) and modname not in SKIP_LIST: + # We have a name that is new. If that's on purpose, add it to + # PUBLIC_MODULES. We don't expect to have to add anything to + # PRIVATE_BUT_PRESENT_MODULES. Use an underscore in the name! + modnames.append(modname) + + if modnames: + raise AssertionError(f'Found unexpected modules: {modnames}') + + +# Stuff that clearly shouldn't be in the API and is detected by the next test +# below +SKIP_LIST_2 = [ + 'scipy.char', + 'scipy.rec', + 'scipy.emath', + 'scipy.math', + 'scipy.random', + 'scipy.ctypeslib', + 'scipy.ma' +] + + +def test_all_modules_are_expected_2(): + """ + Method checking all objects. The pkgutil-based method in + `test_all_modules_are_expected` does not catch imports into a namespace, + only filenames. + """ + + def find_unexpected_members(mod_name): + members = [] + module = importlib.import_module(mod_name) + if hasattr(module, '__all__'): + objnames = module.__all__ + else: + objnames = dir(module) + + for objname in objnames: + if not objname.startswith('_'): + fullobjname = mod_name + '.' + objname + if isinstance(getattr(module, objname), types.ModuleType): + if is_unexpected(fullobjname) and fullobjname not in SKIP_LIST_2: + members.append(fullobjname) + + return members + with np.testing.suppress_warnings() as sup: + sup.filter(DeprecationWarning, "scipy.misc") + unexpected_members = find_unexpected_members("scipy") + + for modname in PUBLIC_MODULES: + unexpected_members.extend(find_unexpected_members(modname)) + + if unexpected_members: + raise AssertionError("Found unexpected object(s) that look like " + f"modules: {unexpected_members}") + + +def test_api_importable(): + """ + Check that all submodules listed higher up in this file can be imported + Note that if a PRIVATE_BUT_PRESENT_MODULES entry goes missing, it may + simply need to be removed from the list (deprecation may or may not be + needed - apply common sense). + """ + def check_importable(module_name): + try: + importlib.import_module(module_name) + except (ImportError, AttributeError): + return False + + return True + + module_names = [] + for module_name in PUBLIC_MODULES: + if not check_importable(module_name): + module_names.append(module_name) + + if module_names: + raise AssertionError("Modules in the public API that cannot be " + f"imported: {module_names}") + + with warnings.catch_warnings(record=True): + warnings.filterwarnings('always', category=DeprecationWarning) + warnings.filterwarnings('always', category=ImportWarning) + for module_name in PRIVATE_BUT_PRESENT_MODULES: + if not check_importable(module_name): + module_names.append(module_name) + + if module_names: + raise AssertionError("Modules that are not really public but looked " + "public and can not be imported: " + f"{module_names}") + + +@pytest.mark.thread_unsafe +@pytest.mark.parametrize(("module_name", "correct_module"), + [('scipy.constants.codata', None), + ('scipy.constants.constants', None), + ('scipy.fftpack.basic', None), + ('scipy.fftpack.helper', None), + ('scipy.fftpack.pseudo_diffs', None), + ('scipy.fftpack.realtransforms', None), + ('scipy.integrate.dop', None), + ('scipy.integrate.lsoda', None), + ('scipy.integrate.odepack', None), + ('scipy.integrate.quadpack', None), + ('scipy.integrate.vode', None), + ('scipy.interpolate.fitpack', None), + ('scipy.interpolate.fitpack2', None), + ('scipy.interpolate.interpolate', None), + ('scipy.interpolate.ndgriddata', None), + ('scipy.interpolate.polyint', None), + ('scipy.interpolate.rbf', None), + ('scipy.io.harwell_boeing', None), + ('scipy.io.idl', None), + ('scipy.io.mmio', None), + ('scipy.io.netcdf', None), + ('scipy.io.arff.arffread', 'arff'), + ('scipy.io.matlab.byteordercodes', 'matlab'), + ('scipy.io.matlab.mio_utils', 'matlab'), + ('scipy.io.matlab.mio', 'matlab'), + ('scipy.io.matlab.mio4', 'matlab'), + ('scipy.io.matlab.mio5_params', 'matlab'), + ('scipy.io.matlab.mio5_utils', 'matlab'), + ('scipy.io.matlab.mio5', 'matlab'), + ('scipy.io.matlab.miobase', 'matlab'), + ('scipy.io.matlab.streams', 'matlab'), + ('scipy.linalg.basic', None), + ('scipy.linalg.decomp', None), + ('scipy.linalg.decomp_cholesky', None), + ('scipy.linalg.decomp_lu', None), + ('scipy.linalg.decomp_qr', None), + ('scipy.linalg.decomp_schur', None), + ('scipy.linalg.decomp_svd', None), + ('scipy.linalg.matfuncs', None), + ('scipy.linalg.misc', None), + ('scipy.linalg.special_matrices', None), + ('scipy.ndimage.filters', None), + ('scipy.ndimage.fourier', None), + ('scipy.ndimage.interpolation', None), + ('scipy.ndimage.measurements', None), + ('scipy.ndimage.morphology', None), + ('scipy.odr.models', None), + ('scipy.odr.odrpack', None), + ('scipy.optimize.cobyla', None), + ('scipy.optimize.lbfgsb', None), + ('scipy.optimize.linesearch', None), + ('scipy.optimize.minpack', None), + ('scipy.optimize.minpack2', None), + ('scipy.optimize.moduleTNC', None), + ('scipy.optimize.nonlin', None), + ('scipy.optimize.optimize', None), + ('scipy.optimize.slsqp', None), + ('scipy.optimize.tnc', None), + ('scipy.optimize.zeros', None), + ('scipy.signal.bsplines', None), + ('scipy.signal.filter_design', None), + ('scipy.signal.fir_filter_design', None), + ('scipy.signal.lti_conversion', None), + ('scipy.signal.ltisys', None), + ('scipy.signal.signaltools', None), + ('scipy.signal.spectral', None), + ('scipy.signal.waveforms', None), + ('scipy.signal.wavelets', None), + ('scipy.signal.windows.windows', 'windows'), + ('scipy.sparse.lil', None), + ('scipy.sparse.linalg.dsolve', 'linalg'), + ('scipy.sparse.linalg.eigen', 'linalg'), + ('scipy.sparse.linalg.interface', 'linalg'), + ('scipy.sparse.linalg.isolve', 'linalg'), + ('scipy.sparse.linalg.matfuncs', 'linalg'), + ('scipy.sparse.sparsetools', None), + ('scipy.sparse.spfuncs', None), + ('scipy.sparse.sputils', None), + ('scipy.spatial.ckdtree', None), + ('scipy.spatial.kdtree', None), + ('scipy.spatial.qhull', None), + ('scipy.spatial.transform.rotation', 'transform'), + ('scipy.special.add_newdocs', None), + ('scipy.special.basic', None), + ('scipy.special.orthogonal', None), + ('scipy.special.sf_error', None), + ('scipy.special.specfun', None), + ('scipy.special.spfun_stats', None), + ('scipy.stats.biasedurn', None), + ('scipy.stats.kde', None), + ('scipy.stats.morestats', None), + ('scipy.stats.mstats_basic', 'mstats'), + ('scipy.stats.mstats_extras', 'mstats'), + ('scipy.stats.mvn', None), + ('scipy.stats.stats', None)]) +def test_private_but_present_deprecation(module_name, correct_module): + # gh-18279, gh-17572, gh-17771 noted that deprecation warnings + # for imports from private modules + # were misleading. Check that this is resolved. + module = import_module(module_name) + if correct_module is None: + import_name = f'scipy.{module_name.split(".")[1]}' + else: + import_name = f'scipy.{module_name.split(".")[1]}.{correct_module}' + + correct_import = import_module(import_name) + + # Attributes that were formerly in `module_name` can still be imported from + # `module_name`, albeit with a deprecation warning. + for attr_name in module.__all__: + # ensure attribute is present where the warning is pointing + assert getattr(correct_import, attr_name, None) is not None + message = f"Please import `{attr_name}` from the `{import_name}`..." + with pytest.deprecated_call(match=message): + getattr(module, attr_name) + + # Attributes that were not in `module_name` get an error notifying the user + # that the attribute is not in `module_name` and that `module_name` is deprecated. + message = f"`{module_name}` is deprecated..." + with pytest.raises(AttributeError, match=message): + getattr(module, "ekki") diff --git a/mantis_evalkit/lib/python3.10/site-packages/scipy/_lib/tests/test_scipy_version.py b/mantis_evalkit/lib/python3.10/site-packages/scipy/_lib/tests/test_scipy_version.py new file mode 100644 index 0000000000000000000000000000000000000000..68e1a43c3fb329b6a4274ba76b53a215738da6ad --- /dev/null +++ b/mantis_evalkit/lib/python3.10/site-packages/scipy/_lib/tests/test_scipy_version.py @@ -0,0 +1,28 @@ +import re + +import scipy +import scipy.version + + +def test_valid_scipy_version(): + # Verify that the SciPy version is a valid one (no .post suffix or other + # nonsense). See NumPy issue gh-6431 for an issue caused by an invalid + # version. + version_pattern = r"^[0-9]+\.[0-9]+\.[0-9]+(|a[0-9]|b[0-9]|rc[0-9])" + dev_suffix = r"((.dev0)|(\.dev0+\+git[0-9]{8}.[0-9a-f]{7}))" + if scipy.version.release: + res = re.match(version_pattern, scipy.__version__) + else: + res = re.match(version_pattern + dev_suffix, scipy.__version__) + + assert res is not None + assert scipy.__version__ + + +def test_version_submodule_members(): + """`scipy.version` may not be quite public, but we install it. + + So check that we don't silently change its contents. + """ + for attr in ('version', 'full_version', 'short_version', 'git_revision', 'release'): + assert hasattr(scipy.version, attr) diff --git a/mantis_evalkit/lib/python3.10/site-packages/scipy/_lib/tests/test_tmpdirs.py b/mantis_evalkit/lib/python3.10/site-packages/scipy/_lib/tests/test_tmpdirs.py new file mode 100644 index 0000000000000000000000000000000000000000..292e7ab1739e663979f9f0b9647fb2c7c95d625c --- /dev/null +++ b/mantis_evalkit/lib/python3.10/site-packages/scipy/_lib/tests/test_tmpdirs.py @@ -0,0 +1,48 @@ +""" Test tmpdirs module """ +from os import getcwd +from os.path import realpath, abspath, dirname, isfile, join as pjoin, exists + +from scipy._lib._tmpdirs import tempdir, in_tempdir, in_dir + +from numpy.testing import assert_, assert_equal + +import pytest + + +MY_PATH = abspath(__file__) +MY_DIR = dirname(MY_PATH) + + +@pytest.mark.thread_unsafe +def test_tempdir(): + with tempdir() as tmpdir: + fname = pjoin(tmpdir, 'example_file.txt') + with open(fname, "w") as fobj: + fobj.write('a string\\n') + assert_(not exists(tmpdir)) + + +@pytest.mark.thread_unsafe +def test_in_tempdir(): + my_cwd = getcwd() + with in_tempdir() as tmpdir: + with open('test.txt', "w") as f: + f.write('some text') + assert_(isfile('test.txt')) + assert_(isfile(pjoin(tmpdir, 'test.txt'))) + assert_(not exists(tmpdir)) + assert_equal(getcwd(), my_cwd) + + +@pytest.mark.thread_unsafe +def test_given_directory(): + # Test InGivenDirectory + cwd = getcwd() + with in_dir() as tmpdir: + assert_equal(tmpdir, abspath(cwd)) + assert_equal(tmpdir, abspath(getcwd())) + with in_dir(MY_DIR) as tmpdir: + assert_equal(tmpdir, MY_DIR) + assert_equal(realpath(MY_DIR), realpath(abspath(getcwd()))) + # We were deleting the given directory! Check not so now. + assert_(isfile(MY_PATH)) diff --git a/mantis_evalkit/lib/python3.10/site-packages/scipy/_lib/tests/test_warnings.py b/mantis_evalkit/lib/python3.10/site-packages/scipy/_lib/tests/test_warnings.py new file mode 100644 index 0000000000000000000000000000000000000000..f200b1a6e9756b17c96e5b8368271bbf61878d72 --- /dev/null +++ b/mantis_evalkit/lib/python3.10/site-packages/scipy/_lib/tests/test_warnings.py @@ -0,0 +1,137 @@ +""" +Tests which scan for certain occurrences in the code, they may not find +all of these occurrences but should catch almost all. This file was adapted +from NumPy. +""" + + +import os +from pathlib import Path +import ast +import tokenize + +import scipy + +import pytest + + +class ParseCall(ast.NodeVisitor): + def __init__(self): + self.ls = [] + + def visit_Attribute(self, node): + ast.NodeVisitor.generic_visit(self, node) + self.ls.append(node.attr) + + def visit_Name(self, node): + self.ls.append(node.id) + + +class FindFuncs(ast.NodeVisitor): + def __init__(self, filename): + super().__init__() + self.__filename = filename + self.bad_filters = [] + self.bad_stacklevels = [] + + def visit_Call(self, node): + p = ParseCall() + p.visit(node.func) + ast.NodeVisitor.generic_visit(self, node) + + if p.ls[-1] == 'simplefilter' or p.ls[-1] == 'filterwarnings': + # get first argument of the `args` node of the filter call + match node.args[0]: + case ast.Constant() as c: + argtext = c.value + case ast.JoinedStr() as js: + # if we get an f-string, discard the templated pieces, which + # are likely the type or specific message; we're interested + # in the action, which is less likely to use a template + argtext = "".join( + x.value for x in js.values if isinstance(x, ast.Constant) + ) + case _: + raise ValueError("unknown ast node type") + # check if filter is set to ignore + if argtext == "ignore": + self.bad_filters.append( + f"{self.__filename}:{node.lineno}") + + if p.ls[-1] == 'warn' and ( + len(p.ls) == 1 or p.ls[-2] == 'warnings'): + + if self.__filename == "_lib/tests/test_warnings.py": + # This file + return + + # See if stacklevel exists: + if len(node.args) == 3: + return + args = {kw.arg for kw in node.keywords} + if "stacklevel" not in args: + self.bad_stacklevels.append( + f"{self.__filename}:{node.lineno}") + + +@pytest.fixture(scope="session") +def warning_calls(): + # combined "ignore" and stacklevel error + base = Path(scipy.__file__).parent + + bad_filters = [] + bad_stacklevels = [] + + for path in base.rglob("*.py"): + # use tokenize to auto-detect encoding on systems where no + # default encoding is defined (e.g., LANG='C') + with tokenize.open(str(path)) as file: + tree = ast.parse(file.read(), filename=str(path)) + finder = FindFuncs(path.relative_to(base)) + finder.visit(tree) + bad_filters.extend(finder.bad_filters) + bad_stacklevels.extend(finder.bad_stacklevels) + + return bad_filters, bad_stacklevels + + +@pytest.mark.fail_slow(40) +@pytest.mark.slow +def test_warning_calls_filters(warning_calls): + bad_filters, bad_stacklevels = warning_calls + + # We try not to add filters in the code base, because those filters aren't + # thread-safe. We aim to only filter in tests with + # np.testing.suppress_warnings. However, in some cases it may prove + # necessary to filter out warnings, because we can't (easily) fix the root + # cause for them and we don't want users to see some warnings when they use + # SciPy correctly. So we list exceptions here. Add new entries only if + # there's a good reason. + allowed_filters = ( + os.path.join('datasets', '_fetchers.py'), + os.path.join('datasets', '__init__.py'), + os.path.join('optimize', '_optimize.py'), + os.path.join('optimize', '_constraints.py'), + os.path.join('optimize', '_nnls.py'), + os.path.join('signal', '_ltisys.py'), + os.path.join('sparse', '__init__.py'), # np.matrix pending-deprecation + os.path.join('special', '_basic.py'), # gh-21801 + os.path.join('stats', '_discrete_distns.py'), # gh-14901 + os.path.join('stats', '_continuous_distns.py'), + os.path.join('stats', '_binned_statistic.py'), # gh-19345 + os.path.join('stats', '_stats_py.py'), # gh-20743 + os.path.join('stats', 'tests', 'test_axis_nan_policy.py'), # gh-20694 + os.path.join('_lib', '_util.py'), # gh-19341 + os.path.join('sparse', 'linalg', '_dsolve', 'linsolve.py'), # gh-17924 + "conftest.py", + ) + bad_filters = [item for item in bad_filters if item.split(':')[0] not in + allowed_filters] + + if bad_filters: + raise AssertionError( + "warning ignore filter should not be used, instead, use\n" + "numpy.testing.suppress_warnings (in tests only);\n" + "found in:\n {}".format( + "\n ".join(bad_filters))) + diff --git a/moondream/lib/python3.10/site-packages/pandas/tests/__pycache__/test_aggregation.cpython-310.pyc b/moondream/lib/python3.10/site-packages/pandas/tests/__pycache__/test_aggregation.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..a4214f558bde146465e0a7d2c423b27fc7238062 Binary files /dev/null and b/moondream/lib/python3.10/site-packages/pandas/tests/__pycache__/test_aggregation.cpython-310.pyc differ diff --git a/moondream/lib/python3.10/site-packages/pandas/tests/__pycache__/test_algos.cpython-310.pyc b/moondream/lib/python3.10/site-packages/pandas/tests/__pycache__/test_algos.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..4be0a0f9be1ae2d7dda3661c8012b67c2c705b76 Binary files /dev/null and b/moondream/lib/python3.10/site-packages/pandas/tests/__pycache__/test_algos.cpython-310.pyc differ diff --git a/moondream/lib/python3.10/site-packages/pandas/tests/__pycache__/test_common.cpython-310.pyc b/moondream/lib/python3.10/site-packages/pandas/tests/__pycache__/test_common.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..9cd1f2f4359620a4ba9d67b05962ab1ad10e4fe6 Binary files /dev/null and b/moondream/lib/python3.10/site-packages/pandas/tests/__pycache__/test_common.cpython-310.pyc differ diff --git a/moondream/lib/python3.10/site-packages/pandas/tests/__pycache__/test_nanops.cpython-310.pyc b/moondream/lib/python3.10/site-packages/pandas/tests/__pycache__/test_nanops.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..f7b44ab0bb7198be3852acf6c0994e79de12490b Binary files /dev/null and b/moondream/lib/python3.10/site-packages/pandas/tests/__pycache__/test_nanops.cpython-310.pyc differ diff --git a/moondream/lib/python3.10/site-packages/pandas/tests/__pycache__/test_optional_dependency.cpython-310.pyc b/moondream/lib/python3.10/site-packages/pandas/tests/__pycache__/test_optional_dependency.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..24ae1093eb2951ecfa2b94b2720db43fe98e7630 Binary files /dev/null and b/moondream/lib/python3.10/site-packages/pandas/tests/__pycache__/test_optional_dependency.cpython-310.pyc differ diff --git a/moondream/lib/python3.10/site-packages/pandas/tests/api/__init__.py b/moondream/lib/python3.10/site-packages/pandas/tests/api/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/moondream/lib/python3.10/site-packages/pandas/tests/arithmetic/__pycache__/conftest.cpython-310.pyc b/moondream/lib/python3.10/site-packages/pandas/tests/arithmetic/__pycache__/conftest.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..36af1a59a760f4d01241aee16732045d55604f3d Binary files /dev/null and b/moondream/lib/python3.10/site-packages/pandas/tests/arithmetic/__pycache__/conftest.cpython-310.pyc differ diff --git a/moondream/lib/python3.10/site-packages/pandas/tests/arithmetic/__pycache__/test_array_ops.cpython-310.pyc b/moondream/lib/python3.10/site-packages/pandas/tests/arithmetic/__pycache__/test_array_ops.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..3a723326b3c80b3fe9f7b3bca709cec661cb32a8 Binary files /dev/null and b/moondream/lib/python3.10/site-packages/pandas/tests/arithmetic/__pycache__/test_array_ops.cpython-310.pyc differ diff --git a/moondream/lib/python3.10/site-packages/pandas/tests/arithmetic/__pycache__/test_datetime64.cpython-310.pyc b/moondream/lib/python3.10/site-packages/pandas/tests/arithmetic/__pycache__/test_datetime64.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..1a3b9fe7dfce532848221966e5c82f336c0f27ed Binary files /dev/null and b/moondream/lib/python3.10/site-packages/pandas/tests/arithmetic/__pycache__/test_datetime64.cpython-310.pyc differ diff --git a/moondream/lib/python3.10/site-packages/pandas/tests/arithmetic/__pycache__/test_interval.cpython-310.pyc b/moondream/lib/python3.10/site-packages/pandas/tests/arithmetic/__pycache__/test_interval.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..2092cd5957cb86f5437eff888658f30879d8d9a5 Binary files /dev/null and b/moondream/lib/python3.10/site-packages/pandas/tests/arithmetic/__pycache__/test_interval.cpython-310.pyc differ diff --git a/moondream/lib/python3.10/site-packages/pandas/tests/arithmetic/__pycache__/test_object.cpython-310.pyc b/moondream/lib/python3.10/site-packages/pandas/tests/arithmetic/__pycache__/test_object.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..fcd14601741fe1bec8881f25f3e390b9732e9eaf Binary files /dev/null and b/moondream/lib/python3.10/site-packages/pandas/tests/arithmetic/__pycache__/test_object.cpython-310.pyc differ diff --git a/moondream/lib/python3.10/site-packages/pandas/tests/arithmetic/__pycache__/test_period.cpython-310.pyc b/moondream/lib/python3.10/site-packages/pandas/tests/arithmetic/__pycache__/test_period.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..f1930e50934482d68a19fa35099eefc1ee682216 Binary files /dev/null and b/moondream/lib/python3.10/site-packages/pandas/tests/arithmetic/__pycache__/test_period.cpython-310.pyc differ diff --git a/moondream/lib/python3.10/site-packages/pandas/tests/arithmetic/__pycache__/test_timedelta64.cpython-310.pyc b/moondream/lib/python3.10/site-packages/pandas/tests/arithmetic/__pycache__/test_timedelta64.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..1951c0b76aad641bed820290dd6ef360639ef63d Binary files /dev/null and b/moondream/lib/python3.10/site-packages/pandas/tests/arithmetic/__pycache__/test_timedelta64.cpython-310.pyc differ diff --git a/moondream/lib/python3.10/site-packages/pandas/tests/arithmetic/conftest.py b/moondream/lib/python3.10/site-packages/pandas/tests/arithmetic/conftest.py new file mode 100644 index 0000000000000000000000000000000000000000..c7703b34a5e38e7a3887d727b0a8c954016ad836 --- /dev/null +++ b/moondream/lib/python3.10/site-packages/pandas/tests/arithmetic/conftest.py @@ -0,0 +1,139 @@ +import numpy as np +import pytest + +import pandas as pd +from pandas import Index + + +@pytest.fixture(params=[1, np.array(1, dtype=np.int64)]) +def one(request): + """ + Several variants of integer value 1. The zero-dim integer array + behaves like an integer. + + This fixture can be used to check that datetimelike indexes handle + addition and subtraction of integers and zero-dimensional arrays + of integers. + + Examples + -------- + dti = pd.date_range('2016-01-01', periods=2, freq='h') + dti + DatetimeIndex(['2016-01-01 00:00:00', '2016-01-01 01:00:00'], + dtype='datetime64[ns]', freq='h') + dti + one + DatetimeIndex(['2016-01-01 01:00:00', '2016-01-01 02:00:00'], + dtype='datetime64[ns]', freq='h') + """ + return request.param + + +zeros = [ + box_cls([0] * 5, dtype=dtype) + for box_cls in [Index, np.array, pd.array] + for dtype in [np.int64, np.uint64, np.float64] +] +zeros.extend([box_cls([-0.0] * 5, dtype=np.float64) for box_cls in [Index, np.array]]) +zeros.extend([np.array(0, dtype=dtype) for dtype in [np.int64, np.uint64, np.float64]]) +zeros.extend([np.array(-0.0, dtype=np.float64)]) +zeros.extend([0, 0.0, -0.0]) + + +@pytest.fixture(params=zeros) +def zero(request): + """ + Several types of scalar zeros and length 5 vectors of zeros. + + This fixture can be used to check that numeric-dtype indexes handle + division by any zero numeric-dtype. + + Uses vector of length 5 for broadcasting with `numeric_idx` fixture, + which creates numeric-dtype vectors also of length 5. + + Examples + -------- + arr = RangeIndex(5) + arr / zeros + Index([nan, inf, inf, inf, inf], dtype='float64') + """ + return request.param + + +# ------------------------------------------------------------------ +# Scalar Fixtures + + +@pytest.fixture( + params=[ + pd.Timedelta("10m7s").to_pytimedelta(), + pd.Timedelta("10m7s"), + pd.Timedelta("10m7s").to_timedelta64(), + ], + ids=lambda x: type(x).__name__, +) +def scalar_td(request): + """ + Several variants of Timedelta scalars representing 10 minutes and 7 seconds. + """ + return request.param + + +@pytest.fixture( + params=[ + pd.offsets.Day(3), + pd.offsets.Hour(72), + pd.Timedelta(days=3).to_pytimedelta(), + pd.Timedelta("72:00:00"), + np.timedelta64(3, "D"), + np.timedelta64(72, "h"), + ], + ids=lambda x: type(x).__name__, +) +def three_days(request): + """ + Several timedelta-like and DateOffset objects that each represent + a 3-day timedelta + """ + return request.param + + +@pytest.fixture( + params=[ + pd.offsets.Hour(2), + pd.offsets.Minute(120), + pd.Timedelta(hours=2).to_pytimedelta(), + pd.Timedelta(seconds=2 * 3600), + np.timedelta64(2, "h"), + np.timedelta64(120, "m"), + ], + ids=lambda x: type(x).__name__, +) +def two_hours(request): + """ + Several timedelta-like and DateOffset objects that each represent + a 2-hour timedelta + """ + return request.param + + +_common_mismatch = [ + pd.offsets.YearBegin(2), + pd.offsets.MonthBegin(1), + pd.offsets.Minute(), +] + + +@pytest.fixture( + params=[ + np.timedelta64(4, "h"), + pd.Timedelta(hours=23).to_pytimedelta(), + pd.Timedelta("23:00:00"), + ] + + _common_mismatch +) +def not_daily(request): + """ + Several timedelta-like and DateOffset instances that are _not_ + compatible with Daily frequencies. + """ + return request.param diff --git a/moondream/lib/python3.10/site-packages/pandas/tests/arithmetic/test_array_ops.py b/moondream/lib/python3.10/site-packages/pandas/tests/arithmetic/test_array_ops.py new file mode 100644 index 0000000000000000000000000000000000000000..2c347d965bbf7353a6a4e81ca955341f8041b6de --- /dev/null +++ b/moondream/lib/python3.10/site-packages/pandas/tests/arithmetic/test_array_ops.py @@ -0,0 +1,39 @@ +import operator + +import numpy as np +import pytest + +import pandas._testing as tm +from pandas.core.ops.array_ops import ( + comparison_op, + na_logical_op, +) + + +def test_na_logical_op_2d(): + left = np.arange(8).reshape(4, 2) + right = left.astype(object) + right[0, 0] = np.nan + + # Check that we fall back to the vec_binop branch + with pytest.raises(TypeError, match="unsupported operand type"): + operator.or_(left, right) + + result = na_logical_op(left, right, operator.or_) + expected = right + tm.assert_numpy_array_equal(result, expected) + + +def test_object_comparison_2d(): + left = np.arange(9).reshape(3, 3).astype(object) + right = left.T + + result = comparison_op(left, right, operator.eq) + expected = np.eye(3).astype(bool) + tm.assert_numpy_array_equal(result, expected) + + # Ensure that cython doesn't raise on non-writeable arg, which + # we can get from np.broadcast_to + right.flags.writeable = False + result = comparison_op(left, right, operator.ne) + tm.assert_numpy_array_equal(result, ~expected) diff --git a/moondream/lib/python3.10/site-packages/pandas/tests/arithmetic/test_interval.py b/moondream/lib/python3.10/site-packages/pandas/tests/arithmetic/test_interval.py new file mode 100644 index 0000000000000000000000000000000000000000..0e316cf419cb0d3be489f474a9c6d889e668e7c9 --- /dev/null +++ b/moondream/lib/python3.10/site-packages/pandas/tests/arithmetic/test_interval.py @@ -0,0 +1,306 @@ +import operator + +import numpy as np +import pytest + +from pandas.core.dtypes.common import is_list_like + +import pandas as pd +from pandas import ( + Categorical, + Index, + Interval, + IntervalIndex, + Period, + Series, + Timedelta, + Timestamp, + date_range, + period_range, + timedelta_range, +) +import pandas._testing as tm +from pandas.core.arrays import ( + BooleanArray, + IntervalArray, +) +from pandas.tests.arithmetic.common import get_upcast_box + + +@pytest.fixture( + params=[ + (Index([0, 2, 4, 4]), Index([1, 3, 5, 8])), + (Index([0.0, 1.0, 2.0, np.nan]), Index([1.0, 2.0, 3.0, np.nan])), + ( + timedelta_range("0 days", periods=3).insert(3, pd.NaT), + timedelta_range("1 day", periods=3).insert(3, pd.NaT), + ), + ( + date_range("20170101", periods=3).insert(3, pd.NaT), + date_range("20170102", periods=3).insert(3, pd.NaT), + ), + ( + date_range("20170101", periods=3, tz="US/Eastern").insert(3, pd.NaT), + date_range("20170102", periods=3, tz="US/Eastern").insert(3, pd.NaT), + ), + ], + ids=lambda x: str(x[0].dtype), +) +def left_right_dtypes(request): + """ + Fixture for building an IntervalArray from various dtypes + """ + return request.param + + +@pytest.fixture +def interval_array(left_right_dtypes): + """ + Fixture to generate an IntervalArray of various dtypes containing NA if possible + """ + left, right = left_right_dtypes + return IntervalArray.from_arrays(left, right) + + +def create_categorical_intervals(left, right, closed="right"): + return Categorical(IntervalIndex.from_arrays(left, right, closed)) + + +def create_series_intervals(left, right, closed="right"): + return Series(IntervalArray.from_arrays(left, right, closed)) + + +def create_series_categorical_intervals(left, right, closed="right"): + return Series(Categorical(IntervalIndex.from_arrays(left, right, closed))) + + +class TestComparison: + @pytest.fixture(params=[operator.eq, operator.ne]) + def op(self, request): + return request.param + + @pytest.fixture( + params=[ + IntervalArray.from_arrays, + IntervalIndex.from_arrays, + create_categorical_intervals, + create_series_intervals, + create_series_categorical_intervals, + ], + ids=[ + "IntervalArray", + "IntervalIndex", + "Categorical[Interval]", + "Series[Interval]", + "Series[Categorical[Interval]]", + ], + ) + def interval_constructor(self, request): + """ + Fixture for all pandas native interval constructors. + To be used as the LHS of IntervalArray comparisons. + """ + return request.param + + def elementwise_comparison(self, op, interval_array, other): + """ + Helper that performs elementwise comparisons between `array` and `other` + """ + other = other if is_list_like(other) else [other] * len(interval_array) + expected = np.array([op(x, y) for x, y in zip(interval_array, other)]) + if isinstance(other, Series): + return Series(expected, index=other.index) + return expected + + def test_compare_scalar_interval(self, op, interval_array): + # matches first interval + other = interval_array[0] + result = op(interval_array, other) + expected = self.elementwise_comparison(op, interval_array, other) + tm.assert_numpy_array_equal(result, expected) + + # matches on a single endpoint but not both + other = Interval(interval_array.left[0], interval_array.right[1]) + result = op(interval_array, other) + expected = self.elementwise_comparison(op, interval_array, other) + tm.assert_numpy_array_equal(result, expected) + + def test_compare_scalar_interval_mixed_closed(self, op, closed, other_closed): + interval_array = IntervalArray.from_arrays(range(2), range(1, 3), closed=closed) + other = Interval(0, 1, closed=other_closed) + + result = op(interval_array, other) + expected = self.elementwise_comparison(op, interval_array, other) + tm.assert_numpy_array_equal(result, expected) + + def test_compare_scalar_na(self, op, interval_array, nulls_fixture, box_with_array): + box = box_with_array + obj = tm.box_expected(interval_array, box) + result = op(obj, nulls_fixture) + + if nulls_fixture is pd.NA: + # GH#31882 + exp = np.ones(interval_array.shape, dtype=bool) + expected = BooleanArray(exp, exp) + else: + expected = self.elementwise_comparison(op, interval_array, nulls_fixture) + + if not (box is Index and nulls_fixture is pd.NA): + # don't cast expected from BooleanArray to ndarray[object] + xbox = get_upcast_box(obj, nulls_fixture, True) + expected = tm.box_expected(expected, xbox) + + tm.assert_equal(result, expected) + + rev = op(nulls_fixture, obj) + tm.assert_equal(rev, expected) + + @pytest.mark.parametrize( + "other", + [ + 0, + 1.0, + True, + "foo", + Timestamp("2017-01-01"), + Timestamp("2017-01-01", tz="US/Eastern"), + Timedelta("0 days"), + Period("2017-01-01", "D"), + ], + ) + def test_compare_scalar_other(self, op, interval_array, other): + result = op(interval_array, other) + expected = self.elementwise_comparison(op, interval_array, other) + tm.assert_numpy_array_equal(result, expected) + + def test_compare_list_like_interval(self, op, interval_array, interval_constructor): + # same endpoints + other = interval_constructor(interval_array.left, interval_array.right) + result = op(interval_array, other) + expected = self.elementwise_comparison(op, interval_array, other) + tm.assert_equal(result, expected) + + # different endpoints + other = interval_constructor( + interval_array.left[::-1], interval_array.right[::-1] + ) + result = op(interval_array, other) + expected = self.elementwise_comparison(op, interval_array, other) + tm.assert_equal(result, expected) + + # all nan endpoints + other = interval_constructor([np.nan] * 4, [np.nan] * 4) + result = op(interval_array, other) + expected = self.elementwise_comparison(op, interval_array, other) + tm.assert_equal(result, expected) + + def test_compare_list_like_interval_mixed_closed( + self, op, interval_constructor, closed, other_closed + ): + interval_array = IntervalArray.from_arrays(range(2), range(1, 3), closed=closed) + other = interval_constructor(range(2), range(1, 3), closed=other_closed) + + result = op(interval_array, other) + expected = self.elementwise_comparison(op, interval_array, other) + tm.assert_equal(result, expected) + + @pytest.mark.parametrize( + "other", + [ + ( + Interval(0, 1), + Interval(Timedelta("1 day"), Timedelta("2 days")), + Interval(4, 5, "both"), + Interval(10, 20, "neither"), + ), + (0, 1.5, Timestamp("20170103"), np.nan), + ( + Timestamp("20170102", tz="US/Eastern"), + Timedelta("2 days"), + "baz", + pd.NaT, + ), + ], + ) + def test_compare_list_like_object(self, op, interval_array, other): + result = op(interval_array, other) + expected = self.elementwise_comparison(op, interval_array, other) + tm.assert_numpy_array_equal(result, expected) + + def test_compare_list_like_nan(self, op, interval_array, nulls_fixture): + other = [nulls_fixture] * 4 + result = op(interval_array, other) + expected = self.elementwise_comparison(op, interval_array, other) + + tm.assert_equal(result, expected) + + @pytest.mark.parametrize( + "other", + [ + np.arange(4, dtype="int64"), + np.arange(4, dtype="float64"), + date_range("2017-01-01", periods=4), + date_range("2017-01-01", periods=4, tz="US/Eastern"), + timedelta_range("0 days", periods=4), + period_range("2017-01-01", periods=4, freq="D"), + Categorical(list("abab")), + Categorical(date_range("2017-01-01", periods=4)), + pd.array(list("abcd")), + pd.array(["foo", 3.14, None, object()], dtype=object), + ], + ids=lambda x: str(x.dtype), + ) + def test_compare_list_like_other(self, op, interval_array, other): + result = op(interval_array, other) + expected = self.elementwise_comparison(op, interval_array, other) + tm.assert_numpy_array_equal(result, expected) + + @pytest.mark.parametrize("length", [1, 3, 5]) + @pytest.mark.parametrize("other_constructor", [IntervalArray, list]) + def test_compare_length_mismatch_errors(self, op, other_constructor, length): + interval_array = IntervalArray.from_arrays(range(4), range(1, 5)) + other = other_constructor([Interval(0, 1)] * length) + with pytest.raises(ValueError, match="Lengths must match to compare"): + op(interval_array, other) + + @pytest.mark.parametrize( + "constructor, expected_type, assert_func", + [ + (IntervalIndex, np.array, tm.assert_numpy_array_equal), + (Series, Series, tm.assert_series_equal), + ], + ) + def test_index_series_compat(self, op, constructor, expected_type, assert_func): + # IntervalIndex/Series that rely on IntervalArray for comparisons + breaks = range(4) + index = constructor(IntervalIndex.from_breaks(breaks)) + + # scalar comparisons + other = index[0] + result = op(index, other) + expected = expected_type(self.elementwise_comparison(op, index, other)) + assert_func(result, expected) + + other = breaks[0] + result = op(index, other) + expected = expected_type(self.elementwise_comparison(op, index, other)) + assert_func(result, expected) + + # list-like comparisons + other = IntervalArray.from_breaks(breaks) + result = op(index, other) + expected = expected_type(self.elementwise_comparison(op, index, other)) + assert_func(result, expected) + + other = [index[0], breaks[0], "foo"] + result = op(index, other) + expected = expected_type(self.elementwise_comparison(op, index, other)) + assert_func(result, expected) + + @pytest.mark.parametrize("scalars", ["a", False, 1, 1.0, None]) + def test_comparison_operations(self, scalars): + # GH #28981 + expected = Series([False, False]) + s = Series([Interval(0, 1), Interval(1, 2)], dtype="interval") + result = s == scalars + tm.assert_series_equal(result, expected) diff --git a/moondream/lib/python3.10/site-packages/pandas/tests/arithmetic/test_object.py b/moondream/lib/python3.10/site-packages/pandas/tests/arithmetic/test_object.py new file mode 100644 index 0000000000000000000000000000000000000000..4ffd76722286ab0e6729334216652f4613d9769f --- /dev/null +++ b/moondream/lib/python3.10/site-packages/pandas/tests/arithmetic/test_object.py @@ -0,0 +1,420 @@ +# Arithmetic tests for DataFrame/Series/Index/Array classes that should +# behave identically. +# Specifically for object dtype +import datetime +from decimal import Decimal +import operator + +import numpy as np +import pytest + +from pandas._config import using_pyarrow_string_dtype + +import pandas.util._test_decorators as td + +import pandas as pd +from pandas import ( + Series, + Timestamp, + option_context, +) +import pandas._testing as tm +from pandas.core import ops + +# ------------------------------------------------------------------ +# Comparisons + + +class TestObjectComparisons: + def test_comparison_object_numeric_nas(self, comparison_op): + ser = Series(np.random.default_rng(2).standard_normal(10), dtype=object) + shifted = ser.shift(2) + + func = comparison_op + + result = func(ser, shifted) + expected = func(ser.astype(float), shifted.astype(float)) + tm.assert_series_equal(result, expected) + + @pytest.mark.parametrize( + "infer_string", [False, pytest.param(True, marks=td.skip_if_no("pyarrow"))] + ) + def test_object_comparisons(self, infer_string): + with option_context("future.infer_string", infer_string): + ser = Series(["a", "b", np.nan, "c", "a"]) + + result = ser == "a" + expected = Series([True, False, False, False, True]) + tm.assert_series_equal(result, expected) + + result = ser < "a" + expected = Series([False, False, False, False, False]) + tm.assert_series_equal(result, expected) + + result = ser != "a" + expected = -(ser == "a") + tm.assert_series_equal(result, expected) + + @pytest.mark.parametrize("dtype", [None, object]) + def test_more_na_comparisons(self, dtype): + left = Series(["a", np.nan, "c"], dtype=dtype) + right = Series(["a", np.nan, "d"], dtype=dtype) + + result = left == right + expected = Series([True, False, False]) + tm.assert_series_equal(result, expected) + + result = left != right + expected = Series([False, True, True]) + tm.assert_series_equal(result, expected) + + result = left == np.nan + expected = Series([False, False, False]) + tm.assert_series_equal(result, expected) + + result = left != np.nan + expected = Series([True, True, True]) + tm.assert_series_equal(result, expected) + + +# ------------------------------------------------------------------ +# Arithmetic + + +class TestArithmetic: + def test_add_period_to_array_of_offset(self): + # GH#50162 + per = pd.Period("2012-1-1", freq="D") + pi = pd.period_range("2012-1-1", periods=10, freq="D") + idx = per - pi + + expected = pd.Index([x + per for x in idx], dtype=object) + result = idx + per + tm.assert_index_equal(result, expected) + + result = per + idx + tm.assert_index_equal(result, expected) + + # TODO: parametrize + def test_pow_ops_object(self): + # GH#22922 + # pow is weird with masking & 1, so testing here + a = Series([1, np.nan, 1, np.nan], dtype=object) + b = Series([1, np.nan, np.nan, 1], dtype=object) + result = a**b + expected = Series(a.values**b.values, dtype=object) + tm.assert_series_equal(result, expected) + + result = b**a + expected = Series(b.values**a.values, dtype=object) + + tm.assert_series_equal(result, expected) + + @pytest.mark.parametrize("op", [operator.add, ops.radd]) + @pytest.mark.parametrize("other", ["category", "Int64"]) + def test_add_extension_scalar(self, other, box_with_array, op): + # GH#22378 + # Check that scalars satisfying is_extension_array_dtype(obj) + # do not incorrectly try to dispatch to an ExtensionArray operation + + arr = Series(["a", "b", "c"]) + expected = Series([op(x, other) for x in arr]) + + arr = tm.box_expected(arr, box_with_array) + expected = tm.box_expected(expected, box_with_array) + + result = op(arr, other) + tm.assert_equal(result, expected) + + def test_objarr_add_str(self, box_with_array): + ser = Series(["x", np.nan, "x"]) + expected = Series(["xa", np.nan, "xa"]) + + ser = tm.box_expected(ser, box_with_array) + expected = tm.box_expected(expected, box_with_array) + + result = ser + "a" + tm.assert_equal(result, expected) + + def test_objarr_radd_str(self, box_with_array): + ser = Series(["x", np.nan, "x"]) + expected = Series(["ax", np.nan, "ax"]) + + ser = tm.box_expected(ser, box_with_array) + expected = tm.box_expected(expected, box_with_array) + + result = "a" + ser + tm.assert_equal(result, expected) + + @pytest.mark.parametrize( + "data", + [ + [1, 2, 3], + [1.1, 2.2, 3.3], + [Timestamp("2011-01-01"), Timestamp("2011-01-02"), pd.NaT], + ["x", "y", 1], + ], + ) + @pytest.mark.parametrize("dtype", [None, object]) + def test_objarr_radd_str_invalid(self, dtype, data, box_with_array): + ser = Series(data, dtype=dtype) + + ser = tm.box_expected(ser, box_with_array) + msg = "|".join( + [ + "can only concatenate str", + "did not contain a loop with signature matching types", + "unsupported operand type", + "must be str", + ] + ) + with pytest.raises(TypeError, match=msg): + "foo_" + ser + + @pytest.mark.parametrize("op", [operator.add, ops.radd, operator.sub, ops.rsub]) + def test_objarr_add_invalid(self, op, box_with_array): + # invalid ops + box = box_with_array + + obj_ser = Series(list("abc"), dtype=object, name="objects") + + obj_ser = tm.box_expected(obj_ser, box) + msg = "|".join( + [ + "can only concatenate str", + "unsupported operand type", + "must be str", + "has no kernel", + ] + ) + with pytest.raises(Exception, match=msg): + op(obj_ser, 1) + with pytest.raises(Exception, match=msg): + op(obj_ser, np.array(1, dtype=np.int64)) + + # TODO: Moved from tests.series.test_operators; needs cleanup + def test_operators_na_handling(self): + ser = Series(["foo", "bar", "baz", np.nan]) + result = "prefix_" + ser + expected = Series(["prefix_foo", "prefix_bar", "prefix_baz", np.nan]) + tm.assert_series_equal(result, expected) + + result = ser + "_suffix" + expected = Series(["foo_suffix", "bar_suffix", "baz_suffix", np.nan]) + tm.assert_series_equal(result, expected) + + # TODO: parametrize over box + @pytest.mark.parametrize("dtype", [None, object]) + def test_series_with_dtype_radd_timedelta(self, dtype): + # note this test is _not_ aimed at timedelta64-dtyped Series + # as of 2.0 we retain object dtype when ser.dtype == object + ser = Series( + [pd.Timedelta("1 days"), pd.Timedelta("2 days"), pd.Timedelta("3 days")], + dtype=dtype, + ) + expected = Series( + [pd.Timedelta("4 days"), pd.Timedelta("5 days"), pd.Timedelta("6 days")], + dtype=dtype, + ) + + result = pd.Timedelta("3 days") + ser + tm.assert_series_equal(result, expected) + + result = ser + pd.Timedelta("3 days") + tm.assert_series_equal(result, expected) + + # TODO: cleanup & parametrize over box + def test_mixed_timezone_series_ops_object(self): + # GH#13043 + ser = Series( + [ + Timestamp("2015-01-01", tz="US/Eastern"), + Timestamp("2015-01-01", tz="Asia/Tokyo"), + ], + name="xxx", + ) + assert ser.dtype == object + + exp = Series( + [ + Timestamp("2015-01-02", tz="US/Eastern"), + Timestamp("2015-01-02", tz="Asia/Tokyo"), + ], + name="xxx", + ) + tm.assert_series_equal(ser + pd.Timedelta("1 days"), exp) + tm.assert_series_equal(pd.Timedelta("1 days") + ser, exp) + + # object series & object series + ser2 = Series( + [ + Timestamp("2015-01-03", tz="US/Eastern"), + Timestamp("2015-01-05", tz="Asia/Tokyo"), + ], + name="xxx", + ) + assert ser2.dtype == object + exp = Series( + [pd.Timedelta("2 days"), pd.Timedelta("4 days")], name="xxx", dtype=object + ) + tm.assert_series_equal(ser2 - ser, exp) + tm.assert_series_equal(ser - ser2, -exp) + + ser = Series( + [pd.Timedelta("01:00:00"), pd.Timedelta("02:00:00")], + name="xxx", + dtype=object, + ) + assert ser.dtype == object + + exp = Series( + [pd.Timedelta("01:30:00"), pd.Timedelta("02:30:00")], + name="xxx", + dtype=object, + ) + tm.assert_series_equal(ser + pd.Timedelta("00:30:00"), exp) + tm.assert_series_equal(pd.Timedelta("00:30:00") + ser, exp) + + # TODO: cleanup & parametrize over box + def test_iadd_preserves_name(self): + # GH#17067, GH#19723 __iadd__ and __isub__ should preserve index name + ser = Series([1, 2, 3]) + ser.index.name = "foo" + + ser.index += 1 + assert ser.index.name == "foo" + + ser.index -= 1 + assert ser.index.name == "foo" + + def test_add_string(self): + # from bug report + index = pd.Index(["a", "b", "c"]) + index2 = index + "foo" + + assert "a" not in index2 + assert "afoo" in index2 + + def test_iadd_string(self): + index = pd.Index(["a", "b", "c"]) + # doesn't fail test unless there is a check before `+=` + assert "a" in index + + index += "_x" + assert "a_x" in index + + @pytest.mark.xfail(using_pyarrow_string_dtype(), reason="add doesn't work") + def test_add(self): + index = pd.Index([str(i) for i in range(10)]) + expected = pd.Index(index.values * 2) + tm.assert_index_equal(index + index, expected) + tm.assert_index_equal(index + index.tolist(), expected) + tm.assert_index_equal(index.tolist() + index, expected) + + # test add and radd + index = pd.Index(list("abc")) + expected = pd.Index(["a1", "b1", "c1"]) + tm.assert_index_equal(index + "1", expected) + expected = pd.Index(["1a", "1b", "1c"]) + tm.assert_index_equal("1" + index, expected) + + def test_sub_fail(self, using_infer_string): + index = pd.Index([str(i) for i in range(10)]) + + if using_infer_string: + import pyarrow as pa + + err = pa.lib.ArrowNotImplementedError + msg = "has no kernel" + else: + err = TypeError + msg = "unsupported operand type|Cannot broadcast" + with pytest.raises(err, match=msg): + index - "a" + with pytest.raises(err, match=msg): + index - index + with pytest.raises(err, match=msg): + index - index.tolist() + with pytest.raises(err, match=msg): + index.tolist() - index + + def test_sub_object(self): + # GH#19369 + index = pd.Index([Decimal(1), Decimal(2)]) + expected = pd.Index([Decimal(0), Decimal(1)]) + + result = index - Decimal(1) + tm.assert_index_equal(result, expected) + + result = index - pd.Index([Decimal(1), Decimal(1)]) + tm.assert_index_equal(result, expected) + + msg = "unsupported operand type" + with pytest.raises(TypeError, match=msg): + index - "foo" + + with pytest.raises(TypeError, match=msg): + index - np.array([2, "foo"], dtype=object) + + def test_rsub_object(self, fixed_now_ts): + # GH#19369 + index = pd.Index([Decimal(1), Decimal(2)]) + expected = pd.Index([Decimal(1), Decimal(0)]) + + result = Decimal(2) - index + tm.assert_index_equal(result, expected) + + result = np.array([Decimal(2), Decimal(2)]) - index + tm.assert_index_equal(result, expected) + + msg = "unsupported operand type" + with pytest.raises(TypeError, match=msg): + "foo" - index + + with pytest.raises(TypeError, match=msg): + np.array([True, fixed_now_ts]) - index + + +class MyIndex(pd.Index): + # Simple index subclass that tracks ops calls. + + _calls: int + + @classmethod + def _simple_new(cls, values, name=None, dtype=None): + result = object.__new__(cls) + result._data = values + result._name = name + result._calls = 0 + result._reset_identity() + + return result + + def __add__(self, other): + self._calls += 1 + return self._simple_new(self._data) + + def __radd__(self, other): + return self.__add__(other) + + +@pytest.mark.parametrize( + "other", + [ + [datetime.timedelta(1), datetime.timedelta(2)], + [datetime.datetime(2000, 1, 1), datetime.datetime(2000, 1, 2)], + [pd.Period("2000"), pd.Period("2001")], + ["a", "b"], + ], + ids=["timedelta", "datetime", "period", "object"], +) +def test_index_ops_defer_to_unknown_subclasses(other): + # https://github.com/pandas-dev/pandas/issues/31109 + values = np.array( + [datetime.date(2000, 1, 1), datetime.date(2000, 1, 2)], dtype=object + ) + a = MyIndex._simple_new(values) + other = pd.Index(other) + result = other + a + assert isinstance(result, MyIndex) + assert a._calls == 1 diff --git a/moondream/lib/python3.10/site-packages/pandas/tests/arithmetic/test_period.py b/moondream/lib/python3.10/site-packages/pandas/tests/arithmetic/test_period.py new file mode 100644 index 0000000000000000000000000000000000000000..5535fe8ff928d10b994bd6556229e0163a358ab0 --- /dev/null +++ b/moondream/lib/python3.10/site-packages/pandas/tests/arithmetic/test_period.py @@ -0,0 +1,1675 @@ +# Arithmetic tests for DataFrame/Series/Index/Array classes that should +# behave identically. +# Specifically for Period dtype +import operator + +import numpy as np +import pytest + +from pandas._libs.tslibs import ( + IncompatibleFrequency, + Period, + Timestamp, + to_offset, +) +from pandas.errors import PerformanceWarning + +import pandas as pd +from pandas import ( + PeriodIndex, + Series, + Timedelta, + TimedeltaIndex, + period_range, +) +import pandas._testing as tm +from pandas.core import ops +from pandas.core.arrays import TimedeltaArray +from pandas.tests.arithmetic.common import ( + assert_invalid_addsub_type, + assert_invalid_comparison, + get_upcast_box, +) + +_common_mismatch = [ + pd.offsets.YearBegin(2), + pd.offsets.MonthBegin(1), + pd.offsets.Minute(), +] + + +@pytest.fixture( + params=[ + Timedelta(minutes=30).to_pytimedelta(), + np.timedelta64(30, "s"), + Timedelta(seconds=30), + ] + + _common_mismatch +) +def not_hourly(request): + """ + Several timedelta-like and DateOffset instances that are _not_ + compatible with Hourly frequencies. + """ + return request.param + + +@pytest.fixture( + params=[ + np.timedelta64(365, "D"), + Timedelta(days=365).to_pytimedelta(), + Timedelta(days=365), + ] + + _common_mismatch +) +def mismatched_freq(request): + """ + Several timedelta-like and DateOffset instances that are _not_ + compatible with Monthly or Annual frequencies. + """ + return request.param + + +# ------------------------------------------------------------------ +# Comparisons + + +class TestPeriodArrayLikeComparisons: + # Comparison tests for PeriodDtype vectors fully parametrized over + # DataFrame/Series/PeriodIndex/PeriodArray. Ideally all comparison + # tests will eventually end up here. + + @pytest.mark.parametrize("other", ["2017", Period("2017", freq="D")]) + def test_eq_scalar(self, other, box_with_array): + idx = PeriodIndex(["2017", "2017", "2018"], freq="D") + idx = tm.box_expected(idx, box_with_array) + xbox = get_upcast_box(idx, other, True) + + expected = np.array([True, True, False]) + expected = tm.box_expected(expected, xbox) + + result = idx == other + + tm.assert_equal(result, expected) + + def test_compare_zerodim(self, box_with_array): + # GH#26689 make sure we unbox zero-dimensional arrays + + pi = period_range("2000", periods=4) + other = np.array(pi.to_numpy()[0]) + + pi = tm.box_expected(pi, box_with_array) + xbox = get_upcast_box(pi, other, True) + + result = pi <= other + expected = np.array([True, False, False, False]) + expected = tm.box_expected(expected, xbox) + tm.assert_equal(result, expected) + + @pytest.mark.parametrize( + "scalar", + [ + "foo", + Timestamp("2021-01-01"), + Timedelta(days=4), + 9, + 9.5, + 2000, # specifically don't consider 2000 to match Period("2000", "D") + False, + None, + ], + ) + def test_compare_invalid_scalar(self, box_with_array, scalar): + # GH#28980 + # comparison with scalar that cannot be interpreted as a Period + pi = period_range("2000", periods=4) + parr = tm.box_expected(pi, box_with_array) + assert_invalid_comparison(parr, scalar, box_with_array) + + @pytest.mark.parametrize( + "other", + [ + pd.date_range("2000", periods=4).array, + pd.timedelta_range("1D", periods=4).array, + np.arange(4), + np.arange(4).astype(np.float64), + list(range(4)), + # match Period semantics by not treating integers as Periods + [2000, 2001, 2002, 2003], + np.arange(2000, 2004), + np.arange(2000, 2004).astype(object), + pd.Index([2000, 2001, 2002, 2003]), + ], + ) + def test_compare_invalid_listlike(self, box_with_array, other): + pi = period_range("2000", periods=4) + parr = tm.box_expected(pi, box_with_array) + assert_invalid_comparison(parr, other, box_with_array) + + @pytest.mark.parametrize("other_box", [list, np.array, lambda x: x.astype(object)]) + def test_compare_object_dtype(self, box_with_array, other_box): + pi = period_range("2000", periods=5) + parr = tm.box_expected(pi, box_with_array) + + other = other_box(pi) + xbox = get_upcast_box(parr, other, True) + + expected = np.array([True, True, True, True, True]) + expected = tm.box_expected(expected, xbox) + + result = parr == other + tm.assert_equal(result, expected) + result = parr <= other + tm.assert_equal(result, expected) + result = parr >= other + tm.assert_equal(result, expected) + + result = parr != other + tm.assert_equal(result, ~expected) + result = parr < other + tm.assert_equal(result, ~expected) + result = parr > other + tm.assert_equal(result, ~expected) + + other = other_box(pi[::-1]) + + expected = np.array([False, False, True, False, False]) + expected = tm.box_expected(expected, xbox) + result = parr == other + tm.assert_equal(result, expected) + + expected = np.array([True, True, True, False, False]) + expected = tm.box_expected(expected, xbox) + result = parr <= other + tm.assert_equal(result, expected) + + expected = np.array([False, False, True, True, True]) + expected = tm.box_expected(expected, xbox) + result = parr >= other + tm.assert_equal(result, expected) + + expected = np.array([True, True, False, True, True]) + expected = tm.box_expected(expected, xbox) + result = parr != other + tm.assert_equal(result, expected) + + expected = np.array([True, True, False, False, False]) + expected = tm.box_expected(expected, xbox) + result = parr < other + tm.assert_equal(result, expected) + + expected = np.array([False, False, False, True, True]) + expected = tm.box_expected(expected, xbox) + result = parr > other + tm.assert_equal(result, expected) + + +class TestPeriodIndexComparisons: + # TODO: parameterize over boxes + + def test_pi_cmp_period(self): + idx = period_range("2007-01", periods=20, freq="M") + per = idx[10] + + result = idx < per + exp = idx.values < idx.values[10] + tm.assert_numpy_array_equal(result, exp) + + # Tests Period.__richcmp__ against ndarray[object, ndim=2] + result = idx.values.reshape(10, 2) < per + tm.assert_numpy_array_equal(result, exp.reshape(10, 2)) + + # Tests Period.__richcmp__ against ndarray[object, ndim=0] + result = idx < np.array(per) + tm.assert_numpy_array_equal(result, exp) + + # TODO: moved from test_datetime64; de-duplicate with version below + def test_parr_cmp_period_scalar2(self, box_with_array): + pi = period_range("2000-01-01", periods=10, freq="D") + + val = pi[3] + expected = [x > val for x in pi] + + ser = tm.box_expected(pi, box_with_array) + xbox = get_upcast_box(ser, val, True) + + expected = tm.box_expected(expected, xbox) + result = ser > val + tm.assert_equal(result, expected) + + val = pi[5] + result = ser > val + expected = [x > val for x in pi] + expected = tm.box_expected(expected, xbox) + tm.assert_equal(result, expected) + + @pytest.mark.parametrize("freq", ["M", "2M", "3M"]) + def test_parr_cmp_period_scalar(self, freq, box_with_array): + # GH#13200 + base = PeriodIndex(["2011-01", "2011-02", "2011-03", "2011-04"], freq=freq) + base = tm.box_expected(base, box_with_array) + per = Period("2011-02", freq=freq) + xbox = get_upcast_box(base, per, True) + + exp = np.array([False, True, False, False]) + exp = tm.box_expected(exp, xbox) + tm.assert_equal(base == per, exp) + tm.assert_equal(per == base, exp) + + exp = np.array([True, False, True, True]) + exp = tm.box_expected(exp, xbox) + tm.assert_equal(base != per, exp) + tm.assert_equal(per != base, exp) + + exp = np.array([False, False, True, True]) + exp = tm.box_expected(exp, xbox) + tm.assert_equal(base > per, exp) + tm.assert_equal(per < base, exp) + + exp = np.array([True, False, False, False]) + exp = tm.box_expected(exp, xbox) + tm.assert_equal(base < per, exp) + tm.assert_equal(per > base, exp) + + exp = np.array([False, True, True, True]) + exp = tm.box_expected(exp, xbox) + tm.assert_equal(base >= per, exp) + tm.assert_equal(per <= base, exp) + + exp = np.array([True, True, False, False]) + exp = tm.box_expected(exp, xbox) + tm.assert_equal(base <= per, exp) + tm.assert_equal(per >= base, exp) + + @pytest.mark.parametrize("freq", ["M", "2M", "3M"]) + def test_parr_cmp_pi(self, freq, box_with_array): + # GH#13200 + base = PeriodIndex(["2011-01", "2011-02", "2011-03", "2011-04"], freq=freq) + base = tm.box_expected(base, box_with_array) + + # TODO: could also box idx? + idx = PeriodIndex(["2011-02", "2011-01", "2011-03", "2011-05"], freq=freq) + + xbox = get_upcast_box(base, idx, True) + + exp = np.array([False, False, True, False]) + exp = tm.box_expected(exp, xbox) + tm.assert_equal(base == idx, exp) + + exp = np.array([True, True, False, True]) + exp = tm.box_expected(exp, xbox) + tm.assert_equal(base != idx, exp) + + exp = np.array([False, True, False, False]) + exp = tm.box_expected(exp, xbox) + tm.assert_equal(base > idx, exp) + + exp = np.array([True, False, False, True]) + exp = tm.box_expected(exp, xbox) + tm.assert_equal(base < idx, exp) + + exp = np.array([False, True, True, False]) + exp = tm.box_expected(exp, xbox) + tm.assert_equal(base >= idx, exp) + + exp = np.array([True, False, True, True]) + exp = tm.box_expected(exp, xbox) + tm.assert_equal(base <= idx, exp) + + @pytest.mark.parametrize("freq", ["M", "2M", "3M"]) + def test_parr_cmp_pi_mismatched_freq(self, freq, box_with_array): + # GH#13200 + # different base freq + base = PeriodIndex(["2011-01", "2011-02", "2011-03", "2011-04"], freq=freq) + base = tm.box_expected(base, box_with_array) + + msg = rf"Invalid comparison between dtype=period\[{freq}\] and Period" + with pytest.raises(TypeError, match=msg): + base <= Period("2011", freq="Y") + + with pytest.raises(TypeError, match=msg): + Period("2011", freq="Y") >= base + + # TODO: Could parametrize over boxes for idx? + idx = PeriodIndex(["2011", "2012", "2013", "2014"], freq="Y") + rev_msg = r"Invalid comparison between dtype=period\[Y-DEC\] and PeriodArray" + idx_msg = rev_msg if box_with_array in [tm.to_array, pd.array] else msg + with pytest.raises(TypeError, match=idx_msg): + base <= idx + + # Different frequency + msg = rf"Invalid comparison between dtype=period\[{freq}\] and Period" + with pytest.raises(TypeError, match=msg): + base <= Period("2011", freq="4M") + + with pytest.raises(TypeError, match=msg): + Period("2011", freq="4M") >= base + + idx = PeriodIndex(["2011", "2012", "2013", "2014"], freq="4M") + rev_msg = r"Invalid comparison between dtype=period\[4M\] and PeriodArray" + idx_msg = rev_msg if box_with_array in [tm.to_array, pd.array] else msg + with pytest.raises(TypeError, match=idx_msg): + base <= idx + + @pytest.mark.parametrize("freq", ["M", "2M", "3M"]) + def test_pi_cmp_nat(self, freq): + idx1 = PeriodIndex(["2011-01", "2011-02", "NaT", "2011-05"], freq=freq) + per = idx1[1] + + result = idx1 > per + exp = np.array([False, False, False, True]) + tm.assert_numpy_array_equal(result, exp) + result = per < idx1 + tm.assert_numpy_array_equal(result, exp) + + result = idx1 == pd.NaT + exp = np.array([False, False, False, False]) + tm.assert_numpy_array_equal(result, exp) + result = pd.NaT == idx1 + tm.assert_numpy_array_equal(result, exp) + + result = idx1 != pd.NaT + exp = np.array([True, True, True, True]) + tm.assert_numpy_array_equal(result, exp) + result = pd.NaT != idx1 + tm.assert_numpy_array_equal(result, exp) + + idx2 = PeriodIndex(["2011-02", "2011-01", "2011-04", "NaT"], freq=freq) + result = idx1 < idx2 + exp = np.array([True, False, False, False]) + tm.assert_numpy_array_equal(result, exp) + + result = idx1 == idx2 + exp = np.array([False, False, False, False]) + tm.assert_numpy_array_equal(result, exp) + + result = idx1 != idx2 + exp = np.array([True, True, True, True]) + tm.assert_numpy_array_equal(result, exp) + + result = idx1 == idx1 + exp = np.array([True, True, False, True]) + tm.assert_numpy_array_equal(result, exp) + + result = idx1 != idx1 + exp = np.array([False, False, True, False]) + tm.assert_numpy_array_equal(result, exp) + + @pytest.mark.parametrize("freq", ["M", "2M", "3M"]) + def test_pi_cmp_nat_mismatched_freq_raises(self, freq): + idx1 = PeriodIndex(["2011-01", "2011-02", "NaT", "2011-05"], freq=freq) + + diff = PeriodIndex(["2011-02", "2011-01", "2011-04", "NaT"], freq="4M") + msg = rf"Invalid comparison between dtype=period\[{freq}\] and PeriodArray" + with pytest.raises(TypeError, match=msg): + idx1 > diff + + result = idx1 == diff + expected = np.array([False, False, False, False], dtype=bool) + tm.assert_numpy_array_equal(result, expected) + + # TODO: De-duplicate with test_pi_cmp_nat + @pytest.mark.parametrize("dtype", [object, None]) + def test_comp_nat(self, dtype): + left = PeriodIndex([Period("2011-01-01"), pd.NaT, Period("2011-01-03")]) + right = PeriodIndex([pd.NaT, pd.NaT, Period("2011-01-03")]) + + if dtype is not None: + left = left.astype(dtype) + right = right.astype(dtype) + + result = left == right + expected = np.array([False, False, True]) + tm.assert_numpy_array_equal(result, expected) + + result = left != right + expected = np.array([True, True, False]) + tm.assert_numpy_array_equal(result, expected) + + expected = np.array([False, False, False]) + tm.assert_numpy_array_equal(left == pd.NaT, expected) + tm.assert_numpy_array_equal(pd.NaT == right, expected) + + expected = np.array([True, True, True]) + tm.assert_numpy_array_equal(left != pd.NaT, expected) + tm.assert_numpy_array_equal(pd.NaT != left, expected) + + expected = np.array([False, False, False]) + tm.assert_numpy_array_equal(left < pd.NaT, expected) + tm.assert_numpy_array_equal(pd.NaT > left, expected) + + +class TestPeriodSeriesComparisons: + def test_cmp_series_period_series_mixed_freq(self): + # GH#13200 + base = Series( + [ + Period("2011", freq="Y"), + Period("2011-02", freq="M"), + Period("2013", freq="Y"), + Period("2011-04", freq="M"), + ] + ) + + ser = Series( + [ + Period("2012", freq="Y"), + Period("2011-01", freq="M"), + Period("2013", freq="Y"), + Period("2011-05", freq="M"), + ] + ) + + exp = Series([False, False, True, False]) + tm.assert_series_equal(base == ser, exp) + + exp = Series([True, True, False, True]) + tm.assert_series_equal(base != ser, exp) + + exp = Series([False, True, False, False]) + tm.assert_series_equal(base > ser, exp) + + exp = Series([True, False, False, True]) + tm.assert_series_equal(base < ser, exp) + + exp = Series([False, True, True, False]) + tm.assert_series_equal(base >= ser, exp) + + exp = Series([True, False, True, True]) + tm.assert_series_equal(base <= ser, exp) + + +class TestPeriodIndexSeriesComparisonConsistency: + """Test PeriodIndex and Period Series Ops consistency""" + + # TODO: needs parametrization+de-duplication + + def _check(self, values, func, expected): + # Test PeriodIndex and Period Series Ops consistency + + idx = PeriodIndex(values) + result = func(idx) + + # check that we don't pass an unwanted type to tm.assert_equal + assert isinstance(expected, (pd.Index, np.ndarray)) + tm.assert_equal(result, expected) + + s = Series(values) + result = func(s) + + exp = Series(expected, name=values.name) + tm.assert_series_equal(result, exp) + + def test_pi_comp_period(self): + idx = PeriodIndex( + ["2011-01", "2011-02", "2011-03", "2011-04"], freq="M", name="idx" + ) + per = idx[2] + + f = lambda x: x == per + exp = np.array([False, False, True, False], dtype=np.bool_) + self._check(idx, f, exp) + f = lambda x: per == x + self._check(idx, f, exp) + + f = lambda x: x != per + exp = np.array([True, True, False, True], dtype=np.bool_) + self._check(idx, f, exp) + f = lambda x: per != x + self._check(idx, f, exp) + + f = lambda x: per >= x + exp = np.array([True, True, True, False], dtype=np.bool_) + self._check(idx, f, exp) + + f = lambda x: x > per + exp = np.array([False, False, False, True], dtype=np.bool_) + self._check(idx, f, exp) + + f = lambda x: per >= x + exp = np.array([True, True, True, False], dtype=np.bool_) + self._check(idx, f, exp) + + def test_pi_comp_period_nat(self): + idx = PeriodIndex( + ["2011-01", "NaT", "2011-03", "2011-04"], freq="M", name="idx" + ) + per = idx[2] + + f = lambda x: x == per + exp = np.array([False, False, True, False], dtype=np.bool_) + self._check(idx, f, exp) + f = lambda x: per == x + self._check(idx, f, exp) + + f = lambda x: x == pd.NaT + exp = np.array([False, False, False, False], dtype=np.bool_) + self._check(idx, f, exp) + f = lambda x: pd.NaT == x + self._check(idx, f, exp) + + f = lambda x: x != per + exp = np.array([True, True, False, True], dtype=np.bool_) + self._check(idx, f, exp) + f = lambda x: per != x + self._check(idx, f, exp) + + f = lambda x: x != pd.NaT + exp = np.array([True, True, True, True], dtype=np.bool_) + self._check(idx, f, exp) + f = lambda x: pd.NaT != x + self._check(idx, f, exp) + + f = lambda x: per >= x + exp = np.array([True, False, True, False], dtype=np.bool_) + self._check(idx, f, exp) + + f = lambda x: x < per + exp = np.array([True, False, False, False], dtype=np.bool_) + self._check(idx, f, exp) + + f = lambda x: x > pd.NaT + exp = np.array([False, False, False, False], dtype=np.bool_) + self._check(idx, f, exp) + + f = lambda x: pd.NaT >= x + exp = np.array([False, False, False, False], dtype=np.bool_) + self._check(idx, f, exp) + + +# ------------------------------------------------------------------ +# Arithmetic + + +class TestPeriodFrameArithmetic: + def test_ops_frame_period(self): + # GH#13043 + df = pd.DataFrame( + { + "A": [Period("2015-01", freq="M"), Period("2015-02", freq="M")], + "B": [Period("2014-01", freq="M"), Period("2014-02", freq="M")], + } + ) + assert df["A"].dtype == "Period[M]" + assert df["B"].dtype == "Period[M]" + + p = Period("2015-03", freq="M") + off = p.freq + # dtype will be object because of original dtype + exp = pd.DataFrame( + { + "A": np.array([2 * off, 1 * off], dtype=object), + "B": np.array([14 * off, 13 * off], dtype=object), + } + ) + tm.assert_frame_equal(p - df, exp) + tm.assert_frame_equal(df - p, -1 * exp) + + df2 = pd.DataFrame( + { + "A": [Period("2015-05", freq="M"), Period("2015-06", freq="M")], + "B": [Period("2015-05", freq="M"), Period("2015-06", freq="M")], + } + ) + assert df2["A"].dtype == "Period[M]" + assert df2["B"].dtype == "Period[M]" + + exp = pd.DataFrame( + { + "A": np.array([4 * off, 4 * off], dtype=object), + "B": np.array([16 * off, 16 * off], dtype=object), + } + ) + tm.assert_frame_equal(df2 - df, exp) + tm.assert_frame_equal(df - df2, -1 * exp) + + +class TestPeriodIndexArithmetic: + # --------------------------------------------------------------- + # __add__/__sub__ with PeriodIndex + # PeriodIndex + other is defined for integers and timedelta-like others + # PeriodIndex - other is defined for integers, timedelta-like others, + # and PeriodIndex (with matching freq) + + def test_parr_add_iadd_parr_raises(self, box_with_array): + rng = period_range("1/1/2000", freq="D", periods=5) + other = period_range("1/6/2000", freq="D", periods=5) + # TODO: parametrize over boxes for other? + + rng = tm.box_expected(rng, box_with_array) + # An earlier implementation of PeriodIndex addition performed + # a set operation (union). This has since been changed to + # raise a TypeError. See GH#14164 and GH#13077 for historical + # reference. + msg = r"unsupported operand type\(s\) for \+: .* and .*" + with pytest.raises(TypeError, match=msg): + rng + other + + with pytest.raises(TypeError, match=msg): + rng += other + + def test_pi_sub_isub_pi(self): + # GH#20049 + # For historical reference see GH#14164, GH#13077. + # PeriodIndex subtraction originally performed set difference, + # then changed to raise TypeError before being implemented in GH#20049 + rng = period_range("1/1/2000", freq="D", periods=5) + other = period_range("1/6/2000", freq="D", periods=5) + + off = rng.freq + expected = pd.Index([-5 * off] * 5) + result = rng - other + tm.assert_index_equal(result, expected) + + rng -= other + tm.assert_index_equal(rng, expected) + + def test_pi_sub_pi_with_nat(self): + rng = period_range("1/1/2000", freq="D", periods=5) + other = rng[1:].insert(0, pd.NaT) + assert other[1:].equals(rng[1:]) + + result = rng - other + off = rng.freq + expected = pd.Index([pd.NaT, 0 * off, 0 * off, 0 * off, 0 * off]) + tm.assert_index_equal(result, expected) + + def test_parr_sub_pi_mismatched_freq(self, box_with_array, box_with_array2): + rng = period_range("1/1/2000", freq="D", periods=5) + other = period_range("1/6/2000", freq="h", periods=5) + + rng = tm.box_expected(rng, box_with_array) + other = tm.box_expected(other, box_with_array2) + msg = r"Input has different freq=[hD] from PeriodArray\(freq=[Dh]\)" + with pytest.raises(IncompatibleFrequency, match=msg): + rng - other + + @pytest.mark.parametrize("n", [1, 2, 3, 4]) + def test_sub_n_gt_1_ticks(self, tick_classes, n): + # GH 23878 + p1_d = "19910905" + p2_d = "19920406" + p1 = PeriodIndex([p1_d], freq=tick_classes(n)) + p2 = PeriodIndex([p2_d], freq=tick_classes(n)) + + expected = PeriodIndex([p2_d], freq=p2.freq.base) - PeriodIndex( + [p1_d], freq=p1.freq.base + ) + + tm.assert_index_equal((p2 - p1), expected) + + @pytest.mark.parametrize("n", [1, 2, 3, 4]) + @pytest.mark.parametrize( + "offset, kwd_name", + [ + (pd.offsets.YearEnd, "month"), + (pd.offsets.QuarterEnd, "startingMonth"), + (pd.offsets.MonthEnd, None), + (pd.offsets.Week, "weekday"), + ], + ) + def test_sub_n_gt_1_offsets(self, offset, kwd_name, n): + # GH 23878 + kwds = {kwd_name: 3} if kwd_name is not None else {} + p1_d = "19910905" + p2_d = "19920406" + freq = offset(n, normalize=False, **kwds) + p1 = PeriodIndex([p1_d], freq=freq) + p2 = PeriodIndex([p2_d], freq=freq) + + result = p2 - p1 + expected = PeriodIndex([p2_d], freq=freq.base) - PeriodIndex( + [p1_d], freq=freq.base + ) + + tm.assert_index_equal(result, expected) + + # ------------------------------------------------------------- + # Invalid Operations + + @pytest.mark.parametrize( + "other", + [ + # datetime scalars + Timestamp("2016-01-01"), + Timestamp("2016-01-01").to_pydatetime(), + Timestamp("2016-01-01").to_datetime64(), + # datetime-like arrays + pd.date_range("2016-01-01", periods=3, freq="h"), + pd.date_range("2016-01-01", periods=3, tz="Europe/Brussels"), + pd.date_range("2016-01-01", periods=3, freq="s")._data, + pd.date_range("2016-01-01", periods=3, tz="Asia/Tokyo")._data, + # Miscellaneous invalid types + 3.14, + np.array([2.0, 3.0, 4.0]), + ], + ) + def test_parr_add_sub_invalid(self, other, box_with_array): + # GH#23215 + rng = period_range("1/1/2000", freq="D", periods=3) + rng = tm.box_expected(rng, box_with_array) + + msg = "|".join( + [ + r"(:?cannot add PeriodArray and .*)", + r"(:?cannot subtract .* from (:?a\s)?.*)", + r"(:?unsupported operand type\(s\) for \+: .* and .*)", + r"unsupported operand type\(s\) for [+-]: .* and .*", + ] + ) + assert_invalid_addsub_type(rng, other, msg) + with pytest.raises(TypeError, match=msg): + rng + other + with pytest.raises(TypeError, match=msg): + other + rng + with pytest.raises(TypeError, match=msg): + rng - other + with pytest.raises(TypeError, match=msg): + other - rng + + # ----------------------------------------------------------------- + # __add__/__sub__ with ndarray[datetime64] and ndarray[timedelta64] + + def test_pi_add_sub_td64_array_non_tick_raises(self): + rng = period_range("1/1/2000", freq="Q", periods=3) + tdi = TimedeltaIndex(["-1 Day", "-1 Day", "-1 Day"]) + tdarr = tdi.values + + msg = r"Cannot add or subtract timedelta64\[ns\] dtype from period\[Q-DEC\]" + with pytest.raises(TypeError, match=msg): + rng + tdarr + with pytest.raises(TypeError, match=msg): + tdarr + rng + + with pytest.raises(TypeError, match=msg): + rng - tdarr + msg = r"cannot subtract PeriodArray from TimedeltaArray" + with pytest.raises(TypeError, match=msg): + tdarr - rng + + def test_pi_add_sub_td64_array_tick(self): + # PeriodIndex + Timedelta-like is allowed only with + # tick-like frequencies + rng = period_range("1/1/2000", freq="90D", periods=3) + tdi = TimedeltaIndex(["-1 Day", "-1 Day", "-1 Day"]) + tdarr = tdi.values + + expected = period_range("12/31/1999", freq="90D", periods=3) + result = rng + tdi + tm.assert_index_equal(result, expected) + result = rng + tdarr + tm.assert_index_equal(result, expected) + result = tdi + rng + tm.assert_index_equal(result, expected) + result = tdarr + rng + tm.assert_index_equal(result, expected) + + expected = period_range("1/2/2000", freq="90D", periods=3) + + result = rng - tdi + tm.assert_index_equal(result, expected) + result = rng - tdarr + tm.assert_index_equal(result, expected) + + msg = r"cannot subtract .* from .*" + with pytest.raises(TypeError, match=msg): + tdarr - rng + + with pytest.raises(TypeError, match=msg): + tdi - rng + + @pytest.mark.parametrize("pi_freq", ["D", "W", "Q", "h"]) + @pytest.mark.parametrize("tdi_freq", [None, "h"]) + def test_parr_sub_td64array(self, box_with_array, tdi_freq, pi_freq): + box = box_with_array + xbox = box if box not in [pd.array, tm.to_array] else pd.Index + + tdi = TimedeltaIndex(["1 hours", "2 hours"], freq=tdi_freq) + dti = Timestamp("2018-03-07 17:16:40") + tdi + pi = dti.to_period(pi_freq) + + # TODO: parametrize over box for pi? + td64obj = tm.box_expected(tdi, box) + + if pi_freq == "h": + result = pi - td64obj + expected = (pi.to_timestamp("s") - tdi).to_period(pi_freq) + expected = tm.box_expected(expected, xbox) + tm.assert_equal(result, expected) + + # Subtract from scalar + result = pi[0] - td64obj + expected = (pi[0].to_timestamp("s") - tdi).to_period(pi_freq) + expected = tm.box_expected(expected, box) + tm.assert_equal(result, expected) + + elif pi_freq == "D": + # Tick, but non-compatible + msg = ( + "Cannot add/subtract timedelta-like from PeriodArray that is " + "not an integer multiple of the PeriodArray's freq." + ) + with pytest.raises(IncompatibleFrequency, match=msg): + pi - td64obj + + with pytest.raises(IncompatibleFrequency, match=msg): + pi[0] - td64obj + + else: + # With non-Tick freq, we could not add timedelta64 array regardless + # of what its resolution is + msg = "Cannot add or subtract timedelta64" + with pytest.raises(TypeError, match=msg): + pi - td64obj + with pytest.raises(TypeError, match=msg): + pi[0] - td64obj + + # ----------------------------------------------------------------- + # operations with array/Index of DateOffset objects + + @pytest.mark.parametrize("box", [np.array, pd.Index]) + def test_pi_add_offset_array(self, box): + # GH#18849 + pi = PeriodIndex([Period("2015Q1"), Period("2016Q2")]) + offs = box( + [ + pd.offsets.QuarterEnd(n=1, startingMonth=12), + pd.offsets.QuarterEnd(n=-2, startingMonth=12), + ] + ) + expected = PeriodIndex([Period("2015Q2"), Period("2015Q4")]).astype(object) + + with tm.assert_produces_warning(PerformanceWarning): + res = pi + offs + tm.assert_index_equal(res, expected) + + with tm.assert_produces_warning(PerformanceWarning): + res2 = offs + pi + tm.assert_index_equal(res2, expected) + + unanchored = np.array([pd.offsets.Hour(n=1), pd.offsets.Minute(n=-2)]) + # addition/subtraction ops with incompatible offsets should issue + # a PerformanceWarning and _then_ raise a TypeError. + msg = r"Input cannot be converted to Period\(freq=Q-DEC\)" + with pytest.raises(IncompatibleFrequency, match=msg): + with tm.assert_produces_warning(PerformanceWarning): + pi + unanchored + with pytest.raises(IncompatibleFrequency, match=msg): + with tm.assert_produces_warning(PerformanceWarning): + unanchored + pi + + @pytest.mark.parametrize("box", [np.array, pd.Index]) + def test_pi_sub_offset_array(self, box): + # GH#18824 + pi = PeriodIndex([Period("2015Q1"), Period("2016Q2")]) + other = box( + [ + pd.offsets.QuarterEnd(n=1, startingMonth=12), + pd.offsets.QuarterEnd(n=-2, startingMonth=12), + ] + ) + + expected = PeriodIndex([pi[n] - other[n] for n in range(len(pi))]) + expected = expected.astype(object) + + with tm.assert_produces_warning(PerformanceWarning): + res = pi - other + tm.assert_index_equal(res, expected) + + anchored = box([pd.offsets.MonthEnd(), pd.offsets.Day(n=2)]) + + # addition/subtraction ops with anchored offsets should issue + # a PerformanceWarning and _then_ raise a TypeError. + msg = r"Input has different freq=-1M from Period\(freq=Q-DEC\)" + with pytest.raises(IncompatibleFrequency, match=msg): + with tm.assert_produces_warning(PerformanceWarning): + pi - anchored + with pytest.raises(IncompatibleFrequency, match=msg): + with tm.assert_produces_warning(PerformanceWarning): + anchored - pi + + def test_pi_add_iadd_int(self, one): + # Variants of `one` for #19012 + rng = period_range("2000-01-01 09:00", freq="h", periods=10) + result = rng + one + expected = period_range("2000-01-01 10:00", freq="h", periods=10) + tm.assert_index_equal(result, expected) + rng += one + tm.assert_index_equal(rng, expected) + + def test_pi_sub_isub_int(self, one): + """ + PeriodIndex.__sub__ and __isub__ with several representations of + the integer 1, e.g. int, np.int64, np.uint8, ... + """ + rng = period_range("2000-01-01 09:00", freq="h", periods=10) + result = rng - one + expected = period_range("2000-01-01 08:00", freq="h", periods=10) + tm.assert_index_equal(result, expected) + rng -= one + tm.assert_index_equal(rng, expected) + + @pytest.mark.parametrize("five", [5, np.array(5, dtype=np.int64)]) + def test_pi_sub_intlike(self, five): + rng = period_range("2007-01", periods=50) + + result = rng - five + exp = rng + (-five) + tm.assert_index_equal(result, exp) + + def test_pi_add_sub_int_array_freqn_gt1(self): + # GH#47209 test adding array of ints when freq.n > 1 matches + # scalar behavior + pi = period_range("2016-01-01", periods=10, freq="2D") + arr = np.arange(10) + result = pi + arr + expected = pd.Index([x + y for x, y in zip(pi, arr)]) + tm.assert_index_equal(result, expected) + + result = pi - arr + expected = pd.Index([x - y for x, y in zip(pi, arr)]) + tm.assert_index_equal(result, expected) + + def test_pi_sub_isub_offset(self): + # offset + # DateOffset + rng = period_range("2014", "2024", freq="Y") + result = rng - pd.offsets.YearEnd(5) + expected = period_range("2009", "2019", freq="Y") + tm.assert_index_equal(result, expected) + rng -= pd.offsets.YearEnd(5) + tm.assert_index_equal(rng, expected) + + rng = period_range("2014-01", "2016-12", freq="M") + result = rng - pd.offsets.MonthEnd(5) + expected = period_range("2013-08", "2016-07", freq="M") + tm.assert_index_equal(result, expected) + + rng -= pd.offsets.MonthEnd(5) + tm.assert_index_equal(rng, expected) + + @pytest.mark.parametrize("transpose", [True, False]) + def test_pi_add_offset_n_gt1(self, box_with_array, transpose): + # GH#23215 + # add offset to PeriodIndex with freq.n > 1 + + per = Period("2016-01", freq="2M") + pi = PeriodIndex([per]) + + expected = PeriodIndex(["2016-03"], freq="2M") + + pi = tm.box_expected(pi, box_with_array, transpose=transpose) + expected = tm.box_expected(expected, box_with_array, transpose=transpose) + + result = pi + per.freq + tm.assert_equal(result, expected) + + result = per.freq + pi + tm.assert_equal(result, expected) + + def test_pi_add_offset_n_gt1_not_divisible(self, box_with_array): + # GH#23215 + # PeriodIndex with freq.n > 1 add offset with offset.n % freq.n != 0 + pi = PeriodIndex(["2016-01"], freq="2M") + expected = PeriodIndex(["2016-04"], freq="2M") + + pi = tm.box_expected(pi, box_with_array) + expected = tm.box_expected(expected, box_with_array) + + result = pi + to_offset("3ME") + tm.assert_equal(result, expected) + + result = to_offset("3ME") + pi + tm.assert_equal(result, expected) + + # --------------------------------------------------------------- + # __add__/__sub__ with integer arrays + + @pytest.mark.parametrize("int_holder", [np.array, pd.Index]) + @pytest.mark.parametrize("op", [operator.add, ops.radd]) + def test_pi_add_intarray(self, int_holder, op): + # GH#19959 + pi = PeriodIndex([Period("2015Q1"), Period("NaT")]) + other = int_holder([4, -1]) + + result = op(pi, other) + expected = PeriodIndex([Period("2016Q1"), Period("NaT")]) + tm.assert_index_equal(result, expected) + + @pytest.mark.parametrize("int_holder", [np.array, pd.Index]) + def test_pi_sub_intarray(self, int_holder): + # GH#19959 + pi = PeriodIndex([Period("2015Q1"), Period("NaT")]) + other = int_holder([4, -1]) + + result = pi - other + expected = PeriodIndex([Period("2014Q1"), Period("NaT")]) + tm.assert_index_equal(result, expected) + + msg = r"bad operand type for unary -: 'PeriodArray'" + with pytest.raises(TypeError, match=msg): + other - pi + + # --------------------------------------------------------------- + # Timedelta-like (timedelta, timedelta64, Timedelta, Tick) + # TODO: Some of these are misnomers because of non-Tick DateOffsets + + def test_parr_add_timedeltalike_minute_gt1(self, three_days, box_with_array): + # GH#23031 adding a time-delta-like offset to a PeriodArray that has + # minute frequency with n != 1. A more general case is tested below + # in test_pi_add_timedeltalike_tick_gt1, but here we write out the + # expected result more explicitly. + other = three_days + rng = period_range("2014-05-01", periods=3, freq="2D") + rng = tm.box_expected(rng, box_with_array) + + expected = PeriodIndex(["2014-05-04", "2014-05-06", "2014-05-08"], freq="2D") + expected = tm.box_expected(expected, box_with_array) + + result = rng + other + tm.assert_equal(result, expected) + + result = other + rng + tm.assert_equal(result, expected) + + # subtraction + expected = PeriodIndex(["2014-04-28", "2014-04-30", "2014-05-02"], freq="2D") + expected = tm.box_expected(expected, box_with_array) + result = rng - other + tm.assert_equal(result, expected) + + msg = "|".join( + [ + r"bad operand type for unary -: 'PeriodArray'", + r"cannot subtract PeriodArray from timedelta64\[[hD]\]", + ] + ) + with pytest.raises(TypeError, match=msg): + other - rng + + @pytest.mark.parametrize("freqstr", ["5ns", "5us", "5ms", "5s", "5min", "5h", "5d"]) + def test_parr_add_timedeltalike_tick_gt1(self, three_days, freqstr, box_with_array): + # GH#23031 adding a time-delta-like offset to a PeriodArray that has + # tick-like frequency with n != 1 + other = three_days + rng = period_range("2014-05-01", periods=6, freq=freqstr) + first = rng[0] + rng = tm.box_expected(rng, box_with_array) + + expected = period_range(first + other, periods=6, freq=freqstr) + expected = tm.box_expected(expected, box_with_array) + + result = rng + other + tm.assert_equal(result, expected) + + result = other + rng + tm.assert_equal(result, expected) + + # subtraction + expected = period_range(first - other, periods=6, freq=freqstr) + expected = tm.box_expected(expected, box_with_array) + result = rng - other + tm.assert_equal(result, expected) + msg = "|".join( + [ + r"bad operand type for unary -: 'PeriodArray'", + r"cannot subtract PeriodArray from timedelta64\[[hD]\]", + ] + ) + with pytest.raises(TypeError, match=msg): + other - rng + + def test_pi_add_iadd_timedeltalike_daily(self, three_days): + # Tick + other = three_days + rng = period_range("2014-05-01", "2014-05-15", freq="D") + expected = period_range("2014-05-04", "2014-05-18", freq="D") + + result = rng + other + tm.assert_index_equal(result, expected) + + rng += other + tm.assert_index_equal(rng, expected) + + def test_pi_sub_isub_timedeltalike_daily(self, three_days): + # Tick-like 3 Days + other = three_days + rng = period_range("2014-05-01", "2014-05-15", freq="D") + expected = period_range("2014-04-28", "2014-05-12", freq="D") + + result = rng - other + tm.assert_index_equal(result, expected) + + rng -= other + tm.assert_index_equal(rng, expected) + + def test_parr_add_sub_timedeltalike_freq_mismatch_daily( + self, not_daily, box_with_array + ): + other = not_daily + rng = period_range("2014-05-01", "2014-05-15", freq="D") + rng = tm.box_expected(rng, box_with_array) + + msg = "|".join( + [ + # non-timedelta-like DateOffset + "Input has different freq(=.+)? from Period.*?\\(freq=D\\)", + # timedelta/td64/Timedelta but not a multiple of 24H + "Cannot add/subtract timedelta-like from PeriodArray that is " + "not an integer multiple of the PeriodArray's freq.", + ] + ) + with pytest.raises(IncompatibleFrequency, match=msg): + rng + other + with pytest.raises(IncompatibleFrequency, match=msg): + rng += other + with pytest.raises(IncompatibleFrequency, match=msg): + rng - other + with pytest.raises(IncompatibleFrequency, match=msg): + rng -= other + + def test_pi_add_iadd_timedeltalike_hourly(self, two_hours): + other = two_hours + rng = period_range("2014-01-01 10:00", "2014-01-05 10:00", freq="h") + expected = period_range("2014-01-01 12:00", "2014-01-05 12:00", freq="h") + + result = rng + other + tm.assert_index_equal(result, expected) + + rng += other + tm.assert_index_equal(rng, expected) + + def test_parr_add_timedeltalike_mismatched_freq_hourly( + self, not_hourly, box_with_array + ): + other = not_hourly + rng = period_range("2014-01-01 10:00", "2014-01-05 10:00", freq="h") + rng = tm.box_expected(rng, box_with_array) + msg = "|".join( + [ + # non-timedelta-like DateOffset + "Input has different freq(=.+)? from Period.*?\\(freq=h\\)", + # timedelta/td64/Timedelta but not a multiple of 24H + "Cannot add/subtract timedelta-like from PeriodArray that is " + "not an integer multiple of the PeriodArray's freq.", + ] + ) + + with pytest.raises(IncompatibleFrequency, match=msg): + rng + other + + with pytest.raises(IncompatibleFrequency, match=msg): + rng += other + + def test_pi_sub_isub_timedeltalike_hourly(self, two_hours): + other = two_hours + rng = period_range("2014-01-01 10:00", "2014-01-05 10:00", freq="h") + expected = period_range("2014-01-01 08:00", "2014-01-05 08:00", freq="h") + + result = rng - other + tm.assert_index_equal(result, expected) + + rng -= other + tm.assert_index_equal(rng, expected) + + def test_add_iadd_timedeltalike_annual(self): + # offset + # DateOffset + rng = period_range("2014", "2024", freq="Y") + result = rng + pd.offsets.YearEnd(5) + expected = period_range("2019", "2029", freq="Y") + tm.assert_index_equal(result, expected) + rng += pd.offsets.YearEnd(5) + tm.assert_index_equal(rng, expected) + + def test_pi_add_sub_timedeltalike_freq_mismatch_annual(self, mismatched_freq): + other = mismatched_freq + rng = period_range("2014", "2024", freq="Y") + msg = "Input has different freq(=.+)? from Period.*?\\(freq=Y-DEC\\)" + with pytest.raises(IncompatibleFrequency, match=msg): + rng + other + with pytest.raises(IncompatibleFrequency, match=msg): + rng += other + with pytest.raises(IncompatibleFrequency, match=msg): + rng - other + with pytest.raises(IncompatibleFrequency, match=msg): + rng -= other + + def test_pi_add_iadd_timedeltalike_M(self): + rng = period_range("2014-01", "2016-12", freq="M") + expected = period_range("2014-06", "2017-05", freq="M") + + result = rng + pd.offsets.MonthEnd(5) + tm.assert_index_equal(result, expected) + + rng += pd.offsets.MonthEnd(5) + tm.assert_index_equal(rng, expected) + + def test_pi_add_sub_timedeltalike_freq_mismatch_monthly(self, mismatched_freq): + other = mismatched_freq + rng = period_range("2014-01", "2016-12", freq="M") + msg = "Input has different freq(=.+)? from Period.*?\\(freq=M\\)" + with pytest.raises(IncompatibleFrequency, match=msg): + rng + other + with pytest.raises(IncompatibleFrequency, match=msg): + rng += other + with pytest.raises(IncompatibleFrequency, match=msg): + rng - other + with pytest.raises(IncompatibleFrequency, match=msg): + rng -= other + + @pytest.mark.parametrize("transpose", [True, False]) + def test_parr_add_sub_td64_nat(self, box_with_array, transpose): + # GH#23320 special handling for timedelta64("NaT") + pi = period_range("1994-04-01", periods=9, freq="19D") + other = np.timedelta64("NaT") + expected = PeriodIndex(["NaT"] * 9, freq="19D") + + obj = tm.box_expected(pi, box_with_array, transpose=transpose) + expected = tm.box_expected(expected, box_with_array, transpose=transpose) + + result = obj + other + tm.assert_equal(result, expected) + result = other + obj + tm.assert_equal(result, expected) + result = obj - other + tm.assert_equal(result, expected) + msg = r"cannot subtract .* from .*" + with pytest.raises(TypeError, match=msg): + other - obj + + @pytest.mark.parametrize( + "other", + [ + np.array(["NaT"] * 9, dtype="m8[ns]"), + TimedeltaArray._from_sequence(["NaT"] * 9, dtype="m8[ns]"), + ], + ) + def test_parr_add_sub_tdt64_nat_array(self, box_with_array, other): + pi = period_range("1994-04-01", periods=9, freq="19D") + expected = PeriodIndex(["NaT"] * 9, freq="19D") + + obj = tm.box_expected(pi, box_with_array) + expected = tm.box_expected(expected, box_with_array) + + result = obj + other + tm.assert_equal(result, expected) + result = other + obj + tm.assert_equal(result, expected) + result = obj - other + tm.assert_equal(result, expected) + msg = r"cannot subtract .* from .*" + with pytest.raises(TypeError, match=msg): + other - obj + + # some but not *all* NaT + other = other.copy() + other[0] = np.timedelta64(0, "ns") + expected = PeriodIndex([pi[0]] + ["NaT"] * 8, freq="19D") + expected = tm.box_expected(expected, box_with_array) + + result = obj + other + tm.assert_equal(result, expected) + result = other + obj + tm.assert_equal(result, expected) + result = obj - other + tm.assert_equal(result, expected) + with pytest.raises(TypeError, match=msg): + other - obj + + # --------------------------------------------------------------- + # Unsorted + + def test_parr_add_sub_index(self): + # Check that PeriodArray defers to Index on arithmetic ops + pi = period_range("2000-12-31", periods=3) + parr = pi.array + + result = parr - pi + expected = pi - pi + tm.assert_index_equal(result, expected) + + def test_parr_add_sub_object_array(self): + pi = period_range("2000-12-31", periods=3, freq="D") + parr = pi.array + + other = np.array([Timedelta(days=1), pd.offsets.Day(2), 3]) + + with tm.assert_produces_warning(PerformanceWarning): + result = parr + other + + expected = PeriodIndex( + ["2001-01-01", "2001-01-03", "2001-01-05"], freq="D" + )._data.astype(object) + tm.assert_equal(result, expected) + + with tm.assert_produces_warning(PerformanceWarning): + result = parr - other + + expected = PeriodIndex(["2000-12-30"] * 3, freq="D")._data.astype(object) + tm.assert_equal(result, expected) + + def test_period_add_timestamp_raises(self, box_with_array): + # GH#17983 + ts = Timestamp("2017") + per = Period("2017", freq="M") + + arr = pd.Index([per], dtype="Period[M]") + arr = tm.box_expected(arr, box_with_array) + + msg = "cannot add PeriodArray and Timestamp" + with pytest.raises(TypeError, match=msg): + arr + ts + with pytest.raises(TypeError, match=msg): + ts + arr + msg = "cannot add PeriodArray and DatetimeArray" + with pytest.raises(TypeError, match=msg): + arr + Series([ts]) + with pytest.raises(TypeError, match=msg): + Series([ts]) + arr + with pytest.raises(TypeError, match=msg): + arr + pd.Index([ts]) + with pytest.raises(TypeError, match=msg): + pd.Index([ts]) + arr + + if box_with_array is pd.DataFrame: + msg = "cannot add PeriodArray and DatetimeArray" + else: + msg = r"unsupported operand type\(s\) for \+: 'Period' and 'DatetimeArray" + with pytest.raises(TypeError, match=msg): + arr + pd.DataFrame([ts]) + if box_with_array is pd.DataFrame: + msg = "cannot add PeriodArray and DatetimeArray" + else: + msg = r"unsupported operand type\(s\) for \+: 'DatetimeArray' and 'Period'" + with pytest.raises(TypeError, match=msg): + pd.DataFrame([ts]) + arr + + +class TestPeriodSeriesArithmetic: + def test_parr_add_timedeltalike_scalar(self, three_days, box_with_array): + # GH#13043 + ser = Series( + [Period("2015-01-01", freq="D"), Period("2015-01-02", freq="D")], + name="xxx", + ) + assert ser.dtype == "Period[D]" + + expected = Series( + [Period("2015-01-04", freq="D"), Period("2015-01-05", freq="D")], + name="xxx", + ) + + obj = tm.box_expected(ser, box_with_array) + if box_with_array is pd.DataFrame: + assert (obj.dtypes == "Period[D]").all() + + expected = tm.box_expected(expected, box_with_array) + + result = obj + three_days + tm.assert_equal(result, expected) + + result = three_days + obj + tm.assert_equal(result, expected) + + def test_ops_series_period(self): + # GH#13043 + ser = Series( + [Period("2015-01-01", freq="D"), Period("2015-01-02", freq="D")], + name="xxx", + ) + assert ser.dtype == "Period[D]" + + per = Period("2015-01-10", freq="D") + off = per.freq + # dtype will be object because of original dtype + expected = Series([9 * off, 8 * off], name="xxx", dtype=object) + tm.assert_series_equal(per - ser, expected) + tm.assert_series_equal(ser - per, -1 * expected) + + s2 = Series( + [Period("2015-01-05", freq="D"), Period("2015-01-04", freq="D")], + name="xxx", + ) + assert s2.dtype == "Period[D]" + + expected = Series([4 * off, 2 * off], name="xxx", dtype=object) + tm.assert_series_equal(s2 - ser, expected) + tm.assert_series_equal(ser - s2, -1 * expected) + + +class TestPeriodIndexSeriesMethods: + """Test PeriodIndex and Period Series Ops consistency""" + + def _check(self, values, func, expected): + idx = PeriodIndex(values) + result = func(idx) + tm.assert_equal(result, expected) + + ser = Series(values) + result = func(ser) + + exp = Series(expected, name=values.name) + tm.assert_series_equal(result, exp) + + def test_pi_ops(self): + idx = PeriodIndex( + ["2011-01", "2011-02", "2011-03", "2011-04"], freq="M", name="idx" + ) + + expected = PeriodIndex( + ["2011-03", "2011-04", "2011-05", "2011-06"], freq="M", name="idx" + ) + + self._check(idx, lambda x: x + 2, expected) + self._check(idx, lambda x: 2 + x, expected) + + self._check(idx + 2, lambda x: x - 2, idx) + + result = idx - Period("2011-01", freq="M") + off = idx.freq + exp = pd.Index([0 * off, 1 * off, 2 * off, 3 * off], name="idx") + tm.assert_index_equal(result, exp) + + result = Period("2011-01", freq="M") - idx + exp = pd.Index([0 * off, -1 * off, -2 * off, -3 * off], name="idx") + tm.assert_index_equal(result, exp) + + @pytest.mark.parametrize("ng", ["str", 1.5]) + @pytest.mark.parametrize( + "func", + [ + lambda obj, ng: obj + ng, + lambda obj, ng: ng + obj, + lambda obj, ng: obj - ng, + lambda obj, ng: ng - obj, + lambda obj, ng: np.add(obj, ng), + lambda obj, ng: np.add(ng, obj), + lambda obj, ng: np.subtract(obj, ng), + lambda obj, ng: np.subtract(ng, obj), + ], + ) + def test_parr_ops_errors(self, ng, func, box_with_array): + idx = PeriodIndex( + ["2011-01", "2011-02", "2011-03", "2011-04"], freq="M", name="idx" + ) + obj = tm.box_expected(idx, box_with_array) + msg = "|".join( + [ + r"unsupported operand type\(s\)", + "can only concatenate", + r"must be str", + "object to str implicitly", + ] + ) + + with pytest.raises(TypeError, match=msg): + func(obj, ng) + + def test_pi_ops_nat(self): + idx = PeriodIndex( + ["2011-01", "2011-02", "NaT", "2011-04"], freq="M", name="idx" + ) + expected = PeriodIndex( + ["2011-03", "2011-04", "NaT", "2011-06"], freq="M", name="idx" + ) + + self._check(idx, lambda x: x + 2, expected) + self._check(idx, lambda x: 2 + x, expected) + self._check(idx, lambda x: np.add(x, 2), expected) + + self._check(idx + 2, lambda x: x - 2, idx) + self._check(idx + 2, lambda x: np.subtract(x, 2), idx) + + # freq with mult + idx = PeriodIndex( + ["2011-01", "2011-02", "NaT", "2011-04"], freq="2M", name="idx" + ) + expected = PeriodIndex( + ["2011-07", "2011-08", "NaT", "2011-10"], freq="2M", name="idx" + ) + + self._check(idx, lambda x: x + 3, expected) + self._check(idx, lambda x: 3 + x, expected) + self._check(idx, lambda x: np.add(x, 3), expected) + + self._check(idx + 3, lambda x: x - 3, idx) + self._check(idx + 3, lambda x: np.subtract(x, 3), idx) + + def test_pi_ops_array_int(self): + idx = PeriodIndex( + ["2011-01", "2011-02", "NaT", "2011-04"], freq="M", name="idx" + ) + f = lambda x: x + np.array([1, 2, 3, 4]) + exp = PeriodIndex( + ["2011-02", "2011-04", "NaT", "2011-08"], freq="M", name="idx" + ) + self._check(idx, f, exp) + + f = lambda x: np.add(x, np.array([4, -1, 1, 2])) + exp = PeriodIndex( + ["2011-05", "2011-01", "NaT", "2011-06"], freq="M", name="idx" + ) + self._check(idx, f, exp) + + f = lambda x: x - np.array([1, 2, 3, 4]) + exp = PeriodIndex( + ["2010-12", "2010-12", "NaT", "2010-12"], freq="M", name="idx" + ) + self._check(idx, f, exp) + + f = lambda x: np.subtract(x, np.array([3, 2, 3, -2])) + exp = PeriodIndex( + ["2010-10", "2010-12", "NaT", "2011-06"], freq="M", name="idx" + ) + self._check(idx, f, exp) + + def test_pi_ops_offset(self): + idx = PeriodIndex( + ["2011-01-01", "2011-02-01", "2011-03-01", "2011-04-01"], + freq="D", + name="idx", + ) + f = lambda x: x + pd.offsets.Day() + exp = PeriodIndex( + ["2011-01-02", "2011-02-02", "2011-03-02", "2011-04-02"], + freq="D", + name="idx", + ) + self._check(idx, f, exp) + + f = lambda x: x + pd.offsets.Day(2) + exp = PeriodIndex( + ["2011-01-03", "2011-02-03", "2011-03-03", "2011-04-03"], + freq="D", + name="idx", + ) + self._check(idx, f, exp) + + f = lambda x: x - pd.offsets.Day(2) + exp = PeriodIndex( + ["2010-12-30", "2011-01-30", "2011-02-27", "2011-03-30"], + freq="D", + name="idx", + ) + self._check(idx, f, exp) + + def test_pi_offset_errors(self): + idx = PeriodIndex( + ["2011-01-01", "2011-02-01", "2011-03-01", "2011-04-01"], + freq="D", + name="idx", + ) + ser = Series(idx) + + msg = ( + "Cannot add/subtract timedelta-like from PeriodArray that is not " + "an integer multiple of the PeriodArray's freq" + ) + for obj in [idx, ser]: + with pytest.raises(IncompatibleFrequency, match=msg): + obj + pd.offsets.Hour(2) + + with pytest.raises(IncompatibleFrequency, match=msg): + pd.offsets.Hour(2) + obj + + with pytest.raises(IncompatibleFrequency, match=msg): + obj - pd.offsets.Hour(2) + + def test_pi_sub_period(self): + # GH#13071 + idx = PeriodIndex( + ["2011-01", "2011-02", "2011-03", "2011-04"], freq="M", name="idx" + ) + + result = idx - Period("2012-01", freq="M") + off = idx.freq + exp = pd.Index([-12 * off, -11 * off, -10 * off, -9 * off], name="idx") + tm.assert_index_equal(result, exp) + + result = np.subtract(idx, Period("2012-01", freq="M")) + tm.assert_index_equal(result, exp) + + result = Period("2012-01", freq="M") - idx + exp = pd.Index([12 * off, 11 * off, 10 * off, 9 * off], name="idx") + tm.assert_index_equal(result, exp) + + result = np.subtract(Period("2012-01", freq="M"), idx) + tm.assert_index_equal(result, exp) + + exp = TimedeltaIndex([np.nan, np.nan, np.nan, np.nan], name="idx") + result = idx - Period("NaT", freq="M") + tm.assert_index_equal(result, exp) + assert result.freq == exp.freq + + result = Period("NaT", freq="M") - idx + tm.assert_index_equal(result, exp) + assert result.freq == exp.freq + + def test_pi_sub_pdnat(self): + # GH#13071, GH#19389 + idx = PeriodIndex( + ["2011-01", "2011-02", "NaT", "2011-04"], freq="M", name="idx" + ) + exp = TimedeltaIndex([pd.NaT] * 4, name="idx") + tm.assert_index_equal(pd.NaT - idx, exp) + tm.assert_index_equal(idx - pd.NaT, exp) + + def test_pi_sub_period_nat(self): + # GH#13071 + idx = PeriodIndex( + ["2011-01", "NaT", "2011-03", "2011-04"], freq="M", name="idx" + ) + + result = idx - Period("2012-01", freq="M") + off = idx.freq + exp = pd.Index([-12 * off, pd.NaT, -10 * off, -9 * off], name="idx") + tm.assert_index_equal(result, exp) + + result = Period("2012-01", freq="M") - idx + exp = pd.Index([12 * off, pd.NaT, 10 * off, 9 * off], name="idx") + tm.assert_index_equal(result, exp) + + exp = TimedeltaIndex([np.nan, np.nan, np.nan, np.nan], name="idx") + tm.assert_index_equal(idx - Period("NaT", freq="M"), exp) + tm.assert_index_equal(Period("NaT", freq="M") - idx, exp) diff --git a/moondream/lib/python3.10/site-packages/pandas/tests/arithmetic/test_timedelta64.py b/moondream/lib/python3.10/site-packages/pandas/tests/arithmetic/test_timedelta64.py new file mode 100644 index 0000000000000000000000000000000000000000..d02e827d435cf16c806b5130f5949143f51c15e3 --- /dev/null +++ b/moondream/lib/python3.10/site-packages/pandas/tests/arithmetic/test_timedelta64.py @@ -0,0 +1,2179 @@ +# Arithmetic tests for DataFrame/Series/Index/Array classes that should +# behave identically. +from datetime import ( + datetime, + timedelta, +) + +import numpy as np +import pytest + +from pandas.errors import ( + OutOfBoundsDatetime, + PerformanceWarning, +) + +import pandas as pd +from pandas import ( + DataFrame, + DatetimeIndex, + Index, + NaT, + Series, + Timedelta, + TimedeltaIndex, + Timestamp, + offsets, + timedelta_range, +) +import pandas._testing as tm +from pandas.core.arrays import NumpyExtensionArray +from pandas.tests.arithmetic.common import ( + assert_invalid_addsub_type, + assert_invalid_comparison, + get_upcast_box, +) + + +def assert_dtype(obj, expected_dtype): + """ + Helper to check the dtype for a Series, Index, or single-column DataFrame. + """ + dtype = tm.get_dtype(obj) + + assert dtype == expected_dtype + + +def get_expected_name(box, names): + if box is DataFrame: + # Since we are operating with a DataFrame and a non-DataFrame, + # the non-DataFrame is cast to Series and its name ignored. + exname = names[0] + elif box in [tm.to_array, pd.array]: + exname = names[1] + else: + exname = names[2] + return exname + + +# ------------------------------------------------------------------ +# Timedelta64[ns] dtype Comparisons + + +class TestTimedelta64ArrayLikeComparisons: + # Comparison tests for timedelta64[ns] vectors fully parametrized over + # DataFrame/Series/TimedeltaIndex/TimedeltaArray. Ideally all comparison + # tests will eventually end up here. + + def test_compare_timedelta64_zerodim(self, box_with_array): + # GH#26689 should unbox when comparing with zerodim array + box = box_with_array + xbox = box_with_array if box_with_array not in [Index, pd.array] else np.ndarray + + tdi = timedelta_range("2h", periods=4) + other = np.array(tdi.to_numpy()[0]) + + tdi = tm.box_expected(tdi, box) + res = tdi <= other + expected = np.array([True, False, False, False]) + expected = tm.box_expected(expected, xbox) + tm.assert_equal(res, expected) + + @pytest.mark.parametrize( + "td_scalar", + [ + timedelta(days=1), + Timedelta(days=1), + Timedelta(days=1).to_timedelta64(), + offsets.Hour(24), + ], + ) + def test_compare_timedeltalike_scalar(self, box_with_array, td_scalar): + # regression test for GH#5963 + box = box_with_array + xbox = box if box not in [Index, pd.array] else np.ndarray + + ser = Series([timedelta(days=1), timedelta(days=2)]) + ser = tm.box_expected(ser, box) + actual = ser > td_scalar + expected = Series([False, True]) + expected = tm.box_expected(expected, xbox) + tm.assert_equal(actual, expected) + + @pytest.mark.parametrize( + "invalid", + [ + 345600000000000, + "a", + Timestamp("2021-01-01"), + Timestamp("2021-01-01").now("UTC"), + Timestamp("2021-01-01").now().to_datetime64(), + Timestamp("2021-01-01").now().to_pydatetime(), + Timestamp("2021-01-01").date(), + np.array(4), # zero-dim mismatched dtype + ], + ) + def test_td64_comparisons_invalid(self, box_with_array, invalid): + # GH#13624 for str + box = box_with_array + + rng = timedelta_range("1 days", periods=10) + obj = tm.box_expected(rng, box) + + assert_invalid_comparison(obj, invalid, box) + + @pytest.mark.parametrize( + "other", + [ + list(range(10)), + np.arange(10), + np.arange(10).astype(np.float32), + np.arange(10).astype(object), + pd.date_range("1970-01-01", periods=10, tz="UTC").array, + np.array(pd.date_range("1970-01-01", periods=10)), + list(pd.date_range("1970-01-01", periods=10)), + pd.date_range("1970-01-01", periods=10).astype(object), + pd.period_range("1971-01-01", freq="D", periods=10).array, + pd.period_range("1971-01-01", freq="D", periods=10).astype(object), + ], + ) + def test_td64arr_cmp_arraylike_invalid(self, other, box_with_array): + # We don't parametrize this over box_with_array because listlike + # other plays poorly with assert_invalid_comparison reversed checks + + rng = timedelta_range("1 days", periods=10)._data + rng = tm.box_expected(rng, box_with_array) + assert_invalid_comparison(rng, other, box_with_array) + + def test_td64arr_cmp_mixed_invalid(self): + rng = timedelta_range("1 days", periods=5)._data + other = np.array([0, 1, 2, rng[3], Timestamp("2021-01-01")]) + + result = rng == other + expected = np.array([False, False, False, True, False]) + tm.assert_numpy_array_equal(result, expected) + + result = rng != other + tm.assert_numpy_array_equal(result, ~expected) + + msg = "Invalid comparison between|Cannot compare type|not supported between" + with pytest.raises(TypeError, match=msg): + rng < other + with pytest.raises(TypeError, match=msg): + rng > other + with pytest.raises(TypeError, match=msg): + rng <= other + with pytest.raises(TypeError, match=msg): + rng >= other + + +class TestTimedelta64ArrayComparisons: + # TODO: All of these need to be parametrized over box + + @pytest.mark.parametrize("dtype", [None, object]) + def test_comp_nat(self, dtype): + left = TimedeltaIndex([Timedelta("1 days"), NaT, Timedelta("3 days")]) + right = TimedeltaIndex([NaT, NaT, Timedelta("3 days")]) + + lhs, rhs = left, right + if dtype is object: + lhs, rhs = left.astype(object), right.astype(object) + + result = rhs == lhs + expected = np.array([False, False, True]) + tm.assert_numpy_array_equal(result, expected) + + result = rhs != lhs + expected = np.array([True, True, False]) + tm.assert_numpy_array_equal(result, expected) + + expected = np.array([False, False, False]) + tm.assert_numpy_array_equal(lhs == NaT, expected) + tm.assert_numpy_array_equal(NaT == rhs, expected) + + expected = np.array([True, True, True]) + tm.assert_numpy_array_equal(lhs != NaT, expected) + tm.assert_numpy_array_equal(NaT != lhs, expected) + + expected = np.array([False, False, False]) + tm.assert_numpy_array_equal(lhs < NaT, expected) + tm.assert_numpy_array_equal(NaT > lhs, expected) + + @pytest.mark.parametrize( + "idx2", + [ + TimedeltaIndex( + ["2 day", "2 day", NaT, NaT, "1 day 00:00:02", "5 days 00:00:03"] + ), + np.array( + [ + np.timedelta64(2, "D"), + np.timedelta64(2, "D"), + np.timedelta64("nat"), + np.timedelta64("nat"), + np.timedelta64(1, "D") + np.timedelta64(2, "s"), + np.timedelta64(5, "D") + np.timedelta64(3, "s"), + ] + ), + ], + ) + def test_comparisons_nat(self, idx2): + idx1 = TimedeltaIndex( + [ + "1 day", + NaT, + "1 day 00:00:01", + NaT, + "1 day 00:00:01", + "5 day 00:00:03", + ] + ) + # Check pd.NaT is handles as the same as np.nan + result = idx1 < idx2 + expected = np.array([True, False, False, False, True, False]) + tm.assert_numpy_array_equal(result, expected) + + result = idx2 > idx1 + expected = np.array([True, False, False, False, True, False]) + tm.assert_numpy_array_equal(result, expected) + + result = idx1 <= idx2 + expected = np.array([True, False, False, False, True, True]) + tm.assert_numpy_array_equal(result, expected) + + result = idx2 >= idx1 + expected = np.array([True, False, False, False, True, True]) + tm.assert_numpy_array_equal(result, expected) + + result = idx1 == idx2 + expected = np.array([False, False, False, False, False, True]) + tm.assert_numpy_array_equal(result, expected) + + result = idx1 != idx2 + expected = np.array([True, True, True, True, True, False]) + tm.assert_numpy_array_equal(result, expected) + + # TODO: better name + def test_comparisons_coverage(self): + rng = timedelta_range("1 days", periods=10) + + result = rng < rng[3] + expected = np.array([True, True, True] + [False] * 7) + tm.assert_numpy_array_equal(result, expected) + + result = rng == list(rng) + exp = rng == rng + tm.assert_numpy_array_equal(result, exp) + + +# ------------------------------------------------------------------ +# Timedelta64[ns] dtype Arithmetic Operations + + +class TestTimedelta64ArithmeticUnsorted: + # Tests moved from type-specific test files but not + # yet sorted/parametrized/de-duplicated + + def test_ufunc_coercions(self): + # normal ops are also tested in tseries/test_timedeltas.py + idx = TimedeltaIndex(["2h", "4h", "6h", "8h", "10h"], freq="2h", name="x") + + for result in [idx * 2, np.multiply(idx, 2)]: + assert isinstance(result, TimedeltaIndex) + exp = TimedeltaIndex(["4h", "8h", "12h", "16h", "20h"], freq="4h", name="x") + tm.assert_index_equal(result, exp) + assert result.freq == "4h" + + for result in [idx / 2, np.divide(idx, 2)]: + assert isinstance(result, TimedeltaIndex) + exp = TimedeltaIndex(["1h", "2h", "3h", "4h", "5h"], freq="h", name="x") + tm.assert_index_equal(result, exp) + assert result.freq == "h" + + for result in [-idx, np.negative(idx)]: + assert isinstance(result, TimedeltaIndex) + exp = TimedeltaIndex( + ["-2h", "-4h", "-6h", "-8h", "-10h"], freq="-2h", name="x" + ) + tm.assert_index_equal(result, exp) + assert result.freq == "-2h" + + idx = TimedeltaIndex(["-2h", "-1h", "0h", "1h", "2h"], freq="h", name="x") + for result in [abs(idx), np.absolute(idx)]: + assert isinstance(result, TimedeltaIndex) + exp = TimedeltaIndex(["2h", "1h", "0h", "1h", "2h"], freq=None, name="x") + tm.assert_index_equal(result, exp) + assert result.freq is None + + def test_subtraction_ops(self): + # with datetimes/timedelta and tdi/dti + tdi = TimedeltaIndex(["1 days", NaT, "2 days"], name="foo") + dti = pd.date_range("20130101", periods=3, name="bar") + td = Timedelta("1 days") + dt = Timestamp("20130101") + + msg = "cannot subtract a datelike from a TimedeltaArray" + with pytest.raises(TypeError, match=msg): + tdi - dt + with pytest.raises(TypeError, match=msg): + tdi - dti + + msg = r"unsupported operand type\(s\) for -" + with pytest.raises(TypeError, match=msg): + td - dt + + msg = "(bad|unsupported) operand type for unary" + with pytest.raises(TypeError, match=msg): + td - dti + + result = dt - dti + expected = TimedeltaIndex(["0 days", "-1 days", "-2 days"], name="bar") + tm.assert_index_equal(result, expected) + + result = dti - dt + expected = TimedeltaIndex(["0 days", "1 days", "2 days"], name="bar") + tm.assert_index_equal(result, expected) + + result = tdi - td + expected = TimedeltaIndex(["0 days", NaT, "1 days"], name="foo") + tm.assert_index_equal(result, expected) + + result = td - tdi + expected = TimedeltaIndex(["0 days", NaT, "-1 days"], name="foo") + tm.assert_index_equal(result, expected) + + result = dti - td + expected = DatetimeIndex( + ["20121231", "20130101", "20130102"], dtype="M8[ns]", freq="D", name="bar" + ) + tm.assert_index_equal(result, expected) + + result = dt - tdi + expected = DatetimeIndex( + ["20121231", NaT, "20121230"], dtype="M8[ns]", name="foo" + ) + tm.assert_index_equal(result, expected) + + def test_subtraction_ops_with_tz(self, box_with_array): + # check that dt/dti subtraction ops with tz are validated + dti = pd.date_range("20130101", periods=3) + dti = tm.box_expected(dti, box_with_array) + ts = Timestamp("20130101") + dt = ts.to_pydatetime() + dti_tz = pd.date_range("20130101", periods=3).tz_localize("US/Eastern") + dti_tz = tm.box_expected(dti_tz, box_with_array) + ts_tz = Timestamp("20130101").tz_localize("US/Eastern") + ts_tz2 = Timestamp("20130101").tz_localize("CET") + dt_tz = ts_tz.to_pydatetime() + td = Timedelta("1 days") + + def _check(result, expected): + assert result == expected + assert isinstance(result, Timedelta) + + # scalars + result = ts - ts + expected = Timedelta("0 days") + _check(result, expected) + + result = dt_tz - ts_tz + expected = Timedelta("0 days") + _check(result, expected) + + result = ts_tz - dt_tz + expected = Timedelta("0 days") + _check(result, expected) + + # tz mismatches + msg = "Cannot subtract tz-naive and tz-aware datetime-like objects." + with pytest.raises(TypeError, match=msg): + dt_tz - ts + msg = "can't subtract offset-naive and offset-aware datetimes" + with pytest.raises(TypeError, match=msg): + dt_tz - dt + msg = "can't subtract offset-naive and offset-aware datetimes" + with pytest.raises(TypeError, match=msg): + dt - dt_tz + msg = "Cannot subtract tz-naive and tz-aware datetime-like objects." + with pytest.raises(TypeError, match=msg): + ts - dt_tz + with pytest.raises(TypeError, match=msg): + ts_tz2 - ts + with pytest.raises(TypeError, match=msg): + ts_tz2 - dt + + msg = "Cannot subtract tz-naive and tz-aware" + # with dti + with pytest.raises(TypeError, match=msg): + dti - ts_tz + with pytest.raises(TypeError, match=msg): + dti_tz - ts + + result = dti_tz - dt_tz + expected = TimedeltaIndex(["0 days", "1 days", "2 days"]) + expected = tm.box_expected(expected, box_with_array) + tm.assert_equal(result, expected) + + result = dt_tz - dti_tz + expected = TimedeltaIndex(["0 days", "-1 days", "-2 days"]) + expected = tm.box_expected(expected, box_with_array) + tm.assert_equal(result, expected) + + result = dti_tz - ts_tz + expected = TimedeltaIndex(["0 days", "1 days", "2 days"]) + expected = tm.box_expected(expected, box_with_array) + tm.assert_equal(result, expected) + + result = ts_tz - dti_tz + expected = TimedeltaIndex(["0 days", "-1 days", "-2 days"]) + expected = tm.box_expected(expected, box_with_array) + tm.assert_equal(result, expected) + + result = td - td + expected = Timedelta("0 days") + _check(result, expected) + + result = dti_tz - td + expected = DatetimeIndex( + ["20121231", "20130101", "20130102"], tz="US/Eastern" + ).as_unit("ns") + expected = tm.box_expected(expected, box_with_array) + tm.assert_equal(result, expected) + + def test_dti_tdi_numeric_ops(self): + # These are normally union/diff set-like ops + tdi = TimedeltaIndex(["1 days", NaT, "2 days"], name="foo") + dti = pd.date_range("20130101", periods=3, name="bar") + + result = tdi - tdi + expected = TimedeltaIndex(["0 days", NaT, "0 days"], name="foo") + tm.assert_index_equal(result, expected) + + result = tdi + tdi + expected = TimedeltaIndex(["2 days", NaT, "4 days"], name="foo") + tm.assert_index_equal(result, expected) + + result = dti - tdi # name will be reset + expected = DatetimeIndex(["20121231", NaT, "20130101"], dtype="M8[ns]") + tm.assert_index_equal(result, expected) + + def test_addition_ops(self): + # with datetimes/timedelta and tdi/dti + tdi = TimedeltaIndex(["1 days", NaT, "2 days"], name="foo") + dti = pd.date_range("20130101", periods=3, name="bar") + td = Timedelta("1 days") + dt = Timestamp("20130101") + + result = tdi + dt + expected = DatetimeIndex( + ["20130102", NaT, "20130103"], dtype="M8[ns]", name="foo" + ) + tm.assert_index_equal(result, expected) + + result = dt + tdi + expected = DatetimeIndex( + ["20130102", NaT, "20130103"], dtype="M8[ns]", name="foo" + ) + tm.assert_index_equal(result, expected) + + result = td + tdi + expected = TimedeltaIndex(["2 days", NaT, "3 days"], name="foo") + tm.assert_index_equal(result, expected) + + result = tdi + td + expected = TimedeltaIndex(["2 days", NaT, "3 days"], name="foo") + tm.assert_index_equal(result, expected) + + # unequal length + msg = "cannot add indices of unequal length" + with pytest.raises(ValueError, match=msg): + tdi + dti[0:1] + with pytest.raises(ValueError, match=msg): + tdi[0:1] + dti + + # random indexes + msg = "Addition/subtraction of integers and integer-arrays" + with pytest.raises(TypeError, match=msg): + tdi + Index([1, 2, 3], dtype=np.int64) + + # this is a union! + # FIXME: don't leave commented-out + # pytest.raises(TypeError, lambda : Index([1,2,3]) + tdi) + + result = tdi + dti # name will be reset + expected = DatetimeIndex(["20130102", NaT, "20130105"], dtype="M8[ns]") + tm.assert_index_equal(result, expected) + + result = dti + tdi # name will be reset + expected = DatetimeIndex(["20130102", NaT, "20130105"], dtype="M8[ns]") + tm.assert_index_equal(result, expected) + + result = dt + td + expected = Timestamp("20130102") + assert result == expected + + result = td + dt + expected = Timestamp("20130102") + assert result == expected + + # TODO: Needs more informative name, probably split up into + # more targeted tests + @pytest.mark.parametrize("freq", ["D", "B"]) + def test_timedelta(self, freq): + index = pd.date_range("1/1/2000", periods=50, freq=freq) + + shifted = index + timedelta(1) + back = shifted + timedelta(-1) + back = back._with_freq("infer") + tm.assert_index_equal(index, back) + + if freq == "D": + expected = pd.tseries.offsets.Day(1) + assert index.freq == expected + assert shifted.freq == expected + assert back.freq == expected + else: # freq == 'B' + assert index.freq == pd.tseries.offsets.BusinessDay(1) + assert shifted.freq is None + assert back.freq == pd.tseries.offsets.BusinessDay(1) + + result = index - timedelta(1) + expected = index + timedelta(-1) + tm.assert_index_equal(result, expected) + + def test_timedelta_tick_arithmetic(self): + # GH#4134, buggy with timedeltas + rng = pd.date_range("2013", "2014") + s = Series(rng) + result1 = rng - offsets.Hour(1) + result2 = DatetimeIndex(s - np.timedelta64(100000000)) + result3 = rng - np.timedelta64(100000000) + result4 = DatetimeIndex(s - offsets.Hour(1)) + + assert result1.freq == rng.freq + result1 = result1._with_freq(None) + tm.assert_index_equal(result1, result4) + + assert result3.freq == rng.freq + result3 = result3._with_freq(None) + tm.assert_index_equal(result2, result3) + + def test_tda_add_sub_index(self): + # Check that TimedeltaArray defers to Index on arithmetic ops + tdi = TimedeltaIndex(["1 days", NaT, "2 days"]) + tda = tdi.array + + dti = pd.date_range("1999-12-31", periods=3, freq="D") + + result = tda + dti + expected = tdi + dti + tm.assert_index_equal(result, expected) + + result = tda + tdi + expected = tdi + tdi + tm.assert_index_equal(result, expected) + + result = tda - tdi + expected = tdi - tdi + tm.assert_index_equal(result, expected) + + def test_tda_add_dt64_object_array(self, box_with_array, tz_naive_fixture): + # Result should be cast back to DatetimeArray + box = box_with_array + + dti = pd.date_range("2016-01-01", periods=3, tz=tz_naive_fixture) + dti = dti._with_freq(None) + tdi = dti - dti + + obj = tm.box_expected(tdi, box) + other = tm.box_expected(dti, box) + + with tm.assert_produces_warning(PerformanceWarning): + result = obj + other.astype(object) + tm.assert_equal(result, other.astype(object)) + + # ------------------------------------------------------------- + # Binary operations TimedeltaIndex and timedelta-like + + def test_tdi_iadd_timedeltalike(self, two_hours, box_with_array): + # only test adding/sub offsets as + is now numeric + rng = timedelta_range("1 days", "10 days") + expected = timedelta_range("1 days 02:00:00", "10 days 02:00:00", freq="D") + + rng = tm.box_expected(rng, box_with_array) + expected = tm.box_expected(expected, box_with_array) + + orig_rng = rng + rng += two_hours + tm.assert_equal(rng, expected) + if box_with_array is not Index: + # Check that operation is actually inplace + tm.assert_equal(orig_rng, expected) + + def test_tdi_isub_timedeltalike(self, two_hours, box_with_array): + # only test adding/sub offsets as - is now numeric + rng = timedelta_range("1 days", "10 days") + expected = timedelta_range("0 days 22:00:00", "9 days 22:00:00") + + rng = tm.box_expected(rng, box_with_array) + expected = tm.box_expected(expected, box_with_array) + + orig_rng = rng + rng -= two_hours + tm.assert_equal(rng, expected) + if box_with_array is not Index: + # Check that operation is actually inplace + tm.assert_equal(orig_rng, expected) + + # ------------------------------------------------------------- + + def test_tdi_ops_attributes(self): + rng = timedelta_range("2 days", periods=5, freq="2D", name="x") + + result = rng + 1 * rng.freq + exp = timedelta_range("4 days", periods=5, freq="2D", name="x") + tm.assert_index_equal(result, exp) + assert result.freq == "2D" + + result = rng - 2 * rng.freq + exp = timedelta_range("-2 days", periods=5, freq="2D", name="x") + tm.assert_index_equal(result, exp) + assert result.freq == "2D" + + result = rng * 2 + exp = timedelta_range("4 days", periods=5, freq="4D", name="x") + tm.assert_index_equal(result, exp) + assert result.freq == "4D" + + result = rng / 2 + exp = timedelta_range("1 days", periods=5, freq="D", name="x") + tm.assert_index_equal(result, exp) + assert result.freq == "D" + + result = -rng + exp = timedelta_range("-2 days", periods=5, freq="-2D", name="x") + tm.assert_index_equal(result, exp) + assert result.freq == "-2D" + + rng = timedelta_range("-2 days", periods=5, freq="D", name="x") + + result = abs(rng) + exp = TimedeltaIndex( + ["2 days", "1 days", "0 days", "1 days", "2 days"], name="x" + ) + tm.assert_index_equal(result, exp) + assert result.freq is None + + +class TestAddSubNaTMasking: + # TODO: parametrize over boxes + + @pytest.mark.parametrize("str_ts", ["1950-01-01", "1980-01-01"]) + def test_tdarr_add_timestamp_nat_masking(self, box_with_array, str_ts): + # GH#17991 checking for overflow-masking with NaT + tdinat = pd.to_timedelta(["24658 days 11:15:00", "NaT"]) + tdobj = tm.box_expected(tdinat, box_with_array) + + ts = Timestamp(str_ts) + ts_variants = [ + ts, + ts.to_pydatetime(), + ts.to_datetime64().astype("datetime64[ns]"), + ts.to_datetime64().astype("datetime64[D]"), + ] + + for variant in ts_variants: + res = tdobj + variant + if box_with_array is DataFrame: + assert res.iloc[1, 1] is NaT + else: + assert res[1] is NaT + + def test_tdi_add_overflow(self): + # See GH#14068 + # preliminary test scalar analogue of vectorized tests below + # TODO: Make raised error message more informative and test + with pytest.raises(OutOfBoundsDatetime, match="10155196800000000000"): + pd.to_timedelta(106580, "D") + Timestamp("2000") + with pytest.raises(OutOfBoundsDatetime, match="10155196800000000000"): + Timestamp("2000") + pd.to_timedelta(106580, "D") + + _NaT = NaT._value + 1 + msg = "Overflow in int64 addition" + with pytest.raises(OverflowError, match=msg): + pd.to_timedelta([106580], "D") + Timestamp("2000") + with pytest.raises(OverflowError, match=msg): + Timestamp("2000") + pd.to_timedelta([106580], "D") + with pytest.raises(OverflowError, match=msg): + pd.to_timedelta([_NaT]) - Timedelta("1 days") + with pytest.raises(OverflowError, match=msg): + pd.to_timedelta(["5 days", _NaT]) - Timedelta("1 days") + with pytest.raises(OverflowError, match=msg): + ( + pd.to_timedelta([_NaT, "5 days", "1 hours"]) + - pd.to_timedelta(["7 seconds", _NaT, "4 hours"]) + ) + + # These should not overflow! + exp = TimedeltaIndex([NaT]) + result = pd.to_timedelta([NaT]) - Timedelta("1 days") + tm.assert_index_equal(result, exp) + + exp = TimedeltaIndex(["4 days", NaT]) + result = pd.to_timedelta(["5 days", NaT]) - Timedelta("1 days") + tm.assert_index_equal(result, exp) + + exp = TimedeltaIndex([NaT, NaT, "5 hours"]) + result = pd.to_timedelta([NaT, "5 days", "1 hours"]) + pd.to_timedelta( + ["7 seconds", NaT, "4 hours"] + ) + tm.assert_index_equal(result, exp) + + +class TestTimedeltaArraylikeAddSubOps: + # Tests for timedelta64[ns] __add__, __sub__, __radd__, __rsub__ + + def test_sub_nat_retain_unit(self): + ser = pd.to_timedelta(Series(["00:00:01"])).astype("m8[s]") + + result = ser - NaT + expected = Series([NaT], dtype="m8[s]") + tm.assert_series_equal(result, expected) + + # TODO: moved from tests.indexes.timedeltas.test_arithmetic; needs + # parametrization+de-duplication + def test_timedelta_ops_with_missing_values(self): + # setup + s1 = pd.to_timedelta(Series(["00:00:01"])) + s2 = pd.to_timedelta(Series(["00:00:02"])) + + sn = pd.to_timedelta(Series([NaT], dtype="m8[ns]")) + + df1 = DataFrame(["00:00:01"]).apply(pd.to_timedelta) + df2 = DataFrame(["00:00:02"]).apply(pd.to_timedelta) + + dfn = DataFrame([NaT._value]).apply(pd.to_timedelta) + + scalar1 = pd.to_timedelta("00:00:01") + scalar2 = pd.to_timedelta("00:00:02") + timedelta_NaT = pd.to_timedelta("NaT") + + actual = scalar1 + scalar1 + assert actual == scalar2 + actual = scalar2 - scalar1 + assert actual == scalar1 + + actual = s1 + s1 + tm.assert_series_equal(actual, s2) + actual = s2 - s1 + tm.assert_series_equal(actual, s1) + + actual = s1 + scalar1 + tm.assert_series_equal(actual, s2) + actual = scalar1 + s1 + tm.assert_series_equal(actual, s2) + actual = s2 - scalar1 + tm.assert_series_equal(actual, s1) + actual = -scalar1 + s2 + tm.assert_series_equal(actual, s1) + + actual = s1 + timedelta_NaT + tm.assert_series_equal(actual, sn) + actual = timedelta_NaT + s1 + tm.assert_series_equal(actual, sn) + actual = s1 - timedelta_NaT + tm.assert_series_equal(actual, sn) + actual = -timedelta_NaT + s1 + tm.assert_series_equal(actual, sn) + + msg = "unsupported operand type" + with pytest.raises(TypeError, match=msg): + s1 + np.nan + with pytest.raises(TypeError, match=msg): + np.nan + s1 + with pytest.raises(TypeError, match=msg): + s1 - np.nan + with pytest.raises(TypeError, match=msg): + -np.nan + s1 + + actual = s1 + NaT + tm.assert_series_equal(actual, sn) + actual = s2 - NaT + tm.assert_series_equal(actual, sn) + + actual = s1 + df1 + tm.assert_frame_equal(actual, df2) + actual = s2 - df1 + tm.assert_frame_equal(actual, df1) + actual = df1 + s1 + tm.assert_frame_equal(actual, df2) + actual = df2 - s1 + tm.assert_frame_equal(actual, df1) + + actual = df1 + df1 + tm.assert_frame_equal(actual, df2) + actual = df2 - df1 + tm.assert_frame_equal(actual, df1) + + actual = df1 + scalar1 + tm.assert_frame_equal(actual, df2) + actual = df2 - scalar1 + tm.assert_frame_equal(actual, df1) + + actual = df1 + timedelta_NaT + tm.assert_frame_equal(actual, dfn) + actual = df1 - timedelta_NaT + tm.assert_frame_equal(actual, dfn) + + msg = "cannot subtract a datelike from|unsupported operand type" + with pytest.raises(TypeError, match=msg): + df1 + np.nan + with pytest.raises(TypeError, match=msg): + df1 - np.nan + + actual = df1 + NaT # NaT is datetime, not timedelta + tm.assert_frame_equal(actual, dfn) + actual = df1 - NaT + tm.assert_frame_equal(actual, dfn) + + # TODO: moved from tests.series.test_operators, needs splitting, cleanup, + # de-duplication, box-parametrization... + def test_operators_timedelta64(self): + # series ops + v1 = pd.date_range("2012-1-1", periods=3, freq="D") + v2 = pd.date_range("2012-1-2", periods=3, freq="D") + rs = Series(v2) - Series(v1) + xp = Series(1e9 * 3600 * 24, rs.index).astype("int64").astype("timedelta64[ns]") + tm.assert_series_equal(rs, xp) + assert rs.dtype == "timedelta64[ns]" + + df = DataFrame({"A": v1}) + td = Series([timedelta(days=i) for i in range(3)]) + assert td.dtype == "timedelta64[ns]" + + # series on the rhs + result = df["A"] - df["A"].shift() + assert result.dtype == "timedelta64[ns]" + + result = df["A"] + td + assert result.dtype == "M8[ns]" + + # scalar Timestamp on rhs + maxa = df["A"].max() + assert isinstance(maxa, Timestamp) + + resultb = df["A"] - df["A"].max() + assert resultb.dtype == "timedelta64[ns]" + + # timestamp on lhs + result = resultb + df["A"] + values = [Timestamp("20111230"), Timestamp("20120101"), Timestamp("20120103")] + expected = Series(values, dtype="M8[ns]", name="A") + tm.assert_series_equal(result, expected) + + # datetimes on rhs + result = df["A"] - datetime(2001, 1, 1) + expected = Series([timedelta(days=4017 + i) for i in range(3)], name="A") + tm.assert_series_equal(result, expected) + assert result.dtype == "m8[ns]" + + d = datetime(2001, 1, 1, 3, 4) + resulta = df["A"] - d + assert resulta.dtype == "m8[ns]" + + # roundtrip + resultb = resulta + d + tm.assert_series_equal(df["A"], resultb) + + # timedeltas on rhs + td = timedelta(days=1) + resulta = df["A"] + td + resultb = resulta - td + tm.assert_series_equal(resultb, df["A"]) + assert resultb.dtype == "M8[ns]" + + # roundtrip + td = timedelta(minutes=5, seconds=3) + resulta = df["A"] + td + resultb = resulta - td + tm.assert_series_equal(df["A"], resultb) + assert resultb.dtype == "M8[ns]" + + # inplace + value = rs[2] + np.timedelta64(timedelta(minutes=5, seconds=1)) + rs[2] += np.timedelta64(timedelta(minutes=5, seconds=1)) + assert rs[2] == value + + def test_timedelta64_ops_nat(self): + # GH 11349 + timedelta_series = Series([NaT, Timedelta("1s")]) + nat_series_dtype_timedelta = Series([NaT, NaT], dtype="timedelta64[ns]") + single_nat_dtype_timedelta = Series([NaT], dtype="timedelta64[ns]") + + # subtraction + tm.assert_series_equal(timedelta_series - NaT, nat_series_dtype_timedelta) + tm.assert_series_equal(-NaT + timedelta_series, nat_series_dtype_timedelta) + + tm.assert_series_equal( + timedelta_series - single_nat_dtype_timedelta, nat_series_dtype_timedelta + ) + tm.assert_series_equal( + -single_nat_dtype_timedelta + timedelta_series, nat_series_dtype_timedelta + ) + + # addition + tm.assert_series_equal( + nat_series_dtype_timedelta + NaT, nat_series_dtype_timedelta + ) + tm.assert_series_equal( + NaT + nat_series_dtype_timedelta, nat_series_dtype_timedelta + ) + + tm.assert_series_equal( + nat_series_dtype_timedelta + single_nat_dtype_timedelta, + nat_series_dtype_timedelta, + ) + tm.assert_series_equal( + single_nat_dtype_timedelta + nat_series_dtype_timedelta, + nat_series_dtype_timedelta, + ) + + tm.assert_series_equal(timedelta_series + NaT, nat_series_dtype_timedelta) + tm.assert_series_equal(NaT + timedelta_series, nat_series_dtype_timedelta) + + tm.assert_series_equal( + timedelta_series + single_nat_dtype_timedelta, nat_series_dtype_timedelta + ) + tm.assert_series_equal( + single_nat_dtype_timedelta + timedelta_series, nat_series_dtype_timedelta + ) + + tm.assert_series_equal( + nat_series_dtype_timedelta + NaT, nat_series_dtype_timedelta + ) + tm.assert_series_equal( + NaT + nat_series_dtype_timedelta, nat_series_dtype_timedelta + ) + + tm.assert_series_equal( + nat_series_dtype_timedelta + single_nat_dtype_timedelta, + nat_series_dtype_timedelta, + ) + tm.assert_series_equal( + single_nat_dtype_timedelta + nat_series_dtype_timedelta, + nat_series_dtype_timedelta, + ) + + # multiplication + tm.assert_series_equal( + nat_series_dtype_timedelta * 1.0, nat_series_dtype_timedelta + ) + tm.assert_series_equal( + 1.0 * nat_series_dtype_timedelta, nat_series_dtype_timedelta + ) + + tm.assert_series_equal(timedelta_series * 1, timedelta_series) + tm.assert_series_equal(1 * timedelta_series, timedelta_series) + + tm.assert_series_equal(timedelta_series * 1.5, Series([NaT, Timedelta("1.5s")])) + tm.assert_series_equal(1.5 * timedelta_series, Series([NaT, Timedelta("1.5s")])) + + tm.assert_series_equal(timedelta_series * np.nan, nat_series_dtype_timedelta) + tm.assert_series_equal(np.nan * timedelta_series, nat_series_dtype_timedelta) + + # division + tm.assert_series_equal(timedelta_series / 2, Series([NaT, Timedelta("0.5s")])) + tm.assert_series_equal(timedelta_series / 2.0, Series([NaT, Timedelta("0.5s")])) + tm.assert_series_equal(timedelta_series / np.nan, nat_series_dtype_timedelta) + + # ------------------------------------------------------------- + # Binary operations td64 arraylike and datetime-like + + @pytest.mark.parametrize("cls", [Timestamp, datetime, np.datetime64]) + def test_td64arr_add_sub_datetimelike_scalar( + self, cls, box_with_array, tz_naive_fixture + ): + # GH#11925, GH#29558, GH#23215 + tz = tz_naive_fixture + + dt_scalar = Timestamp("2012-01-01", tz=tz) + if cls is datetime: + ts = dt_scalar.to_pydatetime() + elif cls is np.datetime64: + if tz_naive_fixture is not None: + pytest.skip(f"{cls} doesn support {tz_naive_fixture}") + ts = dt_scalar.to_datetime64() + else: + ts = dt_scalar + + tdi = timedelta_range("1 day", periods=3) + expected = pd.date_range("2012-01-02", periods=3, tz=tz) + + tdarr = tm.box_expected(tdi, box_with_array) + expected = tm.box_expected(expected, box_with_array) + + tm.assert_equal(ts + tdarr, expected) + tm.assert_equal(tdarr + ts, expected) + + expected2 = pd.date_range("2011-12-31", periods=3, freq="-1D", tz=tz) + expected2 = tm.box_expected(expected2, box_with_array) + + tm.assert_equal(ts - tdarr, expected2) + tm.assert_equal(ts + (-tdarr), expected2) + + msg = "cannot subtract a datelike" + with pytest.raises(TypeError, match=msg): + tdarr - ts + + def test_td64arr_add_datetime64_nat(self, box_with_array): + # GH#23215 + other = np.datetime64("NaT") + + tdi = timedelta_range("1 day", periods=3) + expected = DatetimeIndex(["NaT", "NaT", "NaT"], dtype="M8[ns]") + + tdser = tm.box_expected(tdi, box_with_array) + expected = tm.box_expected(expected, box_with_array) + + tm.assert_equal(tdser + other, expected) + tm.assert_equal(other + tdser, expected) + + def test_td64arr_sub_dt64_array(self, box_with_array): + dti = pd.date_range("2016-01-01", periods=3) + tdi = TimedeltaIndex(["-1 Day"] * 3) + dtarr = dti.values + expected = DatetimeIndex(dtarr) - tdi + + tdi = tm.box_expected(tdi, box_with_array) + expected = tm.box_expected(expected, box_with_array) + + msg = "cannot subtract a datelike from" + with pytest.raises(TypeError, match=msg): + tdi - dtarr + + # TimedeltaIndex.__rsub__ + result = dtarr - tdi + tm.assert_equal(result, expected) + + def test_td64arr_add_dt64_array(self, box_with_array): + dti = pd.date_range("2016-01-01", periods=3) + tdi = TimedeltaIndex(["-1 Day"] * 3) + dtarr = dti.values + expected = DatetimeIndex(dtarr) + tdi + + tdi = tm.box_expected(tdi, box_with_array) + expected = tm.box_expected(expected, box_with_array) + + result = tdi + dtarr + tm.assert_equal(result, expected) + result = dtarr + tdi + tm.assert_equal(result, expected) + + # ------------------------------------------------------------------ + # Invalid __add__/__sub__ operations + + @pytest.mark.parametrize("pi_freq", ["D", "W", "Q", "h"]) + @pytest.mark.parametrize("tdi_freq", [None, "h"]) + def test_td64arr_sub_periodlike( + self, box_with_array, box_with_array2, tdi_freq, pi_freq + ): + # GH#20049 subtracting PeriodIndex should raise TypeError + tdi = TimedeltaIndex(["1 hours", "2 hours"], freq=tdi_freq) + dti = Timestamp("2018-03-07 17:16:40") + tdi + pi = dti.to_period(pi_freq) + per = pi[0] + + tdi = tm.box_expected(tdi, box_with_array) + pi = tm.box_expected(pi, box_with_array2) + msg = "cannot subtract|unsupported operand type" + with pytest.raises(TypeError, match=msg): + tdi - pi + + # GH#13078 subtraction of Period scalar not supported + with pytest.raises(TypeError, match=msg): + tdi - per + + @pytest.mark.parametrize( + "other", + [ + # GH#12624 for str case + "a", + # GH#19123 + 1, + 1.5, + np.array(2), + ], + ) + def test_td64arr_addsub_numeric_scalar_invalid(self, box_with_array, other): + # vector-like others are tested in test_td64arr_add_sub_numeric_arr_invalid + tdser = Series(["59 Days", "59 Days", "NaT"], dtype="m8[ns]") + tdarr = tm.box_expected(tdser, box_with_array) + + assert_invalid_addsub_type(tdarr, other) + + @pytest.mark.parametrize( + "vec", + [ + np.array([1, 2, 3]), + Index([1, 2, 3]), + Series([1, 2, 3]), + DataFrame([[1, 2, 3]]), + ], + ids=lambda x: type(x).__name__, + ) + def test_td64arr_addsub_numeric_arr_invalid( + self, box_with_array, vec, any_real_numpy_dtype + ): + tdser = Series(["59 Days", "59 Days", "NaT"], dtype="m8[ns]") + tdarr = tm.box_expected(tdser, box_with_array) + + vector = vec.astype(any_real_numpy_dtype) + assert_invalid_addsub_type(tdarr, vector) + + def test_td64arr_add_sub_int(self, box_with_array, one): + # Variants of `one` for #19012, deprecated GH#22535 + rng = timedelta_range("1 days 09:00:00", freq="h", periods=10) + tdarr = tm.box_expected(rng, box_with_array) + + msg = "Addition/subtraction of integers" + assert_invalid_addsub_type(tdarr, one, msg) + + # TODO: get inplace ops into assert_invalid_addsub_type + with pytest.raises(TypeError, match=msg): + tdarr += one + with pytest.raises(TypeError, match=msg): + tdarr -= one + + def test_td64arr_add_sub_integer_array(self, box_with_array): + # GH#19959, deprecated GH#22535 + # GH#22696 for DataFrame case, check that we don't dispatch to numpy + # implementation, which treats int64 as m8[ns] + box = box_with_array + xbox = np.ndarray if box is pd.array else box + + rng = timedelta_range("1 days 09:00:00", freq="h", periods=3) + tdarr = tm.box_expected(rng, box) + other = tm.box_expected([4, 3, 2], xbox) + + msg = "Addition/subtraction of integers and integer-arrays" + assert_invalid_addsub_type(tdarr, other, msg) + + def test_td64arr_addsub_integer_array_no_freq(self, box_with_array): + # GH#19959 + box = box_with_array + xbox = np.ndarray if box is pd.array else box + + tdi = TimedeltaIndex(["1 Day", "NaT", "3 Hours"]) + tdarr = tm.box_expected(tdi, box) + other = tm.box_expected([14, -1, 16], xbox) + + msg = "Addition/subtraction of integers" + assert_invalid_addsub_type(tdarr, other, msg) + + # ------------------------------------------------------------------ + # Operations with timedelta-like others + + def test_td64arr_add_sub_td64_array(self, box_with_array): + box = box_with_array + dti = pd.date_range("2016-01-01", periods=3) + tdi = dti - dti.shift(1) + tdarr = tdi.values + + expected = 2 * tdi + tdi = tm.box_expected(tdi, box) + expected = tm.box_expected(expected, box) + + result = tdi + tdarr + tm.assert_equal(result, expected) + result = tdarr + tdi + tm.assert_equal(result, expected) + + expected_sub = 0 * tdi + result = tdi - tdarr + tm.assert_equal(result, expected_sub) + result = tdarr - tdi + tm.assert_equal(result, expected_sub) + + def test_td64arr_add_sub_tdi(self, box_with_array, names): + # GH#17250 make sure result dtype is correct + # GH#19043 make sure names are propagated correctly + box = box_with_array + exname = get_expected_name(box, names) + + tdi = TimedeltaIndex(["0 days", "1 day"], name=names[1]) + tdi = np.array(tdi) if box in [tm.to_array, pd.array] else tdi + ser = Series([Timedelta(hours=3), Timedelta(hours=4)], name=names[0]) + expected = Series([Timedelta(hours=3), Timedelta(days=1, hours=4)], name=exname) + + ser = tm.box_expected(ser, box) + expected = tm.box_expected(expected, box) + + result = tdi + ser + tm.assert_equal(result, expected) + assert_dtype(result, "timedelta64[ns]") + + result = ser + tdi + tm.assert_equal(result, expected) + assert_dtype(result, "timedelta64[ns]") + + expected = Series( + [Timedelta(hours=-3), Timedelta(days=1, hours=-4)], name=exname + ) + expected = tm.box_expected(expected, box) + + result = tdi - ser + tm.assert_equal(result, expected) + assert_dtype(result, "timedelta64[ns]") + + result = ser - tdi + tm.assert_equal(result, -expected) + assert_dtype(result, "timedelta64[ns]") + + @pytest.mark.parametrize("tdnat", [np.timedelta64("NaT"), NaT]) + def test_td64arr_add_sub_td64_nat(self, box_with_array, tdnat): + # GH#18808, GH#23320 special handling for timedelta64("NaT") + box = box_with_array + tdi = TimedeltaIndex([NaT, Timedelta("1s")]) + expected = TimedeltaIndex(["NaT"] * 2) + + obj = tm.box_expected(tdi, box) + expected = tm.box_expected(expected, box) + + result = obj + tdnat + tm.assert_equal(result, expected) + result = tdnat + obj + tm.assert_equal(result, expected) + result = obj - tdnat + tm.assert_equal(result, expected) + result = tdnat - obj + tm.assert_equal(result, expected) + + def test_td64arr_add_timedeltalike(self, two_hours, box_with_array): + # only test adding/sub offsets as + is now numeric + # GH#10699 for Tick cases + box = box_with_array + rng = timedelta_range("1 days", "10 days") + expected = timedelta_range("1 days 02:00:00", "10 days 02:00:00", freq="D") + rng = tm.box_expected(rng, box) + expected = tm.box_expected(expected, box) + + result = rng + two_hours + tm.assert_equal(result, expected) + + result = two_hours + rng + tm.assert_equal(result, expected) + + def test_td64arr_sub_timedeltalike(self, two_hours, box_with_array): + # only test adding/sub offsets as - is now numeric + # GH#10699 for Tick cases + box = box_with_array + rng = timedelta_range("1 days", "10 days") + expected = timedelta_range("0 days 22:00:00", "9 days 22:00:00") + + rng = tm.box_expected(rng, box) + expected = tm.box_expected(expected, box) + + result = rng - two_hours + tm.assert_equal(result, expected) + + result = two_hours - rng + tm.assert_equal(result, -expected) + + # ------------------------------------------------------------------ + # __add__/__sub__ with DateOffsets and arrays of DateOffsets + + def test_td64arr_add_sub_offset_index(self, names, box_with_array): + # GH#18849, GH#19744 + box = box_with_array + exname = get_expected_name(box, names) + + tdi = TimedeltaIndex(["1 days 00:00:00", "3 days 04:00:00"], name=names[0]) + other = Index([offsets.Hour(n=1), offsets.Minute(n=-2)], name=names[1]) + other = np.array(other) if box in [tm.to_array, pd.array] else other + + expected = TimedeltaIndex( + [tdi[n] + other[n] for n in range(len(tdi))], freq="infer", name=exname + ) + expected_sub = TimedeltaIndex( + [tdi[n] - other[n] for n in range(len(tdi))], freq="infer", name=exname + ) + + tdi = tm.box_expected(tdi, box) + expected = tm.box_expected(expected, box).astype(object, copy=False) + expected_sub = tm.box_expected(expected_sub, box).astype(object, copy=False) + + with tm.assert_produces_warning(PerformanceWarning): + res = tdi + other + tm.assert_equal(res, expected) + + with tm.assert_produces_warning(PerformanceWarning): + res2 = other + tdi + tm.assert_equal(res2, expected) + + with tm.assert_produces_warning(PerformanceWarning): + res_sub = tdi - other + tm.assert_equal(res_sub, expected_sub) + + def test_td64arr_add_sub_offset_array(self, box_with_array): + # GH#18849, GH#18824 + box = box_with_array + tdi = TimedeltaIndex(["1 days 00:00:00", "3 days 04:00:00"]) + other = np.array([offsets.Hour(n=1), offsets.Minute(n=-2)]) + + expected = TimedeltaIndex( + [tdi[n] + other[n] for n in range(len(tdi))], freq="infer" + ) + expected_sub = TimedeltaIndex( + [tdi[n] - other[n] for n in range(len(tdi))], freq="infer" + ) + + tdi = tm.box_expected(tdi, box) + expected = tm.box_expected(expected, box).astype(object) + + with tm.assert_produces_warning(PerformanceWarning): + res = tdi + other + tm.assert_equal(res, expected) + + with tm.assert_produces_warning(PerformanceWarning): + res2 = other + tdi + tm.assert_equal(res2, expected) + + expected_sub = tm.box_expected(expected_sub, box_with_array).astype(object) + with tm.assert_produces_warning(PerformanceWarning): + res_sub = tdi - other + tm.assert_equal(res_sub, expected_sub) + + def test_td64arr_with_offset_series(self, names, box_with_array): + # GH#18849 + box = box_with_array + box2 = Series if box in [Index, tm.to_array, pd.array] else box + exname = get_expected_name(box, names) + + tdi = TimedeltaIndex(["1 days 00:00:00", "3 days 04:00:00"], name=names[0]) + other = Series([offsets.Hour(n=1), offsets.Minute(n=-2)], name=names[1]) + + expected_add = Series( + [tdi[n] + other[n] for n in range(len(tdi))], name=exname, dtype=object + ) + obj = tm.box_expected(tdi, box) + expected_add = tm.box_expected(expected_add, box2).astype(object) + + with tm.assert_produces_warning(PerformanceWarning): + res = obj + other + tm.assert_equal(res, expected_add) + + with tm.assert_produces_warning(PerformanceWarning): + res2 = other + obj + tm.assert_equal(res2, expected_add) + + expected_sub = Series( + [tdi[n] - other[n] for n in range(len(tdi))], name=exname, dtype=object + ) + expected_sub = tm.box_expected(expected_sub, box2).astype(object) + + with tm.assert_produces_warning(PerformanceWarning): + res3 = obj - other + tm.assert_equal(res3, expected_sub) + + @pytest.mark.parametrize("obox", [np.array, Index, Series]) + def test_td64arr_addsub_anchored_offset_arraylike(self, obox, box_with_array): + # GH#18824 + tdi = TimedeltaIndex(["1 days 00:00:00", "3 days 04:00:00"]) + tdi = tm.box_expected(tdi, box_with_array) + + anchored = obox([offsets.MonthEnd(), offsets.Day(n=2)]) + + # addition/subtraction ops with anchored offsets should issue + # a PerformanceWarning and _then_ raise a TypeError. + msg = "has incorrect type|cannot add the type MonthEnd" + with pytest.raises(TypeError, match=msg): + with tm.assert_produces_warning(PerformanceWarning): + tdi + anchored + with pytest.raises(TypeError, match=msg): + with tm.assert_produces_warning(PerformanceWarning): + anchored + tdi + with pytest.raises(TypeError, match=msg): + with tm.assert_produces_warning(PerformanceWarning): + tdi - anchored + with pytest.raises(TypeError, match=msg): + with tm.assert_produces_warning(PerformanceWarning): + anchored - tdi + + # ------------------------------------------------------------------ + # Unsorted + + def test_td64arr_add_sub_object_array(self, box_with_array): + box = box_with_array + xbox = np.ndarray if box is pd.array else box + + tdi = timedelta_range("1 day", periods=3, freq="D") + tdarr = tm.box_expected(tdi, box) + + other = np.array([Timedelta(days=1), offsets.Day(2), Timestamp("2000-01-04")]) + + with tm.assert_produces_warning(PerformanceWarning): + result = tdarr + other + + expected = Index( + [Timedelta(days=2), Timedelta(days=4), Timestamp("2000-01-07")] + ) + expected = tm.box_expected(expected, xbox).astype(object) + tm.assert_equal(result, expected) + + msg = "unsupported operand type|cannot subtract a datelike" + with pytest.raises(TypeError, match=msg): + with tm.assert_produces_warning(PerformanceWarning): + tdarr - other + + with tm.assert_produces_warning(PerformanceWarning): + result = other - tdarr + + expected = Index([Timedelta(0), Timedelta(0), Timestamp("2000-01-01")]) + expected = tm.box_expected(expected, xbox).astype(object) + tm.assert_equal(result, expected) + + +class TestTimedeltaArraylikeMulDivOps: + # Tests for timedelta64[ns] + # __mul__, __rmul__, __div__, __rdiv__, __floordiv__, __rfloordiv__ + + # ------------------------------------------------------------------ + # Multiplication + # organized with scalar others first, then array-like + + def test_td64arr_mul_int(self, box_with_array): + idx = TimedeltaIndex(np.arange(5, dtype="int64")) + idx = tm.box_expected(idx, box_with_array) + + result = idx * 1 + tm.assert_equal(result, idx) + + result = 1 * idx + tm.assert_equal(result, idx) + + def test_td64arr_mul_tdlike_scalar_raises(self, two_hours, box_with_array): + rng = timedelta_range("1 days", "10 days", name="foo") + rng = tm.box_expected(rng, box_with_array) + msg = "|".join( + [ + "argument must be an integer", + "cannot use operands with types dtype", + "Cannot multiply with", + ] + ) + with pytest.raises(TypeError, match=msg): + rng * two_hours + + def test_tdi_mul_int_array_zerodim(self, box_with_array): + rng5 = np.arange(5, dtype="int64") + idx = TimedeltaIndex(rng5) + expected = TimedeltaIndex(rng5 * 5) + + idx = tm.box_expected(idx, box_with_array) + expected = tm.box_expected(expected, box_with_array) + + result = idx * np.array(5, dtype="int64") + tm.assert_equal(result, expected) + + def test_tdi_mul_int_array(self, box_with_array): + rng5 = np.arange(5, dtype="int64") + idx = TimedeltaIndex(rng5) + expected = TimedeltaIndex(rng5**2) + + idx = tm.box_expected(idx, box_with_array) + expected = tm.box_expected(expected, box_with_array) + + result = idx * rng5 + tm.assert_equal(result, expected) + + def test_tdi_mul_int_series(self, box_with_array): + box = box_with_array + xbox = Series if box in [Index, tm.to_array, pd.array] else box + + idx = TimedeltaIndex(np.arange(5, dtype="int64")) + expected = TimedeltaIndex(np.arange(5, dtype="int64") ** 2) + + idx = tm.box_expected(idx, box) + expected = tm.box_expected(expected, xbox) + + result = idx * Series(np.arange(5, dtype="int64")) + tm.assert_equal(result, expected) + + def test_tdi_mul_float_series(self, box_with_array): + box = box_with_array + xbox = Series if box in [Index, tm.to_array, pd.array] else box + + idx = TimedeltaIndex(np.arange(5, dtype="int64")) + idx = tm.box_expected(idx, box) + + rng5f = np.arange(5, dtype="float64") + expected = TimedeltaIndex(rng5f * (rng5f + 1.0)) + expected = tm.box_expected(expected, xbox) + + result = idx * Series(rng5f + 1.0) + tm.assert_equal(result, expected) + + # TODO: Put Series/DataFrame in others? + @pytest.mark.parametrize( + "other", + [ + np.arange(1, 11), + Index(np.arange(1, 11), np.int64), + Index(range(1, 11), np.uint64), + Index(range(1, 11), np.float64), + pd.RangeIndex(1, 11), + ], + ids=lambda x: type(x).__name__, + ) + def test_tdi_rmul_arraylike(self, other, box_with_array): + box = box_with_array + + tdi = TimedeltaIndex(["1 Day"] * 10) + expected = timedelta_range("1 days", "10 days")._with_freq(None) + + tdi = tm.box_expected(tdi, box) + xbox = get_upcast_box(tdi, other) + + expected = tm.box_expected(expected, xbox) + + result = other * tdi + tm.assert_equal(result, expected) + commute = tdi * other + tm.assert_equal(commute, expected) + + # ------------------------------------------------------------------ + # __div__, __rdiv__ + + def test_td64arr_div_nat_invalid(self, box_with_array): + # don't allow division by NaT (maybe could in the future) + rng = timedelta_range("1 days", "10 days", name="foo") + rng = tm.box_expected(rng, box_with_array) + + with pytest.raises(TypeError, match="unsupported operand type"): + rng / NaT + with pytest.raises(TypeError, match="Cannot divide NaTType by"): + NaT / rng + + dt64nat = np.datetime64("NaT", "ns") + msg = "|".join( + [ + # 'divide' on npdev as of 2021-12-18 + "ufunc '(true_divide|divide)' cannot use operands", + "cannot perform __r?truediv__", + "Cannot divide datetime64 by TimedeltaArray", + ] + ) + with pytest.raises(TypeError, match=msg): + rng / dt64nat + with pytest.raises(TypeError, match=msg): + dt64nat / rng + + def test_td64arr_div_td64nat(self, box_with_array): + # GH#23829 + box = box_with_array + xbox = np.ndarray if box is pd.array else box + + rng = timedelta_range("1 days", "10 days") + rng = tm.box_expected(rng, box) + + other = np.timedelta64("NaT") + + expected = np.array([np.nan] * 10) + expected = tm.box_expected(expected, xbox) + + result = rng / other + tm.assert_equal(result, expected) + + result = other / rng + tm.assert_equal(result, expected) + + def test_td64arr_div_int(self, box_with_array): + idx = TimedeltaIndex(np.arange(5, dtype="int64")) + idx = tm.box_expected(idx, box_with_array) + + result = idx / 1 + tm.assert_equal(result, idx) + + with pytest.raises(TypeError, match="Cannot divide"): + # GH#23829 + 1 / idx + + def test_td64arr_div_tdlike_scalar(self, two_hours, box_with_array): + # GH#20088, GH#22163 ensure DataFrame returns correct dtype + box = box_with_array + xbox = np.ndarray if box is pd.array else box + + rng = timedelta_range("1 days", "10 days", name="foo") + expected = Index((np.arange(10) + 1) * 12, dtype=np.float64, name="foo") + + rng = tm.box_expected(rng, box) + expected = tm.box_expected(expected, xbox) + + result = rng / two_hours + tm.assert_equal(result, expected) + + result = two_hours / rng + expected = 1 / expected + tm.assert_equal(result, expected) + + @pytest.mark.parametrize("m", [1, 3, 10]) + @pytest.mark.parametrize("unit", ["D", "h", "m", "s", "ms", "us", "ns"]) + def test_td64arr_div_td64_scalar(self, m, unit, box_with_array): + box = box_with_array + xbox = np.ndarray if box is pd.array else box + + ser = Series([Timedelta(days=59)] * 3) + ser[2] = np.nan + flat = ser + ser = tm.box_expected(ser, box) + + # op + expected = Series([x / np.timedelta64(m, unit) for x in flat]) + expected = tm.box_expected(expected, xbox) + result = ser / np.timedelta64(m, unit) + tm.assert_equal(result, expected) + + # reverse op + expected = Series([Timedelta(np.timedelta64(m, unit)) / x for x in flat]) + expected = tm.box_expected(expected, xbox) + result = np.timedelta64(m, unit) / ser + tm.assert_equal(result, expected) + + def test_td64arr_div_tdlike_scalar_with_nat(self, two_hours, box_with_array): + box = box_with_array + xbox = np.ndarray if box is pd.array else box + + rng = TimedeltaIndex(["1 days", NaT, "2 days"], name="foo") + expected = Index([12, np.nan, 24], dtype=np.float64, name="foo") + + rng = tm.box_expected(rng, box) + expected = tm.box_expected(expected, xbox) + + result = rng / two_hours + tm.assert_equal(result, expected) + + result = two_hours / rng + expected = 1 / expected + tm.assert_equal(result, expected) + + def test_td64arr_div_td64_ndarray(self, box_with_array): + # GH#22631 + box = box_with_array + xbox = np.ndarray if box is pd.array else box + + rng = TimedeltaIndex(["1 days", NaT, "2 days"]) + expected = Index([12, np.nan, 24], dtype=np.float64) + + rng = tm.box_expected(rng, box) + expected = tm.box_expected(expected, xbox) + + other = np.array([2, 4, 2], dtype="m8[h]") + result = rng / other + tm.assert_equal(result, expected) + + result = rng / tm.box_expected(other, box) + tm.assert_equal(result, expected) + + result = rng / other.astype(object) + tm.assert_equal(result, expected.astype(object)) + + result = rng / list(other) + tm.assert_equal(result, expected) + + # reversed op + expected = 1 / expected + result = other / rng + tm.assert_equal(result, expected) + + result = tm.box_expected(other, box) / rng + tm.assert_equal(result, expected) + + result = other.astype(object) / rng + tm.assert_equal(result, expected) + + result = list(other) / rng + tm.assert_equal(result, expected) + + def test_tdarr_div_length_mismatch(self, box_with_array): + rng = TimedeltaIndex(["1 days", NaT, "2 days"]) + mismatched = [1, 2, 3, 4] + + rng = tm.box_expected(rng, box_with_array) + msg = "Cannot divide vectors|Unable to coerce to Series" + for obj in [mismatched, mismatched[:2]]: + # one shorter, one longer + for other in [obj, np.array(obj), Index(obj)]: + with pytest.raises(ValueError, match=msg): + rng / other + with pytest.raises(ValueError, match=msg): + other / rng + + def test_td64_div_object_mixed_result(self, box_with_array): + # Case where we having a NaT in the result inseat of timedelta64("NaT") + # is misleading + orig = timedelta_range("1 Day", periods=3).insert(1, NaT) + tdi = tm.box_expected(orig, box_with_array, transpose=False) + + other = np.array([orig[0], 1.5, 2.0, orig[2]], dtype=object) + other = tm.box_expected(other, box_with_array, transpose=False) + + res = tdi / other + + expected = Index([1.0, np.timedelta64("NaT", "ns"), orig[0], 1.5], dtype=object) + expected = tm.box_expected(expected, box_with_array, transpose=False) + if isinstance(expected, NumpyExtensionArray): + expected = expected.to_numpy() + tm.assert_equal(res, expected) + if box_with_array is DataFrame: + # We have a np.timedelta64(NaT), not pd.NaT + assert isinstance(res.iloc[1, 0], np.timedelta64) + + res = tdi // other + + expected = Index([1, np.timedelta64("NaT", "ns"), orig[0], 1], dtype=object) + expected = tm.box_expected(expected, box_with_array, transpose=False) + if isinstance(expected, NumpyExtensionArray): + expected = expected.to_numpy() + tm.assert_equal(res, expected) + if box_with_array is DataFrame: + # We have a np.timedelta64(NaT), not pd.NaT + assert isinstance(res.iloc[1, 0], np.timedelta64) + + # ------------------------------------------------------------------ + # __floordiv__, __rfloordiv__ + + def test_td64arr_floordiv_td64arr_with_nat( + self, box_with_array, using_array_manager + ): + # GH#35529 + box = box_with_array + xbox = np.ndarray if box is pd.array else box + + left = Series([1000, 222330, 30], dtype="timedelta64[ns]") + right = Series([1000, 222330, None], dtype="timedelta64[ns]") + + left = tm.box_expected(left, box) + right = tm.box_expected(right, box) + + expected = np.array([1.0, 1.0, np.nan], dtype=np.float64) + expected = tm.box_expected(expected, xbox) + if box is DataFrame and using_array_manager: + # INFO(ArrayManager) floordiv returns integer, and ArrayManager + # performs ops column-wise and thus preserves int64 dtype for + # columns without missing values + expected[[0, 1]] = expected[[0, 1]].astype("int64") + + with tm.maybe_produces_warning( + RuntimeWarning, box is pd.array, check_stacklevel=False + ): + result = left // right + + tm.assert_equal(result, expected) + + # case that goes through __rfloordiv__ with arraylike + with tm.maybe_produces_warning( + RuntimeWarning, box is pd.array, check_stacklevel=False + ): + result = np.asarray(left) // right + tm.assert_equal(result, expected) + + @pytest.mark.filterwarnings("ignore:invalid value encountered:RuntimeWarning") + def test_td64arr_floordiv_tdscalar(self, box_with_array, scalar_td): + # GH#18831, GH#19125 + box = box_with_array + xbox = np.ndarray if box is pd.array else box + td = Timedelta("5m3s") # i.e. (scalar_td - 1sec) / 2 + + td1 = Series([td, td, NaT], dtype="m8[ns]") + td1 = tm.box_expected(td1, box, transpose=False) + + expected = Series([0, 0, np.nan]) + expected = tm.box_expected(expected, xbox, transpose=False) + + result = td1 // scalar_td + tm.assert_equal(result, expected) + + # Reversed op + expected = Series([2, 2, np.nan]) + expected = tm.box_expected(expected, xbox, transpose=False) + + result = scalar_td // td1 + tm.assert_equal(result, expected) + + # same thing buts let's be explicit about calling __rfloordiv__ + result = td1.__rfloordiv__(scalar_td) + tm.assert_equal(result, expected) + + def test_td64arr_floordiv_int(self, box_with_array): + idx = TimedeltaIndex(np.arange(5, dtype="int64")) + idx = tm.box_expected(idx, box_with_array) + result = idx // 1 + tm.assert_equal(result, idx) + + pattern = "floor_divide cannot use operands|Cannot divide int by Timedelta*" + with pytest.raises(TypeError, match=pattern): + 1 // idx + + # ------------------------------------------------------------------ + # mod, divmod + # TODO: operations with timedelta-like arrays, numeric arrays, + # reversed ops + + def test_td64arr_mod_tdscalar(self, box_with_array, three_days): + tdi = timedelta_range("1 Day", "9 days") + tdarr = tm.box_expected(tdi, box_with_array) + + expected = TimedeltaIndex(["1 Day", "2 Days", "0 Days"] * 3) + expected = tm.box_expected(expected, box_with_array) + + result = tdarr % three_days + tm.assert_equal(result, expected) + + warn = None + if box_with_array is DataFrame and isinstance(three_days, pd.DateOffset): + warn = PerformanceWarning + # TODO: making expected be object here a result of DataFrame.__divmod__ + # being defined in a naive way that does not dispatch to the underlying + # array's __divmod__ + expected = expected.astype(object) + + with tm.assert_produces_warning(warn): + result = divmod(tdarr, three_days) + + tm.assert_equal(result[1], expected) + tm.assert_equal(result[0], tdarr // three_days) + + def test_td64arr_mod_int(self, box_with_array): + tdi = timedelta_range("1 ns", "10 ns", periods=10) + tdarr = tm.box_expected(tdi, box_with_array) + + expected = TimedeltaIndex(["1 ns", "0 ns"] * 5) + expected = tm.box_expected(expected, box_with_array) + + result = tdarr % 2 + tm.assert_equal(result, expected) + + msg = "Cannot divide int by" + with pytest.raises(TypeError, match=msg): + 2 % tdarr + + result = divmod(tdarr, 2) + tm.assert_equal(result[1], expected) + tm.assert_equal(result[0], tdarr // 2) + + def test_td64arr_rmod_tdscalar(self, box_with_array, three_days): + tdi = timedelta_range("1 Day", "9 days") + tdarr = tm.box_expected(tdi, box_with_array) + + expected = ["0 Days", "1 Day", "0 Days"] + ["3 Days"] * 6 + expected = TimedeltaIndex(expected) + expected = tm.box_expected(expected, box_with_array) + + result = three_days % tdarr + tm.assert_equal(result, expected) + + result = divmod(three_days, tdarr) + tm.assert_equal(result[1], expected) + tm.assert_equal(result[0], three_days // tdarr) + + # ------------------------------------------------------------------ + # Operations with invalid others + + def test_td64arr_mul_tdscalar_invalid(self, box_with_array, scalar_td): + td1 = Series([timedelta(minutes=5, seconds=3)] * 3) + td1.iloc[2] = np.nan + + td1 = tm.box_expected(td1, box_with_array) + + # check that we are getting a TypeError + # with 'operate' (from core/ops.py) for the ops that are not + # defined + pattern = "operate|unsupported|cannot|not supported" + with pytest.raises(TypeError, match=pattern): + td1 * scalar_td + with pytest.raises(TypeError, match=pattern): + scalar_td * td1 + + def test_td64arr_mul_too_short_raises(self, box_with_array): + idx = TimedeltaIndex(np.arange(5, dtype="int64")) + idx = tm.box_expected(idx, box_with_array) + msg = "|".join( + [ + "cannot use operands with types dtype", + "Cannot multiply with unequal lengths", + "Unable to coerce to Series", + ] + ) + with pytest.raises(TypeError, match=msg): + # length check before dtype check + idx * idx[:3] + with pytest.raises(ValueError, match=msg): + idx * np.array([1, 2]) + + def test_td64arr_mul_td64arr_raises(self, box_with_array): + idx = TimedeltaIndex(np.arange(5, dtype="int64")) + idx = tm.box_expected(idx, box_with_array) + msg = "cannot use operands with types dtype" + with pytest.raises(TypeError, match=msg): + idx * idx + + # ------------------------------------------------------------------ + # Operations with numeric others + + def test_td64arr_mul_numeric_scalar(self, box_with_array, one): + # GH#4521 + # divide/multiply by integers + tdser = Series(["59 Days", "59 Days", "NaT"], dtype="m8[ns]") + expected = Series(["-59 Days", "-59 Days", "NaT"], dtype="timedelta64[ns]") + + tdser = tm.box_expected(tdser, box_with_array) + expected = tm.box_expected(expected, box_with_array) + + result = tdser * (-one) + tm.assert_equal(result, expected) + result = (-one) * tdser + tm.assert_equal(result, expected) + + expected = Series(["118 Days", "118 Days", "NaT"], dtype="timedelta64[ns]") + expected = tm.box_expected(expected, box_with_array) + + result = tdser * (2 * one) + tm.assert_equal(result, expected) + result = (2 * one) * tdser + tm.assert_equal(result, expected) + + @pytest.mark.parametrize("two", [2, 2.0, np.array(2), np.array(2.0)]) + def test_td64arr_div_numeric_scalar(self, box_with_array, two): + # GH#4521 + # divide/multiply by integers + tdser = Series(["59 Days", "59 Days", "NaT"], dtype="m8[ns]") + expected = Series(["29.5D", "29.5D", "NaT"], dtype="timedelta64[ns]") + + tdser = tm.box_expected(tdser, box_with_array) + expected = tm.box_expected(expected, box_with_array) + + result = tdser / two + tm.assert_equal(result, expected) + + with pytest.raises(TypeError, match="Cannot divide"): + two / tdser + + @pytest.mark.parametrize("two", [2, 2.0, np.array(2), np.array(2.0)]) + def test_td64arr_floordiv_numeric_scalar(self, box_with_array, two): + tdser = Series(["59 Days", "59 Days", "NaT"], dtype="m8[ns]") + expected = Series(["29.5D", "29.5D", "NaT"], dtype="timedelta64[ns]") + + tdser = tm.box_expected(tdser, box_with_array) + expected = tm.box_expected(expected, box_with_array) + + result = tdser // two + tm.assert_equal(result, expected) + + with pytest.raises(TypeError, match="Cannot divide"): + two // tdser + + @pytest.mark.parametrize( + "vector", + [np.array([20, 30, 40]), Index([20, 30, 40]), Series([20, 30, 40])], + ids=lambda x: type(x).__name__, + ) + def test_td64arr_rmul_numeric_array( + self, + box_with_array, + vector, + any_real_numpy_dtype, + ): + # GH#4521 + # divide/multiply by integers + + tdser = Series(["59 Days", "59 Days", "NaT"], dtype="m8[ns]") + vector = vector.astype(any_real_numpy_dtype) + + expected = Series(["1180 Days", "1770 Days", "NaT"], dtype="timedelta64[ns]") + + tdser = tm.box_expected(tdser, box_with_array) + xbox = get_upcast_box(tdser, vector) + + expected = tm.box_expected(expected, xbox) + + result = tdser * vector + tm.assert_equal(result, expected) + + result = vector * tdser + tm.assert_equal(result, expected) + + @pytest.mark.parametrize( + "vector", + [np.array([20, 30, 40]), Index([20, 30, 40]), Series([20, 30, 40])], + ids=lambda x: type(x).__name__, + ) + def test_td64arr_div_numeric_array( + self, box_with_array, vector, any_real_numpy_dtype + ): + # GH#4521 + # divide/multiply by integers + + tdser = Series(["59 Days", "59 Days", "NaT"], dtype="m8[ns]") + vector = vector.astype(any_real_numpy_dtype) + + expected = Series(["2.95D", "1D 23h 12m", "NaT"], dtype="timedelta64[ns]") + + tdser = tm.box_expected(tdser, box_with_array) + xbox = get_upcast_box(tdser, vector) + expected = tm.box_expected(expected, xbox) + + result = tdser / vector + tm.assert_equal(result, expected) + + pattern = "|".join( + [ + "true_divide'? cannot use operands", + "cannot perform __div__", + "cannot perform __truediv__", + "unsupported operand", + "Cannot divide", + "ufunc 'divide' cannot use operands with types", + ] + ) + with pytest.raises(TypeError, match=pattern): + vector / tdser + + result = tdser / vector.astype(object) + if box_with_array is DataFrame: + expected = [tdser.iloc[0, n] / vector[n] for n in range(len(vector))] + expected = tm.box_expected(expected, xbox).astype(object) + # We specifically expect timedelta64("NaT") here, not pd.NA + msg = "The 'downcast' keyword in fillna" + with tm.assert_produces_warning(FutureWarning, match=msg): + expected[2] = expected[2].fillna( + np.timedelta64("NaT", "ns"), downcast=False + ) + else: + expected = [tdser[n] / vector[n] for n in range(len(tdser))] + expected = [ + x if x is not NaT else np.timedelta64("NaT", "ns") for x in expected + ] + if xbox is tm.to_array: + expected = tm.to_array(expected).astype(object) + else: + expected = xbox(expected, dtype=object) + + tm.assert_equal(result, expected) + + with pytest.raises(TypeError, match=pattern): + vector.astype(object) / tdser + + def test_td64arr_mul_int_series(self, box_with_array, names): + # GH#19042 test for correct name attachment + box = box_with_array + exname = get_expected_name(box, names) + + tdi = TimedeltaIndex( + ["0days", "1day", "2days", "3days", "4days"], name=names[0] + ) + # TODO: Should we be parametrizing over types for `ser` too? + ser = Series([0, 1, 2, 3, 4], dtype=np.int64, name=names[1]) + + expected = Series( + ["0days", "1day", "4days", "9days", "16days"], + dtype="timedelta64[ns]", + name=exname, + ) + + tdi = tm.box_expected(tdi, box) + xbox = get_upcast_box(tdi, ser) + + expected = tm.box_expected(expected, xbox) + + result = ser * tdi + tm.assert_equal(result, expected) + + result = tdi * ser + tm.assert_equal(result, expected) + + # TODO: Should we be parametrizing over types for `ser` too? + def test_float_series_rdiv_td64arr(self, box_with_array, names): + # GH#19042 test for correct name attachment + box = box_with_array + tdi = TimedeltaIndex( + ["0days", "1day", "2days", "3days", "4days"], name=names[0] + ) + ser = Series([1.5, 3, 4.5, 6, 7.5], dtype=np.float64, name=names[1]) + + xname = names[2] if box not in [tm.to_array, pd.array] else names[1] + expected = Series( + [tdi[n] / ser[n] for n in range(len(ser))], + dtype="timedelta64[ns]", + name=xname, + ) + + tdi = tm.box_expected(tdi, box) + xbox = get_upcast_box(tdi, ser) + expected = tm.box_expected(expected, xbox) + + result = ser.__rtruediv__(tdi) + if box is DataFrame: + assert result is NotImplemented + else: + tm.assert_equal(result, expected) + + def test_td64arr_all_nat_div_object_dtype_numeric(self, box_with_array): + # GH#39750 make sure we infer the result as td64 + tdi = TimedeltaIndex([NaT, NaT]) + + left = tm.box_expected(tdi, box_with_array) + right = np.array([2, 2.0], dtype=object) + + tdnat = np.timedelta64("NaT", "ns") + expected = Index([tdnat] * 2, dtype=object) + if box_with_array is not Index: + expected = tm.box_expected(expected, box_with_array).astype(object) + if box_with_array in [Series, DataFrame]: + msg = "The 'downcast' keyword in fillna is deprecated" + with tm.assert_produces_warning(FutureWarning, match=msg): + expected = expected.fillna(tdnat, downcast=False) # GH#18463 + + result = left / right + tm.assert_equal(result, expected) + + result = left // right + tm.assert_equal(result, expected) + + +class TestTimedelta64ArrayLikeArithmetic: + # Arithmetic tests for timedelta64[ns] vectors fully parametrized over + # DataFrame/Series/TimedeltaIndex/TimedeltaArray. Ideally all arithmetic + # tests will eventually end up here. + + def test_td64arr_pow_invalid(self, scalar_td, box_with_array): + td1 = Series([timedelta(minutes=5, seconds=3)] * 3) + td1.iloc[2] = np.nan + + td1 = tm.box_expected(td1, box_with_array) + + # check that we are getting a TypeError + # with 'operate' (from core/ops.py) for the ops that are not + # defined + pattern = "operate|unsupported|cannot|not supported" + with pytest.raises(TypeError, match=pattern): + scalar_td**td1 + + with pytest.raises(TypeError, match=pattern): + td1**scalar_td + + +def test_add_timestamp_to_timedelta(): + # GH: 35897 + timestamp = Timestamp("2021-01-01") + result = timestamp + timedelta_range("0s", "1s", periods=31) + expected = DatetimeIndex( + [ + timestamp + + ( + pd.to_timedelta("0.033333333s") * i + + pd.to_timedelta("0.000000001s") * divmod(i, 3)[0] + ) + for i in range(31) + ] + ) + tm.assert_index_equal(result, expected) diff --git a/moondream/lib/python3.10/site-packages/pandas/tests/base/__pycache__/common.cpython-310.pyc b/moondream/lib/python3.10/site-packages/pandas/tests/base/__pycache__/common.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..2d10e7a832d35b8aafd1206e0744cdf807949ce5 Binary files /dev/null and b/moondream/lib/python3.10/site-packages/pandas/tests/base/__pycache__/common.cpython-310.pyc differ diff --git a/moondream/lib/python3.10/site-packages/pandas/tests/base/__pycache__/test_constructors.cpython-310.pyc b/moondream/lib/python3.10/site-packages/pandas/tests/base/__pycache__/test_constructors.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..8e3d109fa0c0ecf59d508046c9bdb95aa868df0f Binary files /dev/null and b/moondream/lib/python3.10/site-packages/pandas/tests/base/__pycache__/test_constructors.cpython-310.pyc differ diff --git a/moondream/lib/python3.10/site-packages/pandas/tests/base/__pycache__/test_conversion.cpython-310.pyc b/moondream/lib/python3.10/site-packages/pandas/tests/base/__pycache__/test_conversion.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..3b8daaf3b32a59e95b9b27741aacce81adb48a49 Binary files /dev/null and b/moondream/lib/python3.10/site-packages/pandas/tests/base/__pycache__/test_conversion.cpython-310.pyc differ diff --git a/moondream/lib/python3.10/site-packages/pandas/tests/base/__pycache__/test_fillna.cpython-310.pyc b/moondream/lib/python3.10/site-packages/pandas/tests/base/__pycache__/test_fillna.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..50efd4a0b7dddeb7e346464840425faa4321dfec Binary files /dev/null and b/moondream/lib/python3.10/site-packages/pandas/tests/base/__pycache__/test_fillna.cpython-310.pyc differ diff --git a/moondream/lib/python3.10/site-packages/pandas/tests/base/__pycache__/test_misc.cpython-310.pyc b/moondream/lib/python3.10/site-packages/pandas/tests/base/__pycache__/test_misc.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..42fe4852d7fc8ad7d7bd3b026a9acb62796607f5 Binary files /dev/null and b/moondream/lib/python3.10/site-packages/pandas/tests/base/__pycache__/test_misc.cpython-310.pyc differ diff --git a/moondream/lib/python3.10/site-packages/pandas/tests/base/__pycache__/test_unique.cpython-310.pyc b/moondream/lib/python3.10/site-packages/pandas/tests/base/__pycache__/test_unique.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..00476a60bae36f20148aae4ec02735c948ef79c1 Binary files /dev/null and b/moondream/lib/python3.10/site-packages/pandas/tests/base/__pycache__/test_unique.cpython-310.pyc differ diff --git a/moondream/lib/python3.10/site-packages/pandas/tests/base/test_fillna.py b/moondream/lib/python3.10/site-packages/pandas/tests/base/test_fillna.py new file mode 100644 index 0000000000000000000000000000000000000000..7300d3013305a7ca08312ae85cc42ae8950acf23 --- /dev/null +++ b/moondream/lib/python3.10/site-packages/pandas/tests/base/test_fillna.py @@ -0,0 +1,60 @@ +""" +Though Index.fillna and Series.fillna has separate impl, +test here to confirm these works as the same +""" + +import numpy as np +import pytest + +from pandas import MultiIndex +import pandas._testing as tm +from pandas.tests.base.common import allow_na_ops + + +def test_fillna(index_or_series_obj): + # GH 11343 + obj = index_or_series_obj + + if isinstance(obj, MultiIndex): + msg = "isna is not defined for MultiIndex" + with pytest.raises(NotImplementedError, match=msg): + obj.fillna(0) + return + + # values will not be changed + fill_value = obj.values[0] if len(obj) > 0 else 0 + result = obj.fillna(fill_value) + + tm.assert_equal(obj, result) + + # check shallow_copied + assert obj is not result + + +@pytest.mark.parametrize("null_obj", [np.nan, None]) +def test_fillna_null(null_obj, index_or_series_obj): + # GH 11343 + obj = index_or_series_obj + klass = type(obj) + + if not allow_na_ops(obj): + pytest.skip(f"{klass} doesn't allow for NA operations") + elif len(obj) < 1: + pytest.skip("Test doesn't make sense on empty data") + elif isinstance(obj, MultiIndex): + pytest.skip(f"MultiIndex can't hold '{null_obj}'") + + values = obj._values + fill_value = values[0] + expected = values.copy() + values[0:2] = null_obj + expected[0:2] = fill_value + + expected = klass(expected) + obj = klass(values) + + result = obj.fillna(fill_value) + tm.assert_equal(result, expected) + + # check shallow_copied + assert obj is not result diff --git a/moondream/lib/python3.10/site-packages/pandas/tests/base/test_misc.py b/moondream/lib/python3.10/site-packages/pandas/tests/base/test_misc.py new file mode 100644 index 0000000000000000000000000000000000000000..65e234e799353844bab2a63df582adfa5842d2cd --- /dev/null +++ b/moondream/lib/python3.10/site-packages/pandas/tests/base/test_misc.py @@ -0,0 +1,191 @@ +import sys + +import numpy as np +import pytest + +from pandas._config import using_pyarrow_string_dtype + +from pandas.compat import PYPY + +from pandas.core.dtypes.common import ( + is_dtype_equal, + is_object_dtype, +) + +import pandas as pd +from pandas import ( + Index, + Series, +) +import pandas._testing as tm + + +def test_isnull_notnull_docstrings(): + # GH#41855 make sure its clear these are aliases + doc = pd.DataFrame.notnull.__doc__ + assert doc.startswith("\nDataFrame.notnull is an alias for DataFrame.notna.\n") + doc = pd.DataFrame.isnull.__doc__ + assert doc.startswith("\nDataFrame.isnull is an alias for DataFrame.isna.\n") + + doc = Series.notnull.__doc__ + assert doc.startswith("\nSeries.notnull is an alias for Series.notna.\n") + doc = Series.isnull.__doc__ + assert doc.startswith("\nSeries.isnull is an alias for Series.isna.\n") + + +@pytest.mark.parametrize( + "op_name, op", + [ + ("add", "+"), + ("sub", "-"), + ("mul", "*"), + ("mod", "%"), + ("pow", "**"), + ("truediv", "/"), + ("floordiv", "//"), + ], +) +def test_binary_ops_docstring(frame_or_series, op_name, op): + # not using the all_arithmetic_functions fixture with _get_opstr + # as _get_opstr is used internally in the dynamic implementation of the docstring + klass = frame_or_series + + operand1 = klass.__name__.lower() + operand2 = "other" + expected_str = " ".join([operand1, op, operand2]) + assert expected_str in getattr(klass, op_name).__doc__ + + # reverse version of the binary ops + expected_str = " ".join([operand2, op, operand1]) + assert expected_str in getattr(klass, "r" + op_name).__doc__ + + +def test_ndarray_compat_properties(index_or_series_obj): + obj = index_or_series_obj + + # Check that we work. + for p in ["shape", "dtype", "T", "nbytes"]: + assert getattr(obj, p, None) is not None + + # deprecated properties + for p in ["strides", "itemsize", "base", "data"]: + assert not hasattr(obj, p) + + msg = "can only convert an array of size 1 to a Python scalar" + with pytest.raises(ValueError, match=msg): + obj.item() # len > 1 + + assert obj.ndim == 1 + assert obj.size == len(obj) + + assert Index([1]).item() == 1 + assert Series([1]).item() == 1 + + +@pytest.mark.skipif( + PYPY or using_pyarrow_string_dtype(), + reason="not relevant for PyPy doesn't work properly for arrow strings", +) +def test_memory_usage(index_or_series_memory_obj): + obj = index_or_series_memory_obj + # Clear index caches so that len(obj) == 0 report 0 memory usage + if isinstance(obj, Series): + is_ser = True + obj.index._engine.clear_mapping() + else: + is_ser = False + obj._engine.clear_mapping() + + res = obj.memory_usage() + res_deep = obj.memory_usage(deep=True) + + is_object = is_object_dtype(obj) or (is_ser and is_object_dtype(obj.index)) + is_categorical = isinstance(obj.dtype, pd.CategoricalDtype) or ( + is_ser and isinstance(obj.index.dtype, pd.CategoricalDtype) + ) + is_object_string = is_dtype_equal(obj, "string[python]") or ( + is_ser and is_dtype_equal(obj.index.dtype, "string[python]") + ) + + if len(obj) == 0: + expected = 0 + assert res_deep == res == expected + elif is_object or is_categorical or is_object_string: + # only deep will pick them up + assert res_deep > res + else: + assert res == res_deep + + # sys.getsizeof will call the .memory_usage with + # deep=True, and add on some GC overhead + diff = res_deep - sys.getsizeof(obj) + assert abs(diff) < 100 + + +def test_memory_usage_components_series(series_with_simple_index): + series = series_with_simple_index + total_usage = series.memory_usage(index=True) + non_index_usage = series.memory_usage(index=False) + index_usage = series.index.memory_usage() + assert total_usage == non_index_usage + index_usage + + +@pytest.mark.parametrize("dtype", tm.NARROW_NP_DTYPES) +def test_memory_usage_components_narrow_series(dtype): + series = Series(range(5), dtype=dtype, index=[f"i-{i}" for i in range(5)], name="a") + total_usage = series.memory_usage(index=True) + non_index_usage = series.memory_usage(index=False) + index_usage = series.index.memory_usage() + assert total_usage == non_index_usage + index_usage + + +def test_searchsorted(request, index_or_series_obj): + # numpy.searchsorted calls obj.searchsorted under the hood. + # See gh-12238 + obj = index_or_series_obj + + if isinstance(obj, pd.MultiIndex): + # See gh-14833 + request.applymarker( + pytest.mark.xfail( + reason="np.searchsorted doesn't work on pd.MultiIndex: GH 14833" + ) + ) + elif obj.dtype.kind == "c" and isinstance(obj, Index): + # TODO: Should Series cases also raise? Looks like they use numpy + # comparison semantics https://github.com/numpy/numpy/issues/15981 + mark = pytest.mark.xfail(reason="complex objects are not comparable") + request.applymarker(mark) + + max_obj = max(obj, default=0) + index = np.searchsorted(obj, max_obj) + assert 0 <= index <= len(obj) + + index = np.searchsorted(obj, max_obj, sorter=range(len(obj))) + assert 0 <= index <= len(obj) + + +def test_access_by_position(index_flat): + index = index_flat + + if len(index) == 0: + pytest.skip("Test doesn't make sense on empty data") + + series = Series(index) + assert index[0] == series.iloc[0] + assert index[5] == series.iloc[5] + assert index[-1] == series.iloc[-1] + + size = len(index) + assert index[-1] == index[size - 1] + + msg = f"index {size} is out of bounds for axis 0 with size {size}" + if is_dtype_equal(index.dtype, "string[pyarrow]") or is_dtype_equal( + index.dtype, "string[pyarrow_numpy]" + ): + msg = "index out of bounds" + with pytest.raises(IndexError, match=msg): + index[size] + msg = "single positional indexer is out-of-bounds" + with pytest.raises(IndexError, match=msg): + series.iloc[size] diff --git a/moondream/lib/python3.10/site-packages/pandas/tests/base/test_transpose.py b/moondream/lib/python3.10/site-packages/pandas/tests/base/test_transpose.py new file mode 100644 index 0000000000000000000000000000000000000000..246f33d27476cb419620fb8571984619785f9b62 --- /dev/null +++ b/moondream/lib/python3.10/site-packages/pandas/tests/base/test_transpose.py @@ -0,0 +1,56 @@ +import numpy as np +import pytest + +from pandas import ( + CategoricalDtype, + DataFrame, +) +import pandas._testing as tm + + +def test_transpose(index_or_series_obj): + obj = index_or_series_obj + tm.assert_equal(obj.transpose(), obj) + + +def test_transpose_non_default_axes(index_or_series_obj): + msg = "the 'axes' parameter is not supported" + obj = index_or_series_obj + with pytest.raises(ValueError, match=msg): + obj.transpose(1) + with pytest.raises(ValueError, match=msg): + obj.transpose(axes=1) + + +def test_numpy_transpose(index_or_series_obj): + msg = "the 'axes' parameter is not supported" + obj = index_or_series_obj + tm.assert_equal(np.transpose(obj), obj) + + with pytest.raises(ValueError, match=msg): + np.transpose(obj, axes=1) + + +@pytest.mark.parametrize( + "data, transposed_data, index, columns, dtype", + [ + ([[1], [2]], [[1, 2]], ["a", "a"], ["b"], int), + ([[1], [2]], [[1, 2]], ["a", "a"], ["b"], CategoricalDtype([1, 2])), + ([[1, 2]], [[1], [2]], ["b"], ["a", "a"], int), + ([[1, 2]], [[1], [2]], ["b"], ["a", "a"], CategoricalDtype([1, 2])), + ([[1, 2], [3, 4]], [[1, 3], [2, 4]], ["a", "a"], ["b", "b"], int), + ( + [[1, 2], [3, 4]], + [[1, 3], [2, 4]], + ["a", "a"], + ["b", "b"], + CategoricalDtype([1, 2, 3, 4]), + ), + ], +) +def test_duplicate_labels(data, transposed_data, index, columns, dtype): + # GH 42380 + df = DataFrame(data, index=index, columns=columns, dtype=dtype) + result = df.T + expected = DataFrame(transposed_data, index=columns, columns=index, dtype=dtype) + tm.assert_frame_equal(result, expected) diff --git a/moondream/lib/python3.10/site-packages/pandas/tests/plotting/test_backend.py b/moondream/lib/python3.10/site-packages/pandas/tests/plotting/test_backend.py new file mode 100644 index 0000000000000000000000000000000000000000..8a8643415ae12f96bbbd87ed85ff74f8813b07e4 --- /dev/null +++ b/moondream/lib/python3.10/site-packages/pandas/tests/plotting/test_backend.py @@ -0,0 +1,98 @@ +import sys +import types + +import pytest + +import pandas.util._test_decorators as td + +import pandas + + +@pytest.fixture +def dummy_backend(): + db = types.ModuleType("pandas_dummy_backend") + setattr(db, "plot", lambda *args, **kwargs: "used_dummy") + return db + + +@pytest.fixture +def restore_backend(): + """Restore the plotting backend to matplotlib""" + with pandas.option_context("plotting.backend", "matplotlib"): + yield + + +def test_backend_is_not_module(): + msg = "Could not find plotting backend 'not_an_existing_module'." + with pytest.raises(ValueError, match=msg): + pandas.set_option("plotting.backend", "not_an_existing_module") + + assert pandas.options.plotting.backend == "matplotlib" + + +def test_backend_is_correct(monkeypatch, restore_backend, dummy_backend): + monkeypatch.setitem(sys.modules, "pandas_dummy_backend", dummy_backend) + + pandas.set_option("plotting.backend", "pandas_dummy_backend") + assert pandas.get_option("plotting.backend") == "pandas_dummy_backend" + assert ( + pandas.plotting._core._get_plot_backend("pandas_dummy_backend") is dummy_backend + ) + + +def test_backend_can_be_set_in_plot_call(monkeypatch, restore_backend, dummy_backend): + monkeypatch.setitem(sys.modules, "pandas_dummy_backend", dummy_backend) + df = pandas.DataFrame([1, 2, 3]) + + assert pandas.get_option("plotting.backend") == "matplotlib" + assert df.plot(backend="pandas_dummy_backend") == "used_dummy" + + +def test_register_entrypoint(restore_backend, tmp_path, monkeypatch, dummy_backend): + monkeypatch.syspath_prepend(tmp_path) + monkeypatch.setitem(sys.modules, "pandas_dummy_backend", dummy_backend) + + dist_info = tmp_path / "my_backend-0.0.0.dist-info" + dist_info.mkdir() + # entry_point name should not match module name - otherwise pandas will + # fall back to backend lookup by module name + (dist_info / "entry_points.txt").write_bytes( + b"[pandas_plotting_backends]\nmy_ep_backend = pandas_dummy_backend\n" + ) + + assert pandas.plotting._core._get_plot_backend("my_ep_backend") is dummy_backend + + with pandas.option_context("plotting.backend", "my_ep_backend"): + assert pandas.plotting._core._get_plot_backend() is dummy_backend + + +def test_setting_backend_without_plot_raises(monkeypatch): + # GH-28163 + module = types.ModuleType("pandas_plot_backend") + monkeypatch.setitem(sys.modules, "pandas_plot_backend", module) + + assert pandas.options.plotting.backend == "matplotlib" + with pytest.raises( + ValueError, match="Could not find plotting backend 'pandas_plot_backend'." + ): + pandas.set_option("plotting.backend", "pandas_plot_backend") + + assert pandas.options.plotting.backend == "matplotlib" + + +@td.skip_if_installed("matplotlib") +def test_no_matplotlib_ok(): + msg = ( + 'matplotlib is required for plotting when the default backend "matplotlib" is ' + "selected." + ) + with pytest.raises(ImportError, match=msg): + pandas.plotting._core._get_plot_backend("matplotlib") + + +def test_extra_kinds_ok(monkeypatch, restore_backend, dummy_backend): + # https://github.com/pandas-dev/pandas/pull/28647 + monkeypatch.setitem(sys.modules, "pandas_dummy_backend", dummy_backend) + pandas.set_option("plotting.backend", "pandas_dummy_backend") + df = pandas.DataFrame({"A": [1, 2, 3]}) + df.plot(kind="not a real kind") diff --git a/moondream/lib/python3.10/site-packages/pandas/tests/plotting/test_common.py b/moondream/lib/python3.10/site-packages/pandas/tests/plotting/test_common.py new file mode 100644 index 0000000000000000000000000000000000000000..20daf5935624843af3224f991497f84fa6639a0d --- /dev/null +++ b/moondream/lib/python3.10/site-packages/pandas/tests/plotting/test_common.py @@ -0,0 +1,60 @@ +import pytest + +from pandas import DataFrame +from pandas.tests.plotting.common import ( + _check_plot_works, + _check_ticks_props, + _gen_two_subplots, +) + +plt = pytest.importorskip("matplotlib.pyplot") + + +class TestCommon: + def test__check_ticks_props(self): + # GH 34768 + df = DataFrame({"b": [0, 1, 0], "a": [1, 2, 3]}) + ax = _check_plot_works(df.plot, rot=30) + ax.yaxis.set_tick_params(rotation=30) + msg = "expected 0.00000 but got " + with pytest.raises(AssertionError, match=msg): + _check_ticks_props(ax, xrot=0) + with pytest.raises(AssertionError, match=msg): + _check_ticks_props(ax, xlabelsize=0) + with pytest.raises(AssertionError, match=msg): + _check_ticks_props(ax, yrot=0) + with pytest.raises(AssertionError, match=msg): + _check_ticks_props(ax, ylabelsize=0) + + def test__gen_two_subplots_with_ax(self): + fig = plt.gcf() + gen = _gen_two_subplots(f=lambda **kwargs: None, fig=fig, ax="test") + # On the first yield, no subplot should be added since ax was passed + next(gen) + assert fig.get_axes() == [] + # On the second, the one axis should match fig.subplot(2, 1, 2) + next(gen) + axes = fig.get_axes() + assert len(axes) == 1 + subplot_geometry = list(axes[0].get_subplotspec().get_geometry()[:-1]) + subplot_geometry[-1] += 1 + assert subplot_geometry == [2, 1, 2] + + def test_colorbar_layout(self): + fig = plt.figure() + + axes = fig.subplot_mosaic( + """ + AB + CC + """ + ) + + x = [1, 2, 3] + y = [1, 2, 3] + + cs0 = axes["A"].scatter(x, y) + axes["B"].scatter(x, y) + + fig.colorbar(cs0, ax=[axes["A"], axes["B"]], location="right") + DataFrame(x).plot(ax=axes["C"])