ZTWHHH commited on
Commit
6fcb0be
·
verified ·
1 Parent(s): 2051c44

Add files using upload-large-folder tool

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. .gitattributes +3 -0
  2. llava_next/lib/python3.10/site-packages/scipy/optimize/_highs/__init__.py +0 -0
  3. llava_next/lib/python3.10/site-packages/scipy/optimize/_highs/_highs_constants.cpython-310-x86_64-linux-gnu.so +0 -0
  4. llava_next/lib/python3.10/site-packages/scipy/optimize/_highs/_highs_wrapper.cpython-310-x86_64-linux-gnu.so +3 -0
  5. llava_next/lib/python3.10/site-packages/scipy/optimize/_highs/src/cython/HConst.pxd +106 -0
  6. llava_next/lib/python3.10/site-packages/scipy/optimize/_highs/src/cython/Highs.pxd +56 -0
  7. llava_next/lib/python3.10/site-packages/scipy/optimize/_highs/src/cython/HighsLpUtils.pxd +9 -0
  8. llava_next/lib/python3.10/site-packages/scipy/optimize/_highs/src/cython/HighsRuntimeOptions.pxd +9 -0
  9. llava_next/lib/python3.10/site-packages/scipy/optimize/_lsq/__pycache__/__init__.cpython-310.pyc +0 -0
  10. llava_next/lib/python3.10/site-packages/scipy/optimize/_lsq/__pycache__/common.cpython-310.pyc +0 -0
  11. llava_next/lib/python3.10/site-packages/scipy/optimize/_lsq/__pycache__/dogbox.cpython-310.pyc +0 -0
  12. llava_next/lib/python3.10/site-packages/scipy/optimize/_lsq/__pycache__/least_squares.cpython-310.pyc +0 -0
  13. llava_next/lib/python3.10/site-packages/scipy/optimize/_lsq/__pycache__/lsq_linear.cpython-310.pyc +0 -0
  14. llava_next/lib/python3.10/site-packages/scipy/optimize/_lsq/__pycache__/trf.cpython-310.pyc +0 -0
  15. llava_next/lib/python3.10/site-packages/scipy/optimize/_lsq/__pycache__/trf_linear.cpython-310.pyc +0 -0
  16. llava_next/lib/python3.10/site-packages/scipy/optimize/_lsq/givens_elimination.cpython-310-x86_64-linux-gnu.so +3 -0
  17. llava_next/lib/python3.10/site-packages/scipy/optimize/_shgo_lib/_complex.py +1225 -0
  18. llava_next/lib/python3.10/site-packages/scipy/optimize/tests/__init__.py +0 -0
  19. llava_next/lib/python3.10/site-packages/scipy/optimize/tests/test__basinhopping.py +529 -0
  20. llava_next/lib/python3.10/site-packages/scipy/optimize/tests/test__differential_evolution.py +1699 -0
  21. llava_next/lib/python3.10/site-packages/scipy/optimize/tests/test__dual_annealing.py +406 -0
  22. llava_next/lib/python3.10/site-packages/scipy/optimize/tests/test__linprog_clean_inputs.py +310 -0
  23. llava_next/lib/python3.10/site-packages/scipy/optimize/tests/test__numdiff.py +815 -0
  24. llava_next/lib/python3.10/site-packages/scipy/optimize/tests/test__remove_redundancy.py +228 -0
  25. llava_next/lib/python3.10/site-packages/scipy/optimize/tests/test__root.py +123 -0
  26. llava_next/lib/python3.10/site-packages/scipy/optimize/tests/test__shgo.py +1155 -0
  27. llava_next/lib/python3.10/site-packages/scipy/optimize/tests/test_bracket.py +793 -0
  28. llava_next/lib/python3.10/site-packages/scipy/optimize/tests/test_chandrupatla.py +906 -0
  29. llava_next/lib/python3.10/site-packages/scipy/optimize/tests/test_cobyqa.py +246 -0
  30. llava_next/lib/python3.10/site-packages/scipy/optimize/tests/test_constraints.py +255 -0
  31. llava_next/lib/python3.10/site-packages/scipy/optimize/tests/test_cython_optimize.py +92 -0
  32. llava_next/lib/python3.10/site-packages/scipy/optimize/tests/test_differentiable_functions.py +803 -0
  33. llava_next/lib/python3.10/site-packages/scipy/optimize/tests/test_differentiate.py +512 -0
  34. llava_next/lib/python3.10/site-packages/scipy/optimize/tests/test_hessian_update_strategy.py +292 -0
  35. llava_next/lib/python3.10/site-packages/scipy/optimize/tests/test_isotonic_regression.py +167 -0
  36. llava_next/lib/python3.10/site-packages/scipy/optimize/tests/test_lbfgsb_hessinv.py +43 -0
  37. llava_next/lib/python3.10/site-packages/scipy/optimize/tests/test_lbfgsb_setulb.py +128 -0
  38. llava_next/lib/python3.10/site-packages/scipy/optimize/tests/test_least_squares.py +874 -0
  39. llava_next/lib/python3.10/site-packages/scipy/optimize/tests/test_linear_assignment.py +116 -0
  40. llava_next/lib/python3.10/site-packages/scipy/optimize/tests/test_linprog.py +0 -0
  41. llava_next/lib/python3.10/site-packages/scipy/optimize/tests/test_lsq_common.py +297 -0
  42. llava_next/lib/python3.10/site-packages/scipy/optimize/tests/test_lsq_linear.py +285 -0
  43. llava_next/lib/python3.10/site-packages/scipy/optimize/tests/test_minimize_constrained.py +828 -0
  44. llava_next/lib/python3.10/site-packages/scipy/optimize/tests/test_nnls.py +318 -0
  45. llava_next/lib/python3.10/site-packages/scipy/optimize/tests/test_nonlin.py +534 -0
  46. llava_next/lib/python3.10/site-packages/scipy/optimize/tests/test_optimize.py +0 -0
  47. llava_next/lib/python3.10/site-packages/scipy/optimize/tests/test_regression.py +40 -0
  48. llava_next/lib/python3.10/site-packages/scipy/optimize/tests/test_slsqp.py +608 -0
  49. llava_next/lib/python3.10/site-packages/scipy/optimize/tests/test_trustregion.py +112 -0
  50. llava_next/lib/python3.10/site-packages/scipy/optimize/tests/test_zeros.py +939 -0
.gitattributes CHANGED
@@ -357,3 +357,6 @@ llava_next/lib/python3.10/site-packages/scipy/optimize/_lbfgsb.cpython-310-x86_6
357
  llava_next/lib/python3.10/site-packages/scipy/optimize/_pava_pybind.cpython-310-x86_64-linux-gnu.so filter=lfs diff=lfs merge=lfs -text
358
  llava_next/lib/python3.10/site-packages/scipy/signal/__pycache__/_filter_design.cpython-310.pyc filter=lfs diff=lfs merge=lfs -text
359
  llava_next/lib/python3.10/site-packages/scipy/optimize/_cobyla.cpython-310-x86_64-linux-gnu.so filter=lfs diff=lfs merge=lfs -text
 
 
 
 
357
  llava_next/lib/python3.10/site-packages/scipy/optimize/_pava_pybind.cpython-310-x86_64-linux-gnu.so filter=lfs diff=lfs merge=lfs -text
358
  llava_next/lib/python3.10/site-packages/scipy/signal/__pycache__/_filter_design.cpython-310.pyc filter=lfs diff=lfs merge=lfs -text
359
  llava_next/lib/python3.10/site-packages/scipy/optimize/_cobyla.cpython-310-x86_64-linux-gnu.so filter=lfs diff=lfs merge=lfs -text
360
+ llava_next/lib/python3.10/site-packages/scipy/optimize/_highs/_highs_wrapper.cpython-310-x86_64-linux-gnu.so filter=lfs diff=lfs merge=lfs -text
361
+ llava_next/lib/python3.10/site-packages/scipy/optimize/_lsq/givens_elimination.cpython-310-x86_64-linux-gnu.so filter=lfs diff=lfs merge=lfs -text
362
+ parrot/lib/python3.10/site-packages/torch/testing/_internal/generated/__pycache__/annotated_fn_args.cpython-310.pyc filter=lfs diff=lfs merge=lfs -text
llava_next/lib/python3.10/site-packages/scipy/optimize/_highs/__init__.py ADDED
File without changes
llava_next/lib/python3.10/site-packages/scipy/optimize/_highs/_highs_constants.cpython-310-x86_64-linux-gnu.so ADDED
Binary file (36.1 kB). View file
 
llava_next/lib/python3.10/site-packages/scipy/optimize/_highs/_highs_wrapper.cpython-310-x86_64-linux-gnu.so ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:bb8b438acf50232ba67a9d01e3c922c882b37781adeed0d37ccc86bfae81f325
3
+ size 4111920
llava_next/lib/python3.10/site-packages/scipy/optimize/_highs/src/cython/HConst.pxd ADDED
@@ -0,0 +1,106 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # cython: language_level=3
2
+
3
+ from libcpp cimport bool
4
+ from libcpp.string cimport string
5
+
6
+ cdef extern from "HConst.h" nogil:
7
+
8
+ const int HIGHS_CONST_I_INF "kHighsIInf"
9
+ const double HIGHS_CONST_INF "kHighsInf"
10
+ const double kHighsTiny
11
+ const double kHighsZero
12
+ const int kHighsThreadLimit
13
+
14
+ cdef enum HighsDebugLevel:
15
+ HighsDebugLevel_kHighsDebugLevelNone "kHighsDebugLevelNone" = 0
16
+ HighsDebugLevel_kHighsDebugLevelCheap "kHighsDebugLevelCheap"
17
+ HighsDebugLevel_kHighsDebugLevelCostly "kHighsDebugLevelCostly"
18
+ HighsDebugLevel_kHighsDebugLevelExpensive "kHighsDebugLevelExpensive"
19
+ HighsDebugLevel_kHighsDebugLevelMin "kHighsDebugLevelMin" = HighsDebugLevel_kHighsDebugLevelNone
20
+ HighsDebugLevel_kHighsDebugLevelMax "kHighsDebugLevelMax" = HighsDebugLevel_kHighsDebugLevelExpensive
21
+
22
+ ctypedef enum HighsModelStatus:
23
+ HighsModelStatusNOTSET "HighsModelStatus::kNotset" = 0
24
+ HighsModelStatusLOAD_ERROR "HighsModelStatus::kLoadError"
25
+ HighsModelStatusMODEL_ERROR "HighsModelStatus::kModelError"
26
+ HighsModelStatusPRESOLVE_ERROR "HighsModelStatus::kPresolveError"
27
+ HighsModelStatusSOLVE_ERROR "HighsModelStatus::kSolveError"
28
+ HighsModelStatusPOSTSOLVE_ERROR "HighsModelStatus::kPostsolveError"
29
+ HighsModelStatusMODEL_EMPTY "HighsModelStatus::kModelEmpty"
30
+ HighsModelStatusOPTIMAL "HighsModelStatus::kOptimal"
31
+ HighsModelStatusINFEASIBLE "HighsModelStatus::kInfeasible"
32
+ HighsModelStatus_UNBOUNDED_OR_INFEASIBLE "HighsModelStatus::kUnboundedOrInfeasible"
33
+ HighsModelStatusUNBOUNDED "HighsModelStatus::kUnbounded"
34
+ HighsModelStatusREACHED_DUAL_OBJECTIVE_VALUE_UPPER_BOUND "HighsModelStatus::kObjectiveBound"
35
+ HighsModelStatusREACHED_OBJECTIVE_TARGET "HighsModelStatus::kObjectiveTarget"
36
+ HighsModelStatusREACHED_TIME_LIMIT "HighsModelStatus::kTimeLimit"
37
+ HighsModelStatusREACHED_ITERATION_LIMIT "HighsModelStatus::kIterationLimit"
38
+ HighsModelStatusUNKNOWN "HighsModelStatus::kUnknown"
39
+ HighsModelStatusHIGHS_MODEL_STATUS_MIN "HighsModelStatus::kMin" = HighsModelStatusNOTSET
40
+ HighsModelStatusHIGHS_MODEL_STATUS_MAX "HighsModelStatus::kMax" = HighsModelStatusUNKNOWN
41
+
42
+ cdef enum HighsBasisStatus:
43
+ HighsBasisStatusLOWER "HighsBasisStatus::kLower" = 0, # (slack) variable is at its lower bound [including fixed variables]
44
+ HighsBasisStatusBASIC "HighsBasisStatus::kBasic" # (slack) variable is basic
45
+ HighsBasisStatusUPPER "HighsBasisStatus::kUpper" # (slack) variable is at its upper bound
46
+ HighsBasisStatusZERO "HighsBasisStatus::kZero" # free variable is non-basic and set to zero
47
+ HighsBasisStatusNONBASIC "HighsBasisStatus::kNonbasic" # nonbasic with no specific bound information - useful for users and postsolve
48
+
49
+ cdef enum SolverOption:
50
+ SOLVER_OPTION_SIMPLEX "SolverOption::SOLVER_OPTION_SIMPLEX" = -1
51
+ SOLVER_OPTION_CHOOSE "SolverOption::SOLVER_OPTION_CHOOSE"
52
+ SOLVER_OPTION_IPM "SolverOption::SOLVER_OPTION_IPM"
53
+
54
+ cdef enum PrimalDualStatus:
55
+ PrimalDualStatusSTATUS_NOT_SET "PrimalDualStatus::STATUS_NOT_SET" = -1
56
+ PrimalDualStatusSTATUS_MIN "PrimalDualStatus::STATUS_MIN" = PrimalDualStatusSTATUS_NOT_SET
57
+ PrimalDualStatusSTATUS_NO_SOLUTION "PrimalDualStatus::STATUS_NO_SOLUTION"
58
+ PrimalDualStatusSTATUS_UNKNOWN "PrimalDualStatus::STATUS_UNKNOWN"
59
+ PrimalDualStatusSTATUS_INFEASIBLE_POINT "PrimalDualStatus::STATUS_INFEASIBLE_POINT"
60
+ PrimalDualStatusSTATUS_FEASIBLE_POINT "PrimalDualStatus::STATUS_FEASIBLE_POINT"
61
+ PrimalDualStatusSTATUS_MAX "PrimalDualStatus::STATUS_MAX" = PrimalDualStatusSTATUS_FEASIBLE_POINT
62
+
63
+ cdef enum HighsOptionType:
64
+ HighsOptionTypeBOOL "HighsOptionType::kBool" = 0
65
+ HighsOptionTypeINT "HighsOptionType::kInt"
66
+ HighsOptionTypeDOUBLE "HighsOptionType::kDouble"
67
+ HighsOptionTypeSTRING "HighsOptionType::kString"
68
+
69
+ # workaround for lack of enum class support in Cython < 3.x
70
+ # cdef enum class ObjSense(int):
71
+ # ObjSenseMINIMIZE "ObjSense::kMinimize" = 1
72
+ # ObjSenseMAXIMIZE "ObjSense::kMaximize" = -1
73
+
74
+ cdef cppclass ObjSense:
75
+ pass
76
+
77
+ cdef ObjSense ObjSenseMINIMIZE "ObjSense::kMinimize"
78
+ cdef ObjSense ObjSenseMAXIMIZE "ObjSense::kMaximize"
79
+
80
+ # cdef enum class MatrixFormat(int):
81
+ # MatrixFormatkColwise "MatrixFormat::kColwise" = 1
82
+ # MatrixFormatkRowwise "MatrixFormat::kRowwise"
83
+ # MatrixFormatkRowwisePartitioned "MatrixFormat::kRowwisePartitioned"
84
+
85
+ cdef cppclass MatrixFormat:
86
+ pass
87
+
88
+ cdef MatrixFormat MatrixFormatkColwise "MatrixFormat::kColwise"
89
+ cdef MatrixFormat MatrixFormatkRowwise "MatrixFormat::kRowwise"
90
+ cdef MatrixFormat MatrixFormatkRowwisePartitioned "MatrixFormat::kRowwisePartitioned"
91
+
92
+ # cdef enum class HighsVarType(int):
93
+ # kContinuous "HighsVarType::kContinuous"
94
+ # kInteger "HighsVarType::kInteger"
95
+ # kSemiContinuous "HighsVarType::kSemiContinuous"
96
+ # kSemiInteger "HighsVarType::kSemiInteger"
97
+ # kImplicitInteger "HighsVarType::kImplicitInteger"
98
+
99
+ cdef cppclass HighsVarType:
100
+ pass
101
+
102
+ cdef HighsVarType kContinuous "HighsVarType::kContinuous"
103
+ cdef HighsVarType kInteger "HighsVarType::kInteger"
104
+ cdef HighsVarType kSemiContinuous "HighsVarType::kSemiContinuous"
105
+ cdef HighsVarType kSemiInteger "HighsVarType::kSemiInteger"
106
+ cdef HighsVarType kImplicitInteger "HighsVarType::kImplicitInteger"
llava_next/lib/python3.10/site-packages/scipy/optimize/_highs/src/cython/Highs.pxd ADDED
@@ -0,0 +1,56 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # cython: language_level=3
2
+
3
+ from libc.stdio cimport FILE
4
+
5
+ from libcpp cimport bool
6
+ from libcpp.string cimport string
7
+
8
+ from .HighsStatus cimport HighsStatus
9
+ from .HighsOptions cimport HighsOptions
10
+ from .HighsInfo cimport HighsInfo
11
+ from .HighsLp cimport (
12
+ HighsLp,
13
+ HighsSolution,
14
+ HighsBasis,
15
+ ObjSense,
16
+ )
17
+ from .HConst cimport HighsModelStatus
18
+
19
+ cdef extern from "Highs.h":
20
+ # From HiGHS/src/Highs.h
21
+ cdef cppclass Highs:
22
+ HighsStatus passHighsOptions(const HighsOptions& options)
23
+ HighsStatus passModel(const HighsLp& lp)
24
+ HighsStatus run()
25
+ HighsStatus setHighsLogfile(FILE* logfile)
26
+ HighsStatus setHighsOutput(FILE* output)
27
+ HighsStatus writeHighsOptions(const string filename, const bool report_only_non_default_values = true)
28
+
29
+ # split up for cython below
30
+ #const HighsModelStatus& getModelStatus(const bool scaled_model = False) const
31
+ const HighsModelStatus & getModelStatus() const
32
+
33
+ const HighsInfo& getHighsInfo "getInfo" () const
34
+ string modelStatusToString(const HighsModelStatus model_status) const
35
+ #HighsStatus getHighsInfoValue(const string& info, int& value)
36
+ HighsStatus getHighsInfoValue(const string& info, double& value) const
37
+ const HighsOptions& getHighsOptions() const
38
+
39
+ const HighsLp& getLp() const
40
+
41
+ HighsStatus writeSolution(const string filename, const bool pretty) const
42
+
43
+ HighsStatus setBasis()
44
+ const HighsSolution& getSolution() const
45
+ const HighsBasis& getBasis() const
46
+
47
+ bool changeObjectiveSense(const ObjSense sense)
48
+
49
+ HighsStatus setHighsOptionValueBool "setOptionValue" (const string & option, const bool value)
50
+ HighsStatus setHighsOptionValueInt "setOptionValue" (const string & option, const int value)
51
+ HighsStatus setHighsOptionValueStr "setOptionValue" (const string & option, const string & value)
52
+ HighsStatus setHighsOptionValueDbl "setOptionValue" (const string & option, const double value)
53
+
54
+ string primalDualStatusToString(const int primal_dual_status)
55
+
56
+ void resetGlobalScheduler(bool blocking)
llava_next/lib/python3.10/site-packages/scipy/optimize/_highs/src/cython/HighsLpUtils.pxd ADDED
@@ -0,0 +1,9 @@
 
 
 
 
 
 
 
 
 
 
1
+ # cython: language_level=3
2
+
3
+ from .HighsStatus cimport HighsStatus
4
+ from .HighsLp cimport HighsLp
5
+ from .HighsOptions cimport HighsOptions
6
+
7
+ cdef extern from "HighsLpUtils.h" nogil:
8
+ # From HiGHS/src/lp_data/HighsLpUtils.h
9
+ HighsStatus assessLp(HighsLp& lp, const HighsOptions& options)
llava_next/lib/python3.10/site-packages/scipy/optimize/_highs/src/cython/HighsRuntimeOptions.pxd ADDED
@@ -0,0 +1,9 @@
 
 
 
 
 
 
 
 
 
 
1
+ # cython: language_level=3
2
+
3
+ from libcpp cimport bool
4
+
5
+ from .HighsOptions cimport HighsOptions
6
+
7
+ cdef extern from "HighsRuntimeOptions.h" nogil:
8
+ # From HiGHS/src/lp_data/HighsRuntimeOptions.h
9
+ bool loadOptions(int argc, char** argv, HighsOptions& options)
llava_next/lib/python3.10/site-packages/scipy/optimize/_lsq/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (336 Bytes). View file
 
llava_next/lib/python3.10/site-packages/scipy/optimize/_lsq/__pycache__/common.cpython-310.pyc ADDED
Binary file (19.5 kB). View file
 
llava_next/lib/python3.10/site-packages/scipy/optimize/_lsq/__pycache__/dogbox.cpython-310.pyc ADDED
Binary file (8.19 kB). View file
 
llava_next/lib/python3.10/site-packages/scipy/optimize/_lsq/__pycache__/least_squares.cpython-310.pyc ADDED
Binary file (36.6 kB). View file
 
llava_next/lib/python3.10/site-packages/scipy/optimize/_lsq/__pycache__/lsq_linear.cpython-310.pyc ADDED
Binary file (14 kB). View file
 
llava_next/lib/python3.10/site-packages/scipy/optimize/_lsq/__pycache__/trf.cpython-310.pyc ADDED
Binary file (12.1 kB). View file
 
llava_next/lib/python3.10/site-packages/scipy/optimize/_lsq/__pycache__/trf_linear.cpython-310.pyc ADDED
Binary file (5.64 kB). View file
 
llava_next/lib/python3.10/site-packages/scipy/optimize/_lsq/givens_elimination.cpython-310-x86_64-linux-gnu.so ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:8105ab7b716213b672fcb44231c05c893d0f084016f877228b1673a454d689fa
3
+ size 231792
llava_next/lib/python3.10/site-packages/scipy/optimize/_shgo_lib/_complex.py ADDED
@@ -0,0 +1,1225 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """Base classes for low memory simplicial complex structures."""
2
+ import copy
3
+ import logging
4
+ import itertools
5
+ import decimal
6
+ from functools import cache
7
+
8
+ import numpy as np
9
+
10
+ from ._vertex import (VertexCacheField, VertexCacheIndex)
11
+
12
+
13
+ class Complex:
14
+ """
15
+ Base class for a simplicial complex described as a cache of vertices
16
+ together with their connections.
17
+
18
+ Important methods:
19
+ Domain triangulation:
20
+ Complex.triangulate, Complex.split_generation
21
+ Triangulating arbitrary points (must be traingulable,
22
+ may exist outside domain):
23
+ Complex.triangulate(sample_set)
24
+ Converting another simplicial complex structure data type to the
25
+ structure used in Complex (ex. OBJ wavefront)
26
+ Complex.convert(datatype, data)
27
+
28
+ Important objects:
29
+ HC.V: The cache of vertices and their connection
30
+ HC.H: Storage structure of all vertex groups
31
+
32
+ Parameters
33
+ ----------
34
+ dim : int
35
+ Spatial dimensionality of the complex R^dim
36
+ domain : list of tuples, optional
37
+ The bounds [x_l, x_u]^dim of the hyperrectangle space
38
+ ex. The default domain is the hyperrectangle [0, 1]^dim
39
+ Note: The domain must be convex, non-convex spaces can be cut
40
+ away from this domain using the non-linear
41
+ g_cons functions to define any arbitrary domain
42
+ (these domains may also be disconnected from each other)
43
+ sfield :
44
+ A scalar function defined in the associated domain f: R^dim --> R
45
+ sfield_args : tuple
46
+ Additional arguments to be passed to `sfield`
47
+ vfield :
48
+ A scalar function defined in the associated domain
49
+ f: R^dim --> R^m
50
+ (for example a gradient function of the scalar field)
51
+ vfield_args : tuple
52
+ Additional arguments to be passed to vfield
53
+ symmetry : None or list
54
+ Specify if the objective function contains symmetric variables.
55
+ The search space (and therefore performance) is decreased by up to
56
+ O(n!) times in the fully symmetric case.
57
+
58
+ E.g. f(x) = (x_1 + x_2 + x_3) + (x_4)**2 + (x_5)**2 + (x_6)**2
59
+
60
+ In this equation x_2 and x_3 are symmetric to x_1, while x_5 and
61
+ x_6 are symmetric to x_4, this can be specified to the solver as:
62
+
63
+ symmetry = [0, # Variable 1
64
+ 0, # symmetric to variable 1
65
+ 0, # symmetric to variable 1
66
+ 3, # Variable 4
67
+ 3, # symmetric to variable 4
68
+ 3, # symmetric to variable 4
69
+ ]
70
+
71
+ constraints : dict or sequence of dict, optional
72
+ Constraints definition.
73
+ Function(s) ``R**n`` in the form::
74
+
75
+ g(x) <= 0 applied as g : R^n -> R^m
76
+ h(x) == 0 applied as h : R^n -> R^p
77
+
78
+ Each constraint is defined in a dictionary with fields:
79
+
80
+ type : str
81
+ Constraint type: 'eq' for equality, 'ineq' for inequality.
82
+ fun : callable
83
+ The function defining the constraint.
84
+ jac : callable, optional
85
+ The Jacobian of `fun` (only for SLSQP).
86
+ args : sequence, optional
87
+ Extra arguments to be passed to the function and Jacobian.
88
+
89
+ Equality constraint means that the constraint function result is to
90
+ be zero whereas inequality means that it is to be
91
+ non-negative.constraints : dict or sequence of dict, optional
92
+ Constraints definition.
93
+ Function(s) ``R**n`` in the form::
94
+
95
+ g(x) <= 0 applied as g : R^n -> R^m
96
+ h(x) == 0 applied as h : R^n -> R^p
97
+
98
+ Each constraint is defined in a dictionary with fields:
99
+
100
+ type : str
101
+ Constraint type: 'eq' for equality, 'ineq' for inequality.
102
+ fun : callable
103
+ The function defining the constraint.
104
+ jac : callable, optional
105
+ The Jacobian of `fun` (unused).
106
+ args : sequence, optional
107
+ Extra arguments to be passed to the function and Jacobian.
108
+
109
+ Equality constraint means that the constraint function result is to
110
+ be zero whereas inequality means that it is to be non-negative.
111
+
112
+ workers : int optional
113
+ Uses `multiprocessing.Pool <multiprocessing>`) to compute the field
114
+ functions in parallel.
115
+ """
116
+ def __init__(self, dim, domain=None, sfield=None, sfield_args=(),
117
+ symmetry=None, constraints=None, workers=1):
118
+ self.dim = dim
119
+
120
+ # Domains
121
+ self.domain = domain
122
+ if domain is None:
123
+ self.bounds = [(0.0, 1.0), ] * dim
124
+ else:
125
+ self.bounds = domain
126
+ self.symmetry = symmetry
127
+ # here in init to avoid if checks
128
+
129
+ # Field functions
130
+ self.sfield = sfield
131
+ self.sfield_args = sfield_args
132
+
133
+ # Process constraints
134
+ # Constraints
135
+ # Process constraint dict sequence:
136
+ if constraints is not None:
137
+ self.min_cons = constraints
138
+ self.g_cons = []
139
+ self.g_args = []
140
+ if not isinstance(constraints, (tuple, list)):
141
+ constraints = (constraints,)
142
+
143
+ for cons in constraints:
144
+ if cons['type'] in ('ineq'):
145
+ self.g_cons.append(cons['fun'])
146
+ try:
147
+ self.g_args.append(cons['args'])
148
+ except KeyError:
149
+ self.g_args.append(())
150
+ self.g_cons = tuple(self.g_cons)
151
+ self.g_args = tuple(self.g_args)
152
+ else:
153
+ self.g_cons = None
154
+ self.g_args = None
155
+
156
+ # Homology properties
157
+ self.gen = 0
158
+ self.perm_cycle = 0
159
+
160
+ # Every cell is stored in a list of its generation,
161
+ # ex. the initial cell is stored in self.H[0]
162
+ # 1st get new cells are stored in self.H[1] etc.
163
+ # When a cell is sub-generated it is removed from this list
164
+
165
+ self.H = [] # Storage structure of vertex groups
166
+
167
+ # Cache of all vertices
168
+ if (sfield is not None) or (self.g_cons is not None):
169
+ # Initiate a vertex cache and an associated field cache, note that
170
+ # the field case is always initiated inside the vertex cache if an
171
+ # associated field scalar field is defined:
172
+ if sfield is not None:
173
+ self.V = VertexCacheField(field=sfield, field_args=sfield_args,
174
+ g_cons=self.g_cons,
175
+ g_cons_args=self.g_args,
176
+ workers=workers)
177
+ elif self.g_cons is not None:
178
+ self.V = VertexCacheField(field=sfield, field_args=sfield_args,
179
+ g_cons=self.g_cons,
180
+ g_cons_args=self.g_args,
181
+ workers=workers)
182
+ else:
183
+ self.V = VertexCacheIndex()
184
+
185
+ self.V_non_symm = [] # List of non-symmetric vertices
186
+
187
+ def __call__(self):
188
+ return self.H
189
+
190
+ # %% Triangulation methods
191
+ def cyclic_product(self, bounds, origin, supremum, centroid=True):
192
+ """Generate initial triangulation using cyclic product"""
193
+ # Define current hyperrectangle
194
+ vot = tuple(origin)
195
+ vut = tuple(supremum) # Hyperrectangle supremum
196
+ self.V[vot]
197
+ vo = self.V[vot]
198
+ yield vo.x
199
+ self.V[vut].connect(self.V[vot])
200
+ yield vut
201
+ # Cyclic group approach with second x_l --- x_u operation.
202
+
203
+ # These containers store the "lower" and "upper" vertices
204
+ # corresponding to the origin or supremum of every C2 group.
205
+ # It has the structure of `dim` times embedded lists each containing
206
+ # these vertices as the entire complex grows. Bounds[0] has to be done
207
+ # outside the loops before we have symmetric containers.
208
+ # NOTE: This means that bounds[0][1] must always exist
209
+ C0x = [[self.V[vot]]]
210
+ a_vo = copy.copy(list(origin))
211
+ a_vo[0] = vut[0] # Update aN Origin
212
+ a_vo = self.V[tuple(a_vo)]
213
+ # self.V[vot].connect(self.V[tuple(a_vo)])
214
+ self.V[vot].connect(a_vo)
215
+ yield a_vo.x
216
+ C1x = [[a_vo]]
217
+ # C1x = [[self.V[tuple(a_vo)]]]
218
+ ab_C = [] # Container for a + b operations
219
+
220
+ # Loop over remaining bounds
221
+ for i, x in enumerate(bounds[1:]):
222
+ # Update lower and upper containers
223
+ C0x.append([])
224
+ C1x.append([])
225
+ # try to access a second bound (if not, C1 is symmetric)
226
+ try:
227
+ # Early try so that we don't have to copy the cache before
228
+ # moving on to next C1/C2: Try to add the operation of a new
229
+ # C2 product by accessing the upper bound
230
+ x[1]
231
+ # Copy lists for iteration
232
+ cC0x = [x[:] for x in C0x[:i + 1]]
233
+ cC1x = [x[:] for x in C1x[:i + 1]]
234
+ for j, (VL, VU) in enumerate(zip(cC0x, cC1x)):
235
+ for k, (vl, vu) in enumerate(zip(VL, VU)):
236
+ # Build aN vertices for each lower-upper pair in N:
237
+ a_vl = list(vl.x)
238
+ a_vu = list(vu.x)
239
+ a_vl[i + 1] = vut[i + 1]
240
+ a_vu[i + 1] = vut[i + 1]
241
+ a_vl = self.V[tuple(a_vl)]
242
+
243
+ # Connect vertices in N to corresponding vertices
244
+ # in aN:
245
+ vl.connect(a_vl)
246
+
247
+ yield a_vl.x
248
+
249
+ a_vu = self.V[tuple(a_vu)]
250
+ # Connect vertices in N to corresponding vertices
251
+ # in aN:
252
+ vu.connect(a_vu)
253
+
254
+ # Connect new vertex pair in aN:
255
+ a_vl.connect(a_vu)
256
+
257
+ # Connect lower pair to upper (triangulation
258
+ # operation of a + b (two arbitrary operations):
259
+ vl.connect(a_vu)
260
+ ab_C.append((vl, a_vu))
261
+
262
+ # Update the containers
263
+ C0x[i + 1].append(vl)
264
+ C0x[i + 1].append(vu)
265
+ C1x[i + 1].append(a_vl)
266
+ C1x[i + 1].append(a_vu)
267
+
268
+ # Update old containers
269
+ C0x[j].append(a_vl)
270
+ C1x[j].append(a_vu)
271
+
272
+ # Yield new points
273
+ yield a_vu.x
274
+
275
+ # Try to connect aN lower source of previous a + b
276
+ # operation with a aN vertex
277
+ ab_Cc = copy.copy(ab_C)
278
+
279
+ for vp in ab_Cc:
280
+ b_v = list(vp[0].x)
281
+ ab_v = list(vp[1].x)
282
+ b_v[i + 1] = vut[i + 1]
283
+ ab_v[i + 1] = vut[i + 1]
284
+ b_v = self.V[tuple(b_v)] # b + vl
285
+ ab_v = self.V[tuple(ab_v)] # b + a_vl
286
+ # Note o---o is already connected
287
+ vp[0].connect(ab_v) # o-s
288
+ b_v.connect(ab_v) # s-s
289
+
290
+ # Add new list of cross pairs
291
+ ab_C.append((vp[0], ab_v))
292
+ ab_C.append((b_v, ab_v))
293
+
294
+ except IndexError:
295
+ cC0x = C0x[i]
296
+ cC1x = C1x[i]
297
+ VL, VU = cC0x, cC1x
298
+ for k, (vl, vu) in enumerate(zip(VL, VU)):
299
+ # Build aN vertices for each lower-upper pair in N:
300
+ a_vu = list(vu.x)
301
+ a_vu[i + 1] = vut[i + 1]
302
+ # Connect vertices in N to corresponding vertices
303
+ # in aN:
304
+ a_vu = self.V[tuple(a_vu)]
305
+ # Connect vertices in N to corresponding vertices
306
+ # in aN:
307
+ vu.connect(a_vu)
308
+ # Connect new vertex pair in aN:
309
+ # a_vl.connect(a_vu)
310
+ # Connect lower pair to upper (triangulation
311
+ # operation of a + b (two arbitrary operations):
312
+ vl.connect(a_vu)
313
+ ab_C.append((vl, a_vu))
314
+ C0x[i + 1].append(vu)
315
+ C1x[i + 1].append(a_vu)
316
+ # Yield new points
317
+ a_vu.connect(self.V[vut])
318
+ yield a_vu.x
319
+ ab_Cc = copy.copy(ab_C)
320
+ for vp in ab_Cc:
321
+ if vp[1].x[i] == vut[i]:
322
+ ab_v = list(vp[1].x)
323
+ ab_v[i + 1] = vut[i + 1]
324
+ ab_v = self.V[tuple(ab_v)] # b + a_vl
325
+ # Note o---o is already connected
326
+ vp[0].connect(ab_v) # o-s
327
+
328
+ # Add new list of cross pairs
329
+ ab_C.append((vp[0], ab_v))
330
+
331
+ # Clean class trash
332
+ try:
333
+ del C0x
334
+ del cC0x
335
+ del C1x
336
+ del cC1x
337
+ del ab_C
338
+ del ab_Cc
339
+ except UnboundLocalError:
340
+ pass
341
+
342
+ # Extra yield to ensure that the triangulation is completed
343
+ if centroid:
344
+ vo = self.V[vot]
345
+ vs = self.V[vut]
346
+ # Disconnect the origin and supremum
347
+ vo.disconnect(vs)
348
+ # Build centroid
349
+ vc = self.split_edge(vot, vut)
350
+ for v in vo.nn:
351
+ v.connect(vc)
352
+ yield vc.x
353
+ return vc.x
354
+ else:
355
+ yield vut
356
+ return vut
357
+
358
+ def triangulate(self, n=None, symmetry=None, centroid=True,
359
+ printout=False):
360
+ """
361
+ Triangulate the initial domain, if n is not None then a limited number
362
+ of points will be generated
363
+
364
+ Parameters
365
+ ----------
366
+ n : int, Number of points to be sampled.
367
+ symmetry :
368
+
369
+ Ex. Dictionary/hashtable
370
+ f(x) = (x_1 + x_2 + x_3) + (x_4)**2 + (x_5)**2 + (x_6)**2
371
+
372
+ symmetry = symmetry[0]: 0, # Variable 1
373
+ symmetry[1]: 0, # symmetric to variable 1
374
+ symmetry[2]: 0, # symmetric to variable 1
375
+ symmetry[3]: 3, # Variable 4
376
+ symmetry[4]: 3, # symmetric to variable 4
377
+ symmetry[5]: 3, # symmetric to variable 4
378
+ }
379
+ centroid : bool, if True add a central point to the hypercube
380
+ printout : bool, if True print out results
381
+
382
+ NOTES:
383
+ ------
384
+ Rather than using the combinatorial algorithm to connect vertices we
385
+ make the following observation:
386
+
387
+ The bound pairs are similar a C2 cyclic group and the structure is
388
+ formed using the cartesian product:
389
+
390
+ H = C2 x C2 x C2 ... x C2 (dim times)
391
+
392
+ So construct any normal subgroup N and consider H/N first, we connect
393
+ all vertices within N (ex. N is C2 (the first dimension), then we move
394
+ to a left coset aN (an operation moving around the defined H/N group by
395
+ for example moving from the lower bound in C2 (dimension 2) to the
396
+ higher bound in C2. During this operation connection all the vertices.
397
+ Now repeat the N connections. Note that these elements can be connected
398
+ in parallel.
399
+ """
400
+ # Inherit class arguments
401
+ if symmetry is None:
402
+ symmetry = self.symmetry
403
+ # Build origin and supremum vectors
404
+ origin = [i[0] for i in self.bounds]
405
+ self.origin = origin
406
+ supremum = [i[1] for i in self.bounds]
407
+
408
+ self.supremum = supremum
409
+
410
+ if symmetry is None:
411
+ cbounds = self.bounds
412
+ else:
413
+ cbounds = copy.copy(self.bounds)
414
+ for i, j in enumerate(symmetry):
415
+ if i is not j:
416
+ # pop second entry on second symmetry vars
417
+ cbounds[i] = [self.bounds[symmetry[i]][0]]
418
+ # Sole (first) entry is the sup value and there is no
419
+ # origin:
420
+ cbounds[i] = [self.bounds[symmetry[i]][1]]
421
+ if (self.bounds[symmetry[i]] is not
422
+ self.bounds[symmetry[j]]):
423
+ logging.warning(f"Variable {i} was specified as "
424
+ f"symmetetric to variable {j}, however"
425
+ f", the bounds {i} ="
426
+ f" {self.bounds[symmetry[i]]} and {j}"
427
+ f" ="
428
+ f" {self.bounds[symmetry[j]]} do not "
429
+ f"match, the mismatch was ignored in "
430
+ f"the initial triangulation.")
431
+ cbounds[i] = self.bounds[symmetry[j]]
432
+
433
+ if n is None:
434
+ # Build generator
435
+ self.cp = self.cyclic_product(cbounds, origin, supremum, centroid)
436
+ for i in self.cp:
437
+ i
438
+
439
+ try:
440
+ self.triangulated_vectors.append((tuple(self.origin),
441
+ tuple(self.supremum)))
442
+ except (AttributeError, KeyError):
443
+ self.triangulated_vectors = [(tuple(self.origin),
444
+ tuple(self.supremum))]
445
+
446
+ else:
447
+ # Check if generator already exists
448
+ try:
449
+ self.cp
450
+ except (AttributeError, KeyError):
451
+ self.cp = self.cyclic_product(cbounds, origin, supremum,
452
+ centroid)
453
+
454
+ try:
455
+ while len(self.V.cache) < n:
456
+ next(self.cp)
457
+ except StopIteration:
458
+ try:
459
+ self.triangulated_vectors.append((tuple(self.origin),
460
+ tuple(self.supremum)))
461
+ except (AttributeError, KeyError):
462
+ self.triangulated_vectors = [(tuple(self.origin),
463
+ tuple(self.supremum))]
464
+
465
+ if printout:
466
+ # for v in self.C0():
467
+ # v.print_out()
468
+ for v in self.V.cache:
469
+ self.V[v].print_out()
470
+
471
+ return
472
+
473
+ def refine(self, n=1):
474
+ if n is None:
475
+ try:
476
+ self.triangulated_vectors
477
+ self.refine_all()
478
+ return
479
+ except AttributeError as ae:
480
+ if str(ae) == "'Complex' object has no attribute " \
481
+ "'triangulated_vectors'":
482
+ self.triangulate(symmetry=self.symmetry)
483
+ return
484
+ else:
485
+ raise
486
+
487
+ nt = len(self.V.cache) + n # Target number of total vertices
488
+ # In the outer while loop we iterate until we have added an extra `n`
489
+ # vertices to the complex:
490
+ while len(self.V.cache) < nt: # while loop 1
491
+ try: # try 1
492
+ # Try to access triangulated_vectors, this should only be
493
+ # defined if an initial triangulation has already been
494
+ # performed:
495
+ self.triangulated_vectors
496
+ # Try a usual iteration of the current generator, if it
497
+ # does not exist or is exhausted then produce a new generator
498
+ try: # try 2
499
+ next(self.rls)
500
+ except (AttributeError, StopIteration, KeyError):
501
+ vp = self.triangulated_vectors[0]
502
+ self.rls = self.refine_local_space(*vp, bounds=self.bounds)
503
+ next(self.rls)
504
+
505
+ except (AttributeError, KeyError):
506
+ # If an initial triangulation has not been completed, then
507
+ # we start/continue the initial triangulation targeting `nt`
508
+ # vertices, if nt is greater than the initial number of
509
+ # vertices then the `refine` routine will move back to try 1.
510
+ self.triangulate(nt, self.symmetry)
511
+ return
512
+
513
+ def refine_all(self, centroids=True):
514
+ """Refine the entire domain of the current complex."""
515
+ try:
516
+ self.triangulated_vectors
517
+ tvs = copy.copy(self.triangulated_vectors)
518
+ for i, vp in enumerate(tvs):
519
+ self.rls = self.refine_local_space(*vp, bounds=self.bounds)
520
+ for i in self.rls:
521
+ i
522
+ except AttributeError as ae:
523
+ if str(ae) == "'Complex' object has no attribute " \
524
+ "'triangulated_vectors'":
525
+ self.triangulate(symmetry=self.symmetry, centroid=centroids)
526
+ else:
527
+ raise
528
+
529
+ # This adds a centroid to every new sub-domain generated and defined
530
+ # by self.triangulated_vectors, in addition the vertices ! to complete
531
+ # the triangulation
532
+ return
533
+
534
+ def refine_local_space(self, origin, supremum, bounds, centroid=1):
535
+ # Copy for later removal
536
+ origin_c = copy.copy(origin)
537
+ supremum_c = copy.copy(supremum)
538
+
539
+ # Initiate local variables redefined in later inner `for` loop:
540
+ vl, vu, a_vu = None, None, None
541
+
542
+ # Change the vector orientation so that it is only increasing
543
+ s_ov = list(origin)
544
+ s_origin = list(origin)
545
+ s_sv = list(supremum)
546
+ s_supremum = list(supremum)
547
+ for i, vi in enumerate(s_origin):
548
+ if s_ov[i] > s_sv[i]:
549
+ s_origin[i] = s_sv[i]
550
+ s_supremum[i] = s_ov[i]
551
+
552
+ vot = tuple(s_origin)
553
+ vut = tuple(s_supremum) # Hyperrectangle supremum
554
+
555
+ vo = self.V[vot] # initiate if doesn't exist yet
556
+ vs = self.V[vut]
557
+ # Start by finding the old centroid of the new space:
558
+ vco = self.split_edge(vo.x, vs.x) # Split in case not centroid arg
559
+
560
+ # Find set of extreme vertices in current local space
561
+ sup_set = copy.copy(vco.nn)
562
+ # Cyclic group approach with second x_l --- x_u operation.
563
+
564
+ # These containers store the "lower" and "upper" vertices
565
+ # corresponding to the origin or supremum of every C2 group.
566
+ # It has the structure of `dim` times embedded lists each containing
567
+ # these vertices as the entire complex grows. Bounds[0] has to be done
568
+ # outside the loops before we have symmetric containers.
569
+ # NOTE: This means that bounds[0][1] must always exist
570
+
571
+ a_vl = copy.copy(list(vot))
572
+ a_vl[0] = vut[0] # Update aN Origin
573
+ if tuple(a_vl) not in self.V.cache:
574
+ vo = self.V[vot] # initiate if doesn't exist yet
575
+ vs = self.V[vut]
576
+ # Start by finding the old centroid of the new space:
577
+ vco = self.split_edge(vo.x, vs.x) # Split in case not centroid arg
578
+
579
+ # Find set of extreme vertices in current local space
580
+ sup_set = copy.copy(vco.nn)
581
+ a_vl = copy.copy(list(vot))
582
+ a_vl[0] = vut[0] # Update aN Origin
583
+ a_vl = self.V[tuple(a_vl)]
584
+ else:
585
+ a_vl = self.V[tuple(a_vl)]
586
+
587
+ c_v = self.split_edge(vo.x, a_vl.x)
588
+ c_v.connect(vco)
589
+ yield c_v.x
590
+ Cox = [[vo]]
591
+ Ccx = [[c_v]]
592
+ Cux = [[a_vl]]
593
+ ab_C = [] # Container for a + b operations
594
+ s_ab_C = [] # Container for symmetric a + b operations
595
+
596
+ # Loop over remaining bounds
597
+ for i, x in enumerate(bounds[1:]):
598
+ # Update lower and upper containers
599
+ Cox.append([])
600
+ Ccx.append([])
601
+ Cux.append([])
602
+ # try to access a second bound (if not, C1 is symmetric)
603
+ try:
604
+ t_a_vl = list(vot)
605
+ t_a_vl[i + 1] = vut[i + 1]
606
+
607
+ # New: lists are used anyway, so copy all
608
+ # %%
609
+ # Copy lists for iteration
610
+ cCox = [x[:] for x in Cox[:i + 1]]
611
+ cCcx = [x[:] for x in Ccx[:i + 1]]
612
+ cCux = [x[:] for x in Cux[:i + 1]]
613
+ # Try to connect aN lower source of previous a + b
614
+ # operation with a aN vertex
615
+ ab_Cc = copy.copy(ab_C) # NOTE: We append ab_C in the
616
+ # (VL, VC, VU) for-loop, but we use the copy of the list in the
617
+ # ab_Cc for-loop.
618
+ s_ab_Cc = copy.copy(s_ab_C)
619
+
620
+ # Early try so that we don't have to copy the cache before
621
+ # moving on to next C1/C2: Try to add the operation of a new
622
+ # C2 product by accessing the upper bound
623
+ if tuple(t_a_vl) not in self.V.cache:
624
+ # Raise error to continue symmetric refine
625
+ raise IndexError
626
+ t_a_vu = list(vut)
627
+ t_a_vu[i + 1] = vut[i + 1]
628
+ if tuple(t_a_vu) not in self.V.cache:
629
+ # Raise error to continue symmetric refine:
630
+ raise IndexError
631
+
632
+ for vectors in s_ab_Cc:
633
+ # s_ab_C.append([c_vc, vl, vu, a_vu])
634
+ bc_vc = list(vectors[0].x)
635
+ b_vl = list(vectors[1].x)
636
+ b_vu = list(vectors[2].x)
637
+ ba_vu = list(vectors[3].x)
638
+
639
+ bc_vc[i + 1] = vut[i + 1]
640
+ b_vl[i + 1] = vut[i + 1]
641
+ b_vu[i + 1] = vut[i + 1]
642
+ ba_vu[i + 1] = vut[i + 1]
643
+
644
+ bc_vc = self.V[tuple(bc_vc)]
645
+ bc_vc.connect(vco) # NOTE: Unneeded?
646
+ yield bc_vc
647
+
648
+ # Split to centre, call this centre group "d = 0.5*a"
649
+ d_bc_vc = self.split_edge(vectors[0].x, bc_vc.x)
650
+ d_bc_vc.connect(bc_vc)
651
+ d_bc_vc.connect(vectors[1]) # Connect all to centroid
652
+ d_bc_vc.connect(vectors[2]) # Connect all to centroid
653
+ d_bc_vc.connect(vectors[3]) # Connect all to centroid
654
+ yield d_bc_vc.x
655
+ b_vl = self.V[tuple(b_vl)]
656
+ bc_vc.connect(b_vl) # Connect aN cross pairs
657
+ d_bc_vc.connect(b_vl) # Connect all to centroid
658
+
659
+ yield b_vl
660
+ b_vu = self.V[tuple(b_vu)]
661
+ bc_vc.connect(b_vu) # Connect aN cross pairs
662
+ d_bc_vc.connect(b_vu) # Connect all to centroid
663
+
664
+ b_vl_c = self.split_edge(b_vu.x, b_vl.x)
665
+ bc_vc.connect(b_vl_c)
666
+
667
+ yield b_vu
668
+ ba_vu = self.V[tuple(ba_vu)]
669
+ bc_vc.connect(ba_vu) # Connect aN cross pairs
670
+ d_bc_vc.connect(ba_vu) # Connect all to centroid
671
+
672
+ # Split the a + b edge of the initial triangulation:
673
+ os_v = self.split_edge(vectors[1].x, ba_vu.x) # o-s
674
+ ss_v = self.split_edge(b_vl.x, ba_vu.x) # s-s
675
+ b_vu_c = self.split_edge(b_vu.x, ba_vu.x)
676
+ bc_vc.connect(b_vu_c)
677
+ yield os_v.x # often equal to vco, but not always
678
+ yield ss_v.x # often equal to bc_vu, but not always
679
+ yield ba_vu
680
+ # Split remaining to centre, call this centre group
681
+ # "d = 0.5*a"
682
+ d_bc_vc = self.split_edge(vectors[0].x, bc_vc.x)
683
+ d_bc_vc.connect(vco) # NOTE: Unneeded?
684
+ yield d_bc_vc.x
685
+ d_b_vl = self.split_edge(vectors[1].x, b_vl.x)
686
+ d_bc_vc.connect(vco) # NOTE: Unneeded?
687
+ d_bc_vc.connect(d_b_vl) # Connect dN cross pairs
688
+ yield d_b_vl.x
689
+ d_b_vu = self.split_edge(vectors[2].x, b_vu.x)
690
+ d_bc_vc.connect(vco) # NOTE: Unneeded?
691
+ d_bc_vc.connect(d_b_vu) # Connect dN cross pairs
692
+ yield d_b_vu.x
693
+ d_ba_vu = self.split_edge(vectors[3].x, ba_vu.x)
694
+ d_bc_vc.connect(vco) # NOTE: Unneeded?
695
+ d_bc_vc.connect(d_ba_vu) # Connect dN cross pairs
696
+ yield d_ba_vu
697
+
698
+ # comb = [c_vc, vl, vu, a_vl, a_vu,
699
+ # bc_vc, b_vl, b_vu, ba_vl, ba_vu]
700
+ comb = [vl, vu, a_vu,
701
+ b_vl, b_vu, ba_vu]
702
+ comb_iter = itertools.combinations(comb, 2)
703
+ for vecs in comb_iter:
704
+ self.split_edge(vecs[0].x, vecs[1].x)
705
+ # Add new list of cross pairs
706
+ ab_C.append((d_bc_vc, vectors[1], b_vl, a_vu, ba_vu))
707
+ ab_C.append((d_bc_vc, vl, b_vl, a_vu, ba_vu)) # = prev
708
+
709
+ for vectors in ab_Cc:
710
+ bc_vc = list(vectors[0].x)
711
+ b_vl = list(vectors[1].x)
712
+ b_vu = list(vectors[2].x)
713
+ ba_vl = list(vectors[3].x)
714
+ ba_vu = list(vectors[4].x)
715
+ bc_vc[i + 1] = vut[i + 1]
716
+ b_vl[i + 1] = vut[i + 1]
717
+ b_vu[i + 1] = vut[i + 1]
718
+ ba_vl[i + 1] = vut[i + 1]
719
+ ba_vu[i + 1] = vut[i + 1]
720
+ bc_vc = self.V[tuple(bc_vc)]
721
+ bc_vc.connect(vco) # NOTE: Unneeded?
722
+ yield bc_vc
723
+
724
+ # Split to centre, call this centre group "d = 0.5*a"
725
+ d_bc_vc = self.split_edge(vectors[0].x, bc_vc.x)
726
+ d_bc_vc.connect(bc_vc)
727
+ d_bc_vc.connect(vectors[1]) # Connect all to centroid
728
+ d_bc_vc.connect(vectors[2]) # Connect all to centroid
729
+ d_bc_vc.connect(vectors[3]) # Connect all to centroid
730
+ d_bc_vc.connect(vectors[4]) # Connect all to centroid
731
+ yield d_bc_vc.x
732
+ b_vl = self.V[tuple(b_vl)]
733
+ bc_vc.connect(b_vl) # Connect aN cross pairs
734
+ d_bc_vc.connect(b_vl) # Connect all to centroid
735
+ yield b_vl
736
+ b_vu = self.V[tuple(b_vu)]
737
+ bc_vc.connect(b_vu) # Connect aN cross pairs
738
+ d_bc_vc.connect(b_vu) # Connect all to centroid
739
+ yield b_vu
740
+ ba_vl = self.V[tuple(ba_vl)]
741
+ bc_vc.connect(ba_vl) # Connect aN cross pairs
742
+ d_bc_vc.connect(ba_vl) # Connect all to centroid
743
+ self.split_edge(b_vu.x, ba_vl.x)
744
+ yield ba_vl
745
+ ba_vu = self.V[tuple(ba_vu)]
746
+ bc_vc.connect(ba_vu) # Connect aN cross pairs
747
+ d_bc_vc.connect(ba_vu) # Connect all to centroid
748
+ # Split the a + b edge of the initial triangulation:
749
+ os_v = self.split_edge(vectors[1].x, ba_vu.x) # o-s
750
+ ss_v = self.split_edge(b_vl.x, ba_vu.x) # s-s
751
+ yield os_v.x # often equal to vco, but not always
752
+ yield ss_v.x # often equal to bc_vu, but not always
753
+ yield ba_vu
754
+ # Split remaining to centre, call this centre group
755
+ # "d = 0.5*a"
756
+ d_bc_vc = self.split_edge(vectors[0].x, bc_vc.x)
757
+ d_bc_vc.connect(vco) # NOTE: Unneeded?
758
+ yield d_bc_vc.x
759
+ d_b_vl = self.split_edge(vectors[1].x, b_vl.x)
760
+ d_bc_vc.connect(vco) # NOTE: Unneeded?
761
+ d_bc_vc.connect(d_b_vl) # Connect dN cross pairs
762
+ yield d_b_vl.x
763
+ d_b_vu = self.split_edge(vectors[2].x, b_vu.x)
764
+ d_bc_vc.connect(vco) # NOTE: Unneeded?
765
+ d_bc_vc.connect(d_b_vu) # Connect dN cross pairs
766
+ yield d_b_vu.x
767
+ d_ba_vl = self.split_edge(vectors[3].x, ba_vl.x)
768
+ d_bc_vc.connect(vco) # NOTE: Unneeded?
769
+ d_bc_vc.connect(d_ba_vl) # Connect dN cross pairs
770
+ yield d_ba_vl
771
+ d_ba_vu = self.split_edge(vectors[4].x, ba_vu.x)
772
+ d_bc_vc.connect(vco) # NOTE: Unneeded?
773
+ d_bc_vc.connect(d_ba_vu) # Connect dN cross pairs
774
+ yield d_ba_vu
775
+ c_vc, vl, vu, a_vl, a_vu = vectors
776
+
777
+ comb = [vl, vu, a_vl, a_vu,
778
+ b_vl, b_vu, ba_vl, ba_vu]
779
+ comb_iter = itertools.combinations(comb, 2)
780
+ for vecs in comb_iter:
781
+ self.split_edge(vecs[0].x, vecs[1].x)
782
+
783
+ # Add new list of cross pairs
784
+ ab_C.append((bc_vc, b_vl, b_vu, ba_vl, ba_vu))
785
+ ab_C.append((d_bc_vc, d_b_vl, d_b_vu, d_ba_vl, d_ba_vu))
786
+ ab_C.append((d_bc_vc, vectors[1], b_vl, a_vu, ba_vu))
787
+ ab_C.append((d_bc_vc, vu, b_vu, a_vl, ba_vl))
788
+
789
+ for j, (VL, VC, VU) in enumerate(zip(cCox, cCcx, cCux)):
790
+ for k, (vl, vc, vu) in enumerate(zip(VL, VC, VU)):
791
+ # Build aN vertices for each lower-upper C3 group in N:
792
+ a_vl = list(vl.x)
793
+ a_vu = list(vu.x)
794
+ a_vl[i + 1] = vut[i + 1]
795
+ a_vu[i + 1] = vut[i + 1]
796
+ a_vl = self.V[tuple(a_vl)]
797
+ a_vu = self.V[tuple(a_vu)]
798
+ # Note, build (a + vc) later for consistent yields
799
+ # Split the a + b edge of the initial triangulation:
800
+ c_vc = self.split_edge(vl.x, a_vu.x)
801
+ self.split_edge(vl.x, vu.x) # Equal to vc
802
+ # Build cN vertices for each lower-upper C3 group in N:
803
+ c_vc.connect(vco)
804
+ c_vc.connect(vc)
805
+ c_vc.connect(vl) # Connect c + ac operations
806
+ c_vc.connect(vu) # Connect c + ac operations
807
+ c_vc.connect(a_vl) # Connect c + ac operations
808
+ c_vc.connect(a_vu) # Connect c + ac operations
809
+ yield c_vc.x
810
+ c_vl = self.split_edge(vl.x, a_vl.x)
811
+ c_vl.connect(vco)
812
+ c_vc.connect(c_vl) # Connect cN group vertices
813
+ yield c_vl.x
814
+ # yield at end of loop:
815
+ c_vu = self.split_edge(vu.x, a_vu.x)
816
+ c_vu.connect(vco)
817
+ # Connect remaining cN group vertices
818
+ c_vc.connect(c_vu) # Connect cN group vertices
819
+ yield c_vu.x
820
+
821
+ a_vc = self.split_edge(a_vl.x, a_vu.x) # is (a + vc) ?
822
+ a_vc.connect(vco)
823
+ a_vc.connect(c_vc)
824
+
825
+ # Storage for connecting c + ac operations:
826
+ ab_C.append((c_vc, vl, vu, a_vl, a_vu))
827
+
828
+ # Update the containers
829
+ Cox[i + 1].append(vl)
830
+ Cox[i + 1].append(vc)
831
+ Cox[i + 1].append(vu)
832
+ Ccx[i + 1].append(c_vl)
833
+ Ccx[i + 1].append(c_vc)
834
+ Ccx[i + 1].append(c_vu)
835
+ Cux[i + 1].append(a_vl)
836
+ Cux[i + 1].append(a_vc)
837
+ Cux[i + 1].append(a_vu)
838
+
839
+ # Update old containers
840
+ Cox[j].append(c_vl) # !
841
+ Cox[j].append(a_vl)
842
+ Ccx[j].append(c_vc) # !
843
+ Ccx[j].append(a_vc) # !
844
+ Cux[j].append(c_vu) # !
845
+ Cux[j].append(a_vu)
846
+
847
+ # Yield new points
848
+ yield a_vc.x
849
+
850
+ except IndexError:
851
+ for vectors in ab_Cc:
852
+ ba_vl = list(vectors[3].x)
853
+ ba_vu = list(vectors[4].x)
854
+ ba_vl[i + 1] = vut[i + 1]
855
+ ba_vu[i + 1] = vut[i + 1]
856
+ ba_vu = self.V[tuple(ba_vu)]
857
+ yield ba_vu
858
+ d_bc_vc = self.split_edge(vectors[1].x, ba_vu.x) # o-s
859
+ yield ba_vu
860
+ d_bc_vc.connect(vectors[1]) # Connect all to centroid
861
+ d_bc_vc.connect(vectors[2]) # Connect all to centroid
862
+ d_bc_vc.connect(vectors[3]) # Connect all to centroid
863
+ d_bc_vc.connect(vectors[4]) # Connect all to centroid
864
+ yield d_bc_vc.x
865
+ ba_vl = self.V[tuple(ba_vl)]
866
+ yield ba_vl
867
+ d_ba_vl = self.split_edge(vectors[3].x, ba_vl.x)
868
+ d_ba_vu = self.split_edge(vectors[4].x, ba_vu.x)
869
+ d_ba_vc = self.split_edge(d_ba_vl.x, d_ba_vu.x)
870
+ yield d_ba_vl
871
+ yield d_ba_vu
872
+ yield d_ba_vc
873
+ c_vc, vl, vu, a_vl, a_vu = vectors
874
+ comb = [vl, vu, a_vl, a_vu,
875
+ ba_vl,
876
+ ba_vu]
877
+ comb_iter = itertools.combinations(comb, 2)
878
+ for vecs in comb_iter:
879
+ self.split_edge(vecs[0].x, vecs[1].x)
880
+
881
+ # Copy lists for iteration
882
+ cCox = Cox[i]
883
+ cCcx = Ccx[i]
884
+ cCux = Cux[i]
885
+ VL, VC, VU = cCox, cCcx, cCux
886
+ for k, (vl, vc, vu) in enumerate(zip(VL, VC, VU)):
887
+ # Build aN vertices for each lower-upper pair in N:
888
+ a_vu = list(vu.x)
889
+ a_vu[i + 1] = vut[i + 1]
890
+
891
+ # Connect vertices in N to corresponding vertices
892
+ # in aN:
893
+ a_vu = self.V[tuple(a_vu)]
894
+ yield a_vl.x
895
+ # Split the a + b edge of the initial triangulation:
896
+ c_vc = self.split_edge(vl.x, a_vu.x)
897
+ self.split_edge(vl.x, vu.x) # Equal to vc
898
+ c_vc.connect(vco)
899
+ c_vc.connect(vc)
900
+ c_vc.connect(vl) # Connect c + ac operations
901
+ c_vc.connect(vu) # Connect c + ac operations
902
+ c_vc.connect(a_vu) # Connect c + ac operations
903
+ yield (c_vc.x)
904
+ c_vu = self.split_edge(vu.x,
905
+ a_vu.x) # yield at end of loop
906
+ c_vu.connect(vco)
907
+ # Connect remaining cN group vertices
908
+ c_vc.connect(c_vu) # Connect cN group vertices
909
+ yield (c_vu.x)
910
+
911
+ # Update the containers
912
+ Cox[i + 1].append(vu)
913
+ Ccx[i + 1].append(c_vu)
914
+ Cux[i + 1].append(a_vu)
915
+
916
+ # Update old containers
917
+ s_ab_C.append([c_vc, vl, vu, a_vu])
918
+
919
+ yield a_vu.x
920
+
921
+ # Clean class trash
922
+ try:
923
+ del Cox
924
+ del Ccx
925
+ del Cux
926
+ del ab_C
927
+ del ab_Cc
928
+ except UnboundLocalError:
929
+ pass
930
+
931
+ try:
932
+ self.triangulated_vectors.remove((tuple(origin_c),
933
+ tuple(supremum_c)))
934
+ except ValueError:
935
+ # Turn this into a logging warning?
936
+ pass
937
+ # Add newly triangulated vectors:
938
+ for vs in sup_set:
939
+ self.triangulated_vectors.append((tuple(vco.x), tuple(vs.x)))
940
+
941
+ # Extra yield to ensure that the triangulation is completed
942
+ if centroid:
943
+ vcn_set = set()
944
+ c_nn_lists = []
945
+ for vs in sup_set:
946
+ # Build centroid
947
+ c_nn = self.vpool(vco.x, vs.x)
948
+ try:
949
+ c_nn.remove(vcn_set)
950
+ except KeyError:
951
+ pass
952
+ c_nn_lists.append(c_nn)
953
+
954
+ for c_nn in c_nn_lists:
955
+ try:
956
+ c_nn.remove(vcn_set)
957
+ except KeyError:
958
+ pass
959
+
960
+ for vs, c_nn in zip(sup_set, c_nn_lists):
961
+ # Build centroid
962
+ vcn = self.split_edge(vco.x, vs.x)
963
+ vcn_set.add(vcn)
964
+ try: # Shouldn't be needed?
965
+ c_nn.remove(vcn_set)
966
+ except KeyError:
967
+ pass
968
+ for vnn in c_nn:
969
+ vcn.connect(vnn)
970
+ yield vcn.x
971
+ else:
972
+ pass
973
+
974
+ yield vut
975
+ return
976
+
977
+ def refine_star(self, v):
978
+ """Refine the star domain of a vertex `v`."""
979
+ # Copy lists before iteration
980
+ vnn = copy.copy(v.nn)
981
+ v1nn = []
982
+ d_v0v1_set = set()
983
+ for v1 in vnn:
984
+ v1nn.append(copy.copy(v1.nn))
985
+
986
+ for v1, v1nn in zip(vnn, v1nn):
987
+ vnnu = v1nn.intersection(vnn)
988
+
989
+ d_v0v1 = self.split_edge(v.x, v1.x)
990
+ for o_d_v0v1 in d_v0v1_set:
991
+ d_v0v1.connect(o_d_v0v1)
992
+ d_v0v1_set.add(d_v0v1)
993
+ for v2 in vnnu:
994
+ d_v1v2 = self.split_edge(v1.x, v2.x)
995
+ d_v0v1.connect(d_v1v2)
996
+ return
997
+
998
+ @cache
999
+ def split_edge(self, v1, v2):
1000
+ v1 = self.V[v1]
1001
+ v2 = self.V[v2]
1002
+ # Destroy original edge, if it exists:
1003
+ v1.disconnect(v2)
1004
+ # Compute vertex on centre of edge:
1005
+ try:
1006
+ vct = (v2.x_a - v1.x_a) / 2.0 + v1.x_a
1007
+ except TypeError: # Allow for decimal operations
1008
+ vct = (v2.x_a - v1.x_a) / decimal.Decimal(2.0) + v1.x_a
1009
+
1010
+ vc = self.V[tuple(vct)]
1011
+ # Connect to original 2 vertices to the new centre vertex
1012
+ vc.connect(v1)
1013
+ vc.connect(v2)
1014
+ return vc
1015
+
1016
+ def vpool(self, origin, supremum):
1017
+ vot = tuple(origin)
1018
+ vst = tuple(supremum)
1019
+ # Initiate vertices in case they don't exist
1020
+ vo = self.V[vot]
1021
+ vs = self.V[vst]
1022
+
1023
+ # Remove origin - supremum disconnect
1024
+
1025
+ # Find the lower/upper bounds of the refinement hyperrectangle
1026
+ bl = list(vot)
1027
+ bu = list(vst)
1028
+ for i, (voi, vsi) in enumerate(zip(vot, vst)):
1029
+ if bl[i] > vsi:
1030
+ bl[i] = vsi
1031
+ if bu[i] < voi:
1032
+ bu[i] = voi
1033
+
1034
+ # NOTE: This is mostly done with sets/lists because we aren't sure
1035
+ # how well the numpy arrays will scale to thousands of
1036
+ # dimensions.
1037
+ vn_pool = set()
1038
+ vn_pool.update(vo.nn)
1039
+ vn_pool.update(vs.nn)
1040
+ cvn_pool = copy.copy(vn_pool)
1041
+ for vn in cvn_pool:
1042
+ for i, xi in enumerate(vn.x):
1043
+ if bl[i] <= xi <= bu[i]:
1044
+ pass
1045
+ else:
1046
+ try:
1047
+ vn_pool.remove(vn)
1048
+ except KeyError:
1049
+ pass # NOTE: Not all neigbouds are in initial pool
1050
+ return vn_pool
1051
+
1052
+ def vf_to_vv(self, vertices, simplices):
1053
+ """
1054
+ Convert a vertex-face mesh to a vertex-vertex mesh used by this class
1055
+
1056
+ Parameters
1057
+ ----------
1058
+ vertices : list
1059
+ Vertices
1060
+ simplices : list
1061
+ Simplices
1062
+ """
1063
+ if self.dim > 1:
1064
+ for s in simplices:
1065
+ edges = itertools.combinations(s, self.dim)
1066
+ for e in edges:
1067
+ self.V[tuple(vertices[e[0]])].connect(
1068
+ self.V[tuple(vertices[e[1]])])
1069
+ else:
1070
+ for e in simplices:
1071
+ self.V[tuple(vertices[e[0]])].connect(
1072
+ self.V[tuple(vertices[e[1]])])
1073
+ return
1074
+
1075
+ def connect_vertex_non_symm(self, v_x, near=None):
1076
+ """
1077
+ Adds a vertex at coords v_x to the complex that is not symmetric to the
1078
+ initial triangulation and sub-triangulation.
1079
+
1080
+ If near is specified (for example; a star domain or collections of
1081
+ cells known to contain v) then only those simplices containd in near
1082
+ will be searched, this greatly speeds up the process.
1083
+
1084
+ If near is not specified this method will search the entire simplicial
1085
+ complex structure.
1086
+
1087
+ Parameters
1088
+ ----------
1089
+ v_x : tuple
1090
+ Coordinates of non-symmetric vertex
1091
+ near : set or list
1092
+ List of vertices, these are points near v to check for
1093
+ """
1094
+ if near is None:
1095
+ star = self.V
1096
+ else:
1097
+ star = near
1098
+ # Create the vertex origin
1099
+ if tuple(v_x) in self.V.cache:
1100
+ if self.V[v_x] in self.V_non_symm:
1101
+ pass
1102
+ else:
1103
+ return
1104
+
1105
+ self.V[v_x]
1106
+ found_nn = False
1107
+ S_rows = []
1108
+ for v in star:
1109
+ S_rows.append(v.x)
1110
+
1111
+ S_rows = np.array(S_rows)
1112
+ A = np.array(S_rows) - np.array(v_x)
1113
+ # Iterate through all the possible simplices of S_rows
1114
+ for s_i in itertools.combinations(range(S_rows.shape[0]),
1115
+ r=self.dim + 1):
1116
+ # Check if connected, else s_i is not a simplex
1117
+ valid_simplex = True
1118
+ for i in itertools.combinations(s_i, r=2):
1119
+ # Every combination of vertices must be connected, we check of
1120
+ # the current iteration of all combinations of s_i are
1121
+ # connected we break the loop if it is not.
1122
+ if ((self.V[tuple(S_rows[i[1]])] not in
1123
+ self.V[tuple(S_rows[i[0]])].nn)
1124
+ and (self.V[tuple(S_rows[i[0]])] not in
1125
+ self.V[tuple(S_rows[i[1]])].nn)):
1126
+ valid_simplex = False
1127
+ break
1128
+
1129
+ S = S_rows[tuple([s_i])]
1130
+ if valid_simplex:
1131
+ if self.deg_simplex(S, proj=None):
1132
+ valid_simplex = False
1133
+
1134
+ # If s_i is a valid simplex we can test if v_x is inside si
1135
+ if valid_simplex:
1136
+ # Find the A_j0 value from the precalculated values
1137
+ A_j0 = A[tuple([s_i])]
1138
+ if self.in_simplex(S, v_x, A_j0):
1139
+ found_nn = True
1140
+ # breaks the main for loop, s_i is the target simplex:
1141
+ break
1142
+
1143
+ # Connect the simplex to point
1144
+ if found_nn:
1145
+ for i in s_i:
1146
+ self.V[v_x].connect(self.V[tuple(S_rows[i])])
1147
+ # Attached the simplex to storage for all non-symmetric vertices
1148
+ self.V_non_symm.append(self.V[v_x])
1149
+ # this bool value indicates a successful connection if True:
1150
+ return found_nn
1151
+
1152
+ def in_simplex(self, S, v_x, A_j0=None):
1153
+ """Check if a vector v_x is in simplex `S`.
1154
+
1155
+ Parameters
1156
+ ----------
1157
+ S : array_like
1158
+ Array containing simplex entries of vertices as rows
1159
+ v_x :
1160
+ A candidate vertex
1161
+ A_j0 : array, optional,
1162
+ Allows for A_j0 to be pre-calculated
1163
+
1164
+ Returns
1165
+ -------
1166
+ res : boolean
1167
+ True if `v_x` is in `S`
1168
+ """
1169
+ A_11 = np.delete(S, 0, 0) - S[0]
1170
+
1171
+ sign_det_A_11 = np.sign(np.linalg.det(A_11))
1172
+ if sign_det_A_11 == 0:
1173
+ # NOTE: We keep the variable A_11, but we loop through A_jj
1174
+ # ind=
1175
+ # while sign_det_A_11 == 0:
1176
+ # A_11 = np.delete(S, ind, 0) - S[ind]
1177
+ # sign_det_A_11 = np.sign(np.linalg.det(A_11))
1178
+
1179
+ sign_det_A_11 = -1 # TODO: Choose another det of j instead?
1180
+ # TODO: Unlikely to work in many cases
1181
+
1182
+ if A_j0 is None:
1183
+ A_j0 = S - v_x
1184
+
1185
+ for d in range(self.dim + 1):
1186
+ det_A_jj = (-1)**d * sign_det_A_11
1187
+ # TODO: Note that scipy might be faster to add as an optional
1188
+ # dependency
1189
+ sign_det_A_j0 = np.sign(np.linalg.det(np.delete(A_j0, d,
1190
+ 0)))
1191
+ # TODO: Note if sign_det_A_j0 == then the point is coplanar to the
1192
+ # current simplex facet, so perhaps return True and attach?
1193
+ if det_A_jj == sign_det_A_j0:
1194
+ continue
1195
+ else:
1196
+ return False
1197
+
1198
+ return True
1199
+
1200
+ def deg_simplex(self, S, proj=None):
1201
+ """Test a simplex S for degeneracy (linear dependence in R^dim).
1202
+
1203
+ Parameters
1204
+ ----------
1205
+ S : np.array
1206
+ Simplex with rows as vertex vectors
1207
+ proj : array, optional,
1208
+ If the projection S[1:] - S[0] is already
1209
+ computed it can be added as an optional argument.
1210
+ """
1211
+ # Strategy: we test all combination of faces, if any of the
1212
+ # determinants are zero then the vectors lie on the same face and is
1213
+ # therefore linearly dependent in the space of R^dim
1214
+ if proj is None:
1215
+ proj = S[1:] - S[0]
1216
+
1217
+ # TODO: Is checking the projection of one vertex against faces of other
1218
+ # vertices sufficient? Or do we need to check more vertices in
1219
+ # dimensions higher than 2?
1220
+ # TODO: Literature seems to suggest using proj.T, but why is this
1221
+ # needed?
1222
+ if np.linalg.det(proj) == 0.0: # TODO: Repalace with tolerance?
1223
+ return True # Simplex is degenerate
1224
+ else:
1225
+ return False # Simplex is not degenerate
llava_next/lib/python3.10/site-packages/scipy/optimize/tests/__init__.py ADDED
File without changes
llava_next/lib/python3.10/site-packages/scipy/optimize/tests/test__basinhopping.py ADDED
@@ -0,0 +1,529 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Unit tests for the basin hopping global minimization algorithm.
3
+ """
4
+ import copy
5
+
6
+ from numpy.testing import (assert_almost_equal, assert_equal, assert_,
7
+ assert_allclose)
8
+ import pytest
9
+ from pytest import raises as assert_raises
10
+ import numpy as np
11
+ from numpy import cos, sin
12
+
13
+ from scipy.optimize import basinhopping, OptimizeResult
14
+ from scipy.optimize._basinhopping import (
15
+ Storage, RandomDisplacement, Metropolis, AdaptiveStepsize)
16
+
17
+
18
+ def func1d(x):
19
+ f = cos(14.5 * x - 0.3) + (x + 0.2) * x
20
+ df = np.array(-14.5 * sin(14.5 * x - 0.3) + 2. * x + 0.2)
21
+ return f, df
22
+
23
+
24
+ def func2d_nograd(x):
25
+ f = cos(14.5 * x[0] - 0.3) + (x[1] + 0.2) * x[1] + (x[0] + 0.2) * x[0]
26
+ return f
27
+
28
+
29
+ def func2d(x):
30
+ f = cos(14.5 * x[0] - 0.3) + (x[1] + 0.2) * x[1] + (x[0] + 0.2) * x[0]
31
+ df = np.zeros(2)
32
+ df[0] = -14.5 * sin(14.5 * x[0] - 0.3) + 2. * x[0] + 0.2
33
+ df[1] = 2. * x[1] + 0.2
34
+ return f, df
35
+
36
+
37
+ def func2d_easyderiv(x):
38
+ f = 2.0*x[0]**2 + 2.0*x[0]*x[1] + 2.0*x[1]**2 - 6.0*x[0]
39
+ df = np.zeros(2)
40
+ df[0] = 4.0*x[0] + 2.0*x[1] - 6.0
41
+ df[1] = 2.0*x[0] + 4.0*x[1]
42
+
43
+ return f, df
44
+
45
+
46
+ class MyTakeStep1(RandomDisplacement):
47
+ """use a copy of displace, but have it set a special parameter to
48
+ make sure it's actually being used."""
49
+ def __init__(self):
50
+ self.been_called = False
51
+ super().__init__()
52
+
53
+ def __call__(self, x):
54
+ self.been_called = True
55
+ return super().__call__(x)
56
+
57
+
58
+ def myTakeStep2(x):
59
+ """redo RandomDisplacement in function form without the attribute stepsize
60
+ to make sure everything still works ok
61
+ """
62
+ s = 0.5
63
+ x += np.random.uniform(-s, s, np.shape(x))
64
+ return x
65
+
66
+
67
+ class MyAcceptTest:
68
+ """pass a custom accept test
69
+
70
+ This does nothing but make sure it's being used and ensure all the
71
+ possible return values are accepted
72
+ """
73
+ def __init__(self):
74
+ self.been_called = False
75
+ self.ncalls = 0
76
+ self.testres = [False, 'force accept', True, np.bool_(True),
77
+ np.bool_(False), [], {}, 0, 1]
78
+
79
+ def __call__(self, **kwargs):
80
+ self.been_called = True
81
+ self.ncalls += 1
82
+ if self.ncalls - 1 < len(self.testres):
83
+ return self.testres[self.ncalls - 1]
84
+ else:
85
+ return True
86
+
87
+
88
+ class MyCallBack:
89
+ """pass a custom callback function
90
+
91
+ This makes sure it's being used. It also returns True after 10
92
+ steps to ensure that it's stopping early.
93
+
94
+ """
95
+ def __init__(self):
96
+ self.been_called = False
97
+ self.ncalls = 0
98
+
99
+ def __call__(self, x, f, accepted):
100
+ self.been_called = True
101
+ self.ncalls += 1
102
+ if self.ncalls == 10:
103
+ return True
104
+
105
+
106
+ class TestBasinHopping:
107
+
108
+ def setup_method(self):
109
+ """ Tests setup.
110
+
111
+ Run tests based on the 1-D and 2-D functions described above.
112
+ """
113
+ self.x0 = (1.0, [1.0, 1.0])
114
+ self.sol = (-0.195, np.array([-0.195, -0.1]))
115
+
116
+ self.tol = 3 # number of decimal places
117
+
118
+ self.niter = 100
119
+ self.disp = False
120
+
121
+ # fix random seed
122
+ np.random.seed(1234)
123
+
124
+ self.kwargs = {"method": "L-BFGS-B", "jac": True}
125
+ self.kwargs_nograd = {"method": "L-BFGS-B"}
126
+
127
+ def test_TypeError(self):
128
+ # test the TypeErrors are raised on bad input
129
+ i = 1
130
+ # if take_step is passed, it must be callable
131
+ assert_raises(TypeError, basinhopping, func2d, self.x0[i],
132
+ take_step=1)
133
+ # if accept_test is passed, it must be callable
134
+ assert_raises(TypeError, basinhopping, func2d, self.x0[i],
135
+ accept_test=1)
136
+
137
+ def test_input_validation(self):
138
+ msg = 'target_accept_rate has to be in range \\(0, 1\\)'
139
+ with assert_raises(ValueError, match=msg):
140
+ basinhopping(func1d, self.x0[0], target_accept_rate=0.)
141
+ with assert_raises(ValueError, match=msg):
142
+ basinhopping(func1d, self.x0[0], target_accept_rate=1.)
143
+
144
+ msg = 'stepwise_factor has to be in range \\(0, 1\\)'
145
+ with assert_raises(ValueError, match=msg):
146
+ basinhopping(func1d, self.x0[0], stepwise_factor=0.)
147
+ with assert_raises(ValueError, match=msg):
148
+ basinhopping(func1d, self.x0[0], stepwise_factor=1.)
149
+
150
+ def test_1d_grad(self):
151
+ # test 1-D minimizations with gradient
152
+ i = 0
153
+ res = basinhopping(func1d, self.x0[i], minimizer_kwargs=self.kwargs,
154
+ niter=self.niter, disp=self.disp)
155
+ assert_almost_equal(res.x, self.sol[i], self.tol)
156
+
157
+ def test_2d(self):
158
+ # test 2d minimizations with gradient
159
+ i = 1
160
+ res = basinhopping(func2d, self.x0[i], minimizer_kwargs=self.kwargs,
161
+ niter=self.niter, disp=self.disp)
162
+ assert_almost_equal(res.x, self.sol[i], self.tol)
163
+ assert_(res.nfev > 0)
164
+
165
+ def test_njev(self):
166
+ # test njev is returned correctly
167
+ i = 1
168
+ minimizer_kwargs = self.kwargs.copy()
169
+ # L-BFGS-B doesn't use njev, but BFGS does
170
+ minimizer_kwargs["method"] = "BFGS"
171
+ res = basinhopping(func2d, self.x0[i],
172
+ minimizer_kwargs=minimizer_kwargs, niter=self.niter,
173
+ disp=self.disp)
174
+ assert_(res.nfev > 0)
175
+ assert_equal(res.nfev, res.njev)
176
+
177
+ def test_jac(self):
178
+ # test Jacobian returned
179
+ minimizer_kwargs = self.kwargs.copy()
180
+ # BFGS returns a Jacobian
181
+ minimizer_kwargs["method"] = "BFGS"
182
+
183
+ res = basinhopping(func2d_easyderiv, [0.0, 0.0],
184
+ minimizer_kwargs=minimizer_kwargs, niter=self.niter,
185
+ disp=self.disp)
186
+
187
+ assert_(hasattr(res.lowest_optimization_result, "jac"))
188
+
189
+ # in this case, the Jacobian is just [df/dx, df/dy]
190
+ _, jacobian = func2d_easyderiv(res.x)
191
+ assert_almost_equal(res.lowest_optimization_result.jac, jacobian,
192
+ self.tol)
193
+
194
+ def test_2d_nograd(self):
195
+ # test 2-D minimizations without gradient
196
+ i = 1
197
+ res = basinhopping(func2d_nograd, self.x0[i],
198
+ minimizer_kwargs=self.kwargs_nograd,
199
+ niter=self.niter, disp=self.disp)
200
+ assert_almost_equal(res.x, self.sol[i], self.tol)
201
+
202
+ @pytest.mark.fail_slow(5)
203
+ def test_all_minimizers(self):
204
+ # Test 2-D minimizations with gradient. Nelder-Mead, Powell, COBYLA, and
205
+ # COBYQA don't accept jac=True, so aren't included here.
206
+ i = 1
207
+ methods = ['CG', 'BFGS', 'Newton-CG', 'L-BFGS-B', 'TNC', 'SLSQP']
208
+ minimizer_kwargs = copy.copy(self.kwargs)
209
+ for method in methods:
210
+ minimizer_kwargs["method"] = method
211
+ res = basinhopping(func2d, self.x0[i],
212
+ minimizer_kwargs=minimizer_kwargs,
213
+ niter=self.niter, disp=self.disp)
214
+ assert_almost_equal(res.x, self.sol[i], self.tol)
215
+
216
+ @pytest.mark.fail_slow(10)
217
+ def test_all_nograd_minimizers(self):
218
+ # Test 2-D minimizations without gradient. Newton-CG requires jac=True,
219
+ # so not included here.
220
+ i = 1
221
+ methods = ['CG', 'BFGS', 'L-BFGS-B', 'TNC', 'SLSQP',
222
+ 'Nelder-Mead', 'Powell', 'COBYLA', 'COBYQA']
223
+ minimizer_kwargs = copy.copy(self.kwargs_nograd)
224
+ for method in methods:
225
+ # COBYQA takes extensive amount of time on this problem
226
+ niter = 10 if method == 'COBYQA' else self.niter
227
+ minimizer_kwargs["method"] = method
228
+ res = basinhopping(func2d_nograd, self.x0[i],
229
+ minimizer_kwargs=minimizer_kwargs,
230
+ niter=niter, disp=self.disp)
231
+ tol = self.tol
232
+ if method == 'COBYLA':
233
+ tol = 2
234
+ assert_almost_equal(res.x, self.sol[i], decimal=tol)
235
+
236
+ def test_pass_takestep(self):
237
+ # test that passing a custom takestep works
238
+ # also test that the stepsize is being adjusted
239
+ takestep = MyTakeStep1()
240
+ initial_step_size = takestep.stepsize
241
+ i = 1
242
+ res = basinhopping(func2d, self.x0[i], minimizer_kwargs=self.kwargs,
243
+ niter=self.niter, disp=self.disp,
244
+ take_step=takestep)
245
+ assert_almost_equal(res.x, self.sol[i], self.tol)
246
+ assert_(takestep.been_called)
247
+ # make sure that the build in adaptive step size has been used
248
+ assert_(initial_step_size != takestep.stepsize)
249
+
250
+ def test_pass_simple_takestep(self):
251
+ # test that passing a custom takestep without attribute stepsize
252
+ takestep = myTakeStep2
253
+ i = 1
254
+ res = basinhopping(func2d_nograd, self.x0[i],
255
+ minimizer_kwargs=self.kwargs_nograd,
256
+ niter=self.niter, disp=self.disp,
257
+ take_step=takestep)
258
+ assert_almost_equal(res.x, self.sol[i], self.tol)
259
+
260
+ def test_pass_accept_test(self):
261
+ # test passing a custom accept test
262
+ # makes sure it's being used and ensures all the possible return values
263
+ # are accepted.
264
+ accept_test = MyAcceptTest()
265
+ i = 1
266
+ # there's no point in running it more than a few steps.
267
+ basinhopping(func2d, self.x0[i], minimizer_kwargs=self.kwargs,
268
+ niter=10, disp=self.disp, accept_test=accept_test)
269
+ assert_(accept_test.been_called)
270
+
271
+ def test_pass_callback(self):
272
+ # test passing a custom callback function
273
+ # This makes sure it's being used. It also returns True after 10 steps
274
+ # to ensure that it's stopping early.
275
+ callback = MyCallBack()
276
+ i = 1
277
+ # there's no point in running it more than a few steps.
278
+ res = basinhopping(func2d, self.x0[i], minimizer_kwargs=self.kwargs,
279
+ niter=30, disp=self.disp, callback=callback)
280
+ assert_(callback.been_called)
281
+ assert_("callback" in res.message[0])
282
+ # One of the calls of MyCallBack is during BasinHoppingRunner
283
+ # construction, so there are only 9 remaining before MyCallBack stops
284
+ # the minimization.
285
+ assert_equal(res.nit, 9)
286
+
287
+ def test_minimizer_fail(self):
288
+ # test if a minimizer fails
289
+ i = 1
290
+ self.kwargs["options"] = dict(maxiter=0)
291
+ self.niter = 10
292
+ res = basinhopping(func2d, self.x0[i], minimizer_kwargs=self.kwargs,
293
+ niter=self.niter, disp=self.disp)
294
+ # the number of failed minimizations should be the number of
295
+ # iterations + 1
296
+ assert_equal(res.nit + 1, res.minimization_failures)
297
+
298
+ def test_niter_zero(self):
299
+ # gh5915, what happens if you call basinhopping with niter=0
300
+ i = 0
301
+ basinhopping(func1d, self.x0[i], minimizer_kwargs=self.kwargs,
302
+ niter=0, disp=self.disp)
303
+
304
+ def test_seed_reproducibility(self):
305
+ # seed should ensure reproducibility between runs
306
+ minimizer_kwargs = {"method": "L-BFGS-B", "jac": True}
307
+
308
+ f_1 = []
309
+
310
+ def callback(x, f, accepted):
311
+ f_1.append(f)
312
+
313
+ basinhopping(func2d, [1.0, 1.0], minimizer_kwargs=minimizer_kwargs,
314
+ niter=10, callback=callback, seed=10)
315
+
316
+ f_2 = []
317
+
318
+ def callback2(x, f, accepted):
319
+ f_2.append(f)
320
+
321
+ basinhopping(func2d, [1.0, 1.0], minimizer_kwargs=minimizer_kwargs,
322
+ niter=10, callback=callback2, seed=10)
323
+ assert_equal(np.array(f_1), np.array(f_2))
324
+
325
+ def test_random_gen(self):
326
+ # check that np.random.Generator can be used (numpy >= 1.17)
327
+ rng = np.random.default_rng(1)
328
+
329
+ minimizer_kwargs = {"method": "L-BFGS-B", "jac": True}
330
+
331
+ res1 = basinhopping(func2d, [1.0, 1.0],
332
+ minimizer_kwargs=minimizer_kwargs,
333
+ niter=10, seed=rng)
334
+
335
+ rng = np.random.default_rng(1)
336
+ res2 = basinhopping(func2d, [1.0, 1.0],
337
+ minimizer_kwargs=minimizer_kwargs,
338
+ niter=10, seed=rng)
339
+ assert_equal(res1.x, res2.x)
340
+
341
+ def test_monotonic_basin_hopping(self):
342
+ # test 1-D minimizations with gradient and T=0
343
+ i = 0
344
+ res = basinhopping(func1d, self.x0[i], minimizer_kwargs=self.kwargs,
345
+ niter=self.niter, disp=self.disp, T=0)
346
+ assert_almost_equal(res.x, self.sol[i], self.tol)
347
+
348
+
349
+ class Test_Storage:
350
+ def setup_method(self):
351
+ self.x0 = np.array(1)
352
+ self.f0 = 0
353
+
354
+ minres = OptimizeResult(success=True)
355
+ minres.x = self.x0
356
+ minres.fun = self.f0
357
+
358
+ self.storage = Storage(minres)
359
+
360
+ def test_higher_f_rejected(self):
361
+ new_minres = OptimizeResult(success=True)
362
+ new_minres.x = self.x0 + 1
363
+ new_minres.fun = self.f0 + 1
364
+
365
+ ret = self.storage.update(new_minres)
366
+ minres = self.storage.get_lowest()
367
+ assert_equal(self.x0, minres.x)
368
+ assert_equal(self.f0, minres.fun)
369
+ assert_(not ret)
370
+
371
+ @pytest.mark.parametrize('success', [True, False])
372
+ def test_lower_f_accepted(self, success):
373
+ new_minres = OptimizeResult(success=success)
374
+ new_minres.x = self.x0 + 1
375
+ new_minres.fun = self.f0 - 1
376
+
377
+ ret = self.storage.update(new_minres)
378
+ minres = self.storage.get_lowest()
379
+ assert (self.x0 != minres.x) == success # can't use `is`
380
+ assert (self.f0 != minres.fun) == success # left side is NumPy bool
381
+ assert ret is success
382
+
383
+
384
+ class Test_RandomDisplacement:
385
+ def setup_method(self):
386
+ self.stepsize = 1.0
387
+ self.displace = RandomDisplacement(stepsize=self.stepsize)
388
+ self.N = 300000
389
+ self.x0 = np.zeros([self.N])
390
+
391
+ def test_random(self):
392
+ # the mean should be 0
393
+ # the variance should be (2*stepsize)**2 / 12
394
+ # note these tests are random, they will fail from time to time
395
+ x = self.displace(self.x0)
396
+ v = (2. * self.stepsize) ** 2 / 12
397
+ assert_almost_equal(np.mean(x), 0., 1)
398
+ assert_almost_equal(np.var(x), v, 1)
399
+
400
+
401
+ class Test_Metropolis:
402
+ def setup_method(self):
403
+ self.T = 2.
404
+ self.met = Metropolis(self.T)
405
+ self.res_new = OptimizeResult(success=True, fun=0.)
406
+ self.res_old = OptimizeResult(success=True, fun=1.)
407
+
408
+ def test_boolean_return(self):
409
+ # the return must be a bool, else an error will be raised in
410
+ # basinhopping
411
+ ret = self.met(res_new=self.res_new, res_old=self.res_old)
412
+ assert isinstance(ret, bool)
413
+
414
+ def test_lower_f_accepted(self):
415
+ assert_(self.met(res_new=self.res_new, res_old=self.res_old))
416
+
417
+ def test_accept(self):
418
+ # test that steps are randomly accepted for f_new > f_old
419
+ one_accept = False
420
+ one_reject = False
421
+ for i in range(1000):
422
+ if one_accept and one_reject:
423
+ break
424
+ res_new = OptimizeResult(success=True, fun=1.)
425
+ res_old = OptimizeResult(success=True, fun=0.5)
426
+ ret = self.met(res_new=res_new, res_old=res_old)
427
+ if ret:
428
+ one_accept = True
429
+ else:
430
+ one_reject = True
431
+ assert_(one_accept)
432
+ assert_(one_reject)
433
+
434
+ def test_GH7495(self):
435
+ # an overflow in exp was producing a RuntimeWarning
436
+ # create own object here in case someone changes self.T
437
+ met = Metropolis(2)
438
+ res_new = OptimizeResult(success=True, fun=0.)
439
+ res_old = OptimizeResult(success=True, fun=2000)
440
+ with np.errstate(over='raise'):
441
+ met.accept_reject(res_new=res_new, res_old=res_old)
442
+
443
+ def test_gh7799(self):
444
+ # gh-7799 reported a problem in which local search was successful but
445
+ # basinhopping returned an invalid solution. Show that this is fixed.
446
+ def func(x):
447
+ return (x**2-8)**2+(x+2)**2
448
+
449
+ x0 = -4
450
+ limit = 50 # Constrain to func value >= 50
451
+ con = {'type': 'ineq', 'fun': lambda x: func(x) - limit},
452
+ res = basinhopping(func, x0, 30, minimizer_kwargs={'constraints': con})
453
+ assert res.success
454
+ assert_allclose(res.fun, limit, rtol=1e-6)
455
+
456
+ def test_accept_gh7799(self):
457
+ # Metropolis should not accept the result of an unsuccessful new local
458
+ # search if the old local search was successful
459
+
460
+ met = Metropolis(0) # monotonic basin hopping
461
+ res_new = OptimizeResult(success=True, fun=0.)
462
+ res_old = OptimizeResult(success=True, fun=1.)
463
+
464
+ # if new local search was successful and energy is lower, accept
465
+ assert met(res_new=res_new, res_old=res_old)
466
+ # if new res is unsuccessful, don't accept - even if energy is lower
467
+ res_new.success = False
468
+ assert not met(res_new=res_new, res_old=res_old)
469
+ # ...unless the old res was unsuccessful, too. In that case, why not?
470
+ res_old.success = False
471
+ assert met(res_new=res_new, res_old=res_old)
472
+
473
+ def test_reject_all_gh7799(self):
474
+ # Test the behavior when there is no feasible solution
475
+ def fun(x):
476
+ return x@x
477
+
478
+ def constraint(x):
479
+ return x + 1
480
+
481
+ kwargs = {'constraints': {'type': 'eq', 'fun': constraint},
482
+ 'bounds': [(0, 1), (0, 1)], 'method': 'slsqp'}
483
+ res = basinhopping(fun, x0=[2, 3], niter=10, minimizer_kwargs=kwargs)
484
+ assert not res.success
485
+
486
+
487
+ class Test_AdaptiveStepsize:
488
+ def setup_method(self):
489
+ self.stepsize = 1.
490
+ self.ts = RandomDisplacement(stepsize=self.stepsize)
491
+ self.target_accept_rate = 0.5
492
+ self.takestep = AdaptiveStepsize(takestep=self.ts, verbose=False,
493
+ accept_rate=self.target_accept_rate)
494
+
495
+ def test_adaptive_increase(self):
496
+ # if few steps are rejected, the stepsize should increase
497
+ x = 0.
498
+ self.takestep(x)
499
+ self.takestep.report(False)
500
+ for i in range(self.takestep.interval):
501
+ self.takestep(x)
502
+ self.takestep.report(True)
503
+ assert_(self.ts.stepsize > self.stepsize)
504
+
505
+ def test_adaptive_decrease(self):
506
+ # if few steps are rejected, the stepsize should increase
507
+ x = 0.
508
+ self.takestep(x)
509
+ self.takestep.report(True)
510
+ for i in range(self.takestep.interval):
511
+ self.takestep(x)
512
+ self.takestep.report(False)
513
+ assert_(self.ts.stepsize < self.stepsize)
514
+
515
+ def test_all_accepted(self):
516
+ # test that everything works OK if all steps were accepted
517
+ x = 0.
518
+ for i in range(self.takestep.interval + 1):
519
+ self.takestep(x)
520
+ self.takestep.report(True)
521
+ assert_(self.ts.stepsize > self.stepsize)
522
+
523
+ def test_all_rejected(self):
524
+ # test that everything works OK if all steps were rejected
525
+ x = 0.
526
+ for i in range(self.takestep.interval + 1):
527
+ self.takestep(x)
528
+ self.takestep.report(False)
529
+ assert_(self.ts.stepsize < self.stepsize)
llava_next/lib/python3.10/site-packages/scipy/optimize/tests/test__differential_evolution.py ADDED
@@ -0,0 +1,1699 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Unit tests for the differential global minimization algorithm.
3
+ """
4
+ import multiprocessing
5
+ from multiprocessing.dummy import Pool as ThreadPool
6
+ import platform
7
+
8
+ from scipy.optimize._differentialevolution import (DifferentialEvolutionSolver,
9
+ _ConstraintWrapper)
10
+ from scipy.optimize import differential_evolution, OptimizeResult
11
+ from scipy.optimize._constraints import (Bounds, NonlinearConstraint,
12
+ LinearConstraint)
13
+ from scipy.optimize import rosen, minimize
14
+ from scipy.sparse import csr_matrix
15
+ from scipy import stats
16
+
17
+ import numpy as np
18
+ from numpy.testing import (assert_equal, assert_allclose, assert_almost_equal,
19
+ assert_string_equal, assert_, suppress_warnings)
20
+ from pytest import raises as assert_raises, warns
21
+ import pytest
22
+
23
+
24
+ class TestDifferentialEvolutionSolver:
25
+
26
+ def setup_method(self):
27
+ self.old_seterr = np.seterr(invalid='raise')
28
+ self.limits = np.array([[0., 0.],
29
+ [2., 2.]])
30
+ self.bounds = [(0., 2.), (0., 2.)]
31
+
32
+ self.dummy_solver = DifferentialEvolutionSolver(self.quadratic,
33
+ [(0, 100)])
34
+
35
+ # dummy_solver2 will be used to test mutation strategies
36
+ self.dummy_solver2 = DifferentialEvolutionSolver(self.quadratic,
37
+ [(0, 1)],
38
+ popsize=7,
39
+ mutation=0.5)
40
+ # create a population that's only 7 members long
41
+ # [0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7]
42
+ population = np.atleast_2d(np.arange(0.1, 0.8, 0.1)).T
43
+ self.dummy_solver2.population = population
44
+
45
+ def teardown_method(self):
46
+ np.seterr(**self.old_seterr)
47
+
48
+ def quadratic(self, x):
49
+ return x[0]**2
50
+
51
+ def test__strategy_resolves(self):
52
+ # test that the correct mutation function is resolved by
53
+ # different requested strategy arguments
54
+ solver = DifferentialEvolutionSolver(rosen,
55
+ self.bounds,
56
+ strategy='best1exp')
57
+ assert_equal(solver.strategy, 'best1exp')
58
+ assert_equal(solver.mutation_func.__name__, '_best1')
59
+
60
+ solver = DifferentialEvolutionSolver(rosen,
61
+ self.bounds,
62
+ strategy='best1bin')
63
+ assert_equal(solver.strategy, 'best1bin')
64
+ assert_equal(solver.mutation_func.__name__, '_best1')
65
+
66
+ solver = DifferentialEvolutionSolver(rosen,
67
+ self.bounds,
68
+ strategy='rand1bin')
69
+ assert_equal(solver.strategy, 'rand1bin')
70
+ assert_equal(solver.mutation_func.__name__, '_rand1')
71
+
72
+ solver = DifferentialEvolutionSolver(rosen,
73
+ self.bounds,
74
+ strategy='rand1exp')
75
+ assert_equal(solver.strategy, 'rand1exp')
76
+ assert_equal(solver.mutation_func.__name__, '_rand1')
77
+
78
+ solver = DifferentialEvolutionSolver(rosen,
79
+ self.bounds,
80
+ strategy='rand2exp')
81
+ assert_equal(solver.strategy, 'rand2exp')
82
+ assert_equal(solver.mutation_func.__name__, '_rand2')
83
+
84
+ solver = DifferentialEvolutionSolver(rosen,
85
+ self.bounds,
86
+ strategy='best2bin')
87
+ assert_equal(solver.strategy, 'best2bin')
88
+ assert_equal(solver.mutation_func.__name__, '_best2')
89
+
90
+ solver = DifferentialEvolutionSolver(rosen,
91
+ self.bounds,
92
+ strategy='rand2bin')
93
+ assert_equal(solver.strategy, 'rand2bin')
94
+ assert_equal(solver.mutation_func.__name__, '_rand2')
95
+
96
+ solver = DifferentialEvolutionSolver(rosen,
97
+ self.bounds,
98
+ strategy='rand2exp')
99
+ assert_equal(solver.strategy, 'rand2exp')
100
+ assert_equal(solver.mutation_func.__name__, '_rand2')
101
+
102
+ solver = DifferentialEvolutionSolver(rosen,
103
+ self.bounds,
104
+ strategy='randtobest1bin')
105
+ assert_equal(solver.strategy, 'randtobest1bin')
106
+ assert_equal(solver.mutation_func.__name__, '_randtobest1')
107
+
108
+ solver = DifferentialEvolutionSolver(rosen,
109
+ self.bounds,
110
+ strategy='randtobest1exp')
111
+ assert_equal(solver.strategy, 'randtobest1exp')
112
+ assert_equal(solver.mutation_func.__name__, '_randtobest1')
113
+
114
+ solver = DifferentialEvolutionSolver(rosen,
115
+ self.bounds,
116
+ strategy='currenttobest1bin')
117
+ assert_equal(solver.strategy, 'currenttobest1bin')
118
+ assert_equal(solver.mutation_func.__name__, '_currenttobest1')
119
+
120
+ solver = DifferentialEvolutionSolver(rosen,
121
+ self.bounds,
122
+ strategy='currenttobest1exp')
123
+ assert_equal(solver.strategy, 'currenttobest1exp')
124
+ assert_equal(solver.mutation_func.__name__, '_currenttobest1')
125
+
126
+ def test__mutate1(self):
127
+ # strategies */1/*, i.e. rand/1/bin, best/1/exp, etc.
128
+ result = np.array([0.05])
129
+ trial = self.dummy_solver2._best1(np.array([2, 3, 4, 5, 6]))
130
+ assert_allclose(trial, result)
131
+
132
+ result = np.array([0.25])
133
+ trial = self.dummy_solver2._rand1(np.array([2, 3, 4, 5, 6]))
134
+ assert_allclose(trial, result)
135
+
136
+ def test__mutate2(self):
137
+ # strategies */2/*, i.e. rand/2/bin, best/2/exp, etc.
138
+ # [0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7]
139
+
140
+ result = np.array([-0.1])
141
+ trial = self.dummy_solver2._best2(np.array([2, 3, 4, 5, 6]))
142
+ assert_allclose(trial, result)
143
+
144
+ result = np.array([0.1])
145
+ trial = self.dummy_solver2._rand2(np.array([2, 3, 4, 5, 6]))
146
+ assert_allclose(trial, result)
147
+
148
+ def test__randtobest1(self):
149
+ # strategies randtobest/1/*
150
+ result = np.array([0.15])
151
+ trial = self.dummy_solver2._randtobest1(np.array([2, 3, 4, 5, 6]))
152
+ assert_allclose(trial, result)
153
+
154
+ def test__currenttobest1(self):
155
+ # strategies currenttobest/1/*
156
+ result = np.array([0.1])
157
+ trial = self.dummy_solver2._currenttobest1(
158
+ 1,
159
+ np.array([2, 3, 4, 5, 6])
160
+ )
161
+ assert_allclose(trial, result)
162
+
163
+ def test_can_init_with_dithering(self):
164
+ mutation = (0.5, 1)
165
+ solver = DifferentialEvolutionSolver(self.quadratic,
166
+ self.bounds,
167
+ mutation=mutation)
168
+
169
+ assert_equal(solver.dither, list(mutation))
170
+
171
+ def test_invalid_mutation_values_arent_accepted(self):
172
+ func = rosen
173
+ mutation = (0.5, 3)
174
+ assert_raises(ValueError,
175
+ DifferentialEvolutionSolver,
176
+ func,
177
+ self.bounds,
178
+ mutation=mutation)
179
+
180
+ mutation = (-1, 1)
181
+ assert_raises(ValueError,
182
+ DifferentialEvolutionSolver,
183
+ func,
184
+ self.bounds,
185
+ mutation=mutation)
186
+
187
+ mutation = (0.1, np.nan)
188
+ assert_raises(ValueError,
189
+ DifferentialEvolutionSolver,
190
+ func,
191
+ self.bounds,
192
+ mutation=mutation)
193
+
194
+ mutation = 0.5
195
+ solver = DifferentialEvolutionSolver(func,
196
+ self.bounds,
197
+ mutation=mutation)
198
+ assert_equal(0.5, solver.scale)
199
+ assert_equal(None, solver.dither)
200
+
201
+ def test_invalid_functional(self):
202
+ def func(x):
203
+ return np.array([np.sum(x ** 2), np.sum(x)])
204
+
205
+ with assert_raises(
206
+ RuntimeError,
207
+ match=r"func\(x, \*args\) must return a scalar value"):
208
+ differential_evolution(func, [(-2, 2), (-2, 2)])
209
+
210
+ def test__scale_parameters(self):
211
+ trial = np.array([0.3])
212
+ assert_equal(30, self.dummy_solver._scale_parameters(trial))
213
+
214
+ # it should also work with the limits reversed
215
+ self.dummy_solver.limits = np.array([[100], [0.]])
216
+ assert_equal(30, self.dummy_solver._scale_parameters(trial))
217
+
218
+ def test__unscale_parameters(self):
219
+ trial = np.array([30])
220
+ assert_equal(0.3, self.dummy_solver._unscale_parameters(trial))
221
+
222
+ # it should also work with the limits reversed
223
+ self.dummy_solver.limits = np.array([[100], [0.]])
224
+ assert_equal(0.3, self.dummy_solver._unscale_parameters(trial))
225
+
226
+ def test_equal_bounds(self):
227
+ with np.errstate(invalid='raise'):
228
+ solver = DifferentialEvolutionSolver(
229
+ self.quadratic,
230
+ bounds=[(2.0, 2.0), (1.0, 3.0)]
231
+ )
232
+ v = solver._unscale_parameters([2.0, 2.0])
233
+ assert_allclose(v, 0.5)
234
+
235
+ res = differential_evolution(self.quadratic, [(2.0, 2.0), (3.0, 3.0)])
236
+ assert_equal(res.x, [2.0, 3.0])
237
+
238
+ def test__ensure_constraint(self):
239
+ trial = np.array([1.1, -100, 0.9, 2., 300., -0.00001])
240
+ self.dummy_solver._ensure_constraint(trial)
241
+
242
+ assert_equal(trial[2], 0.9)
243
+ assert_(np.logical_and(trial >= 0, trial <= 1).all())
244
+
245
+ def test_differential_evolution(self):
246
+ # test that the Jmin of DifferentialEvolutionSolver
247
+ # is the same as the function evaluation
248
+ solver = DifferentialEvolutionSolver(
249
+ self.quadratic, [(-2, 2)], maxiter=1, polish=False
250
+ )
251
+ result = solver.solve()
252
+ assert_equal(result.fun, self.quadratic(result.x))
253
+
254
+ solver = DifferentialEvolutionSolver(
255
+ self.quadratic, [(-2, 2)], maxiter=1, polish=True
256
+ )
257
+ result = solver.solve()
258
+ assert_equal(result.fun, self.quadratic(result.x))
259
+
260
+ def test_best_solution_retrieval(self):
261
+ # test that the getter property method for the best solution works.
262
+ solver = DifferentialEvolutionSolver(self.quadratic, [(-2, 2)])
263
+ result = solver.solve()
264
+ assert_equal(result.x, solver.x)
265
+
266
+ def test_intermediate_result(self):
267
+ # Check that intermediate result object passed into the callback
268
+ # function contains the expected information and that raising
269
+ # `StopIteration` causes the expected behavior.
270
+ maxiter = 10
271
+
272
+ def func(x):
273
+ val = rosen(x)
274
+ if val < func.val:
275
+ func.x = x
276
+ func.val = val
277
+ return val
278
+ func.x = None
279
+ func.val = np.inf
280
+
281
+ def callback(intermediate_result):
282
+ callback.nit += 1
283
+ callback.intermediate_result = intermediate_result
284
+ assert intermediate_result.population.ndim == 2
285
+ assert intermediate_result.population.shape[1] == 2
286
+ assert intermediate_result.nit == callback.nit
287
+
288
+ # Check that `x` and `fun` attributes are the best found so far
289
+ assert_equal(intermediate_result.x, callback.func.x)
290
+ assert_equal(intermediate_result.fun, callback.func.val)
291
+
292
+ # Check for consistency between `fun`, `population_energies`,
293
+ # `x`, and `population`
294
+ assert_equal(intermediate_result.fun, rosen(intermediate_result.x))
295
+ for i in range(len(intermediate_result.population_energies)):
296
+ res = intermediate_result.population_energies[i]
297
+ ref = rosen(intermediate_result.population[i])
298
+ assert_equal(res, ref)
299
+ assert_equal(intermediate_result.x,
300
+ intermediate_result.population[0])
301
+ assert_equal(intermediate_result.fun,
302
+ intermediate_result.population_energies[0])
303
+
304
+ assert intermediate_result.message == 'in progress'
305
+ assert intermediate_result.success is True
306
+ assert isinstance(intermediate_result, OptimizeResult)
307
+ if callback.nit == maxiter:
308
+ raise StopIteration
309
+ callback.nit = 0
310
+ callback.intermediate_result = None
311
+ callback.func = func
312
+
313
+ bounds = [(0, 2), (0, 2)]
314
+ kwargs = dict(func=func, bounds=bounds, seed=838245, polish=False)
315
+ res = differential_evolution(**kwargs, callback=callback)
316
+ ref = differential_evolution(**kwargs, maxiter=maxiter)
317
+
318
+ # Check that final `intermediate_result` is equivalent to returned
319
+ # result object and that terminating with callback `StopIteration`
320
+ # after `maxiter` iterations is equivalent to terminating with
321
+ # `maxiter` parameter.
322
+ assert res.success is ref.success is False
323
+ assert callback.nit == res.nit == maxiter
324
+ assert res.message == 'callback function requested stop early'
325
+ assert ref.message == 'Maximum number of iterations has been exceeded.'
326
+ for field, val in ref.items():
327
+ if field in {'message', 'success'}: # checked separately
328
+ continue
329
+ assert_equal(callback.intermediate_result[field], val)
330
+ assert_equal(res[field], val)
331
+
332
+ # Check that polish occurs after `StopIteration` as advertised
333
+ callback.nit = 0
334
+ func.val = np.inf
335
+ kwargs['polish'] = True
336
+ res = differential_evolution(**kwargs, callback=callback)
337
+ assert res.fun < ref.fun
338
+
339
+ def test_callback_terminates(self):
340
+ # test that if the callback returns true, then the minimization halts
341
+ bounds = [(0, 2), (0, 2)]
342
+ expected_msg = 'callback function requested stop early'
343
+ def callback_python_true(param, convergence=0.):
344
+ return True
345
+
346
+ result = differential_evolution(
347
+ rosen, bounds, callback=callback_python_true
348
+ )
349
+ assert_string_equal(result.message, expected_msg)
350
+
351
+ # if callback raises StopIteration then solve should be interrupted
352
+ def callback_stop(intermediate_result):
353
+ raise StopIteration
354
+
355
+ result = differential_evolution(rosen, bounds, callback=callback_stop)
356
+ assert not result.success
357
+
358
+ def callback_evaluates_true(param, convergence=0.):
359
+ # DE should stop if bool(self.callback) is True
360
+ return [10]
361
+
362
+ result = differential_evolution(rosen, bounds, callback=callback_evaluates_true)
363
+ assert_string_equal(result.message, expected_msg)
364
+ assert not result.success
365
+
366
+ def callback_evaluates_false(param, convergence=0.):
367
+ return []
368
+
369
+ result = differential_evolution(rosen, bounds,
370
+ callback=callback_evaluates_false)
371
+ assert result.success
372
+
373
+ def test_args_tuple_is_passed(self):
374
+ # test that the args tuple is passed to the cost function properly.
375
+ bounds = [(-10, 10)]
376
+ args = (1., 2., 3.)
377
+
378
+ def quadratic(x, *args):
379
+ if type(args) != tuple:
380
+ raise ValueError('args should be a tuple')
381
+ return args[0] + args[1] * x + args[2] * x**2.
382
+
383
+ result = differential_evolution(quadratic,
384
+ bounds,
385
+ args=args,
386
+ polish=True)
387
+ assert_almost_equal(result.fun, 2 / 3.)
388
+
389
+ def test_init_with_invalid_strategy(self):
390
+ # test that passing an invalid strategy raises ValueError
391
+ func = rosen
392
+ bounds = [(-3, 3)]
393
+ assert_raises(ValueError,
394
+ differential_evolution,
395
+ func,
396
+ bounds,
397
+ strategy='abc')
398
+
399
+ def test_bounds_checking(self):
400
+ # test that the bounds checking works
401
+ func = rosen
402
+ bounds = [(-3)]
403
+ assert_raises(ValueError,
404
+ differential_evolution,
405
+ func,
406
+ bounds)
407
+ bounds = [(-3, 3), (3, 4, 5)]
408
+ assert_raises(ValueError,
409
+ differential_evolution,
410
+ func,
411
+ bounds)
412
+
413
+ # test that we can use a new-type Bounds object
414
+ result = differential_evolution(rosen, Bounds([0, 0], [2, 2]))
415
+ assert_almost_equal(result.x, (1., 1.))
416
+
417
+ def test_select_samples(self):
418
+ # select_samples should return 5 separate random numbers.
419
+ limits = np.arange(12., dtype='float64').reshape(2, 6)
420
+ bounds = list(zip(limits[0, :], limits[1, :]))
421
+ solver = DifferentialEvolutionSolver(None, bounds, popsize=1)
422
+ candidate = 0
423
+ r1, r2, r3, r4, r5 = solver._select_samples(candidate, 5)
424
+ assert_equal(
425
+ len(np.unique(np.array([candidate, r1, r2, r3, r4, r5]))), 6)
426
+
427
+ def test_maxiter_stops_solve(self):
428
+ # test that if the maximum number of iterations is exceeded
429
+ # the solver stops.
430
+ solver = DifferentialEvolutionSolver(rosen, self.bounds, maxiter=1)
431
+ result = solver.solve()
432
+ assert_equal(result.success, False)
433
+ assert_equal(result.message,
434
+ 'Maximum number of iterations has been exceeded.')
435
+
436
+ def test_maxfun_stops_solve(self):
437
+ # test that if the maximum number of function evaluations is exceeded
438
+ # during initialisation the solver stops
439
+ solver = DifferentialEvolutionSolver(rosen, self.bounds, maxfun=1,
440
+ polish=False)
441
+ result = solver.solve()
442
+
443
+ assert_equal(result.nfev, 2)
444
+ assert_equal(result.success, False)
445
+ assert_equal(result.message,
446
+ 'Maximum number of function evaluations has '
447
+ 'been exceeded.')
448
+
449
+ # test that if the maximum number of function evaluations is exceeded
450
+ # during the actual minimisation, then the solver stops.
451
+ # Have to turn polishing off, as this will still occur even if maxfun
452
+ # is reached. For popsize=5 and len(bounds)=2, then there are only 10
453
+ # function evaluations during initialisation.
454
+ solver = DifferentialEvolutionSolver(rosen,
455
+ self.bounds,
456
+ popsize=5,
457
+ polish=False,
458
+ maxfun=40)
459
+ result = solver.solve()
460
+
461
+ assert_equal(result.nfev, 41)
462
+ assert_equal(result.success, False)
463
+ assert_equal(result.message,
464
+ 'Maximum number of function evaluations has '
465
+ 'been exceeded.')
466
+
467
+ # now repeat for updating='deferred version
468
+ # 47 function evaluations is not a multiple of the population size,
469
+ # so maxfun is reached partway through a population evaluation.
470
+ solver = DifferentialEvolutionSolver(rosen,
471
+ self.bounds,
472
+ popsize=5,
473
+ polish=False,
474
+ maxfun=47,
475
+ updating='deferred')
476
+ result = solver.solve()
477
+
478
+ assert_equal(result.nfev, 47)
479
+ assert_equal(result.success, False)
480
+ assert_equal(result.message,
481
+ 'Maximum number of function evaluations has '
482
+ 'been reached.')
483
+
484
+ def test_quadratic(self):
485
+ # test the quadratic function from object
486
+ solver = DifferentialEvolutionSolver(self.quadratic,
487
+ [(-100, 100)],
488
+ tol=0.02)
489
+ solver.solve()
490
+ assert_equal(np.argmin(solver.population_energies), 0)
491
+
492
+ def test_quadratic_from_diff_ev(self):
493
+ # test the quadratic function from differential_evolution function
494
+ differential_evolution(self.quadratic,
495
+ [(-100, 100)],
496
+ tol=0.02)
497
+
498
+ def test_seed_gives_repeatability(self):
499
+ result = differential_evolution(self.quadratic,
500
+ [(-100, 100)],
501
+ polish=False,
502
+ seed=1,
503
+ tol=0.5)
504
+ result2 = differential_evolution(self.quadratic,
505
+ [(-100, 100)],
506
+ polish=False,
507
+ seed=1,
508
+ tol=0.5)
509
+ assert_equal(result.x, result2.x)
510
+ assert_equal(result.nfev, result2.nfev)
511
+
512
+ def test_random_generator(self):
513
+ # check that np.random.Generator can be used (numpy >= 1.17)
514
+ # obtain a np.random.Generator object
515
+ rng = np.random.default_rng()
516
+
517
+ inits = ['random', 'latinhypercube', 'sobol', 'halton']
518
+ for init in inits:
519
+ differential_evolution(self.quadratic,
520
+ [(-100, 100)],
521
+ polish=False,
522
+ seed=rng,
523
+ tol=0.5,
524
+ init=init)
525
+
526
+ def test_exp_runs(self):
527
+ # test whether exponential mutation loop runs
528
+ solver = DifferentialEvolutionSolver(rosen,
529
+ self.bounds,
530
+ strategy='best1exp',
531
+ maxiter=1)
532
+
533
+ solver.solve()
534
+
535
+ def test_gh_4511_regression(self):
536
+ # This modification of the differential evolution docstring example
537
+ # uses a custom popsize that had triggered an off-by-one error.
538
+ # Because we do not care about solving the optimization problem in
539
+ # this test, we use maxiter=1 to reduce the testing time.
540
+ bounds = [(-5, 5), (-5, 5)]
541
+ # result = differential_evolution(rosen, bounds, popsize=1815,
542
+ # maxiter=1)
543
+
544
+ # the original issue arose because of rounding error in arange, with
545
+ # linspace being a much better solution. 1815 is quite a large popsize
546
+ # to use and results in a long test time (~13s). I used the original
547
+ # issue to figure out the lowest number of samples that would cause
548
+ # this rounding error to occur, 49.
549
+ differential_evolution(rosen, bounds, popsize=49, maxiter=1)
550
+
551
+ def test_calculate_population_energies(self):
552
+ # if popsize is 3, then the overall generation has size (6,)
553
+ solver = DifferentialEvolutionSolver(rosen, self.bounds, popsize=3)
554
+ solver._calculate_population_energies(solver.population)
555
+ solver._promote_lowest_energy()
556
+ assert_equal(np.argmin(solver.population_energies), 0)
557
+
558
+ # initial calculation of the energies should require 6 nfev.
559
+ assert_equal(solver._nfev, 6)
560
+
561
+ def test_iteration(self):
562
+ # test that DifferentialEvolutionSolver is iterable
563
+ # if popsize is 3, then the overall generation has size (6,)
564
+ solver = DifferentialEvolutionSolver(rosen, self.bounds, popsize=3,
565
+ maxfun=12)
566
+ x, fun = next(solver)
567
+ assert_equal(np.size(x, 0), 2)
568
+
569
+ # 6 nfev are required for initial calculation of energies, 6 nfev are
570
+ # required for the evolution of the 6 population members.
571
+ assert_equal(solver._nfev, 12)
572
+
573
+ # the next generation should halt because it exceeds maxfun
574
+ assert_raises(StopIteration, next, solver)
575
+
576
+ # check a proper minimisation can be done by an iterable solver
577
+ solver = DifferentialEvolutionSolver(rosen, self.bounds)
578
+ _, fun_prev = next(solver)
579
+ for i, soln in enumerate(solver):
580
+ x_current, fun_current = soln
581
+ assert fun_prev >= fun_current
582
+ _, fun_prev = x_current, fun_current
583
+ # need to have this otherwise the solver would never stop.
584
+ if i == 50:
585
+ break
586
+
587
+ def test_convergence(self):
588
+ solver = DifferentialEvolutionSolver(rosen, self.bounds, tol=0.2,
589
+ polish=False)
590
+ solver.solve()
591
+ assert_(solver.convergence < 0.2)
592
+
593
+ def test_maxiter_none_GH5731(self):
594
+ # Pre 0.17 the previous default for maxiter and maxfun was None.
595
+ # the numerical defaults are now 1000 and np.inf. However, some scripts
596
+ # will still supply None for both of those, this will raise a TypeError
597
+ # in the solve method.
598
+ solver = DifferentialEvolutionSolver(rosen, self.bounds, maxiter=None,
599
+ maxfun=None)
600
+ solver.solve()
601
+
602
+ def test_population_initiation(self):
603
+ # test the different modes of population initiation
604
+
605
+ # init must be either 'latinhypercube' or 'random'
606
+ # raising ValueError is something else is passed in
607
+ assert_raises(ValueError,
608
+ DifferentialEvolutionSolver,
609
+ *(rosen, self.bounds),
610
+ **{'init': 'rubbish'})
611
+
612
+ solver = DifferentialEvolutionSolver(rosen, self.bounds)
613
+
614
+ # check that population initiation:
615
+ # 1) resets _nfev to 0
616
+ # 2) all population energies are np.inf
617
+ solver.init_population_random()
618
+ assert_equal(solver._nfev, 0)
619
+ assert_(np.all(np.isinf(solver.population_energies)))
620
+
621
+ solver.init_population_lhs()
622
+ assert_equal(solver._nfev, 0)
623
+ assert_(np.all(np.isinf(solver.population_energies)))
624
+
625
+ solver.init_population_qmc(qmc_engine='halton')
626
+ assert_equal(solver._nfev, 0)
627
+ assert_(np.all(np.isinf(solver.population_energies)))
628
+
629
+ solver = DifferentialEvolutionSolver(rosen, self.bounds, init='sobol')
630
+ solver.init_population_qmc(qmc_engine='sobol')
631
+ assert_equal(solver._nfev, 0)
632
+ assert_(np.all(np.isinf(solver.population_energies)))
633
+
634
+ # we should be able to initialize with our own array
635
+ population = np.linspace(-1, 3, 10).reshape(5, 2)
636
+ solver = DifferentialEvolutionSolver(rosen, self.bounds,
637
+ init=population,
638
+ strategy='best2bin',
639
+ atol=0.01, seed=1, popsize=5)
640
+
641
+ assert_equal(solver._nfev, 0)
642
+ assert_(np.all(np.isinf(solver.population_energies)))
643
+ assert_(solver.num_population_members == 5)
644
+ assert_(solver.population_shape == (5, 2))
645
+
646
+ # check that the population was initialized correctly
647
+ unscaled_population = np.clip(solver._unscale_parameters(population),
648
+ 0, 1)
649
+ assert_almost_equal(solver.population[:5], unscaled_population)
650
+
651
+ # population values need to be clipped to bounds
652
+ assert_almost_equal(np.min(solver.population[:5]), 0)
653
+ assert_almost_equal(np.max(solver.population[:5]), 1)
654
+
655
+ # shouldn't be able to initialize with an array if it's the wrong shape
656
+ # this would have too many parameters
657
+ population = np.linspace(-1, 3, 15).reshape(5, 3)
658
+ assert_raises(ValueError,
659
+ DifferentialEvolutionSolver,
660
+ *(rosen, self.bounds),
661
+ **{'init': population})
662
+
663
+ # provide an initial solution
664
+ # bounds are [(0, 2), (0, 2)]
665
+ x0 = np.random.uniform(low=0.0, high=2.0, size=2)
666
+ solver = DifferentialEvolutionSolver(
667
+ rosen, self.bounds, x0=x0
668
+ )
669
+ # parameters are scaled to unit interval
670
+ assert_allclose(solver.population[0], x0 / 2.0)
671
+
672
+ def test_x0(self):
673
+ # smoke test that checks that x0 is usable.
674
+ res = differential_evolution(rosen, self.bounds, x0=[0.2, 0.8])
675
+ assert res.success
676
+
677
+ # check what happens if some of the x0 lay outside the bounds
678
+ with assert_raises(ValueError):
679
+ differential_evolution(rosen, self.bounds, x0=[0.2, 2.1])
680
+
681
+ def test_infinite_objective_function(self):
682
+ # Test that there are no problems if the objective function
683
+ # returns inf on some runs
684
+ def sometimes_inf(x):
685
+ if x[0] < .5:
686
+ return np.inf
687
+ return x[1]
688
+ bounds = [(0, 1), (0, 1)]
689
+ differential_evolution(sometimes_inf, bounds=bounds, disp=False)
690
+
691
+ def test_deferred_updating(self):
692
+ # check setting of deferred updating, with default workers
693
+ bounds = [(0., 2.), (0., 2.)]
694
+ solver = DifferentialEvolutionSolver(rosen, bounds, updating='deferred')
695
+ assert_(solver._updating == 'deferred')
696
+ assert_(solver._mapwrapper._mapfunc is map)
697
+ res = solver.solve()
698
+ assert res.success
699
+
700
+ # check that deferred updating works with an exponential crossover
701
+ res = differential_evolution(
702
+ rosen, bounds, updating='deferred', strategy='best1exp'
703
+ )
704
+ assert res.success
705
+
706
+ def test_immediate_updating(self):
707
+ # check setting of immediate updating, with default workers
708
+ bounds = [(0., 2.), (0., 2.)]
709
+ solver = DifferentialEvolutionSolver(rosen, bounds)
710
+ assert_(solver._updating == 'immediate')
711
+
712
+ # Safely forking from a multithreaded process is
713
+ # problematic, and deprecated in Python 3.12, so
714
+ # we use a slower but portable alternative
715
+ # see gh-19848
716
+ ctx = multiprocessing.get_context("spawn")
717
+ with ctx.Pool(2) as p:
718
+ # should raise a UserWarning because the updating='immediate'
719
+ # is being overridden by the workers keyword
720
+ with warns(UserWarning):
721
+ with DifferentialEvolutionSolver(rosen, bounds, workers=p.map) as s:
722
+ pass
723
+ assert s._updating == 'deferred'
724
+
725
+ @pytest.mark.fail_slow(5)
726
+ def test_parallel(self):
727
+ # smoke test for parallelization with deferred updating
728
+ bounds = [(0., 2.), (0., 2.)]
729
+ # use threads instead of Process to speed things up for this simple example
730
+ with ThreadPool(2) as p, DifferentialEvolutionSolver(
731
+ rosen, bounds, updating='deferred', workers=p.map, tol=0.1, popsize=3
732
+ ) as solver:
733
+ assert solver._mapwrapper.pool is not None
734
+ assert solver._updating == 'deferred'
735
+ solver.solve()
736
+
737
+ with DifferentialEvolutionSolver(
738
+ rosen, bounds, updating='deferred', workers=2, popsize=3, tol=0.1
739
+ ) as solver:
740
+ assert solver._mapwrapper.pool is not None
741
+ assert solver._updating == 'deferred'
742
+ solver.solve()
743
+
744
+ def test_converged(self):
745
+ solver = DifferentialEvolutionSolver(rosen, [(0, 2), (0, 2)])
746
+ solver.solve()
747
+ assert_(solver.converged())
748
+
749
+ def test_constraint_violation_fn(self):
750
+ def constr_f(x):
751
+ return [x[0] + x[1]]
752
+
753
+ def constr_f2(x):
754
+ return np.array([x[0]**2 + x[1], x[0] - x[1]])
755
+
756
+ nlc = NonlinearConstraint(constr_f, -np.inf, 1.9)
757
+
758
+ solver = DifferentialEvolutionSolver(rosen, [(0, 2), (0, 2)],
759
+ constraints=(nlc,))
760
+
761
+ cv = solver._constraint_violation_fn(np.array([1.0, 1.0]))
762
+ assert_almost_equal(cv, 0.1)
763
+
764
+ nlc2 = NonlinearConstraint(constr_f2, -np.inf, 1.8)
765
+ solver = DifferentialEvolutionSolver(rosen, [(0, 2), (0, 2)],
766
+ constraints=(nlc, nlc2))
767
+
768
+ # for multiple constraints the constraint violations should
769
+ # be concatenated.
770
+ xs = [(1.2, 1), (2.0, 2.0), (0.5, 0.5)]
771
+ vs = [(0.3, 0.64, 0.0), (2.1, 4.2, 0.0), (0, 0, 0)]
772
+
773
+ for x, v in zip(xs, vs):
774
+ cv = solver._constraint_violation_fn(np.array(x))
775
+ assert_allclose(cv, np.atleast_2d(v))
776
+
777
+ # vectorized calculation of a series of solutions
778
+ assert_allclose(
779
+ solver._constraint_violation_fn(np.array(xs)), np.array(vs)
780
+ )
781
+
782
+ # the following line is used in _calculate_population_feasibilities.
783
+ # _constraint_violation_fn returns an (1, M) array when
784
+ # x.shape == (N,), i.e. a single solution. Therefore this list
785
+ # comprehension should generate (S, 1, M) array.
786
+ constraint_violation = np.array([solver._constraint_violation_fn(x)
787
+ for x in np.array(xs)])
788
+ assert constraint_violation.shape == (3, 1, 3)
789
+
790
+ # we need reasonable error messages if the constraint function doesn't
791
+ # return the right thing
792
+ def constr_f3(x):
793
+ # returns (S, M), rather than (M, S)
794
+ return constr_f2(x).T
795
+
796
+ nlc2 = NonlinearConstraint(constr_f3, -np.inf, 1.8)
797
+ solver = DifferentialEvolutionSolver(rosen, [(0, 2), (0, 2)],
798
+ constraints=(nlc, nlc2),
799
+ vectorized=False)
800
+ solver.vectorized = True
801
+ with pytest.raises(
802
+ RuntimeError, match="An array returned from a Constraint"
803
+ ):
804
+ solver._constraint_violation_fn(np.array(xs))
805
+
806
+ def test_constraint_population_feasibilities(self):
807
+ def constr_f(x):
808
+ return [x[0] + x[1]]
809
+
810
+ def constr_f2(x):
811
+ return [x[0]**2 + x[1], x[0] - x[1]]
812
+
813
+ nlc = NonlinearConstraint(constr_f, -np.inf, 1.9)
814
+
815
+ solver = DifferentialEvolutionSolver(rosen, [(0, 2), (0, 2)],
816
+ constraints=(nlc,))
817
+
818
+ # are population feasibilities correct
819
+ # [0.5, 0.5] corresponds to scaled values of [1., 1.]
820
+ feas, cv = solver._calculate_population_feasibilities(
821
+ np.array([[0.5, 0.5], [1., 1.]]))
822
+ assert_equal(feas, [False, False])
823
+ assert_almost_equal(cv, np.array([[0.1], [2.1]]))
824
+ assert cv.shape == (2, 1)
825
+
826
+ nlc2 = NonlinearConstraint(constr_f2, -np.inf, 1.8)
827
+
828
+ for vectorize in [False, True]:
829
+ solver = DifferentialEvolutionSolver(rosen, [(0, 2), (0, 2)],
830
+ constraints=(nlc, nlc2),
831
+ vectorized=vectorize,
832
+ updating='deferred')
833
+
834
+ feas, cv = solver._calculate_population_feasibilities(
835
+ np.array([[0.5, 0.5], [0.6, 0.5]]))
836
+ assert_equal(feas, [False, False])
837
+ assert_almost_equal(cv, np.array([[0.1, 0.2, 0], [0.3, 0.64, 0]]))
838
+
839
+ feas, cv = solver._calculate_population_feasibilities(
840
+ np.array([[0.5, 0.5], [1., 1.]]))
841
+ assert_equal(feas, [False, False])
842
+ assert_almost_equal(cv, np.array([[0.1, 0.2, 0], [2.1, 4.2, 0]]))
843
+ assert cv.shape == (2, 3)
844
+
845
+ feas, cv = solver._calculate_population_feasibilities(
846
+ np.array([[0.25, 0.25], [1., 1.]]))
847
+ assert_equal(feas, [True, False])
848
+ assert_almost_equal(cv, np.array([[0.0, 0.0, 0.], [2.1, 4.2, 0]]))
849
+ assert cv.shape == (2, 3)
850
+
851
+ def test_constraint_solve(self):
852
+ def constr_f(x):
853
+ return np.array([x[0] + x[1]])
854
+
855
+ nlc = NonlinearConstraint(constr_f, -np.inf, 1.9)
856
+
857
+ solver = DifferentialEvolutionSolver(rosen, [(0, 2), (0, 2)],
858
+ constraints=(nlc,))
859
+
860
+ # trust-constr warns if the constraint function is linear
861
+ with warns(UserWarning):
862
+ res = solver.solve()
863
+
864
+ assert constr_f(res.x) <= 1.9
865
+ assert res.success
866
+
867
+ @pytest.mark.fail_slow(5)
868
+ def test_impossible_constraint(self):
869
+ def constr_f(x):
870
+ return np.array([x[0] + x[1]])
871
+
872
+ nlc = NonlinearConstraint(constr_f, -np.inf, -1)
873
+
874
+ solver = DifferentialEvolutionSolver(
875
+ rosen, [(0, 2), (0, 2)], constraints=(nlc,), popsize=1, seed=1, maxiter=100
876
+ )
877
+
878
+ # a UserWarning is issued because the 'trust-constr' polishing is
879
+ # attempted on the least infeasible solution found.
880
+ with warns(UserWarning):
881
+ res = solver.solve()
882
+
883
+ assert res.maxcv > 0
884
+ assert not res.success
885
+
886
+ # test _promote_lowest_energy works when none of the population is
887
+ # feasible. In this case, the solution with the lowest constraint
888
+ # violation should be promoted.
889
+ solver = DifferentialEvolutionSolver(
890
+ rosen, [(0, 2), (0, 2)], constraints=(nlc,), polish=False)
891
+ next(solver)
892
+ assert not solver.feasible.all()
893
+ assert not np.isfinite(solver.population_energies).all()
894
+
895
+ # now swap two of the entries in the population
896
+ l = 20
897
+ cv = solver.constraint_violation[0]
898
+
899
+ solver.population_energies[[0, l]] = solver.population_energies[[l, 0]]
900
+ solver.population[[0, l], :] = solver.population[[l, 0], :]
901
+ solver.constraint_violation[[0, l], :] = (
902
+ solver.constraint_violation[[l, 0], :])
903
+
904
+ solver._promote_lowest_energy()
905
+ assert_equal(solver.constraint_violation[0], cv)
906
+
907
+ def test_accept_trial(self):
908
+ # _accept_trial(self, energy_trial, feasible_trial, cv_trial,
909
+ # energy_orig, feasible_orig, cv_orig)
910
+ def constr_f(x):
911
+ return [x[0] + x[1]]
912
+ nlc = NonlinearConstraint(constr_f, -np.inf, 1.9)
913
+ solver = DifferentialEvolutionSolver(rosen, [(0, 2), (0, 2)],
914
+ constraints=(nlc,))
915
+ fn = solver._accept_trial
916
+ # both solutions are feasible, select lower energy
917
+ assert fn(0.1, True, np.array([0.]), 1.0, True, np.array([0.]))
918
+ assert (fn(1.0, True, np.array([0.0]), 0.1, True, np.array([0.0])) is False)
919
+ assert fn(0.1, True, np.array([0.]), 0.1, True, np.array([0.]))
920
+
921
+ # trial is feasible, original is not
922
+ assert fn(9.9, True, np.array([0.]), 1.0, False, np.array([1.]))
923
+
924
+ # trial and original are infeasible
925
+ # cv_trial have to be <= cv_original to be better
926
+ assert (fn(0.1, False, np.array([0.5, 0.5]),
927
+ 1.0, False, np.array([1., 1.0])))
928
+ assert (fn(0.1, False, np.array([0.5, 0.5]),
929
+ 1.0, False, np.array([1., 0.50])))
930
+ assert not (fn(1.0, False, np.array([0.5, 0.5]),
931
+ 1.0, False, np.array([1.0, 0.4])))
932
+
933
+ def test_constraint_wrapper(self):
934
+ lb = np.array([0, 20, 30])
935
+ ub = np.array([0.5, np.inf, 70])
936
+ x0 = np.array([1, 2, 3])
937
+ pc = _ConstraintWrapper(Bounds(lb, ub), x0)
938
+ assert (pc.violation(x0) > 0).any()
939
+ assert (pc.violation([0.25, 21, 31]) == 0).all()
940
+
941
+ # check vectorized Bounds constraint
942
+ xs = np.arange(1, 16).reshape(5, 3)
943
+ violations = []
944
+ for x in xs:
945
+ violations.append(pc.violation(x))
946
+ np.testing.assert_allclose(pc.violation(xs.T), np.array(violations).T)
947
+
948
+ x0 = np.array([1, 2, 3, 4])
949
+ A = np.array([[1, 2, 3, 4], [5, 0, 0, 6], [7, 0, 8, 0]])
950
+ pc = _ConstraintWrapper(LinearConstraint(A, -np.inf, 0), x0)
951
+ assert (pc.violation(x0) > 0).any()
952
+ assert (pc.violation([-10, 2, -10, 4]) == 0).all()
953
+
954
+ # check vectorized LinearConstraint, for 7 lots of parameter vectors
955
+ # with each parameter vector being 4 long, with 3 constraints
956
+ # xs is the same shape as stored in the differential evolution
957
+ # population, but it's sent to the violation function as (len(x), M)
958
+ xs = np.arange(1, 29).reshape(7, 4)
959
+ violations = []
960
+ for x in xs:
961
+ violations.append(pc.violation(x))
962
+ np.testing.assert_allclose(pc.violation(xs.T), np.array(violations).T)
963
+
964
+ pc = _ConstraintWrapper(LinearConstraint(csr_matrix(A), -np.inf, 0),
965
+ x0)
966
+ assert (pc.violation(x0) > 0).any()
967
+ assert (pc.violation([-10, 2, -10, 4]) == 0).all()
968
+
969
+ def fun(x):
970
+ return A.dot(x)
971
+
972
+ nonlinear = NonlinearConstraint(fun, -np.inf, 0)
973
+ pc = _ConstraintWrapper(nonlinear, [-10, 2, -10, 4])
974
+ assert (pc.violation(x0) > 0).any()
975
+ assert (pc.violation([-10, 2, -10, 4]) == 0).all()
976
+
977
+ def test_constraint_wrapper_violation(self):
978
+ def cons_f(x):
979
+ # written in vectorised form to accept an array of (N, S)
980
+ # returning (M, S)
981
+ # where N is the number of parameters,
982
+ # S is the number of solution vectors to be examined,
983
+ # and M is the number of constraint components
984
+ return np.array([x[0] ** 2 + x[1],
985
+ x[0] ** 2 - x[1]])
986
+
987
+ nlc = NonlinearConstraint(cons_f, [-1, -0.8500], [2, 2])
988
+ pc = _ConstraintWrapper(nlc, [0.5, 1])
989
+ assert np.size(pc.bounds[0]) == 2
990
+
991
+ xs = [(0.5, 1), (0.5, 1.2), (1.2, 1.2), (0.1, -1.2), (0.1, 2.0)]
992
+ vs = [(0, 0), (0, 0.1), (0.64, 0), (0.19, 0), (0.01, 1.14)]
993
+
994
+ for x, v in zip(xs, vs):
995
+ assert_allclose(pc.violation(x), v)
996
+
997
+ # now check that we can vectorize the constraint wrapper
998
+ assert_allclose(pc.violation(np.array(xs).T),
999
+ np.array(vs).T)
1000
+ assert pc.fun(np.array(xs).T).shape == (2, len(xs))
1001
+ assert pc.violation(np.array(xs).T).shape == (2, len(xs))
1002
+ assert pc.num_constr == 2
1003
+ assert pc.parameter_count == 2
1004
+
1005
+ def test_matrix_linear_constraint(self):
1006
+ # gh20041 supplying an np.matrix to construct a LinearConstraint caused
1007
+ # _ConstraintWrapper to start returning constraint violations of the
1008
+ # wrong shape.
1009
+ with suppress_warnings() as sup:
1010
+ sup.filter(PendingDeprecationWarning)
1011
+ matrix = np.matrix([[1, 1, 1, 1.],
1012
+ [2, 2, 2, 2.]])
1013
+ lc = LinearConstraint(matrix, 0, 1)
1014
+ x0 = np.ones(4)
1015
+ cw = _ConstraintWrapper(lc, x0)
1016
+ # the shape of the constraint violation should be the same as the number
1017
+ # of constraints applied.
1018
+ assert cw.violation(x0).shape == (2,)
1019
+
1020
+ # let's try a vectorised violation call.
1021
+ xtrial = np.arange(4 * 5).reshape(4, 5)
1022
+ assert cw.violation(xtrial).shape == (2, 5)
1023
+
1024
+ @pytest.mark.fail_slow(10)
1025
+ def test_L1(self):
1026
+ # Lampinen ([5]) test problem 1
1027
+
1028
+ def f(x):
1029
+ x = np.hstack(([0], x)) # 1-indexed to match reference
1030
+ fun = np.sum(5*x[1:5]) - 5*x[1:5]@x[1:5] - np.sum(x[5:])
1031
+ return fun
1032
+
1033
+ A = np.zeros((10, 14)) # 1-indexed to match reference
1034
+ A[1, [1, 2, 10, 11]] = 2, 2, 1, 1
1035
+ A[2, [1, 10]] = -8, 1
1036
+ A[3, [4, 5, 10]] = -2, -1, 1
1037
+ A[4, [1, 3, 10, 11]] = 2, 2, 1, 1
1038
+ A[5, [2, 11]] = -8, 1
1039
+ A[6, [6, 7, 11]] = -2, -1, 1
1040
+ A[7, [2, 3, 11, 12]] = 2, 2, 1, 1
1041
+ A[8, [3, 12]] = -8, 1
1042
+ A[9, [8, 9, 12]] = -2, -1, 1
1043
+ A = A[1:, 1:]
1044
+
1045
+ b = np.array([10, 0, 0, 10, 0, 0, 10, 0, 0])
1046
+
1047
+ L = LinearConstraint(A, -np.inf, b)
1048
+
1049
+ bounds = [(0, 1)]*9 + [(0, 100)]*3 + [(0, 1)]
1050
+
1051
+ # using a lower popsize to speed the test up
1052
+ res = differential_evolution(
1053
+ f, bounds, strategy='best1bin', seed=1234, constraints=(L,),
1054
+ popsize=2, tol=0.05
1055
+ )
1056
+
1057
+ x_opt = (1, 1, 1, 1, 1, 1, 1, 1, 1, 3, 3, 3, 1)
1058
+ f_opt = -15
1059
+
1060
+ assert_allclose(f(x_opt), f_opt, atol=6e-4)
1061
+ assert res.success
1062
+ assert_allclose(res.x, x_opt, atol=6e-4)
1063
+ assert_allclose(res.fun, f_opt, atol=5e-3)
1064
+ assert_(np.all(A@res.x <= b))
1065
+ assert_(np.all(res.x >= np.array(bounds)[:, 0]))
1066
+ assert_(np.all(res.x <= np.array(bounds)[:, 1]))
1067
+
1068
+ # now repeat the same solve, using the same overall constraints,
1069
+ # but using a sparse matrix for the LinearConstraint instead of an
1070
+ # array
1071
+
1072
+ L = LinearConstraint(csr_matrix(A), -np.inf, b)
1073
+
1074
+ # using a lower popsize to speed the test up
1075
+ res = differential_evolution(
1076
+ f, bounds, strategy='best1bin', seed=1234, constraints=(L,),
1077
+ popsize=2, tol=0.05
1078
+ )
1079
+
1080
+ assert_allclose(f(x_opt), f_opt)
1081
+ assert res.success
1082
+ assert_allclose(res.x, x_opt, atol=5e-4)
1083
+ assert_allclose(res.fun, f_opt, atol=5e-3)
1084
+ assert_(np.all(A@res.x <= b))
1085
+ assert_(np.all(res.x >= np.array(bounds)[:, 0]))
1086
+ assert_(np.all(res.x <= np.array(bounds)[:, 1]))
1087
+
1088
+ # now repeat the same solve, using the same overall constraints,
1089
+ # but specify half the constraints in terms of LinearConstraint,
1090
+ # and the other half by NonlinearConstraint
1091
+ def c1(x):
1092
+ x = np.hstack(([0], x))
1093
+ return [2*x[2] + 2*x[3] + x[11] + x[12],
1094
+ -8*x[3] + x[12]]
1095
+
1096
+ def c2(x):
1097
+ x = np.hstack(([0], x))
1098
+ return -2*x[8] - x[9] + x[12]
1099
+
1100
+ L = LinearConstraint(A[:5, :], -np.inf, b[:5])
1101
+ L2 = LinearConstraint(A[5:6, :], -np.inf, b[5:6])
1102
+ N = NonlinearConstraint(c1, -np.inf, b[6:8])
1103
+ N2 = NonlinearConstraint(c2, -np.inf, b[8:9])
1104
+ constraints = (L, N, L2, N2)
1105
+
1106
+ with suppress_warnings() as sup:
1107
+ sup.filter(UserWarning)
1108
+ res = differential_evolution(
1109
+ f, bounds, strategy='best1bin', seed=1234,
1110
+ constraints=constraints, popsize=2, tol=0.05
1111
+ )
1112
+
1113
+ assert_allclose(res.x, x_opt, atol=6e-4)
1114
+ assert_allclose(res.fun, f_opt, atol=5e-3)
1115
+ assert_(np.all(A@res.x <= b))
1116
+ assert_(np.all(res.x >= np.array(bounds)[:, 0]))
1117
+ assert_(np.all(res.x <= np.array(bounds)[:, 1]))
1118
+
1119
+ @pytest.mark.fail_slow(5)
1120
+ def test_L2(self):
1121
+ # Lampinen ([5]) test problem 2
1122
+
1123
+ def f(x):
1124
+ x = np.hstack(([0], x)) # 1-indexed to match reference
1125
+ fun = ((x[1]-10)**2 + 5*(x[2]-12)**2 + x[3]**4 + 3*(x[4]-11)**2 +
1126
+ 10*x[5]**6 + 7*x[6]**2 + x[7]**4 - 4*x[6]*x[7] - 10*x[6] -
1127
+ 8*x[7])
1128
+ return fun
1129
+
1130
+ def c1(x):
1131
+ x = np.hstack(([0], x)) # 1-indexed to match reference
1132
+ return [127 - 2*x[1]**2 - 3*x[2]**4 - x[3] - 4*x[4]**2 - 5*x[5],
1133
+ 196 - 23*x[1] - x[2]**2 - 6*x[6]**2 + 8*x[7],
1134
+ 282 - 7*x[1] - 3*x[2] - 10*x[3]**2 - x[4] + x[5],
1135
+ -4*x[1]**2 - x[2]**2 + 3*x[1]*x[2] - 2*x[3]**2 -
1136
+ 5*x[6] + 11*x[7]]
1137
+
1138
+ N = NonlinearConstraint(c1, 0, np.inf)
1139
+ bounds = [(-10, 10)]*7
1140
+ constraints = (N)
1141
+
1142
+ with suppress_warnings() as sup:
1143
+ sup.filter(UserWarning)
1144
+ res = differential_evolution(f, bounds, strategy='best1bin',
1145
+ seed=1234, constraints=constraints)
1146
+
1147
+ f_opt = 680.6300599487869
1148
+ x_opt = (2.330499, 1.951372, -0.4775414, 4.365726,
1149
+ -0.6244870, 1.038131, 1.594227)
1150
+
1151
+ assert_allclose(f(x_opt), f_opt)
1152
+ assert_allclose(res.fun, f_opt)
1153
+ assert_allclose(res.x, x_opt, atol=1e-5)
1154
+ assert res.success
1155
+ assert_(np.all(np.array(c1(res.x)) >= 0))
1156
+ assert_(np.all(res.x >= np.array(bounds)[:, 0]))
1157
+ assert_(np.all(res.x <= np.array(bounds)[:, 1]))
1158
+
1159
+ @pytest.mark.fail_slow(5)
1160
+ def test_L3(self):
1161
+ # Lampinen ([5]) test problem 3
1162
+
1163
+ def f(x):
1164
+ x = np.hstack(([0], x)) # 1-indexed to match reference
1165
+ fun = (x[1]**2 + x[2]**2 + x[1]*x[2] - 14*x[1] - 16*x[2] +
1166
+ (x[3]-10)**2 + 4*(x[4]-5)**2 + (x[5]-3)**2 + 2*(x[6]-1)**2 +
1167
+ 5*x[7]**2 + 7*(x[8]-11)**2 + 2*(x[9]-10)**2 +
1168
+ (x[10] - 7)**2 + 45
1169
+ )
1170
+ return fun # maximize
1171
+
1172
+ A = np.zeros((4, 11))
1173
+ A[1, [1, 2, 7, 8]] = -4, -5, 3, -9
1174
+ A[2, [1, 2, 7, 8]] = -10, 8, 17, -2
1175
+ A[3, [1, 2, 9, 10]] = 8, -2, -5, 2
1176
+ A = A[1:, 1:]
1177
+ b = np.array([-105, 0, -12])
1178
+
1179
+ def c1(x):
1180
+ x = np.hstack(([0], x)) # 1-indexed to match reference
1181
+ return [3*x[1] - 6*x[2] - 12*(x[9]-8)**2 + 7*x[10],
1182
+ -3*(x[1]-2)**2 - 4*(x[2]-3)**2 - 2*x[3]**2 + 7*x[4] + 120,
1183
+ -x[1]**2 - 2*(x[2]-2)**2 + 2*x[1]*x[2] - 14*x[5] + 6*x[6],
1184
+ -5*x[1]**2 - 8*x[2] - (x[3]-6)**2 + 2*x[4] + 40,
1185
+ -0.5*(x[1]-8)**2 - 2*(x[2]-4)**2 - 3*x[5]**2 + x[6] + 30]
1186
+
1187
+ L = LinearConstraint(A, b, np.inf)
1188
+ N = NonlinearConstraint(c1, 0, np.inf)
1189
+ bounds = [(-10, 10)]*10
1190
+ constraints = (L, N)
1191
+
1192
+ with suppress_warnings() as sup:
1193
+ sup.filter(UserWarning)
1194
+ res = differential_evolution(f, bounds, seed=1234,
1195
+ constraints=constraints, popsize=3)
1196
+
1197
+ x_opt = (2.171996, 2.363683, 8.773926, 5.095984, 0.9906548,
1198
+ 1.430574, 1.321644, 9.828726, 8.280092, 8.375927)
1199
+ f_opt = 24.3062091
1200
+
1201
+ assert_allclose(f(x_opt), f_opt, atol=1e-5)
1202
+ assert_allclose(res.x, x_opt, atol=1e-6)
1203
+ assert_allclose(res.fun, f_opt, atol=1e-5)
1204
+ assert res.success
1205
+ assert_(np.all(A @ res.x >= b))
1206
+ assert_(np.all(np.array(c1(res.x)) >= 0))
1207
+ assert_(np.all(res.x >= np.array(bounds)[:, 0]))
1208
+ assert_(np.all(res.x <= np.array(bounds)[:, 1]))
1209
+
1210
+ @pytest.mark.fail_slow(5)
1211
+ def test_L4(self):
1212
+ # Lampinen ([5]) test problem 4
1213
+ def f(x):
1214
+ return np.sum(x[:3])
1215
+
1216
+ A = np.zeros((4, 9))
1217
+ A[1, [4, 6]] = 0.0025, 0.0025
1218
+ A[2, [5, 7, 4]] = 0.0025, 0.0025, -0.0025
1219
+ A[3, [8, 5]] = 0.01, -0.01
1220
+ A = A[1:, 1:]
1221
+ b = np.array([1, 1, 1])
1222
+
1223
+ def c1(x):
1224
+ x = np.hstack(([0], x)) # 1-indexed to match reference
1225
+ return [x[1]*x[6] - 833.33252*x[4] - 100*x[1] + 83333.333,
1226
+ x[2]*x[7] - 1250*x[5] - x[2]*x[4] + 1250*x[4],
1227
+ x[3]*x[8] - 1250000 - x[3]*x[5] + 2500*x[5]]
1228
+
1229
+ L = LinearConstraint(A, -np.inf, 1)
1230
+ N = NonlinearConstraint(c1, 0, np.inf)
1231
+
1232
+ bounds = [(100, 10000)] + [(1000, 10000)]*2 + [(10, 1000)]*5
1233
+ constraints = (L, N)
1234
+
1235
+ with suppress_warnings() as sup:
1236
+ sup.filter(UserWarning)
1237
+ res = differential_evolution(
1238
+ f, bounds, strategy='best1bin', seed=1234,
1239
+ constraints=constraints, popsize=3, tol=0.05
1240
+ )
1241
+
1242
+ f_opt = 7049.248
1243
+
1244
+ x_opt = [579.306692, 1359.97063, 5109.9707, 182.0177, 295.601172,
1245
+ 217.9823, 286.416528, 395.601172]
1246
+
1247
+ assert_allclose(f(x_opt), f_opt, atol=0.001)
1248
+ assert_allclose(res.fun, f_opt, atol=0.001)
1249
+
1250
+ # use higher tol here for 32-bit Windows, see gh-11693
1251
+ if (platform.system() == 'Windows' and np.dtype(np.intp).itemsize < 8):
1252
+ assert_allclose(res.x, x_opt, rtol=2.4e-6, atol=0.0035)
1253
+ else:
1254
+ # tolerance determined from macOS + MKL failure, see gh-12701
1255
+ assert_allclose(res.x, x_opt, rtol=5e-6, atol=0.0024)
1256
+
1257
+ assert res.success
1258
+ assert_(np.all(A @ res.x <= b))
1259
+ assert_(np.all(np.array(c1(res.x)) >= 0))
1260
+ assert_(np.all(res.x >= np.array(bounds)[:, 0]))
1261
+ assert_(np.all(res.x <= np.array(bounds)[:, 1]))
1262
+
1263
+ @pytest.mark.fail_slow(5)
1264
+ def test_L5(self):
1265
+ # Lampinen ([5]) test problem 5
1266
+
1267
+ def f(x):
1268
+ x = np.hstack(([0], x)) # 1-indexed to match reference
1269
+ fun = (np.sin(2*np.pi*x[1])**3*np.sin(2*np.pi*x[2]) /
1270
+ (x[1]**3*(x[1]+x[2])))
1271
+ return -fun # maximize
1272
+
1273
+ def c1(x):
1274
+ x = np.hstack(([0], x)) # 1-indexed to match reference
1275
+ return [x[1]**2 - x[2] + 1,
1276
+ 1 - x[1] + (x[2]-4)**2]
1277
+
1278
+ N = NonlinearConstraint(c1, -np.inf, 0)
1279
+ bounds = [(0, 10)]*2
1280
+ constraints = (N)
1281
+
1282
+ res = differential_evolution(f, bounds, strategy='rand1bin', seed=1234,
1283
+ constraints=constraints)
1284
+
1285
+ x_opt = (1.22797135, 4.24537337)
1286
+ f_opt = -0.095825
1287
+ assert_allclose(f(x_opt), f_opt, atol=2e-5)
1288
+ assert_allclose(res.fun, f_opt, atol=1e-4)
1289
+ assert res.success
1290
+ assert_(np.all(np.array(c1(res.x)) <= 0))
1291
+ assert_(np.all(res.x >= np.array(bounds)[:, 0]))
1292
+ assert_(np.all(res.x <= np.array(bounds)[:, 1]))
1293
+
1294
+ @pytest.mark.fail_slow(5)
1295
+ def test_L6(self):
1296
+ # Lampinen ([5]) test problem 6
1297
+ def f(x):
1298
+ x = np.hstack(([0], x)) # 1-indexed to match reference
1299
+ fun = (x[1]-10)**3 + (x[2] - 20)**3
1300
+ return fun
1301
+
1302
+ def c1(x):
1303
+ x = np.hstack(([0], x)) # 1-indexed to match reference
1304
+ return [(x[1]-5)**2 + (x[2] - 5)**2 - 100,
1305
+ -(x[1]-6)**2 - (x[2] - 5)**2 + 82.81]
1306
+
1307
+ N = NonlinearConstraint(c1, 0, np.inf)
1308
+ bounds = [(13, 100), (0, 100)]
1309
+ constraints = (N)
1310
+ res = differential_evolution(f, bounds, strategy='rand1bin', seed=1234,
1311
+ constraints=constraints, tol=1e-7)
1312
+ x_opt = (14.095, 0.84296)
1313
+ f_opt = -6961.814744
1314
+
1315
+ assert_allclose(f(x_opt), f_opt, atol=1e-6)
1316
+ assert_allclose(res.fun, f_opt, atol=0.001)
1317
+ assert_allclose(res.x, x_opt, atol=1e-4)
1318
+ assert res.success
1319
+ assert_(np.all(np.array(c1(res.x)) >= 0))
1320
+ assert_(np.all(res.x >= np.array(bounds)[:, 0]))
1321
+ assert_(np.all(res.x <= np.array(bounds)[:, 1]))
1322
+
1323
+ def test_L7(self):
1324
+ # Lampinen ([5]) test problem 7
1325
+ def f(x):
1326
+ x = np.hstack(([0], x)) # 1-indexed to match reference
1327
+ fun = (5.3578547*x[3]**2 + 0.8356891*x[1]*x[5] +
1328
+ 37.293239*x[1] - 40792.141)
1329
+ return fun
1330
+
1331
+ def c1(x):
1332
+ x = np.hstack(([0], x)) # 1-indexed to match reference
1333
+ return [
1334
+ 85.334407 + 0.0056858*x[2]*x[5] + 0.0006262*x[1]*x[4] -
1335
+ 0.0022053*x[3]*x[5],
1336
+
1337
+ 80.51249 + 0.0071317*x[2]*x[5] + 0.0029955*x[1]*x[2] +
1338
+ 0.0021813*x[3]**2,
1339
+
1340
+ 9.300961 + 0.0047026*x[3]*x[5] + 0.0012547*x[1]*x[3] +
1341
+ 0.0019085*x[3]*x[4]
1342
+ ]
1343
+
1344
+ N = NonlinearConstraint(c1, [0, 90, 20], [92, 110, 25])
1345
+
1346
+ bounds = [(78, 102), (33, 45)] + [(27, 45)]*3
1347
+ constraints = (N)
1348
+
1349
+ res = differential_evolution(f, bounds, strategy='rand1bin', seed=1234,
1350
+ constraints=constraints)
1351
+
1352
+ # using our best solution, rather than Lampinen/Koziel. Koziel solution
1353
+ # doesn't satisfy constraints, Lampinen f_opt just plain wrong.
1354
+ x_opt = [78.00000686, 33.00000362, 29.99526064, 44.99999971,
1355
+ 36.77579979]
1356
+
1357
+ f_opt = -30665.537578
1358
+
1359
+ assert_allclose(f(x_opt), f_opt)
1360
+ assert_allclose(res.x, x_opt, atol=1e-3)
1361
+ assert_allclose(res.fun, f_opt, atol=1e-3)
1362
+
1363
+ assert res.success
1364
+ assert_(np.all(np.array(c1(res.x)) >= np.array([0, 90, 20])))
1365
+ assert_(np.all(np.array(c1(res.x)) <= np.array([92, 110, 25])))
1366
+ assert_(np.all(res.x >= np.array(bounds)[:, 0]))
1367
+ assert_(np.all(res.x <= np.array(bounds)[:, 1]))
1368
+
1369
+ @pytest.mark.xslow
1370
+ @pytest.mark.xfail(platform.machine() == 'ppc64le',
1371
+ reason="fails on ppc64le")
1372
+ def test_L8(self):
1373
+ def f(x):
1374
+ x = np.hstack(([0], x)) # 1-indexed to match reference
1375
+ fun = 3*x[1] + 0.000001*x[1]**3 + 2*x[2] + 0.000002/3*x[2]**3
1376
+ return fun
1377
+
1378
+ A = np.zeros((3, 5))
1379
+ A[1, [4, 3]] = 1, -1
1380
+ A[2, [3, 4]] = 1, -1
1381
+ A = A[1:, 1:]
1382
+ b = np.array([-.55, -.55])
1383
+
1384
+ def c1(x):
1385
+ x = np.hstack(([0], x)) # 1-indexed to match reference
1386
+ return [
1387
+ 1000*np.sin(-x[3]-0.25) + 1000*np.sin(-x[4]-0.25) +
1388
+ 894.8 - x[1],
1389
+ 1000*np.sin(x[3]-0.25) + 1000*np.sin(x[3]-x[4]-0.25) +
1390
+ 894.8 - x[2],
1391
+ 1000*np.sin(x[4]-0.25) + 1000*np.sin(x[4]-x[3]-0.25) +
1392
+ 1294.8
1393
+ ]
1394
+ L = LinearConstraint(A, b, np.inf)
1395
+ N = NonlinearConstraint(c1, np.full(3, -0.001), np.full(3, 0.001))
1396
+
1397
+ bounds = [(0, 1200)]*2+[(-.55, .55)]*2
1398
+ constraints = (L, N)
1399
+
1400
+ with suppress_warnings() as sup:
1401
+ sup.filter(UserWarning)
1402
+ # original Lampinen test was with rand1bin, but that takes a
1403
+ # huge amount of CPU time. Changing strategy to best1bin speeds
1404
+ # things up a lot
1405
+ res = differential_evolution(f, bounds, strategy='best1bin',
1406
+ seed=1234, constraints=constraints,
1407
+ maxiter=5000)
1408
+
1409
+ x_opt = (679.9453, 1026.067, 0.1188764, -0.3962336)
1410
+ f_opt = 5126.4981
1411
+
1412
+ assert_allclose(f(x_opt), f_opt, atol=1e-3)
1413
+ assert_allclose(res.x[:2], x_opt[:2], atol=2e-3)
1414
+ assert_allclose(res.x[2:], x_opt[2:], atol=2e-3)
1415
+ assert_allclose(res.fun, f_opt, atol=2e-2)
1416
+ assert res.success
1417
+ assert_(np.all(A@res.x >= b))
1418
+ assert_(np.all(np.array(c1(res.x)) >= -0.001))
1419
+ assert_(np.all(np.array(c1(res.x)) <= 0.001))
1420
+ assert_(np.all(res.x >= np.array(bounds)[:, 0]))
1421
+ assert_(np.all(res.x <= np.array(bounds)[:, 1]))
1422
+
1423
+ @pytest.mark.fail_slow(5)
1424
+ def test_L9(self):
1425
+ # Lampinen ([5]) test problem 9
1426
+
1427
+ def f(x):
1428
+ x = np.hstack(([0], x)) # 1-indexed to match reference
1429
+ return x[1]**2 + (x[2]-1)**2
1430
+
1431
+ def c1(x):
1432
+ x = np.hstack(([0], x)) # 1-indexed to match reference
1433
+ return [x[2] - x[1]**2]
1434
+
1435
+ N = NonlinearConstraint(c1, [-.001], [0.001])
1436
+
1437
+ bounds = [(-1, 1)]*2
1438
+ constraints = (N)
1439
+ res = differential_evolution(f, bounds, strategy='rand1bin', seed=1234,
1440
+ constraints=constraints)
1441
+
1442
+ x_opt = [np.sqrt(2)/2, 0.5]
1443
+ f_opt = 0.75
1444
+
1445
+ assert_allclose(f(x_opt), f_opt)
1446
+ assert_allclose(np.abs(res.x), x_opt, atol=1e-3)
1447
+ assert_allclose(res.fun, f_opt, atol=1e-3)
1448
+ assert res.success
1449
+ assert_(np.all(np.array(c1(res.x)) >= -0.001))
1450
+ assert_(np.all(np.array(c1(res.x)) <= 0.001))
1451
+ assert_(np.all(res.x >= np.array(bounds)[:, 0]))
1452
+ assert_(np.all(res.x <= np.array(bounds)[:, 1]))
1453
+
1454
+ @pytest.mark.fail_slow(5)
1455
+ def test_integrality(self):
1456
+ # test fitting discrete distribution to data
1457
+ rng = np.random.default_rng(6519843218105)
1458
+ dist = stats.nbinom
1459
+ shapes = (5, 0.5)
1460
+ x = dist.rvs(*shapes, size=10000, random_state=rng)
1461
+
1462
+ def func(p, *args):
1463
+ dist, x = args
1464
+ # negative log-likelihood function
1465
+ ll = -np.log(dist.pmf(x, *p)).sum(axis=-1)
1466
+ if np.isnan(ll): # occurs when x is outside of support
1467
+ ll = np.inf # we don't want that
1468
+ return ll
1469
+
1470
+ integrality = [True, False]
1471
+ bounds = [(1, 18), (0, 0.95)]
1472
+
1473
+ res = differential_evolution(func, bounds, args=(dist, x),
1474
+ integrality=integrality, polish=False,
1475
+ seed=rng)
1476
+ # tolerance has to be fairly relaxed for the second parameter
1477
+ # because we're fitting a distribution to random variates.
1478
+ assert res.x[0] == 5
1479
+ assert_allclose(res.x, shapes, rtol=0.025)
1480
+
1481
+ # check that we can still use integrality constraints with polishing
1482
+ res2 = differential_evolution(func, bounds, args=(dist, x),
1483
+ integrality=integrality, polish=True,
1484
+ seed=rng)
1485
+
1486
+ def func2(p, *args):
1487
+ n, dist, x = args
1488
+ return func(np.array([n, p[0]]), dist, x)
1489
+
1490
+ # compare the DE derived solution to an LBFGSB solution (that doesn't
1491
+ # have to find the integral values). Note we're setting x0 to be the
1492
+ # output from the first DE result, thereby making the polishing step
1493
+ # and this minimisation pretty much equivalent.
1494
+ LBFGSB = minimize(func2, res2.x[1], args=(5, dist, x),
1495
+ bounds=[(0, 0.95)])
1496
+ assert_allclose(res2.x[1], LBFGSB.x)
1497
+ assert res2.fun <= res.fun
1498
+
1499
+ def test_integrality_limits(self):
1500
+ def f(x):
1501
+ return x
1502
+
1503
+ integrality = [True, False, True]
1504
+ bounds = [(0.2, 1.1), (0.9, 2.2), (3.3, 4.9)]
1505
+
1506
+ # no integrality constraints
1507
+ solver = DifferentialEvolutionSolver(f, bounds=bounds, polish=False,
1508
+ integrality=False)
1509
+ assert_allclose(solver.limits[0], [0.2, 0.9, 3.3])
1510
+ assert_allclose(solver.limits[1], [1.1, 2.2, 4.9])
1511
+
1512
+ # with integrality constraints
1513
+ solver = DifferentialEvolutionSolver(f, bounds=bounds, polish=False,
1514
+ integrality=integrality)
1515
+ assert_allclose(solver.limits[0], [0.5, 0.9, 3.5])
1516
+ assert_allclose(solver.limits[1], [1.5, 2.2, 4.5])
1517
+ assert_equal(solver.integrality, [True, False, True])
1518
+ assert solver.polish is False
1519
+
1520
+ bounds = [(-1.2, -0.9), (0.9, 2.2), (-10.3, 4.1)]
1521
+ solver = DifferentialEvolutionSolver(f, bounds=bounds, polish=False,
1522
+ integrality=integrality)
1523
+ assert_allclose(solver.limits[0], [-1.5, 0.9, -10.5])
1524
+ assert_allclose(solver.limits[1], [-0.5, 2.2, 4.5])
1525
+
1526
+ # A lower bound of -1.2 is converted to
1527
+ # np.nextafter(np.ceil(-1.2) - 0.5, np.inf)
1528
+ # with a similar process to the upper bound. Check that the
1529
+ # conversions work
1530
+ assert_allclose(np.round(solver.limits[0]), [-1.0, 1.0, -10.0])
1531
+ assert_allclose(np.round(solver.limits[1]), [-1.0, 2.0, 4.0])
1532
+
1533
+ bounds = [(-10.2, -8.1), (0.9, 2.2), (-10.9, -9.9999)]
1534
+ solver = DifferentialEvolutionSolver(f, bounds=bounds, polish=False,
1535
+ integrality=integrality)
1536
+ assert_allclose(solver.limits[0], [-10.5, 0.9, -10.5])
1537
+ assert_allclose(solver.limits[1], [-8.5, 2.2, -9.5])
1538
+
1539
+ bounds = [(-10.2, -10.1), (0.9, 2.2), (-10.9, -9.9999)]
1540
+ with pytest.raises(ValueError, match='One of the integrality'):
1541
+ DifferentialEvolutionSolver(f, bounds=bounds, polish=False,
1542
+ integrality=integrality)
1543
+
1544
+ @pytest.mark.fail_slow(5)
1545
+ def test_vectorized(self):
1546
+ def quadratic(x):
1547
+ return np.sum(x**2)
1548
+
1549
+ def quadratic_vec(x):
1550
+ return np.sum(x**2, axis=0)
1551
+
1552
+ # A vectorized function needs to accept (len(x), S) and return (S,)
1553
+ with pytest.raises(RuntimeError, match='The vectorized function'):
1554
+ differential_evolution(quadratic, self.bounds,
1555
+ vectorized=True, updating='deferred')
1556
+
1557
+ # vectorized overrides the updating keyword, check for warning
1558
+ with warns(UserWarning, match="differential_evolution: the 'vector"):
1559
+ differential_evolution(quadratic_vec, self.bounds,
1560
+ vectorized=True)
1561
+
1562
+ # vectorized defers to the workers keyword, check for warning
1563
+ with warns(UserWarning, match="differential_evolution: the 'workers"):
1564
+ differential_evolution(quadratic_vec, self.bounds,
1565
+ vectorized=True, workers=map,
1566
+ updating='deferred')
1567
+
1568
+ ncalls = [0]
1569
+
1570
+ def rosen_vec(x):
1571
+ ncalls[0] += 1
1572
+ return rosen(x)
1573
+
1574
+ bounds = [(0, 10), (0, 10)]
1575
+ res1 = differential_evolution(rosen, bounds, updating='deferred',
1576
+ seed=1)
1577
+ res2 = differential_evolution(rosen_vec, bounds, vectorized=True,
1578
+ updating='deferred', seed=1)
1579
+
1580
+ # the two minimisation runs should be functionally equivalent
1581
+ assert_allclose(res1.x, res2.x)
1582
+ assert ncalls[0] == res2.nfev
1583
+ assert res1.nit == res2.nit
1584
+
1585
+ def test_vectorized_constraints(self):
1586
+ def constr_f(x):
1587
+ return np.array([x[0] + x[1]])
1588
+
1589
+ def constr_f2(x):
1590
+ return np.array([x[0]**2 + x[1], x[0] - x[1]])
1591
+
1592
+ nlc1 = NonlinearConstraint(constr_f, -np.inf, 1.9)
1593
+ nlc2 = NonlinearConstraint(constr_f2, (0.9, 0.5), (2.0, 2.0))
1594
+
1595
+ def rosen_vec(x):
1596
+ # accept an (len(x0), S) array, returning a (S,) array
1597
+ v = 100 * (x[1:] - x[:-1]**2.0)**2.0
1598
+ v += (1 - x[:-1])**2.0
1599
+ return np.squeeze(v)
1600
+
1601
+ bounds = [(0, 10), (0, 10)]
1602
+
1603
+ res1 = differential_evolution(rosen, bounds, updating='deferred',
1604
+ seed=1, constraints=[nlc1, nlc2],
1605
+ polish=False)
1606
+ res2 = differential_evolution(rosen_vec, bounds, vectorized=True,
1607
+ updating='deferred', seed=1,
1608
+ constraints=[nlc1, nlc2],
1609
+ polish=False)
1610
+ # the two minimisation runs should be functionally equivalent
1611
+ assert_allclose(res1.x, res2.x)
1612
+
1613
+ def test_constraint_violation_error_message(self):
1614
+
1615
+ def func(x):
1616
+ return np.cos(x[0]) + np.sin(x[1])
1617
+
1618
+ # Intentionally infeasible constraints.
1619
+ c0 = NonlinearConstraint(lambda x: x[1] - (x[0]-1)**2, 0, np.inf)
1620
+ c1 = NonlinearConstraint(lambda x: x[1] + x[0]**2, -np.inf, 0)
1621
+
1622
+ result = differential_evolution(func,
1623
+ bounds=[(-1, 2), (-1, 1)],
1624
+ constraints=[c0, c1],
1625
+ maxiter=10,
1626
+ polish=False,
1627
+ seed=864197532)
1628
+ assert result.success is False
1629
+ # The numerical value in the error message might be sensitive to
1630
+ # changes in the implementation. It can be updated if the code is
1631
+ # changed. The essential part of the test is that there is a number
1632
+ # after the '=', so if necessary, the text could be reduced to, say,
1633
+ # "MAXCV = 0.".
1634
+ assert "MAXCV = 0.4" in result.message
1635
+
1636
+ @pytest.mark.fail_slow(10) # fail-slow exception by request - see gh-20806
1637
+ def test_strategy_fn(self):
1638
+ # examines ability to customize strategy by mimicking one of the
1639
+ # in-built strategies
1640
+ parameter_count = 4
1641
+ popsize = 10
1642
+ bounds = [(0, 10.)] * parameter_count
1643
+ total_popsize = parameter_count * popsize
1644
+ mutation = 0.8
1645
+ recombination = 0.7
1646
+
1647
+ calls = [0]
1648
+ def custom_strategy_fn(candidate, population, rng=None):
1649
+ calls[0] += 1
1650
+ trial = np.copy(population[candidate])
1651
+ fill_point = rng.choice(parameter_count)
1652
+
1653
+ pool = np.arange(total_popsize)
1654
+ rng.shuffle(pool)
1655
+ idxs = pool[:2 + 1]
1656
+ idxs = idxs[idxs != candidate][:2]
1657
+
1658
+ r0, r1 = idxs[:2]
1659
+
1660
+ bprime = (population[0] + mutation *
1661
+ (population[r0] - population[r1]))
1662
+
1663
+ crossovers = rng.uniform(size=parameter_count)
1664
+ crossovers = crossovers < recombination
1665
+ crossovers[fill_point] = True
1666
+ trial = np.where(crossovers, bprime, trial)
1667
+ return trial
1668
+
1669
+ solver = DifferentialEvolutionSolver(
1670
+ rosen,
1671
+ bounds,
1672
+ popsize=popsize,
1673
+ recombination=recombination,
1674
+ mutation=mutation,
1675
+ maxiter=2,
1676
+ strategy=custom_strategy_fn,
1677
+ seed=10,
1678
+ polish=False
1679
+ )
1680
+ assert solver.strategy is custom_strategy_fn
1681
+ solver.solve()
1682
+ assert calls[0] > 0
1683
+
1684
+ # check custom strategy works with updating='deferred'
1685
+ res = differential_evolution(
1686
+ rosen, bounds, strategy=custom_strategy_fn, updating='deferred'
1687
+ )
1688
+ assert res.success
1689
+
1690
+ def custom_strategy_fn(candidate, population, rng=None):
1691
+ return np.array([1.0, 2.0])
1692
+
1693
+ with pytest.raises(RuntimeError, match="strategy*"):
1694
+ differential_evolution(
1695
+ rosen,
1696
+ bounds,
1697
+ strategy=custom_strategy_fn
1698
+ )
1699
+
llava_next/lib/python3.10/site-packages/scipy/optimize/tests/test__dual_annealing.py ADDED
@@ -0,0 +1,406 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Dual annealing unit tests implementation.
2
+ # Copyright (c) 2018 Sylvain Gubian <sylvain.gubian@pmi.com>,
3
+ # Yang Xiang <yang.xiang@pmi.com>
4
+ # Author: Sylvain Gubian, PMP S.A.
5
+ """
6
+ Unit tests for the dual annealing global optimizer
7
+ """
8
+ from scipy.optimize import dual_annealing, Bounds
9
+
10
+ from scipy.optimize._dual_annealing import EnergyState
11
+ from scipy.optimize._dual_annealing import LocalSearchWrapper
12
+ from scipy.optimize._dual_annealing import ObjectiveFunWrapper
13
+ from scipy.optimize._dual_annealing import StrategyChain
14
+ from scipy.optimize._dual_annealing import VisitingDistribution
15
+ from scipy.optimize import rosen, rosen_der
16
+ import pytest
17
+ import numpy as np
18
+ from numpy.testing import assert_equal, assert_allclose, assert_array_less
19
+ from pytest import raises as assert_raises
20
+ from scipy._lib._util import check_random_state
21
+
22
+
23
+ class TestDualAnnealing:
24
+
25
+ def setup_method(self):
26
+ # A function that returns always infinity for initialization tests
27
+ self.weirdfunc = lambda x: np.inf
28
+ # 2-D bounds for testing function
29
+ self.ld_bounds = [(-5.12, 5.12)] * 2
30
+ # 4-D bounds for testing function
31
+ self.hd_bounds = self.ld_bounds * 4
32
+ # Number of values to be generated for testing visit function
33
+ self.nbtestvalues = 5000
34
+ self.high_temperature = 5230
35
+ self.low_temperature = 0.1
36
+ self.qv = 2.62
37
+ self.seed = 1234
38
+ self.rs = check_random_state(self.seed)
39
+ self.nb_fun_call = 0
40
+ self.ngev = 0
41
+
42
+ def callback(self, x, f, context):
43
+ # For testing callback mechanism. Should stop for e <= 1 as
44
+ # the callback function returns True
45
+ if f <= 1.0:
46
+ return True
47
+
48
+ def func(self, x, args=()):
49
+ # Using Rastrigin function for performing tests
50
+ if args:
51
+ shift = args
52
+ else:
53
+ shift = 0
54
+ y = np.sum((x - shift) ** 2 - 10 * np.cos(2 * np.pi * (
55
+ x - shift))) + 10 * np.size(x) + shift
56
+ self.nb_fun_call += 1
57
+ return y
58
+
59
+ def rosen_der_wrapper(self, x, args=()):
60
+ self.ngev += 1
61
+ return rosen_der(x, *args)
62
+
63
+ # FIXME: there are some discontinuities in behaviour as a function of `qv`,
64
+ # this needs investigating - see gh-12384
65
+ @pytest.mark.parametrize('qv', [1.1, 1.41, 2, 2.62, 2.9])
66
+ def test_visiting_stepping(self, qv):
67
+ lu = list(zip(*self.ld_bounds))
68
+ lower = np.array(lu[0])
69
+ upper = np.array(lu[1])
70
+ dim = lower.size
71
+ vd = VisitingDistribution(lower, upper, qv, self.rs)
72
+ values = np.zeros(dim)
73
+ x_step_low = vd.visiting(values, 0, self.high_temperature)
74
+ # Make sure that only the first component is changed
75
+ assert_equal(np.not_equal(x_step_low, 0), True)
76
+ values = np.zeros(dim)
77
+ x_step_high = vd.visiting(values, dim, self.high_temperature)
78
+ # Make sure that component other than at dim has changed
79
+ assert_equal(np.not_equal(x_step_high[0], 0), True)
80
+
81
+ @pytest.mark.parametrize('qv', [2.25, 2.62, 2.9])
82
+ def test_visiting_dist_high_temperature(self, qv):
83
+ lu = list(zip(*self.ld_bounds))
84
+ lower = np.array(lu[0])
85
+ upper = np.array(lu[1])
86
+ vd = VisitingDistribution(lower, upper, qv, self.rs)
87
+ # values = np.zeros(self.nbtestvalues)
88
+ # for i in np.arange(self.nbtestvalues):
89
+ # values[i] = vd.visit_fn(self.high_temperature)
90
+ values = vd.visit_fn(self.high_temperature, self.nbtestvalues)
91
+
92
+ # Visiting distribution is a distorted version of Cauchy-Lorentz
93
+ # distribution, and as no 1st and higher moments (no mean defined,
94
+ # no variance defined).
95
+ # Check that big tails values are generated
96
+ assert_array_less(np.min(values), 1e-10)
97
+ assert_array_less(1e+10, np.max(values))
98
+
99
+ def test_reset(self):
100
+ owf = ObjectiveFunWrapper(self.weirdfunc)
101
+ lu = list(zip(*self.ld_bounds))
102
+ lower = np.array(lu[0])
103
+ upper = np.array(lu[1])
104
+ es = EnergyState(lower, upper)
105
+ assert_raises(ValueError, es.reset, owf, check_random_state(None))
106
+
107
+ def test_low_dim(self):
108
+ ret = dual_annealing(
109
+ self.func, self.ld_bounds, seed=self.seed)
110
+ assert_allclose(ret.fun, 0., atol=1e-12)
111
+ assert ret.success
112
+
113
+ @pytest.mark.fail_slow(5)
114
+ def test_high_dim(self):
115
+ ret = dual_annealing(self.func, self.hd_bounds, seed=self.seed)
116
+ assert_allclose(ret.fun, 0., atol=1e-12)
117
+ assert ret.success
118
+
119
+ def test_low_dim_no_ls(self):
120
+ ret = dual_annealing(self.func, self.ld_bounds,
121
+ no_local_search=True, seed=self.seed)
122
+ assert_allclose(ret.fun, 0., atol=1e-4)
123
+
124
+ @pytest.mark.fail_slow(5)
125
+ def test_high_dim_no_ls(self):
126
+ ret = dual_annealing(self.func, self.hd_bounds,
127
+ no_local_search=True, seed=self.seed)
128
+ assert_allclose(ret.fun, 0., atol=1e-4)
129
+
130
+ def test_nb_fun_call(self):
131
+ ret = dual_annealing(self.func, self.ld_bounds, seed=self.seed)
132
+ assert_equal(self.nb_fun_call, ret.nfev)
133
+
134
+ def test_nb_fun_call_no_ls(self):
135
+ ret = dual_annealing(self.func, self.ld_bounds,
136
+ no_local_search=True, seed=self.seed)
137
+ assert_equal(self.nb_fun_call, ret.nfev)
138
+
139
+ def test_max_reinit(self):
140
+ assert_raises(ValueError, dual_annealing, self.weirdfunc,
141
+ self.ld_bounds)
142
+
143
+ @pytest.mark.fail_slow(5)
144
+ def test_reproduce(self):
145
+ res1 = dual_annealing(self.func, self.ld_bounds, seed=self.seed)
146
+ res2 = dual_annealing(self.func, self.ld_bounds, seed=self.seed)
147
+ res3 = dual_annealing(self.func, self.ld_bounds, seed=self.seed)
148
+ # If we have reproducible results, x components found has to
149
+ # be exactly the same, which is not the case with no seeding
150
+ assert_equal(res1.x, res2.x)
151
+ assert_equal(res1.x, res3.x)
152
+
153
+ def test_rand_gen(self):
154
+ # check that np.random.Generator can be used (numpy >= 1.17)
155
+ # obtain a np.random.Generator object
156
+ rng = np.random.default_rng(1)
157
+
158
+ res1 = dual_annealing(self.func, self.ld_bounds, seed=rng)
159
+ # seed again
160
+ rng = np.random.default_rng(1)
161
+ res2 = dual_annealing(self.func, self.ld_bounds, seed=rng)
162
+ # If we have reproducible results, x components found has to
163
+ # be exactly the same, which is not the case with no seeding
164
+ assert_equal(res1.x, res2.x)
165
+
166
+ def test_bounds_integrity(self):
167
+ wrong_bounds = [(-5.12, 5.12), (1, 0), (5.12, 5.12)]
168
+ assert_raises(ValueError, dual_annealing, self.func,
169
+ wrong_bounds)
170
+
171
+ def test_bound_validity(self):
172
+ invalid_bounds = [(-5, 5), (-np.inf, 0), (-5, 5)]
173
+ assert_raises(ValueError, dual_annealing, self.func,
174
+ invalid_bounds)
175
+ invalid_bounds = [(-5, 5), (0, np.inf), (-5, 5)]
176
+ assert_raises(ValueError, dual_annealing, self.func,
177
+ invalid_bounds)
178
+ invalid_bounds = [(-5, 5), (0, np.nan), (-5, 5)]
179
+ assert_raises(ValueError, dual_annealing, self.func,
180
+ invalid_bounds)
181
+
182
+ def test_deprecated_local_search_options_bounds(self):
183
+ def func(x):
184
+ return np.sum((x - 5) * (x - 1))
185
+ bounds = list(zip([-6, -5], [6, 5]))
186
+ # Test bounds can be passed (see gh-10831)
187
+
188
+ with pytest.warns(RuntimeWarning, match=r"Method CG cannot handle "):
189
+ dual_annealing(
190
+ func,
191
+ bounds=bounds,
192
+ minimizer_kwargs={"method": "CG", "bounds": bounds})
193
+
194
+ def test_minimizer_kwargs_bounds(self):
195
+ def func(x):
196
+ return np.sum((x - 5) * (x - 1))
197
+ bounds = list(zip([-6, -5], [6, 5]))
198
+ # Test bounds can be passed (see gh-10831)
199
+ dual_annealing(
200
+ func,
201
+ bounds=bounds,
202
+ minimizer_kwargs={"method": "SLSQP", "bounds": bounds})
203
+
204
+ with pytest.warns(RuntimeWarning, match=r"Method CG cannot handle "):
205
+ dual_annealing(
206
+ func,
207
+ bounds=bounds,
208
+ minimizer_kwargs={"method": "CG", "bounds": bounds})
209
+
210
+ def test_max_fun_ls(self):
211
+ ret = dual_annealing(self.func, self.ld_bounds, maxfun=100,
212
+ seed=self.seed)
213
+
214
+ ls_max_iter = min(max(
215
+ len(self.ld_bounds) * LocalSearchWrapper.LS_MAXITER_RATIO,
216
+ LocalSearchWrapper.LS_MAXITER_MIN),
217
+ LocalSearchWrapper.LS_MAXITER_MAX)
218
+ assert ret.nfev <= 100 + ls_max_iter
219
+ assert not ret.success
220
+
221
+ def test_max_fun_no_ls(self):
222
+ ret = dual_annealing(self.func, self.ld_bounds,
223
+ no_local_search=True, maxfun=500, seed=self.seed)
224
+ assert ret.nfev <= 500
225
+ assert not ret.success
226
+
227
+ def test_maxiter(self):
228
+ ret = dual_annealing(self.func, self.ld_bounds, maxiter=700,
229
+ seed=self.seed)
230
+ assert ret.nit <= 700
231
+
232
+ # Testing that args are passed correctly for dual_annealing
233
+ def test_fun_args_ls(self):
234
+ ret = dual_annealing(self.func, self.ld_bounds,
235
+ args=((3.14159,)), seed=self.seed)
236
+ assert_allclose(ret.fun, 3.14159, atol=1e-6)
237
+
238
+ # Testing that args are passed correctly for pure simulated annealing
239
+ def test_fun_args_no_ls(self):
240
+ ret = dual_annealing(self.func, self.ld_bounds,
241
+ args=((3.14159, )), no_local_search=True,
242
+ seed=self.seed)
243
+ assert_allclose(ret.fun, 3.14159, atol=1e-4)
244
+
245
+ def test_callback_stop(self):
246
+ # Testing that callback make the algorithm stop for
247
+ # fun value <= 1.0 (see callback method)
248
+ ret = dual_annealing(self.func, self.ld_bounds,
249
+ callback=self.callback, seed=self.seed)
250
+ assert ret.fun <= 1.0
251
+ assert 'stop early' in ret.message[0]
252
+ assert not ret.success
253
+
254
+ @pytest.mark.parametrize('method, atol', [
255
+ ('Nelder-Mead', 2e-5),
256
+ ('COBYLA', 1e-5),
257
+ ('COBYQA', 1e-8),
258
+ ('Powell', 1e-8),
259
+ ('CG', 1e-8),
260
+ ('BFGS', 1e-8),
261
+ ('TNC', 1e-8),
262
+ ('SLSQP', 2e-7),
263
+ ])
264
+ def test_multi_ls_minimizer(self, method, atol):
265
+ ret = dual_annealing(self.func, self.ld_bounds,
266
+ minimizer_kwargs=dict(method=method),
267
+ seed=self.seed)
268
+ assert_allclose(ret.fun, 0., atol=atol)
269
+
270
+ def test_wrong_restart_temp(self):
271
+ assert_raises(ValueError, dual_annealing, self.func,
272
+ self.ld_bounds, restart_temp_ratio=1)
273
+ assert_raises(ValueError, dual_annealing, self.func,
274
+ self.ld_bounds, restart_temp_ratio=0)
275
+
276
+ def test_gradient_gnev(self):
277
+ minimizer_opts = {
278
+ 'jac': self.rosen_der_wrapper,
279
+ }
280
+ ret = dual_annealing(rosen, self.ld_bounds,
281
+ minimizer_kwargs=minimizer_opts,
282
+ seed=self.seed)
283
+ assert ret.njev == self.ngev
284
+
285
+ @pytest.mark.fail_slow(5)
286
+ def test_from_docstring(self):
287
+ def func(x):
288
+ return np.sum(x * x - 10 * np.cos(2 * np.pi * x)) + 10 * np.size(x)
289
+ lw = [-5.12] * 10
290
+ up = [5.12] * 10
291
+ ret = dual_annealing(func, bounds=list(zip(lw, up)), seed=1234)
292
+ assert_allclose(ret.x,
293
+ [-4.26437714e-09, -3.91699361e-09, -1.86149218e-09,
294
+ -3.97165720e-09, -6.29151648e-09, -6.53145322e-09,
295
+ -3.93616815e-09, -6.55623025e-09, -6.05775280e-09,
296
+ -5.00668935e-09], atol=4e-8)
297
+ assert_allclose(ret.fun, 0.000000, atol=5e-13)
298
+
299
+ @pytest.mark.parametrize('new_e, temp_step, accepted, accept_rate', [
300
+ (0, 100, 1000, 1.0097587941791923),
301
+ (0, 2, 1000, 1.2599210498948732),
302
+ (10, 100, 878, 0.8786035869128718),
303
+ (10, 60, 695, 0.6812920690579612),
304
+ (2, 100, 990, 0.9897404249173424),
305
+ ])
306
+ def test_accept_reject_probabilistic(
307
+ self, new_e, temp_step, accepted, accept_rate):
308
+ # Test accepts unconditionally with e < current_energy and
309
+ # probabilistically with e > current_energy
310
+
311
+ rs = check_random_state(123)
312
+
313
+ count_accepted = 0
314
+ iterations = 1000
315
+
316
+ accept_param = -5
317
+ current_energy = 1
318
+ for _ in range(iterations):
319
+ energy_state = EnergyState(lower=None, upper=None)
320
+ # Set energy state with current_energy, any location.
321
+ energy_state.update_current(current_energy, [0])
322
+
323
+ chain = StrategyChain(
324
+ accept_param, None, None, None, rs, energy_state)
325
+ # Normally this is set in run()
326
+ chain.temperature_step = temp_step
327
+
328
+ # Check if update is accepted.
329
+ chain.accept_reject(j=1, e=new_e, x_visit=[2])
330
+ if energy_state.current_energy == new_e:
331
+ count_accepted += 1
332
+
333
+ assert count_accepted == accepted
334
+
335
+ # Check accept rate
336
+ pqv = 1 - (1 - accept_param) * (new_e - current_energy) / temp_step
337
+ rate = 0 if pqv <= 0 else np.exp(np.log(pqv) / (1 - accept_param))
338
+
339
+ assert_allclose(rate, accept_rate)
340
+
341
+ @pytest.mark.fail_slow(5)
342
+ def test_bounds_class(self):
343
+ # test that result does not depend on the bounds type
344
+ def func(x):
345
+ f = np.sum(x * x - 10 * np.cos(2 * np.pi * x)) + 10 * np.size(x)
346
+ return f
347
+ lw = [-5.12] * 5
348
+ up = [5.12] * 5
349
+
350
+ # Unbounded global minimum is all zeros. Most bounds below will force
351
+ # a DV away from unbounded minimum and be active at solution.
352
+ up[0] = -2.0
353
+ up[1] = -1.0
354
+ lw[3] = 1.0
355
+ lw[4] = 2.0
356
+
357
+ # run optimizations
358
+ bounds = Bounds(lw, up)
359
+ ret_bounds_class = dual_annealing(func, bounds=bounds, seed=1234)
360
+
361
+ bounds_old = list(zip(lw, up))
362
+ ret_bounds_list = dual_annealing(func, bounds=bounds_old, seed=1234)
363
+
364
+ # test that found minima, function evaluations and iterations match
365
+ assert_allclose(ret_bounds_class.x, ret_bounds_list.x, atol=1e-8)
366
+ assert_allclose(ret_bounds_class.x, np.arange(-2, 3), atol=1e-7)
367
+ assert_allclose(ret_bounds_list.fun, ret_bounds_class.fun, atol=1e-9)
368
+ assert ret_bounds_list.nfev == ret_bounds_class.nfev
369
+
370
+ @pytest.mark.fail_slow(5)
371
+ def test_callable_jac_hess_with_args_gh11052(self):
372
+ # dual_annealing used to fail when `jac` was callable and `args` were
373
+ # used; check that this is resolved. Example is from gh-11052.
374
+
375
+ # extended to hess as part of closing gh20614
376
+ rng = np.random.default_rng(94253637693657847462)
377
+ def f(x, power):
378
+ return np.sum(np.exp(x ** power))
379
+
380
+ def jac(x, power):
381
+ return np.exp(x ** power) * power * x ** (power - 1)
382
+
383
+ def hess(x, power):
384
+ # calculated using WolframAlpha as d^2/dx^2 e^(x^p)
385
+ return np.diag(
386
+ power * np.exp(x ** power) * x ** (power - 2) *
387
+ (power * x ** power + power - 1)
388
+ )
389
+
390
+ def hessp(x, p, power):
391
+ return hess(x, power) @ p
392
+
393
+ res1 = dual_annealing(f, args=(2, ), bounds=[[0, 1], [0, 1]], seed=rng,
394
+ minimizer_kwargs=dict(method='L-BFGS-B'))
395
+ res2 = dual_annealing(f, args=(2, ), bounds=[[0, 1], [0, 1]], seed=rng,
396
+ minimizer_kwargs=dict(method='L-BFGS-B',
397
+ jac=jac))
398
+ res3 = dual_annealing(f, args=(2, ), bounds=[[0, 1], [0, 1]], seed=rng,
399
+ minimizer_kwargs=dict(method='newton-cg',
400
+ jac=jac, hess=hess))
401
+ res4 = dual_annealing(f, args=(2, ), bounds=[[0, 1], [0, 1]], seed=rng,
402
+ minimizer_kwargs=dict(method='newton-cg',
403
+ jac=jac, hessp=hessp))
404
+ assert_allclose(res1.fun, res2.fun, rtol=1e-6)
405
+ assert_allclose(res3.fun, res2.fun, rtol=1e-6)
406
+ assert_allclose(res4.fun, res2.fun, rtol=1e-6)
llava_next/lib/python3.10/site-packages/scipy/optimize/tests/test__linprog_clean_inputs.py ADDED
@@ -0,0 +1,310 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Unit test for Linear Programming via Simplex Algorithm.
3
+ """
4
+ import numpy as np
5
+ from numpy.testing import assert_, assert_allclose, assert_equal
6
+ from pytest import raises as assert_raises
7
+ from scipy.optimize._linprog_util import _clean_inputs, _LPProblem
8
+ from scipy._lib._util import VisibleDeprecationWarning
9
+ from copy import deepcopy
10
+ from datetime import date
11
+
12
+
13
+ def test_aliasing():
14
+ """
15
+ Test for ensuring that no objects referred to by `lp` attributes,
16
+ `c`, `A_ub`, `b_ub`, `A_eq`, `b_eq`, `bounds`, have been modified
17
+ by `_clean_inputs` as a side effect.
18
+ """
19
+ lp = _LPProblem(
20
+ c=1,
21
+ A_ub=[[1]],
22
+ b_ub=[1],
23
+ A_eq=[[1]],
24
+ b_eq=[1],
25
+ bounds=(-np.inf, np.inf)
26
+ )
27
+ lp_copy = deepcopy(lp)
28
+
29
+ _clean_inputs(lp)
30
+
31
+ assert_(lp.c == lp_copy.c, "c modified by _clean_inputs")
32
+ assert_(lp.A_ub == lp_copy.A_ub, "A_ub modified by _clean_inputs")
33
+ assert_(lp.b_ub == lp_copy.b_ub, "b_ub modified by _clean_inputs")
34
+ assert_(lp.A_eq == lp_copy.A_eq, "A_eq modified by _clean_inputs")
35
+ assert_(lp.b_eq == lp_copy.b_eq, "b_eq modified by _clean_inputs")
36
+ assert_(lp.bounds == lp_copy.bounds, "bounds modified by _clean_inputs")
37
+
38
+
39
+ def test_aliasing2():
40
+ """
41
+ Similar purpose as `test_aliasing` above.
42
+ """
43
+ lp = _LPProblem(
44
+ c=np.array([1, 1]),
45
+ A_ub=np.array([[1, 1], [2, 2]]),
46
+ b_ub=np.array([[1], [1]]),
47
+ A_eq=np.array([[1, 1]]),
48
+ b_eq=np.array([1]),
49
+ bounds=[(-np.inf, np.inf), (None, 1)]
50
+ )
51
+ lp_copy = deepcopy(lp)
52
+
53
+ _clean_inputs(lp)
54
+
55
+ assert_allclose(lp.c, lp_copy.c, err_msg="c modified by _clean_inputs")
56
+ assert_allclose(lp.A_ub, lp_copy.A_ub, err_msg="A_ub modified by _clean_inputs")
57
+ assert_allclose(lp.b_ub, lp_copy.b_ub, err_msg="b_ub modified by _clean_inputs")
58
+ assert_allclose(lp.A_eq, lp_copy.A_eq, err_msg="A_eq modified by _clean_inputs")
59
+ assert_allclose(lp.b_eq, lp_copy.b_eq, err_msg="b_eq modified by _clean_inputs")
60
+ assert_(lp.bounds == lp_copy.bounds, "bounds modified by _clean_inputs")
61
+
62
+
63
+ def test_missing_inputs():
64
+ c = [1, 2]
65
+ A_ub = np.array([[1, 1], [2, 2]])
66
+ b_ub = np.array([1, 1])
67
+ A_eq = np.array([[1, 1], [2, 2]])
68
+ b_eq = np.array([1, 1])
69
+
70
+ assert_raises(TypeError, _clean_inputs)
71
+ assert_raises(TypeError, _clean_inputs, _LPProblem(c=None))
72
+ assert_raises(ValueError, _clean_inputs, _LPProblem(c=c, A_ub=A_ub))
73
+ assert_raises(ValueError, _clean_inputs, _LPProblem(c=c, A_ub=A_ub, b_ub=None))
74
+ assert_raises(ValueError, _clean_inputs, _LPProblem(c=c, b_ub=b_ub))
75
+ assert_raises(ValueError, _clean_inputs, _LPProblem(c=c, A_ub=None, b_ub=b_ub))
76
+ assert_raises(ValueError, _clean_inputs, _LPProblem(c=c, A_eq=A_eq))
77
+ assert_raises(ValueError, _clean_inputs, _LPProblem(c=c, A_eq=A_eq, b_eq=None))
78
+ assert_raises(ValueError, _clean_inputs, _LPProblem(c=c, b_eq=b_eq))
79
+ assert_raises(ValueError, _clean_inputs, _LPProblem(c=c, A_eq=None, b_eq=b_eq))
80
+
81
+
82
+ def test_too_many_dimensions():
83
+ cb = [1, 2, 3, 4]
84
+ A = np.random.rand(4, 4)
85
+ bad2D = [[1, 2], [3, 4]]
86
+ bad3D = np.random.rand(4, 4, 4)
87
+ assert_raises(ValueError, _clean_inputs, _LPProblem(c=bad2D, A_ub=A, b_ub=cb))
88
+ assert_raises(ValueError, _clean_inputs, _LPProblem(c=cb, A_ub=bad3D, b_ub=cb))
89
+ assert_raises(ValueError, _clean_inputs, _LPProblem(c=cb, A_ub=A, b_ub=bad2D))
90
+ assert_raises(ValueError, _clean_inputs, _LPProblem(c=cb, A_eq=bad3D, b_eq=cb))
91
+ assert_raises(ValueError, _clean_inputs, _LPProblem(c=cb, A_eq=A, b_eq=bad2D))
92
+
93
+
94
+ def test_too_few_dimensions():
95
+ bad = np.random.rand(4, 4).ravel()
96
+ cb = np.random.rand(4)
97
+ assert_raises(ValueError, _clean_inputs, _LPProblem(c=cb, A_ub=bad, b_ub=cb))
98
+ assert_raises(ValueError, _clean_inputs, _LPProblem(c=cb, A_eq=bad, b_eq=cb))
99
+
100
+
101
+ def test_inconsistent_dimensions():
102
+ m = 2
103
+ n = 4
104
+ c = [1, 2, 3, 4]
105
+
106
+ Agood = np.random.rand(m, n)
107
+ Abad = np.random.rand(m, n + 1)
108
+ bgood = np.random.rand(m)
109
+ bbad = np.random.rand(m + 1)
110
+ boundsbad = [(0, 1)] * (n + 1)
111
+ assert_raises(ValueError, _clean_inputs, _LPProblem(c=c, A_ub=Abad, b_ub=bgood))
112
+ assert_raises(ValueError, _clean_inputs, _LPProblem(c=c, A_ub=Agood, b_ub=bbad))
113
+ assert_raises(ValueError, _clean_inputs, _LPProblem(c=c, A_eq=Abad, b_eq=bgood))
114
+ assert_raises(ValueError, _clean_inputs, _LPProblem(c=c, A_eq=Agood, b_eq=bbad))
115
+ assert_raises(ValueError, _clean_inputs, _LPProblem(c=c, bounds=boundsbad))
116
+ with np.testing.suppress_warnings() as sup:
117
+ sup.filter(VisibleDeprecationWarning, "Creating an ndarray from ragged")
118
+ assert_raises(ValueError, _clean_inputs,
119
+ _LPProblem(c=c, bounds=[[1, 2], [2, 3], [3, 4], [4, 5, 6]]))
120
+
121
+
122
+ def test_type_errors():
123
+ lp = _LPProblem(
124
+ c=[1, 2],
125
+ A_ub=np.array([[1, 1], [2, 2]]),
126
+ b_ub=np.array([1, 1]),
127
+ A_eq=np.array([[1, 1], [2, 2]]),
128
+ b_eq=np.array([1, 1]),
129
+ bounds=[(0, 1)]
130
+ )
131
+ bad = "hello"
132
+
133
+ assert_raises(TypeError, _clean_inputs, lp._replace(c=bad))
134
+ assert_raises(TypeError, _clean_inputs, lp._replace(A_ub=bad))
135
+ assert_raises(TypeError, _clean_inputs, lp._replace(b_ub=bad))
136
+ assert_raises(TypeError, _clean_inputs, lp._replace(A_eq=bad))
137
+ assert_raises(TypeError, _clean_inputs, lp._replace(b_eq=bad))
138
+
139
+ assert_raises(ValueError, _clean_inputs, lp._replace(bounds=bad))
140
+ assert_raises(ValueError, _clean_inputs, lp._replace(bounds="hi"))
141
+ assert_raises(ValueError, _clean_inputs, lp._replace(bounds=["hi"]))
142
+ assert_raises(ValueError, _clean_inputs, lp._replace(bounds=[("hi")]))
143
+ assert_raises(ValueError, _clean_inputs, lp._replace(bounds=[(1, "")]))
144
+ assert_raises(ValueError, _clean_inputs, lp._replace(bounds=[(1, 2), (1, "")]))
145
+ assert_raises(TypeError, _clean_inputs,
146
+ lp._replace(bounds=[(1, date(2020, 2, 29))]))
147
+ assert_raises(ValueError, _clean_inputs, lp._replace(bounds=[[[1, 2]]]))
148
+
149
+
150
+ def test_non_finite_errors():
151
+ lp = _LPProblem(
152
+ c=[1, 2],
153
+ A_ub=np.array([[1, 1], [2, 2]]),
154
+ b_ub=np.array([1, 1]),
155
+ A_eq=np.array([[1, 1], [2, 2]]),
156
+ b_eq=np.array([1, 1]),
157
+ bounds=[(0, 1)]
158
+ )
159
+ assert_raises(ValueError, _clean_inputs, lp._replace(c=[0, None]))
160
+ assert_raises(ValueError, _clean_inputs, lp._replace(c=[np.inf, 0]))
161
+ assert_raises(ValueError, _clean_inputs, lp._replace(c=[0, -np.inf]))
162
+ assert_raises(ValueError, _clean_inputs, lp._replace(c=[np.nan, 0]))
163
+
164
+ assert_raises(ValueError, _clean_inputs, lp._replace(A_ub=[[1, 2], [None, 1]]))
165
+ assert_raises(ValueError, _clean_inputs, lp._replace(b_ub=[np.inf, 1]))
166
+ assert_raises(ValueError, _clean_inputs, lp._replace(A_eq=[[1, 2], [1, -np.inf]]))
167
+ assert_raises(ValueError, _clean_inputs, lp._replace(b_eq=[1, np.nan]))
168
+
169
+
170
+ def test__clean_inputs1():
171
+ lp = _LPProblem(
172
+ c=[1, 2],
173
+ A_ub=[[1, 1], [2, 2]],
174
+ b_ub=[1, 1],
175
+ A_eq=[[1, 1], [2, 2]],
176
+ b_eq=[1, 1],
177
+ bounds=None
178
+ )
179
+
180
+ lp_cleaned = _clean_inputs(lp)
181
+
182
+ assert_allclose(lp_cleaned.c, np.array(lp.c))
183
+ assert_allclose(lp_cleaned.A_ub, np.array(lp.A_ub))
184
+ assert_allclose(lp_cleaned.b_ub, np.array(lp.b_ub))
185
+ assert_allclose(lp_cleaned.A_eq, np.array(lp.A_eq))
186
+ assert_allclose(lp_cleaned.b_eq, np.array(lp.b_eq))
187
+ assert_equal(lp_cleaned.bounds, [(0, np.inf)] * 2)
188
+
189
+ assert_(lp_cleaned.c.shape == (2,), "")
190
+ assert_(lp_cleaned.A_ub.shape == (2, 2), "")
191
+ assert_(lp_cleaned.b_ub.shape == (2,), "")
192
+ assert_(lp_cleaned.A_eq.shape == (2, 2), "")
193
+ assert_(lp_cleaned.b_eq.shape == (2,), "")
194
+
195
+
196
+ def test__clean_inputs2():
197
+ lp = _LPProblem(
198
+ c=1,
199
+ A_ub=[[1]],
200
+ b_ub=1,
201
+ A_eq=[[1]],
202
+ b_eq=1,
203
+ bounds=(0, 1)
204
+ )
205
+
206
+ lp_cleaned = _clean_inputs(lp)
207
+
208
+ assert_allclose(lp_cleaned.c, np.array(lp.c))
209
+ assert_allclose(lp_cleaned.A_ub, np.array(lp.A_ub))
210
+ assert_allclose(lp_cleaned.b_ub, np.array(lp.b_ub))
211
+ assert_allclose(lp_cleaned.A_eq, np.array(lp.A_eq))
212
+ assert_allclose(lp_cleaned.b_eq, np.array(lp.b_eq))
213
+ assert_equal(lp_cleaned.bounds, [(0, 1)])
214
+
215
+ assert_(lp_cleaned.c.shape == (1,), "")
216
+ assert_(lp_cleaned.A_ub.shape == (1, 1), "")
217
+ assert_(lp_cleaned.b_ub.shape == (1,), "")
218
+ assert_(lp_cleaned.A_eq.shape == (1, 1), "")
219
+ assert_(lp_cleaned.b_eq.shape == (1,), "")
220
+
221
+
222
+ def test__clean_inputs3():
223
+ lp = _LPProblem(
224
+ c=[[1, 2]],
225
+ A_ub=np.random.rand(2, 2),
226
+ b_ub=[[1], [2]],
227
+ A_eq=np.random.rand(2, 2),
228
+ b_eq=[[1], [2]],
229
+ bounds=[(0, 1)]
230
+ )
231
+
232
+ lp_cleaned = _clean_inputs(lp)
233
+
234
+ assert_allclose(lp_cleaned.c, np.array([1, 2]))
235
+ assert_allclose(lp_cleaned.b_ub, np.array([1, 2]))
236
+ assert_allclose(lp_cleaned.b_eq, np.array([1, 2]))
237
+ assert_equal(lp_cleaned.bounds, [(0, 1)] * 2)
238
+
239
+ assert_(lp_cleaned.c.shape == (2,), "")
240
+ assert_(lp_cleaned.b_ub.shape == (2,), "")
241
+ assert_(lp_cleaned.b_eq.shape == (2,), "")
242
+
243
+
244
+ def test_bad_bounds():
245
+ lp = _LPProblem(c=[1, 2])
246
+
247
+ assert_raises(ValueError, _clean_inputs, lp._replace(bounds=(1, 2, 2)))
248
+ assert_raises(ValueError, _clean_inputs, lp._replace(bounds=[(1, 2, 2)]))
249
+ with np.testing.suppress_warnings() as sup:
250
+ sup.filter(VisibleDeprecationWarning, "Creating an ndarray from ragged")
251
+ assert_raises(ValueError, _clean_inputs,
252
+ lp._replace(bounds=[(1, 2), (1, 2, 2)]))
253
+ assert_raises(ValueError, _clean_inputs,
254
+ lp._replace(bounds=[(1, 2), (1, 2), (1, 2)]))
255
+
256
+ lp = _LPProblem(c=[1, 2, 3, 4])
257
+
258
+ assert_raises(ValueError, _clean_inputs,
259
+ lp._replace(bounds=[(1, 2, 3, 4), (1, 2, 3, 4)]))
260
+
261
+
262
+ def test_good_bounds():
263
+ lp = _LPProblem(c=[1, 2])
264
+
265
+ lp_cleaned = _clean_inputs(lp) # lp.bounds is None by default
266
+ assert_equal(lp_cleaned.bounds, [(0, np.inf)] * 2)
267
+
268
+ lp_cleaned = _clean_inputs(lp._replace(bounds=[]))
269
+ assert_equal(lp_cleaned.bounds, [(0, np.inf)] * 2)
270
+
271
+ lp_cleaned = _clean_inputs(lp._replace(bounds=[[]]))
272
+ assert_equal(lp_cleaned.bounds, [(0, np.inf)] * 2)
273
+
274
+ lp_cleaned = _clean_inputs(lp._replace(bounds=(1, 2)))
275
+ assert_equal(lp_cleaned.bounds, [(1, 2)] * 2)
276
+
277
+ lp_cleaned = _clean_inputs(lp._replace(bounds=[(1, 2)]))
278
+ assert_equal(lp_cleaned.bounds, [(1, 2)] * 2)
279
+
280
+ lp_cleaned = _clean_inputs(lp._replace(bounds=[(1, None)]))
281
+ assert_equal(lp_cleaned.bounds, [(1, np.inf)] * 2)
282
+
283
+ lp_cleaned = _clean_inputs(lp._replace(bounds=[(None, 1)]))
284
+ assert_equal(lp_cleaned.bounds, [(-np.inf, 1)] * 2)
285
+
286
+ lp_cleaned = _clean_inputs(lp._replace(bounds=[(None, None), (-np.inf, None)]))
287
+ assert_equal(lp_cleaned.bounds, [(-np.inf, np.inf)] * 2)
288
+
289
+ lp = _LPProblem(c=[1, 2, 3, 4])
290
+
291
+ lp_cleaned = _clean_inputs(lp) # lp.bounds is None by default
292
+ assert_equal(lp_cleaned.bounds, [(0, np.inf)] * 4)
293
+
294
+ lp_cleaned = _clean_inputs(lp._replace(bounds=(1, 2)))
295
+ assert_equal(lp_cleaned.bounds, [(1, 2)] * 4)
296
+
297
+ lp_cleaned = _clean_inputs(lp._replace(bounds=[(1, 2)]))
298
+ assert_equal(lp_cleaned.bounds, [(1, 2)] * 4)
299
+
300
+ lp_cleaned = _clean_inputs(lp._replace(bounds=[(1, None)]))
301
+ assert_equal(lp_cleaned.bounds, [(1, np.inf)] * 4)
302
+
303
+ lp_cleaned = _clean_inputs(lp._replace(bounds=[(None, 1)]))
304
+ assert_equal(lp_cleaned.bounds, [(-np.inf, 1)] * 4)
305
+
306
+ lp_cleaned = _clean_inputs(lp._replace(bounds=[(None, None),
307
+ (-np.inf, None),
308
+ (None, np.inf),
309
+ (-np.inf, np.inf)]))
310
+ assert_equal(lp_cleaned.bounds, [(-np.inf, np.inf)] * 4)
llava_next/lib/python3.10/site-packages/scipy/optimize/tests/test__numdiff.py ADDED
@@ -0,0 +1,815 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import math
2
+ from itertools import product
3
+
4
+ import numpy as np
5
+ from numpy.testing import assert_allclose, assert_equal, assert_
6
+ from pytest import raises as assert_raises
7
+
8
+ from scipy.sparse import csr_matrix, csc_matrix, lil_matrix
9
+
10
+ from scipy.optimize._numdiff import (
11
+ _adjust_scheme_to_bounds, approx_derivative, check_derivative,
12
+ group_columns, _eps_for_method, _compute_absolute_step)
13
+
14
+
15
+ def test_group_columns():
16
+ structure = [
17
+ [1, 1, 0, 0, 0, 0],
18
+ [1, 1, 1, 0, 0, 0],
19
+ [0, 1, 1, 1, 0, 0],
20
+ [0, 0, 1, 1, 1, 0],
21
+ [0, 0, 0, 1, 1, 1],
22
+ [0, 0, 0, 0, 1, 1],
23
+ [0, 0, 0, 0, 0, 0]
24
+ ]
25
+ for transform in [np.asarray, csr_matrix, csc_matrix, lil_matrix]:
26
+ A = transform(structure)
27
+ order = np.arange(6)
28
+ groups_true = np.array([0, 1, 2, 0, 1, 2])
29
+ groups = group_columns(A, order)
30
+ assert_equal(groups, groups_true)
31
+
32
+ order = [1, 2, 4, 3, 5, 0]
33
+ groups_true = np.array([2, 0, 1, 2, 0, 1])
34
+ groups = group_columns(A, order)
35
+ assert_equal(groups, groups_true)
36
+
37
+ # Test repeatability.
38
+ groups_1 = group_columns(A)
39
+ groups_2 = group_columns(A)
40
+ assert_equal(groups_1, groups_2)
41
+
42
+
43
+ def test_correct_fp_eps():
44
+ # check that relative step size is correct for FP size
45
+ EPS = np.finfo(np.float64).eps
46
+ relative_step = {"2-point": EPS**0.5,
47
+ "3-point": EPS**(1/3),
48
+ "cs": EPS**0.5}
49
+ for method in ['2-point', '3-point', 'cs']:
50
+ assert_allclose(
51
+ _eps_for_method(np.float64, np.float64, method),
52
+ relative_step[method])
53
+ assert_allclose(
54
+ _eps_for_method(np.complex128, np.complex128, method),
55
+ relative_step[method]
56
+ )
57
+
58
+ # check another FP size
59
+ EPS = np.finfo(np.float32).eps
60
+ relative_step = {"2-point": EPS**0.5,
61
+ "3-point": EPS**(1/3),
62
+ "cs": EPS**0.5}
63
+
64
+ for method in ['2-point', '3-point', 'cs']:
65
+ assert_allclose(
66
+ _eps_for_method(np.float64, np.float32, method),
67
+ relative_step[method]
68
+ )
69
+ assert_allclose(
70
+ _eps_for_method(np.float32, np.float64, method),
71
+ relative_step[method]
72
+ )
73
+ assert_allclose(
74
+ _eps_for_method(np.float32, np.float32, method),
75
+ relative_step[method]
76
+ )
77
+
78
+
79
+ class TestAdjustSchemeToBounds:
80
+ def test_no_bounds(self):
81
+ x0 = np.zeros(3)
82
+ h = np.full(3, 1e-2)
83
+ inf_lower = np.empty_like(x0)
84
+ inf_upper = np.empty_like(x0)
85
+ inf_lower.fill(-np.inf)
86
+ inf_upper.fill(np.inf)
87
+
88
+ h_adjusted, one_sided = _adjust_scheme_to_bounds(
89
+ x0, h, 1, '1-sided', inf_lower, inf_upper)
90
+ assert_allclose(h_adjusted, h)
91
+ assert_(np.all(one_sided))
92
+
93
+ h_adjusted, one_sided = _adjust_scheme_to_bounds(
94
+ x0, h, 2, '1-sided', inf_lower, inf_upper)
95
+ assert_allclose(h_adjusted, h)
96
+ assert_(np.all(one_sided))
97
+
98
+ h_adjusted, one_sided = _adjust_scheme_to_bounds(
99
+ x0, h, 1, '2-sided', inf_lower, inf_upper)
100
+ assert_allclose(h_adjusted, h)
101
+ assert_(np.all(~one_sided))
102
+
103
+ h_adjusted, one_sided = _adjust_scheme_to_bounds(
104
+ x0, h, 2, '2-sided', inf_lower, inf_upper)
105
+ assert_allclose(h_adjusted, h)
106
+ assert_(np.all(~one_sided))
107
+
108
+ def test_with_bound(self):
109
+ x0 = np.array([0.0, 0.85, -0.85])
110
+ lb = -np.ones(3)
111
+ ub = np.ones(3)
112
+ h = np.array([1, 1, -1]) * 1e-1
113
+
114
+ h_adjusted, _ = _adjust_scheme_to_bounds(x0, h, 1, '1-sided', lb, ub)
115
+ assert_allclose(h_adjusted, h)
116
+
117
+ h_adjusted, _ = _adjust_scheme_to_bounds(x0, h, 2, '1-sided', lb, ub)
118
+ assert_allclose(h_adjusted, np.array([1, -1, 1]) * 1e-1)
119
+
120
+ h_adjusted, one_sided = _adjust_scheme_to_bounds(
121
+ x0, h, 1, '2-sided', lb, ub)
122
+ assert_allclose(h_adjusted, np.abs(h))
123
+ assert_(np.all(~one_sided))
124
+
125
+ h_adjusted, one_sided = _adjust_scheme_to_bounds(
126
+ x0, h, 2, '2-sided', lb, ub)
127
+ assert_allclose(h_adjusted, np.array([1, -1, 1]) * 1e-1)
128
+ assert_equal(one_sided, np.array([False, True, True]))
129
+
130
+ def test_tight_bounds(self):
131
+ lb = np.array([-0.03, -0.03])
132
+ ub = np.array([0.05, 0.05])
133
+ x0 = np.array([0.0, 0.03])
134
+ h = np.array([-0.1, -0.1])
135
+
136
+ h_adjusted, _ = _adjust_scheme_to_bounds(x0, h, 1, '1-sided', lb, ub)
137
+ assert_allclose(h_adjusted, np.array([0.05, -0.06]))
138
+
139
+ h_adjusted, _ = _adjust_scheme_to_bounds(x0, h, 2, '1-sided', lb, ub)
140
+ assert_allclose(h_adjusted, np.array([0.025, -0.03]))
141
+
142
+ h_adjusted, one_sided = _adjust_scheme_to_bounds(
143
+ x0, h, 1, '2-sided', lb, ub)
144
+ assert_allclose(h_adjusted, np.array([0.03, -0.03]))
145
+ assert_equal(one_sided, np.array([False, True]))
146
+
147
+ h_adjusted, one_sided = _adjust_scheme_to_bounds(
148
+ x0, h, 2, '2-sided', lb, ub)
149
+ assert_allclose(h_adjusted, np.array([0.015, -0.015]))
150
+ assert_equal(one_sided, np.array([False, True]))
151
+
152
+
153
+ class TestApproxDerivativesDense:
154
+ def fun_scalar_scalar(self, x):
155
+ return np.sinh(x)
156
+
157
+ def jac_scalar_scalar(self, x):
158
+ return np.cosh(x)
159
+
160
+ def fun_scalar_vector(self, x):
161
+ return np.array([x[0]**2, np.tan(x[0]), np.exp(x[0])])
162
+
163
+ def jac_scalar_vector(self, x):
164
+ return np.array(
165
+ [2 * x[0], np.cos(x[0]) ** -2, np.exp(x[0])]).reshape(-1, 1)
166
+
167
+ def fun_vector_scalar(self, x):
168
+ return np.sin(x[0] * x[1]) * np.log(x[0])
169
+
170
+ def wrong_dimensions_fun(self, x):
171
+ return np.array([x**2, np.tan(x), np.exp(x)])
172
+
173
+ def jac_vector_scalar(self, x):
174
+ return np.array([
175
+ x[1] * np.cos(x[0] * x[1]) * np.log(x[0]) +
176
+ np.sin(x[0] * x[1]) / x[0],
177
+ x[0] * np.cos(x[0] * x[1]) * np.log(x[0])
178
+ ])
179
+
180
+ def fun_vector_vector(self, x):
181
+ return np.array([
182
+ x[0] * np.sin(x[1]),
183
+ x[1] * np.cos(x[0]),
184
+ x[0] ** 3 * x[1] ** -0.5
185
+ ])
186
+
187
+ def jac_vector_vector(self, x):
188
+ return np.array([
189
+ [np.sin(x[1]), x[0] * np.cos(x[1])],
190
+ [-x[1] * np.sin(x[0]), np.cos(x[0])],
191
+ [3 * x[0] ** 2 * x[1] ** -0.5, -0.5 * x[0] ** 3 * x[1] ** -1.5]
192
+ ])
193
+
194
+ def fun_parametrized(self, x, c0, c1=1.0):
195
+ return np.array([np.exp(c0 * x[0]), np.exp(c1 * x[1])])
196
+
197
+ def jac_parametrized(self, x, c0, c1=0.1):
198
+ return np.array([
199
+ [c0 * np.exp(c0 * x[0]), 0],
200
+ [0, c1 * np.exp(c1 * x[1])]
201
+ ])
202
+
203
+ def fun_with_nan(self, x):
204
+ return x if np.abs(x) <= 1e-8 else np.nan
205
+
206
+ def jac_with_nan(self, x):
207
+ return 1.0 if np.abs(x) <= 1e-8 else np.nan
208
+
209
+ def fun_zero_jacobian(self, x):
210
+ return np.array([x[0] * x[1], np.cos(x[0] * x[1])])
211
+
212
+ def jac_zero_jacobian(self, x):
213
+ return np.array([
214
+ [x[1], x[0]],
215
+ [-x[1] * np.sin(x[0] * x[1]), -x[0] * np.sin(x[0] * x[1])]
216
+ ])
217
+
218
+ def jac_non_numpy(self, x):
219
+ # x can be a scalar or an array [val].
220
+ # Cast to true scalar before handing over to math.exp
221
+ xp = np.asarray(x).item()
222
+ return math.exp(xp)
223
+
224
+ def test_scalar_scalar(self):
225
+ x0 = 1.0
226
+ jac_diff_2 = approx_derivative(self.fun_scalar_scalar, x0,
227
+ method='2-point')
228
+ jac_diff_3 = approx_derivative(self.fun_scalar_scalar, x0)
229
+ jac_diff_4 = approx_derivative(self.fun_scalar_scalar, x0,
230
+ method='cs')
231
+ jac_true = self.jac_scalar_scalar(x0)
232
+ assert_allclose(jac_diff_2, jac_true, rtol=1e-6)
233
+ assert_allclose(jac_diff_3, jac_true, rtol=1e-9)
234
+ assert_allclose(jac_diff_4, jac_true, rtol=1e-12)
235
+
236
+ def test_scalar_scalar_abs_step(self):
237
+ # can approx_derivative use abs_step?
238
+ x0 = 1.0
239
+ jac_diff_2 = approx_derivative(self.fun_scalar_scalar, x0,
240
+ method='2-point', abs_step=1.49e-8)
241
+ jac_diff_3 = approx_derivative(self.fun_scalar_scalar, x0,
242
+ abs_step=1.49e-8)
243
+ jac_diff_4 = approx_derivative(self.fun_scalar_scalar, x0,
244
+ method='cs', abs_step=1.49e-8)
245
+ jac_true = self.jac_scalar_scalar(x0)
246
+ assert_allclose(jac_diff_2, jac_true, rtol=1e-6)
247
+ assert_allclose(jac_diff_3, jac_true, rtol=1e-9)
248
+ assert_allclose(jac_diff_4, jac_true, rtol=1e-12)
249
+
250
+ def test_scalar_vector(self):
251
+ x0 = 0.5
252
+ jac_diff_2 = approx_derivative(self.fun_scalar_vector, x0,
253
+ method='2-point')
254
+ jac_diff_3 = approx_derivative(self.fun_scalar_vector, x0)
255
+ jac_diff_4 = approx_derivative(self.fun_scalar_vector, x0,
256
+ method='cs')
257
+ jac_true = self.jac_scalar_vector(np.atleast_1d(x0))
258
+ assert_allclose(jac_diff_2, jac_true, rtol=1e-6)
259
+ assert_allclose(jac_diff_3, jac_true, rtol=1e-9)
260
+ assert_allclose(jac_diff_4, jac_true, rtol=1e-12)
261
+
262
+ def test_vector_scalar(self):
263
+ x0 = np.array([100.0, -0.5])
264
+ jac_diff_2 = approx_derivative(self.fun_vector_scalar, x0,
265
+ method='2-point')
266
+ jac_diff_3 = approx_derivative(self.fun_vector_scalar, x0)
267
+ jac_diff_4 = approx_derivative(self.fun_vector_scalar, x0,
268
+ method='cs')
269
+ jac_true = self.jac_vector_scalar(x0)
270
+ assert_allclose(jac_diff_2, jac_true, rtol=1e-6)
271
+ assert_allclose(jac_diff_3, jac_true, rtol=1e-7)
272
+ assert_allclose(jac_diff_4, jac_true, rtol=1e-12)
273
+
274
+ def test_vector_scalar_abs_step(self):
275
+ # can approx_derivative use abs_step?
276
+ x0 = np.array([100.0, -0.5])
277
+ jac_diff_2 = approx_derivative(self.fun_vector_scalar, x0,
278
+ method='2-point', abs_step=1.49e-8)
279
+ jac_diff_3 = approx_derivative(self.fun_vector_scalar, x0,
280
+ abs_step=1.49e-8, rel_step=np.inf)
281
+ jac_diff_4 = approx_derivative(self.fun_vector_scalar, x0,
282
+ method='cs', abs_step=1.49e-8)
283
+ jac_true = self.jac_vector_scalar(x0)
284
+ assert_allclose(jac_diff_2, jac_true, rtol=1e-6)
285
+ assert_allclose(jac_diff_3, jac_true, rtol=3e-9)
286
+ assert_allclose(jac_diff_4, jac_true, rtol=1e-12)
287
+
288
+ def test_vector_vector(self):
289
+ x0 = np.array([-100.0, 0.2])
290
+ jac_diff_2 = approx_derivative(self.fun_vector_vector, x0,
291
+ method='2-point')
292
+ jac_diff_3 = approx_derivative(self.fun_vector_vector, x0)
293
+ jac_diff_4 = approx_derivative(self.fun_vector_vector, x0,
294
+ method='cs')
295
+ jac_true = self.jac_vector_vector(x0)
296
+ assert_allclose(jac_diff_2, jac_true, rtol=1e-5)
297
+ assert_allclose(jac_diff_3, jac_true, rtol=1e-6)
298
+ assert_allclose(jac_diff_4, jac_true, rtol=1e-12)
299
+
300
+ def test_wrong_dimensions(self):
301
+ x0 = 1.0
302
+ assert_raises(RuntimeError, approx_derivative,
303
+ self.wrong_dimensions_fun, x0)
304
+ f0 = self.wrong_dimensions_fun(np.atleast_1d(x0))
305
+ assert_raises(ValueError, approx_derivative,
306
+ self.wrong_dimensions_fun, x0, f0=f0)
307
+
308
+ def test_custom_rel_step(self):
309
+ x0 = np.array([-0.1, 0.1])
310
+ jac_diff_2 = approx_derivative(self.fun_vector_vector, x0,
311
+ method='2-point', rel_step=1e-4)
312
+ jac_diff_3 = approx_derivative(self.fun_vector_vector, x0,
313
+ rel_step=1e-4)
314
+ jac_true = self.jac_vector_vector(x0)
315
+ assert_allclose(jac_diff_2, jac_true, rtol=1e-2)
316
+ assert_allclose(jac_diff_3, jac_true, rtol=1e-4)
317
+
318
+ def test_options(self):
319
+ x0 = np.array([1.0, 1.0])
320
+ c0 = -1.0
321
+ c1 = 1.0
322
+ lb = 0.0
323
+ ub = 2.0
324
+ f0 = self.fun_parametrized(x0, c0, c1=c1)
325
+ rel_step = np.array([-1e-6, 1e-7])
326
+ jac_true = self.jac_parametrized(x0, c0, c1)
327
+ jac_diff_2 = approx_derivative(
328
+ self.fun_parametrized, x0, method='2-point', rel_step=rel_step,
329
+ f0=f0, args=(c0,), kwargs=dict(c1=c1), bounds=(lb, ub))
330
+ jac_diff_3 = approx_derivative(
331
+ self.fun_parametrized, x0, rel_step=rel_step,
332
+ f0=f0, args=(c0,), kwargs=dict(c1=c1), bounds=(lb, ub))
333
+ assert_allclose(jac_diff_2, jac_true, rtol=1e-6)
334
+ assert_allclose(jac_diff_3, jac_true, rtol=1e-9)
335
+
336
+ def test_with_bounds_2_point(self):
337
+ lb = -np.ones(2)
338
+ ub = np.ones(2)
339
+
340
+ x0 = np.array([-2.0, 0.2])
341
+ assert_raises(ValueError, approx_derivative,
342
+ self.fun_vector_vector, x0, bounds=(lb, ub))
343
+
344
+ x0 = np.array([-1.0, 1.0])
345
+ jac_diff = approx_derivative(self.fun_vector_vector, x0,
346
+ method='2-point', bounds=(lb, ub))
347
+ jac_true = self.jac_vector_vector(x0)
348
+ assert_allclose(jac_diff, jac_true, rtol=1e-6)
349
+
350
+ def test_with_bounds_3_point(self):
351
+ lb = np.array([1.0, 1.0])
352
+ ub = np.array([2.0, 2.0])
353
+
354
+ x0 = np.array([1.0, 2.0])
355
+ jac_true = self.jac_vector_vector(x0)
356
+
357
+ jac_diff = approx_derivative(self.fun_vector_vector, x0)
358
+ assert_allclose(jac_diff, jac_true, rtol=1e-9)
359
+
360
+ jac_diff = approx_derivative(self.fun_vector_vector, x0,
361
+ bounds=(lb, np.inf))
362
+ assert_allclose(jac_diff, jac_true, rtol=1e-9)
363
+
364
+ jac_diff = approx_derivative(self.fun_vector_vector, x0,
365
+ bounds=(-np.inf, ub))
366
+ assert_allclose(jac_diff, jac_true, rtol=1e-9)
367
+
368
+ jac_diff = approx_derivative(self.fun_vector_vector, x0,
369
+ bounds=(lb, ub))
370
+ assert_allclose(jac_diff, jac_true, rtol=1e-9)
371
+
372
+ def test_tight_bounds(self):
373
+ x0 = np.array([10.0, 10.0])
374
+ lb = x0 - 3e-9
375
+ ub = x0 + 2e-9
376
+ jac_true = self.jac_vector_vector(x0)
377
+ jac_diff = approx_derivative(
378
+ self.fun_vector_vector, x0, method='2-point', bounds=(lb, ub))
379
+ assert_allclose(jac_diff, jac_true, rtol=1e-6)
380
+ jac_diff = approx_derivative(
381
+ self.fun_vector_vector, x0, method='2-point',
382
+ rel_step=1e-6, bounds=(lb, ub))
383
+ assert_allclose(jac_diff, jac_true, rtol=1e-6)
384
+
385
+ jac_diff = approx_derivative(
386
+ self.fun_vector_vector, x0, bounds=(lb, ub))
387
+ assert_allclose(jac_diff, jac_true, rtol=1e-6)
388
+ jac_diff = approx_derivative(
389
+ self.fun_vector_vector, x0, rel_step=1e-6, bounds=(lb, ub))
390
+ assert_allclose(jac_true, jac_diff, rtol=1e-6)
391
+
392
+ def test_bound_switches(self):
393
+ lb = -1e-8
394
+ ub = 1e-8
395
+ x0 = 0.0
396
+ jac_true = self.jac_with_nan(x0)
397
+ jac_diff_2 = approx_derivative(
398
+ self.fun_with_nan, x0, method='2-point', rel_step=1e-6,
399
+ bounds=(lb, ub))
400
+ jac_diff_3 = approx_derivative(
401
+ self.fun_with_nan, x0, rel_step=1e-6, bounds=(lb, ub))
402
+ assert_allclose(jac_diff_2, jac_true, rtol=1e-6)
403
+ assert_allclose(jac_diff_3, jac_true, rtol=1e-9)
404
+
405
+ x0 = 1e-8
406
+ jac_true = self.jac_with_nan(x0)
407
+ jac_diff_2 = approx_derivative(
408
+ self.fun_with_nan, x0, method='2-point', rel_step=1e-6,
409
+ bounds=(lb, ub))
410
+ jac_diff_3 = approx_derivative(
411
+ self.fun_with_nan, x0, rel_step=1e-6, bounds=(lb, ub))
412
+ assert_allclose(jac_diff_2, jac_true, rtol=1e-6)
413
+ assert_allclose(jac_diff_3, jac_true, rtol=1e-9)
414
+
415
+ def test_non_numpy(self):
416
+ x0 = 1.0
417
+ jac_true = self.jac_non_numpy(x0)
418
+ jac_diff_2 = approx_derivative(self.jac_non_numpy, x0,
419
+ method='2-point')
420
+ jac_diff_3 = approx_derivative(self.jac_non_numpy, x0)
421
+ assert_allclose(jac_diff_2, jac_true, rtol=1e-6)
422
+ assert_allclose(jac_diff_3, jac_true, rtol=1e-8)
423
+
424
+ # math.exp cannot handle complex arguments, hence this raises
425
+ assert_raises(TypeError, approx_derivative, self.jac_non_numpy, x0,
426
+ **dict(method='cs'))
427
+
428
+ def test_fp(self):
429
+ # checks that approx_derivative works for FP size other than 64.
430
+ # Example is derived from the minimal working example in gh12991.
431
+ np.random.seed(1)
432
+
433
+ def func(p, x):
434
+ return p[0] + p[1] * x
435
+
436
+ def err(p, x, y):
437
+ return func(p, x) - y
438
+
439
+ x = np.linspace(0, 1, 100, dtype=np.float64)
440
+ y = np.random.random(100).astype(np.float64)
441
+ p0 = np.array([-1.0, -1.0])
442
+
443
+ jac_fp64 = approx_derivative(err, p0, method='2-point', args=(x, y))
444
+
445
+ # parameter vector is float32, func output is float64
446
+ jac_fp = approx_derivative(err, p0.astype(np.float32),
447
+ method='2-point', args=(x, y))
448
+ assert err(p0, x, y).dtype == np.float64
449
+ assert_allclose(jac_fp, jac_fp64, atol=1e-3)
450
+
451
+ # parameter vector is float64, func output is float32
452
+ def err_fp32(p):
453
+ assert p.dtype == np.float32
454
+ return err(p, x, y).astype(np.float32)
455
+
456
+ jac_fp = approx_derivative(err_fp32, p0.astype(np.float32),
457
+ method='2-point')
458
+ assert_allclose(jac_fp, jac_fp64, atol=1e-3)
459
+
460
+ # check upper bound of error on the derivative for 2-point
461
+ def f(x):
462
+ return np.sin(x)
463
+ def g(x):
464
+ return np.cos(x)
465
+ def hess(x):
466
+ return -np.sin(x)
467
+
468
+ def calc_atol(h, x0, f, hess, EPS):
469
+ # truncation error
470
+ t0 = h / 2 * max(np.abs(hess(x0)), np.abs(hess(x0 + h)))
471
+ # roundoff error. There may be a divisor (>1) missing from
472
+ # the following line, so this contribution is possibly
473
+ # overestimated
474
+ t1 = EPS / h * max(np.abs(f(x0)), np.abs(f(x0 + h)))
475
+ return t0 + t1
476
+
477
+ for dtype in [np.float16, np.float32, np.float64]:
478
+ EPS = np.finfo(dtype).eps
479
+ x0 = np.array(1.0).astype(dtype)
480
+ h = _compute_absolute_step(None, x0, f(x0), '2-point')
481
+ atol = calc_atol(h, x0, f, hess, EPS)
482
+ err = approx_derivative(f, x0, method='2-point',
483
+ abs_step=h) - g(x0)
484
+ assert abs(err) < atol
485
+
486
+ def test_check_derivative(self):
487
+ x0 = np.array([-10.0, 10])
488
+ accuracy = check_derivative(self.fun_vector_vector,
489
+ self.jac_vector_vector, x0)
490
+ assert_(accuracy < 1e-9)
491
+ accuracy = check_derivative(self.fun_vector_vector,
492
+ self.jac_vector_vector, x0)
493
+ assert_(accuracy < 1e-6)
494
+
495
+ x0 = np.array([0.0, 0.0])
496
+ accuracy = check_derivative(self.fun_zero_jacobian,
497
+ self.jac_zero_jacobian, x0)
498
+ assert_(accuracy == 0)
499
+ accuracy = check_derivative(self.fun_zero_jacobian,
500
+ self.jac_zero_jacobian, x0)
501
+ assert_(accuracy == 0)
502
+
503
+
504
+ class TestApproxDerivativeSparse:
505
+ # Example from Numerical Optimization 2nd edition, p. 198.
506
+ def setup_method(self):
507
+ np.random.seed(0)
508
+ self.n = 50
509
+ self.lb = -0.1 * (1 + np.arange(self.n))
510
+ self.ub = 0.1 * (1 + np.arange(self.n))
511
+ self.x0 = np.empty(self.n)
512
+ self.x0[::2] = (1 - 1e-7) * self.lb[::2]
513
+ self.x0[1::2] = (1 - 1e-7) * self.ub[1::2]
514
+
515
+ self.J_true = self.jac(self.x0)
516
+
517
+ def fun(self, x):
518
+ e = x[1:]**3 - x[:-1]**2
519
+ return np.hstack((0, 3 * e)) + np.hstack((2 * e, 0))
520
+
521
+ def jac(self, x):
522
+ n = x.size
523
+ J = np.zeros((n, n))
524
+ J[0, 0] = -4 * x[0]
525
+ J[0, 1] = 6 * x[1]**2
526
+ for i in range(1, n - 1):
527
+ J[i, i - 1] = -6 * x[i-1]
528
+ J[i, i] = 9 * x[i]**2 - 4 * x[i]
529
+ J[i, i + 1] = 6 * x[i+1]**2
530
+ J[-1, -1] = 9 * x[-1]**2
531
+ J[-1, -2] = -6 * x[-2]
532
+
533
+ return J
534
+
535
+ def structure(self, n):
536
+ A = np.zeros((n, n), dtype=int)
537
+ A[0, 0] = 1
538
+ A[0, 1] = 1
539
+ for i in range(1, n - 1):
540
+ A[i, i - 1: i + 2] = 1
541
+ A[-1, -1] = 1
542
+ A[-1, -2] = 1
543
+
544
+ return A
545
+
546
+ def test_all(self):
547
+ A = self.structure(self.n)
548
+ order = np.arange(self.n)
549
+ groups_1 = group_columns(A, order)
550
+ np.random.shuffle(order)
551
+ groups_2 = group_columns(A, order)
552
+
553
+ for method, groups, l, u in product(
554
+ ['2-point', '3-point', 'cs'], [groups_1, groups_2],
555
+ [-np.inf, self.lb], [np.inf, self.ub]):
556
+ J = approx_derivative(self.fun, self.x0, method=method,
557
+ bounds=(l, u), sparsity=(A, groups))
558
+ assert_(isinstance(J, csr_matrix))
559
+ assert_allclose(J.toarray(), self.J_true, rtol=1e-6)
560
+
561
+ rel_step = np.full_like(self.x0, 1e-8)
562
+ rel_step[::2] *= -1
563
+ J = approx_derivative(self.fun, self.x0, method=method,
564
+ rel_step=rel_step, sparsity=(A, groups))
565
+ assert_allclose(J.toarray(), self.J_true, rtol=1e-5)
566
+
567
+ def test_no_precomputed_groups(self):
568
+ A = self.structure(self.n)
569
+ J = approx_derivative(self.fun, self.x0, sparsity=A)
570
+ assert_allclose(J.toarray(), self.J_true, rtol=1e-6)
571
+
572
+ def test_equivalence(self):
573
+ structure = np.ones((self.n, self.n), dtype=int)
574
+ groups = np.arange(self.n)
575
+ for method in ['2-point', '3-point', 'cs']:
576
+ J_dense = approx_derivative(self.fun, self.x0, method=method)
577
+ J_sparse = approx_derivative(
578
+ self.fun, self.x0, sparsity=(structure, groups), method=method)
579
+ assert_allclose(J_dense, J_sparse.toarray(),
580
+ rtol=5e-16, atol=7e-15)
581
+
582
+ def test_check_derivative(self):
583
+ def jac(x):
584
+ return csr_matrix(self.jac(x))
585
+
586
+ accuracy = check_derivative(self.fun, jac, self.x0,
587
+ bounds=(self.lb, self.ub))
588
+ assert_(accuracy < 1e-9)
589
+
590
+ accuracy = check_derivative(self.fun, jac, self.x0,
591
+ bounds=(self.lb, self.ub))
592
+ assert_(accuracy < 1e-9)
593
+
594
+
595
+ class TestApproxDerivativeLinearOperator:
596
+
597
+ def fun_scalar_scalar(self, x):
598
+ return np.sinh(x)
599
+
600
+ def jac_scalar_scalar(self, x):
601
+ return np.cosh(x)
602
+
603
+ def fun_scalar_vector(self, x):
604
+ return np.array([x[0]**2, np.tan(x[0]), np.exp(x[0])])
605
+
606
+ def jac_scalar_vector(self, x):
607
+ return np.array(
608
+ [2 * x[0], np.cos(x[0]) ** -2, np.exp(x[0])]).reshape(-1, 1)
609
+
610
+ def fun_vector_scalar(self, x):
611
+ return np.sin(x[0] * x[1]) * np.log(x[0])
612
+
613
+ def jac_vector_scalar(self, x):
614
+ return np.array([
615
+ x[1] * np.cos(x[0] * x[1]) * np.log(x[0]) +
616
+ np.sin(x[0] * x[1]) / x[0],
617
+ x[0] * np.cos(x[0] * x[1]) * np.log(x[0])
618
+ ])
619
+
620
+ def fun_vector_vector(self, x):
621
+ return np.array([
622
+ x[0] * np.sin(x[1]),
623
+ x[1] * np.cos(x[0]),
624
+ x[0] ** 3 * x[1] ** -0.5
625
+ ])
626
+
627
+ def jac_vector_vector(self, x):
628
+ return np.array([
629
+ [np.sin(x[1]), x[0] * np.cos(x[1])],
630
+ [-x[1] * np.sin(x[0]), np.cos(x[0])],
631
+ [3 * x[0] ** 2 * x[1] ** -0.5, -0.5 * x[0] ** 3 * x[1] ** -1.5]
632
+ ])
633
+
634
+ def test_scalar_scalar(self):
635
+ x0 = 1.0
636
+ jac_diff_2 = approx_derivative(self.fun_scalar_scalar, x0,
637
+ method='2-point',
638
+ as_linear_operator=True)
639
+ jac_diff_3 = approx_derivative(self.fun_scalar_scalar, x0,
640
+ as_linear_operator=True)
641
+ jac_diff_4 = approx_derivative(self.fun_scalar_scalar, x0,
642
+ method='cs',
643
+ as_linear_operator=True)
644
+ jac_true = self.jac_scalar_scalar(x0)
645
+ np.random.seed(1)
646
+ for i in range(10):
647
+ p = np.random.uniform(-10, 10, size=(1,))
648
+ assert_allclose(jac_diff_2.dot(p), jac_true*p,
649
+ rtol=1e-5)
650
+ assert_allclose(jac_diff_3.dot(p), jac_true*p,
651
+ rtol=5e-6)
652
+ assert_allclose(jac_diff_4.dot(p), jac_true*p,
653
+ rtol=5e-6)
654
+
655
+ def test_scalar_vector(self):
656
+ x0 = 0.5
657
+ jac_diff_2 = approx_derivative(self.fun_scalar_vector, x0,
658
+ method='2-point',
659
+ as_linear_operator=True)
660
+ jac_diff_3 = approx_derivative(self.fun_scalar_vector, x0,
661
+ as_linear_operator=True)
662
+ jac_diff_4 = approx_derivative(self.fun_scalar_vector, x0,
663
+ method='cs',
664
+ as_linear_operator=True)
665
+ jac_true = self.jac_scalar_vector(np.atleast_1d(x0))
666
+ np.random.seed(1)
667
+ for i in range(10):
668
+ p = np.random.uniform(-10, 10, size=(1,))
669
+ assert_allclose(jac_diff_2.dot(p), jac_true.dot(p),
670
+ rtol=1e-5)
671
+ assert_allclose(jac_diff_3.dot(p), jac_true.dot(p),
672
+ rtol=5e-6)
673
+ assert_allclose(jac_diff_4.dot(p), jac_true.dot(p),
674
+ rtol=5e-6)
675
+
676
+ def test_vector_scalar(self):
677
+ x0 = np.array([100.0, -0.5])
678
+ jac_diff_2 = approx_derivative(self.fun_vector_scalar, x0,
679
+ method='2-point',
680
+ as_linear_operator=True)
681
+ jac_diff_3 = approx_derivative(self.fun_vector_scalar, x0,
682
+ as_linear_operator=True)
683
+ jac_diff_4 = approx_derivative(self.fun_vector_scalar, x0,
684
+ method='cs',
685
+ as_linear_operator=True)
686
+ jac_true = self.jac_vector_scalar(x0)
687
+ np.random.seed(1)
688
+ for i in range(10):
689
+ p = np.random.uniform(-10, 10, size=x0.shape)
690
+ assert_allclose(jac_diff_2.dot(p), np.atleast_1d(jac_true.dot(p)),
691
+ rtol=1e-5)
692
+ assert_allclose(jac_diff_3.dot(p), np.atleast_1d(jac_true.dot(p)),
693
+ rtol=5e-6)
694
+ assert_allclose(jac_diff_4.dot(p), np.atleast_1d(jac_true.dot(p)),
695
+ rtol=1e-7)
696
+
697
+ def test_vector_vector(self):
698
+ x0 = np.array([-100.0, 0.2])
699
+ jac_diff_2 = approx_derivative(self.fun_vector_vector, x0,
700
+ method='2-point',
701
+ as_linear_operator=True)
702
+ jac_diff_3 = approx_derivative(self.fun_vector_vector, x0,
703
+ as_linear_operator=True)
704
+ jac_diff_4 = approx_derivative(self.fun_vector_vector, x0,
705
+ method='cs',
706
+ as_linear_operator=True)
707
+ jac_true = self.jac_vector_vector(x0)
708
+ np.random.seed(1)
709
+ for i in range(10):
710
+ p = np.random.uniform(-10, 10, size=x0.shape)
711
+ assert_allclose(jac_diff_2.dot(p), jac_true.dot(p), rtol=1e-5)
712
+ assert_allclose(jac_diff_3.dot(p), jac_true.dot(p), rtol=1e-6)
713
+ assert_allclose(jac_diff_4.dot(p), jac_true.dot(p), rtol=1e-7)
714
+
715
+ def test_exception(self):
716
+ x0 = np.array([-100.0, 0.2])
717
+ assert_raises(ValueError, approx_derivative,
718
+ self.fun_vector_vector, x0,
719
+ method='2-point', bounds=(1, np.inf))
720
+
721
+
722
+ def test_absolute_step_sign():
723
+ # test for gh12487
724
+ # if an absolute step is specified for 2-point differences make sure that
725
+ # the side corresponds to the step. i.e. if step is positive then forward
726
+ # differences should be used, if step is negative then backwards
727
+ # differences should be used.
728
+
729
+ # function has double discontinuity at x = [-1, -1]
730
+ # first component is \/, second component is /\
731
+ def f(x):
732
+ return -np.abs(x[0] + 1) + np.abs(x[1] + 1)
733
+
734
+ # check that the forward difference is used
735
+ grad = approx_derivative(f, [-1, -1], method='2-point', abs_step=1e-8)
736
+ assert_allclose(grad, [-1.0, 1.0])
737
+
738
+ # check that the backwards difference is used
739
+ grad = approx_derivative(f, [-1, -1], method='2-point', abs_step=-1e-8)
740
+ assert_allclose(grad, [1.0, -1.0])
741
+
742
+ # check that the forwards difference is used with a step for both
743
+ # parameters
744
+ grad = approx_derivative(
745
+ f, [-1, -1], method='2-point', abs_step=[1e-8, 1e-8]
746
+ )
747
+ assert_allclose(grad, [-1.0, 1.0])
748
+
749
+ # check that we can mix forward/backwards steps.
750
+ grad = approx_derivative(
751
+ f, [-1, -1], method='2-point', abs_step=[1e-8, -1e-8]
752
+ )
753
+ assert_allclose(grad, [-1.0, -1.0])
754
+ grad = approx_derivative(
755
+ f, [-1, -1], method='2-point', abs_step=[-1e-8, 1e-8]
756
+ )
757
+ assert_allclose(grad, [1.0, 1.0])
758
+
759
+ # the forward step should reverse to a backwards step if it runs into a
760
+ # bound
761
+ # This is kind of tested in TestAdjustSchemeToBounds, but only for a lower level
762
+ # function.
763
+ grad = approx_derivative(
764
+ f, [-1, -1], method='2-point', abs_step=1e-8,
765
+ bounds=(-np.inf, -1)
766
+ )
767
+ assert_allclose(grad, [1.0, -1.0])
768
+
769
+ grad = approx_derivative(
770
+ f, [-1, -1], method='2-point', abs_step=-1e-8, bounds=(-1, np.inf)
771
+ )
772
+ assert_allclose(grad, [-1.0, 1.0])
773
+
774
+
775
+ def test__compute_absolute_step():
776
+ # tests calculation of absolute step from rel_step
777
+ methods = ['2-point', '3-point', 'cs']
778
+
779
+ x0 = np.array([1e-5, 0, 1, 1e5])
780
+
781
+ EPS = np.finfo(np.float64).eps
782
+ relative_step = {
783
+ "2-point": EPS**0.5,
784
+ "3-point": EPS**(1/3),
785
+ "cs": EPS**0.5
786
+ }
787
+ f0 = np.array(1.0)
788
+
789
+ for method in methods:
790
+ rel_step = relative_step[method]
791
+ correct_step = np.array([rel_step,
792
+ rel_step * 1.,
793
+ rel_step * 1.,
794
+ rel_step * np.abs(x0[3])])
795
+
796
+ abs_step = _compute_absolute_step(None, x0, f0, method)
797
+ assert_allclose(abs_step, correct_step)
798
+
799
+ sign_x0 = (-x0 >= 0).astype(float) * 2 - 1
800
+ abs_step = _compute_absolute_step(None, -x0, f0, method)
801
+ assert_allclose(abs_step, sign_x0 * correct_step)
802
+
803
+ # if a relative step is provided it should be used
804
+ rel_step = np.array([0.1, 1, 10, 100])
805
+ correct_step = np.array([rel_step[0] * x0[0],
806
+ relative_step['2-point'],
807
+ rel_step[2] * 1.,
808
+ rel_step[3] * np.abs(x0[3])])
809
+
810
+ abs_step = _compute_absolute_step(rel_step, x0, f0, '2-point')
811
+ assert_allclose(abs_step, correct_step)
812
+
813
+ sign_x0 = (-x0 >= 0).astype(float) * 2 - 1
814
+ abs_step = _compute_absolute_step(rel_step, -x0, f0, '2-point')
815
+ assert_allclose(abs_step, sign_x0 * correct_step)
llava_next/lib/python3.10/site-packages/scipy/optimize/tests/test__remove_redundancy.py ADDED
@@ -0,0 +1,228 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Unit test for Linear Programming via Simplex Algorithm.
3
+ """
4
+
5
+ # TODO: add tests for:
6
+ # https://github.com/scipy/scipy/issues/5400
7
+ # https://github.com/scipy/scipy/issues/6690
8
+
9
+ import numpy as np
10
+ from numpy.testing import (
11
+ assert_,
12
+ assert_allclose,
13
+ assert_equal)
14
+
15
+ from .test_linprog import magic_square
16
+ from scipy.optimize._remove_redundancy import _remove_redundancy_svd
17
+ from scipy.optimize._remove_redundancy import _remove_redundancy_pivot_dense
18
+ from scipy.optimize._remove_redundancy import _remove_redundancy_pivot_sparse
19
+ from scipy.optimize._remove_redundancy import _remove_redundancy_id
20
+
21
+ from scipy.sparse import csc_matrix
22
+
23
+
24
+ def setup_module():
25
+ np.random.seed(2017)
26
+
27
+
28
+ def redundancy_removed(A, B):
29
+ """Checks whether a matrix contains only independent rows of another"""
30
+ for rowA in A:
31
+ # `rowA in B` is not a reliable check
32
+ for rowB in B:
33
+ if np.all(rowA == rowB):
34
+ break
35
+ else:
36
+ return False
37
+ return A.shape[0] == np.linalg.matrix_rank(A) == np.linalg.matrix_rank(B)
38
+
39
+
40
+ class RRCommonTests:
41
+ def test_no_redundancy(self):
42
+ m, n = 10, 10
43
+ A0 = np.random.rand(m, n)
44
+ b0 = np.random.rand(m)
45
+ A1, b1, status, message = self.rr(A0, b0)
46
+ assert_allclose(A0, A1)
47
+ assert_allclose(b0, b1)
48
+ assert_equal(status, 0)
49
+
50
+ def test_infeasible_zero_row(self):
51
+ A = np.eye(3)
52
+ A[1, :] = 0
53
+ b = np.random.rand(3)
54
+ A1, b1, status, message = self.rr(A, b)
55
+ assert_equal(status, 2)
56
+
57
+ def test_remove_zero_row(self):
58
+ A = np.eye(3)
59
+ A[1, :] = 0
60
+ b = np.random.rand(3)
61
+ b[1] = 0
62
+ A1, b1, status, message = self.rr(A, b)
63
+ assert_equal(status, 0)
64
+ assert_allclose(A1, A[[0, 2], :])
65
+ assert_allclose(b1, b[[0, 2]])
66
+
67
+ def test_infeasible_m_gt_n(self):
68
+ m, n = 20, 10
69
+ A0 = np.random.rand(m, n)
70
+ b0 = np.random.rand(m)
71
+ A1, b1, status, message = self.rr(A0, b0)
72
+ assert_equal(status, 2)
73
+
74
+ def test_infeasible_m_eq_n(self):
75
+ m, n = 10, 10
76
+ A0 = np.random.rand(m, n)
77
+ b0 = np.random.rand(m)
78
+ A0[-1, :] = 2 * A0[-2, :]
79
+ A1, b1, status, message = self.rr(A0, b0)
80
+ assert_equal(status, 2)
81
+
82
+ def test_infeasible_m_lt_n(self):
83
+ m, n = 9, 10
84
+ A0 = np.random.rand(m, n)
85
+ b0 = np.random.rand(m)
86
+ A0[-1, :] = np.arange(m - 1).dot(A0[:-1])
87
+ A1, b1, status, message = self.rr(A0, b0)
88
+ assert_equal(status, 2)
89
+
90
+ def test_m_gt_n(self):
91
+ np.random.seed(2032)
92
+ m, n = 20, 10
93
+ A0 = np.random.rand(m, n)
94
+ b0 = np.random.rand(m)
95
+ x = np.linalg.solve(A0[:n, :], b0[:n])
96
+ b0[n:] = A0[n:, :].dot(x)
97
+ A1, b1, status, message = self.rr(A0, b0)
98
+ assert_equal(status, 0)
99
+ assert_equal(A1.shape[0], n)
100
+ assert_equal(np.linalg.matrix_rank(A1), n)
101
+
102
+ def test_m_gt_n_rank_deficient(self):
103
+ m, n = 20, 10
104
+ A0 = np.zeros((m, n))
105
+ A0[:, 0] = 1
106
+ b0 = np.ones(m)
107
+ A1, b1, status, message = self.rr(A0, b0)
108
+ assert_equal(status, 0)
109
+ assert_allclose(A1, A0[0:1, :])
110
+ assert_allclose(b1, b0[0])
111
+
112
+ def test_m_lt_n_rank_deficient(self):
113
+ m, n = 9, 10
114
+ A0 = np.random.rand(m, n)
115
+ b0 = np.random.rand(m)
116
+ A0[-1, :] = np.arange(m - 1).dot(A0[:-1])
117
+ b0[-1] = np.arange(m - 1).dot(b0[:-1])
118
+ A1, b1, status, message = self.rr(A0, b0)
119
+ assert_equal(status, 0)
120
+ assert_equal(A1.shape[0], 8)
121
+ assert_equal(np.linalg.matrix_rank(A1), 8)
122
+
123
+ def test_dense1(self):
124
+ A = np.ones((6, 6))
125
+ A[0, :3] = 0
126
+ A[1, 3:] = 0
127
+ A[3:, ::2] = -1
128
+ A[3, :2] = 0
129
+ A[4, 2:] = 0
130
+ b = np.zeros(A.shape[0])
131
+
132
+ A1, b1, status, message = self.rr(A, b)
133
+ assert_(redundancy_removed(A1, A))
134
+ assert_equal(status, 0)
135
+
136
+ def test_dense2(self):
137
+ A = np.eye(6)
138
+ A[-2, -1] = 1
139
+ A[-1, :] = 1
140
+ b = np.zeros(A.shape[0])
141
+ A1, b1, status, message = self.rr(A, b)
142
+ assert_(redundancy_removed(A1, A))
143
+ assert_equal(status, 0)
144
+
145
+ def test_dense3(self):
146
+ A = np.eye(6)
147
+ A[-2, -1] = 1
148
+ A[-1, :] = 1
149
+ b = np.random.rand(A.shape[0])
150
+ b[-1] = np.sum(b[:-1])
151
+ A1, b1, status, message = self.rr(A, b)
152
+ assert_(redundancy_removed(A1, A))
153
+ assert_equal(status, 0)
154
+
155
+ def test_m_gt_n_sparse(self):
156
+ np.random.seed(2013)
157
+ m, n = 20, 5
158
+ p = 0.1
159
+ A = np.random.rand(m, n)
160
+ A[np.random.rand(m, n) > p] = 0
161
+ rank = np.linalg.matrix_rank(A)
162
+ b = np.zeros(A.shape[0])
163
+ A1, b1, status, message = self.rr(A, b)
164
+ assert_equal(status, 0)
165
+ assert_equal(A1.shape[0], rank)
166
+ assert_equal(np.linalg.matrix_rank(A1), rank)
167
+
168
+ def test_m_lt_n_sparse(self):
169
+ np.random.seed(2017)
170
+ m, n = 20, 50
171
+ p = 0.05
172
+ A = np.random.rand(m, n)
173
+ A[np.random.rand(m, n) > p] = 0
174
+ rank = np.linalg.matrix_rank(A)
175
+ b = np.zeros(A.shape[0])
176
+ A1, b1, status, message = self.rr(A, b)
177
+ assert_equal(status, 0)
178
+ assert_equal(A1.shape[0], rank)
179
+ assert_equal(np.linalg.matrix_rank(A1), rank)
180
+
181
+ def test_m_eq_n_sparse(self):
182
+ np.random.seed(2017)
183
+ m, n = 100, 100
184
+ p = 0.01
185
+ A = np.random.rand(m, n)
186
+ A[np.random.rand(m, n) > p] = 0
187
+ rank = np.linalg.matrix_rank(A)
188
+ b = np.zeros(A.shape[0])
189
+ A1, b1, status, message = self.rr(A, b)
190
+ assert_equal(status, 0)
191
+ assert_equal(A1.shape[0], rank)
192
+ assert_equal(np.linalg.matrix_rank(A1), rank)
193
+
194
+ def test_magic_square(self):
195
+ A, b, c, numbers, _ = magic_square(3)
196
+ A1, b1, status, message = self.rr(A, b)
197
+ assert_equal(status, 0)
198
+ assert_equal(A1.shape[0], 23)
199
+ assert_equal(np.linalg.matrix_rank(A1), 23)
200
+
201
+ def test_magic_square2(self):
202
+ A, b, c, numbers, _ = magic_square(4)
203
+ A1, b1, status, message = self.rr(A, b)
204
+ assert_equal(status, 0)
205
+ assert_equal(A1.shape[0], 39)
206
+ assert_equal(np.linalg.matrix_rank(A1), 39)
207
+
208
+
209
+ class TestRRSVD(RRCommonTests):
210
+ def rr(self, A, b):
211
+ return _remove_redundancy_svd(A, b)
212
+
213
+
214
+ class TestRRPivotDense(RRCommonTests):
215
+ def rr(self, A, b):
216
+ return _remove_redundancy_pivot_dense(A, b)
217
+
218
+
219
+ class TestRRID(RRCommonTests):
220
+ def rr(self, A, b):
221
+ return _remove_redundancy_id(A, b)
222
+
223
+
224
+ class TestRRPivotSparse(RRCommonTests):
225
+ def rr(self, A, b):
226
+ rr_res = _remove_redundancy_pivot_sparse(csc_matrix(A), b)
227
+ A1, b1, status, message = rr_res
228
+ return A1.toarray(), b1, status, message
llava_next/lib/python3.10/site-packages/scipy/optimize/tests/test__root.py ADDED
@@ -0,0 +1,123 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Unit tests for optimization routines from _root.py.
3
+ """
4
+ from numpy.testing import assert_, assert_equal
5
+ import pytest
6
+ from pytest import raises as assert_raises, warns as assert_warns
7
+ import numpy as np
8
+
9
+ from scipy.optimize import root
10
+
11
+
12
+ class TestRoot:
13
+ def test_tol_parameter(self):
14
+ # Check that the minimize() tol= argument does something
15
+ def func(z):
16
+ x, y = z
17
+ return np.array([x**3 - 1, y**3 - 1])
18
+
19
+ def dfunc(z):
20
+ x, y = z
21
+ return np.array([[3*x**2, 0], [0, 3*y**2]])
22
+
23
+ for method in ['hybr', 'lm', 'broyden1', 'broyden2', 'anderson',
24
+ 'diagbroyden', 'krylov']:
25
+ if method in ('linearmixing', 'excitingmixing'):
26
+ # doesn't converge
27
+ continue
28
+
29
+ if method in ('hybr', 'lm'):
30
+ jac = dfunc
31
+ else:
32
+ jac = None
33
+
34
+ sol1 = root(func, [1.1,1.1], jac=jac, tol=1e-4, method=method)
35
+ sol2 = root(func, [1.1,1.1], jac=jac, tol=0.5, method=method)
36
+ msg = f"{method}: {func(sol1.x)} vs. {func(sol2.x)}"
37
+ assert_(sol1.success, msg)
38
+ assert_(sol2.success, msg)
39
+ assert_(abs(func(sol1.x)).max() < abs(func(sol2.x)).max(),
40
+ msg)
41
+
42
+ def test_tol_norm(self):
43
+
44
+ def norm(x):
45
+ return abs(x[0])
46
+
47
+ for method in ['excitingmixing',
48
+ 'diagbroyden',
49
+ 'linearmixing',
50
+ 'anderson',
51
+ 'broyden1',
52
+ 'broyden2',
53
+ 'krylov']:
54
+
55
+ root(np.zeros_like, np.zeros(2), method=method,
56
+ options={"tol_norm": norm})
57
+
58
+ def test_minimize_scalar_coerce_args_param(self):
59
+ # github issue #3503
60
+ def func(z, f=1):
61
+ x, y = z
62
+ return np.array([x**3 - 1, y**3 - f])
63
+ root(func, [1.1, 1.1], args=1.5)
64
+
65
+ def test_f_size(self):
66
+ # gh8320
67
+ # check that decreasing the size of the returned array raises an error
68
+ # and doesn't segfault
69
+ class fun:
70
+ def __init__(self):
71
+ self.count = 0
72
+
73
+ def __call__(self, x):
74
+ self.count += 1
75
+
76
+ if not (self.count % 5):
77
+ ret = x[0] + 0.5 * (x[0] - x[1]) ** 3 - 1.0
78
+ else:
79
+ ret = ([x[0] + 0.5 * (x[0] - x[1]) ** 3 - 1.0,
80
+ 0.5 * (x[1] - x[0]) ** 3 + x[1]])
81
+
82
+ return ret
83
+
84
+ F = fun()
85
+ with assert_raises(ValueError):
86
+ root(F, [0.1, 0.0], method='lm')
87
+
88
+ def test_gh_10370(self):
89
+ # gh-10370 reported that passing both `args` and `jac` to `root` with
90
+ # `method='krylov'` caused a failure. Ensure that this is fixed whether
91
+ # the gradient is passed via `jac` or as a second output of `fun`.
92
+ def fun(x, ignored):
93
+ return [3*x[0] - 0.25*x[1]**2 + 10, 0.1*x[0]**2 + 5*x[1] - 2]
94
+
95
+ def grad(x, ignored):
96
+ return [[3, 0.5 * x[1]], [0.2 * x[0], 5]]
97
+
98
+ def fun_grad(x, ignored):
99
+ return fun(x, ignored), grad(x, ignored)
100
+
101
+ x0 = np.zeros(2)
102
+
103
+ ref = root(fun, x0, args=(1,), method='krylov')
104
+ message = 'Method krylov does not use the jacobian'
105
+ with assert_warns(RuntimeWarning, match=message):
106
+ res1 = root(fun, x0, args=(1,), method='krylov', jac=grad)
107
+ with assert_warns(RuntimeWarning, match=message):
108
+ res2 = root(fun_grad, x0, args=(1,), method='krylov', jac=True)
109
+
110
+ assert_equal(res1.x, ref.x)
111
+ assert_equal(res2.x, ref.x)
112
+ assert res1.success is res2.success is ref.success is True
113
+
114
+ @pytest.mark.parametrize("method", ["hybr", "lm", "broyden1", "broyden2",
115
+ "anderson", "linearmixing",
116
+ "diagbroyden", "excitingmixing",
117
+ "krylov", "df-sane"])
118
+ def test_method_in_result(self, method):
119
+ def func(x):
120
+ return x - 1
121
+
122
+ res = root(func, x0=[1], method=method)
123
+ assert res.method == method
llava_next/lib/python3.10/site-packages/scipy/optimize/tests/test__shgo.py ADDED
@@ -0,0 +1,1155 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import logging
2
+ import sys
3
+
4
+ import numpy as np
5
+ import time
6
+ from multiprocessing import Pool
7
+ from numpy.testing import assert_allclose, IS_PYPY
8
+ import pytest
9
+ from pytest import raises as assert_raises, warns
10
+ from scipy.optimize import (shgo, Bounds, minimize_scalar, minimize, rosen,
11
+ rosen_der, rosen_hess, NonlinearConstraint)
12
+ from scipy.optimize._constraints import new_constraint_to_old
13
+ from scipy.optimize._shgo import SHGO
14
+
15
+
16
+ class StructTestFunction:
17
+ def __init__(self, bounds, expected_x, expected_fun=None,
18
+ expected_xl=None, expected_funl=None):
19
+ self.bounds = bounds
20
+ self.expected_x = expected_x
21
+ self.expected_fun = expected_fun
22
+ self.expected_xl = expected_xl
23
+ self.expected_funl = expected_funl
24
+
25
+
26
+ def wrap_constraints(g):
27
+ cons = []
28
+ if g is not None:
29
+ if not isinstance(g, (tuple, list)):
30
+ g = (g,)
31
+ else:
32
+ pass
33
+ for g in g:
34
+ cons.append({'type': 'ineq',
35
+ 'fun': g})
36
+ cons = tuple(cons)
37
+ else:
38
+ cons = None
39
+ return cons
40
+
41
+
42
+ class StructTest1(StructTestFunction):
43
+ def f(self, x):
44
+ return x[0] ** 2 + x[1] ** 2
45
+
46
+ def g(x):
47
+ return -(np.sum(x, axis=0) - 6.0)
48
+
49
+ cons = wrap_constraints(g)
50
+
51
+
52
+ test1_1 = StructTest1(bounds=[(-1, 6), (-1, 6)],
53
+ expected_x=[0, 0])
54
+ test1_2 = StructTest1(bounds=[(0, 1), (0, 1)],
55
+ expected_x=[0, 0])
56
+ test1_3 = StructTest1(bounds=[(None, None), (None, None)],
57
+ expected_x=[0, 0])
58
+
59
+
60
+ class StructTest2(StructTestFunction):
61
+ """
62
+ Scalar function with several minima to test all minimiser retrievals
63
+ """
64
+
65
+ def f(self, x):
66
+ return (x - 30) * np.sin(x)
67
+
68
+ def g(x):
69
+ return 58 - np.sum(x, axis=0)
70
+
71
+ cons = wrap_constraints(g)
72
+
73
+
74
+ test2_1 = StructTest2(bounds=[(0, 60)],
75
+ expected_x=[1.53567906],
76
+ expected_fun=-28.44677132,
77
+ # Important: test that funl return is in the correct
78
+ # order
79
+ expected_xl=np.array([[1.53567906],
80
+ [55.01782167],
81
+ [7.80894889],
82
+ [48.74797493],
83
+ [14.07445705],
84
+ [42.4913859],
85
+ [20.31743841],
86
+ [36.28607535],
87
+ [26.43039605],
88
+ [30.76371366]]),
89
+
90
+ expected_funl=np.array([-28.44677132, -24.99785984,
91
+ -22.16855376, -18.72136195,
92
+ -15.89423937, -12.45154942,
93
+ -9.63133158, -6.20801301,
94
+ -3.43727232, -0.46353338])
95
+ )
96
+
97
+ test2_2 = StructTest2(bounds=[(0, 4.5)],
98
+ expected_x=[1.53567906],
99
+ expected_fun=[-28.44677132],
100
+ expected_xl=np.array([[1.53567906]]),
101
+ expected_funl=np.array([-28.44677132])
102
+ )
103
+
104
+
105
+ class StructTest3(StructTestFunction):
106
+ """
107
+ Hock and Schittkowski 18 problem (HS18). Hoch and Schittkowski (1981)
108
+ http://www.ai7.uni-bayreuth.de/test_problem_coll.pdf
109
+ Minimize: f = 0.01 * (x_1)**2 + (x_2)**2
110
+
111
+ Subject to: x_1 * x_2 - 25.0 >= 0,
112
+ (x_1)**2 + (x_2)**2 - 25.0 >= 0,
113
+ 2 <= x_1 <= 50,
114
+ 0 <= x_2 <= 50.
115
+
116
+ Approx. Answer:
117
+ f([(250)**0.5 , (2.5)**0.5]) = 5.0
118
+
119
+
120
+ """
121
+
122
+ # amended to test vectorisation of constraints
123
+ def f(self, x):
124
+ return 0.01 * (x[0]) ** 2 + (x[1]) ** 2
125
+
126
+ def g1(x):
127
+ return x[0] * x[1] - 25.0
128
+
129
+ def g2(x):
130
+ return x[0] ** 2 + x[1] ** 2 - 25.0
131
+
132
+ # g = (g1, g2)
133
+ # cons = wrap_constraints(g)
134
+
135
+ def g(x):
136
+ return x[0] * x[1] - 25.0, x[0] ** 2 + x[1] ** 2 - 25.0
137
+
138
+ # this checks that shgo can be sent new-style constraints
139
+ __nlc = NonlinearConstraint(g, 0, np.inf)
140
+ cons = (__nlc,)
141
+
142
+ test3_1 = StructTest3(bounds=[(2, 50), (0, 50)],
143
+ expected_x=[250 ** 0.5, 2.5 ** 0.5],
144
+ expected_fun=5.0
145
+ )
146
+
147
+
148
+ class StructTest4(StructTestFunction):
149
+ """
150
+ Hock and Schittkowski 11 problem (HS11). Hoch and Schittkowski (1981)
151
+
152
+ NOTE: Did not find in original reference to HS collection, refer to
153
+ Henderson (2015) problem 7 instead. 02.03.2016
154
+ """
155
+
156
+ def f(self, x):
157
+ return ((x[0] - 10) ** 2 + 5 * (x[1] - 12) ** 2 + x[2] ** 4
158
+ + 3 * (x[3] - 11) ** 2 + 10 * x[4] ** 6 + 7 * x[5] ** 2 + x[
159
+ 6] ** 4
160
+ - 4 * x[5] * x[6] - 10 * x[5] - 8 * x[6]
161
+ )
162
+
163
+ def g1(x):
164
+ return -(2 * x[0] ** 2 + 3 * x[1] ** 4 + x[2] + 4 * x[3] ** 2
165
+ + 5 * x[4] - 127)
166
+
167
+ def g2(x):
168
+ return -(7 * x[0] + 3 * x[1] + 10 * x[2] ** 2 + x[3] - x[4] - 282.0)
169
+
170
+ def g3(x):
171
+ return -(23 * x[0] + x[1] ** 2 + 6 * x[5] ** 2 - 8 * x[6] - 196)
172
+
173
+ def g4(x):
174
+ return -(4 * x[0] ** 2 + x[1] ** 2 - 3 * x[0] * x[1] + 2 * x[2] ** 2
175
+ + 5 * x[5] - 11 * x[6])
176
+
177
+ g = (g1, g2, g3, g4)
178
+
179
+ cons = wrap_constraints(g)
180
+
181
+
182
+ test4_1 = StructTest4(bounds=[(-10, 10), ] * 7,
183
+ expected_x=[2.330499, 1.951372, -0.4775414,
184
+ 4.365726, -0.6244870, 1.038131, 1.594227],
185
+ expected_fun=680.6300573
186
+ )
187
+
188
+
189
+ class StructTest5(StructTestFunction):
190
+ def f(self, x):
191
+ return (
192
+ -(x[1] + 47.0)*np.sin(np.sqrt(abs(x[0]/2.0 + (x[1] + 47.0))))
193
+ - x[0]*np.sin(np.sqrt(abs(x[0] - (x[1] + 47.0))))
194
+ )
195
+
196
+ g = None
197
+ cons = wrap_constraints(g)
198
+
199
+
200
+ test5_1 = StructTest5(bounds=[(-512, 512), (-512, 512)],
201
+ expected_fun=[-959.64066272085051],
202
+ expected_x=[512., 404.23180542])
203
+
204
+
205
+ class StructTestLJ(StructTestFunction):
206
+ """
207
+ LennardJones objective function. Used to test symmetry constraints
208
+ settings.
209
+ """
210
+
211
+ def f(self, x, *args):
212
+ print(f'x = {x}')
213
+ self.N = args[0]
214
+ k = int(self.N / 3)
215
+ s = 0.0
216
+
217
+ for i in range(k - 1):
218
+ for j in range(i + 1, k):
219
+ a = 3 * i
220
+ b = 3 * j
221
+ xd = x[a] - x[b]
222
+ yd = x[a + 1] - x[b + 1]
223
+ zd = x[a + 2] - x[b + 2]
224
+ ed = xd * xd + yd * yd + zd * zd
225
+ ud = ed * ed * ed
226
+ if ed > 0.0:
227
+ s += (1.0 / ud - 2.0) / ud
228
+
229
+ return s
230
+
231
+ g = None
232
+ cons = wrap_constraints(g)
233
+
234
+
235
+ N = 6
236
+ boundsLJ = list(zip([-4.0] * 6, [4.0] * 6))
237
+
238
+ testLJ = StructTestLJ(bounds=boundsLJ,
239
+ expected_fun=[-1.0],
240
+ expected_x=None,
241
+ # expected_x=[-2.71247337e-08,
242
+ # -2.71247337e-08,
243
+ # -2.50000222e+00,
244
+ # -2.71247337e-08,
245
+ # -2.71247337e-08,
246
+ # -1.50000222e+00]
247
+ )
248
+
249
+
250
+ class StructTestS(StructTestFunction):
251
+ def f(self, x):
252
+ return ((x[0] - 0.5) ** 2 + (x[1] - 0.5) ** 2
253
+ + (x[2] - 0.5) ** 2 + (x[3] - 0.5) ** 2)
254
+
255
+ g = None
256
+ cons = wrap_constraints(g)
257
+
258
+
259
+ test_s = StructTestS(bounds=[(0, 2.0), ] * 4,
260
+ expected_fun=0.0,
261
+ expected_x=np.ones(4) - 0.5
262
+ )
263
+
264
+
265
+ class StructTestTable(StructTestFunction):
266
+ def f(self, x):
267
+ if x[0] == 3.0 and x[1] == 3.0:
268
+ return 50
269
+ else:
270
+ return 100
271
+
272
+ g = None
273
+ cons = wrap_constraints(g)
274
+
275
+
276
+ test_table = StructTestTable(bounds=[(-10, 10), (-10, 10)],
277
+ expected_fun=[50],
278
+ expected_x=[3.0, 3.0])
279
+
280
+
281
+ class StructTestInfeasible(StructTestFunction):
282
+ """
283
+ Test function with no feasible domain.
284
+ """
285
+
286
+ def f(self, x, *args):
287
+ return x[0] ** 2 + x[1] ** 2
288
+
289
+ def g1(x):
290
+ return x[0] + x[1] - 1
291
+
292
+ def g2(x):
293
+ return -(x[0] + x[1] - 1)
294
+
295
+ def g3(x):
296
+ return -x[0] + x[1] - 1
297
+
298
+ def g4(x):
299
+ return -(-x[0] + x[1] - 1)
300
+
301
+ g = (g1, g2, g3, g4)
302
+ cons = wrap_constraints(g)
303
+
304
+
305
+ test_infeasible = StructTestInfeasible(bounds=[(2, 50), (-1, 1)],
306
+ expected_fun=None,
307
+ expected_x=None
308
+ )
309
+
310
+
311
+ @pytest.mark.skip("Not a test")
312
+ def run_test(test, args=(), test_atol=1e-5, n=100, iters=None,
313
+ callback=None, minimizer_kwargs=None, options=None,
314
+ sampling_method='sobol', workers=1):
315
+ res = shgo(test.f, test.bounds, args=args, constraints=test.cons,
316
+ n=n, iters=iters, callback=callback,
317
+ minimizer_kwargs=minimizer_kwargs, options=options,
318
+ sampling_method=sampling_method, workers=workers)
319
+
320
+ print(f'res = {res}')
321
+ logging.info(f'res = {res}')
322
+ if test.expected_x is not None:
323
+ np.testing.assert_allclose(res.x, test.expected_x,
324
+ rtol=test_atol,
325
+ atol=test_atol)
326
+
327
+ # (Optional tests)
328
+ if test.expected_fun is not None:
329
+ np.testing.assert_allclose(res.fun,
330
+ test.expected_fun,
331
+ atol=test_atol)
332
+
333
+ if test.expected_xl is not None:
334
+ np.testing.assert_allclose(res.xl,
335
+ test.expected_xl,
336
+ atol=test_atol)
337
+
338
+ if test.expected_funl is not None:
339
+ np.testing.assert_allclose(res.funl,
340
+ test.expected_funl,
341
+ atol=test_atol)
342
+ return
343
+
344
+
345
+ # Base test functions:
346
+ class TestShgoSobolTestFunctions:
347
+ """
348
+ Global optimisation tests with Sobol sampling:
349
+ """
350
+
351
+ # Sobol algorithm
352
+ def test_f1_1_sobol(self):
353
+ """Multivariate test function 1:
354
+ x[0]**2 + x[1]**2 with bounds=[(-1, 6), (-1, 6)]"""
355
+ run_test(test1_1)
356
+
357
+ def test_f1_2_sobol(self):
358
+ """Multivariate test function 1:
359
+ x[0]**2 + x[1]**2 with bounds=[(0, 1), (0, 1)]"""
360
+ run_test(test1_2)
361
+
362
+ def test_f1_3_sobol(self):
363
+ """Multivariate test function 1:
364
+ x[0]**2 + x[1]**2 with bounds=[(None, None),(None, None)]"""
365
+ options = {'disp': True}
366
+ run_test(test1_3, options=options)
367
+
368
+ def test_f2_1_sobol(self):
369
+ """Univariate test function on
370
+ f(x) = (x - 30) * sin(x) with bounds=[(0, 60)]"""
371
+ run_test(test2_1)
372
+
373
+ def test_f2_2_sobol(self):
374
+ """Univariate test function on
375
+ f(x) = (x - 30) * sin(x) bounds=[(0, 4.5)]"""
376
+ run_test(test2_2)
377
+
378
+ def test_f3_sobol(self):
379
+ """NLP: Hock and Schittkowski problem 18"""
380
+ run_test(test3_1)
381
+
382
+ @pytest.mark.slow
383
+ def test_f4_sobol(self):
384
+ """NLP: (High dimensional) Hock and Schittkowski 11 problem (HS11)"""
385
+ options = {'infty_constraints': False}
386
+ # run_test(test4_1, n=990, options=options)
387
+ run_test(test4_1, n=990 * 2, options=options)
388
+
389
+ def test_f5_1_sobol(self):
390
+ """NLP: Eggholder, multimodal"""
391
+ # run_test(test5_1, n=30)
392
+ run_test(test5_1, n=60)
393
+
394
+ def test_f5_2_sobol(self):
395
+ """NLP: Eggholder, multimodal"""
396
+ # run_test(test5_1, n=60, iters=5)
397
+ run_test(test5_1, n=60, iters=5)
398
+
399
+ # def test_t911(self):
400
+ # """1D tabletop function"""
401
+ # run_test(test11_1)
402
+
403
+
404
+ class TestShgoSimplicialTestFunctions:
405
+ """
406
+ Global optimisation tests with Simplicial sampling:
407
+ """
408
+
409
+ def test_f1_1_simplicial(self):
410
+ """Multivariate test function 1:
411
+ x[0]**2 + x[1]**2 with bounds=[(-1, 6), (-1, 6)]"""
412
+ run_test(test1_1, n=1, sampling_method='simplicial')
413
+
414
+ def test_f1_2_simplicial(self):
415
+ """Multivariate test function 1:
416
+ x[0]**2 + x[1]**2 with bounds=[(0, 1), (0, 1)]"""
417
+ run_test(test1_2, n=1, sampling_method='simplicial')
418
+
419
+ def test_f1_3_simplicial(self):
420
+ """Multivariate test function 1: x[0]**2 + x[1]**2
421
+ with bounds=[(None, None),(None, None)]"""
422
+ run_test(test1_3, n=5, sampling_method='simplicial')
423
+
424
+ def test_f2_1_simplicial(self):
425
+ """Univariate test function on
426
+ f(x) = (x - 30) * sin(x) with bounds=[(0, 60)]"""
427
+ options = {'minimize_every_iter': False}
428
+ run_test(test2_1, n=200, iters=7, options=options,
429
+ sampling_method='simplicial')
430
+
431
+ def test_f2_2_simplicial(self):
432
+ """Univariate test function on
433
+ f(x) = (x - 30) * sin(x) bounds=[(0, 4.5)]"""
434
+ run_test(test2_2, n=1, sampling_method='simplicial')
435
+
436
+ def test_f3_simplicial(self):
437
+ """NLP: Hock and Schittkowski problem 18"""
438
+ run_test(test3_1, n=1, sampling_method='simplicial')
439
+
440
+ @pytest.mark.slow
441
+ def test_f4_simplicial(self):
442
+ """NLP: (High dimensional) Hock and Schittkowski 11 problem (HS11)"""
443
+ run_test(test4_1, n=1, sampling_method='simplicial')
444
+
445
+ def test_lj_symmetry_old(self):
446
+ """LJ: Symmetry-constrained test function"""
447
+ options = {'symmetry': True,
448
+ 'disp': True}
449
+ args = (6,) # Number of atoms
450
+ run_test(testLJ, args=args, n=300,
451
+ options=options, iters=1,
452
+ sampling_method='simplicial')
453
+
454
+ def test_f5_1_lj_symmetry(self):
455
+ """LJ: Symmetry constrained test function"""
456
+ options = {'symmetry': [0, ] * 6,
457
+ 'disp': True}
458
+ args = (6,) # No. of atoms
459
+
460
+ run_test(testLJ, args=args, n=300,
461
+ options=options, iters=1,
462
+ sampling_method='simplicial')
463
+
464
+ def test_f5_2_cons_symmetry(self):
465
+ """Symmetry constrained test function"""
466
+ options = {'symmetry': [0, 0],
467
+ 'disp': True}
468
+
469
+ run_test(test1_1, n=200,
470
+ options=options, iters=1,
471
+ sampling_method='simplicial')
472
+
473
+ @pytest.mark.fail_slow(5)
474
+ def test_f5_3_cons_symmetry(self):
475
+ """Assymmetrically constrained test function"""
476
+ options = {'symmetry': [0, 0, 0, 3],
477
+ 'disp': True}
478
+
479
+ run_test(test_s, n=10000,
480
+ options=options,
481
+ iters=1,
482
+ sampling_method='simplicial')
483
+
484
+ @pytest.mark.skip("Not a test")
485
+ def test_f0_min_variance(self):
486
+ """Return a minimum on a perfectly symmetric problem, based on
487
+ gh10429"""
488
+ avg = 0.5 # Given average value of x
489
+ cons = {'type': 'eq', 'fun': lambda x: np.mean(x) - avg}
490
+
491
+ # Minimize the variance of x under the given constraint
492
+ res = shgo(np.var, bounds=6 * [(0, 1)], constraints=cons)
493
+ assert res.success
494
+ assert_allclose(res.fun, 0, atol=1e-15)
495
+ assert_allclose(res.x, 0.5)
496
+
497
+ @pytest.mark.skip("Not a test")
498
+ def test_f0_min_variance_1D(self):
499
+ """Return a minimum on a perfectly symmetric 1D problem, based on
500
+ gh10538"""
501
+
502
+ def fun(x):
503
+ return x * (x - 1.0) * (x - 0.5)
504
+
505
+ bounds = [(0, 1)]
506
+ res = shgo(fun, bounds=bounds)
507
+ ref = minimize_scalar(fun, bounds=bounds[0])
508
+ assert res.success
509
+ assert_allclose(res.fun, ref.fun)
510
+ assert_allclose(res.x, ref.x, rtol=1e-6)
511
+
512
+ # Argument test functions
513
+ class TestShgoArguments:
514
+ def test_1_1_simpl_iter(self):
515
+ """Iterative simplicial sampling on TestFunction 1 (multivariate)"""
516
+ run_test(test1_2, n=None, iters=2, sampling_method='simplicial')
517
+
518
+ def test_1_2_simpl_iter(self):
519
+ """Iterative simplicial on TestFunction 2 (univariate)"""
520
+ options = {'minimize_every_iter': False}
521
+ run_test(test2_1, n=None, iters=9, options=options,
522
+ sampling_method='simplicial')
523
+
524
+ def test_2_1_sobol_iter(self):
525
+ """Iterative Sobol sampling on TestFunction 1 (multivariate)"""
526
+ run_test(test1_2, n=None, iters=1, sampling_method='sobol')
527
+
528
+ def test_2_2_sobol_iter(self):
529
+ """Iterative Sobol sampling on TestFunction 2 (univariate)"""
530
+ res = shgo(test2_1.f, test2_1.bounds, constraints=test2_1.cons,
531
+ n=None, iters=1, sampling_method='sobol')
532
+
533
+ np.testing.assert_allclose(res.x, test2_1.expected_x, rtol=1e-5, atol=1e-5)
534
+ np.testing.assert_allclose(res.fun, test2_1.expected_fun, atol=1e-5)
535
+
536
+ def test_3_1_disp_simplicial(self):
537
+ """Iterative sampling on TestFunction 1 and 2 (multi and univariate)
538
+ """
539
+
540
+ def callback_func(x):
541
+ print("Local minimization callback test")
542
+
543
+ for test in [test1_1, test2_1]:
544
+ shgo(test.f, test.bounds, iters=1,
545
+ sampling_method='simplicial',
546
+ callback=callback_func, options={'disp': True})
547
+ shgo(test.f, test.bounds, n=1, sampling_method='simplicial',
548
+ callback=callback_func, options={'disp': True})
549
+
550
+ def test_3_2_disp_sobol(self):
551
+ """Iterative sampling on TestFunction 1 and 2 (multi and univariate)"""
552
+
553
+ def callback_func(x):
554
+ print("Local minimization callback test")
555
+
556
+ for test in [test1_1, test2_1]:
557
+ shgo(test.f, test.bounds, iters=1, sampling_method='sobol',
558
+ callback=callback_func, options={'disp': True})
559
+
560
+ shgo(test.f, test.bounds, n=1, sampling_method='simplicial',
561
+ callback=callback_func, options={'disp': True})
562
+
563
+ def test_args_gh14589(self):
564
+ """Using `args` used to cause `shgo` to fail; see #14589, #15986,
565
+ #16506"""
566
+ res = shgo(func=lambda x, y, z: x * z + y, bounds=[(0, 3)], args=(1, 2)
567
+ )
568
+ ref = shgo(func=lambda x: 2 * x + 1, bounds=[(0, 3)])
569
+ assert_allclose(res.fun, ref.fun)
570
+ assert_allclose(res.x, ref.x)
571
+
572
+ @pytest.mark.slow
573
+ def test_4_1_known_f_min(self):
574
+ """Test known function minima stopping criteria"""
575
+ # Specify known function value
576
+ options = {'f_min': test4_1.expected_fun,
577
+ 'f_tol': 1e-6,
578
+ 'minimize_every_iter': True}
579
+ # TODO: Make default n higher for faster tests
580
+ run_test(test4_1, n=None, test_atol=1e-5, options=options,
581
+ sampling_method='simplicial')
582
+
583
+ @pytest.mark.slow
584
+ def test_4_2_known_f_min(self):
585
+ """Test Global mode limiting local evaluations"""
586
+ options = { # Specify known function value
587
+ 'f_min': test4_1.expected_fun,
588
+ 'f_tol': 1e-6,
589
+ # Specify number of local iterations to perform
590
+ 'minimize_every_iter': True,
591
+ 'local_iter': 1}
592
+
593
+ run_test(test4_1, n=None, test_atol=1e-5, options=options,
594
+ sampling_method='simplicial')
595
+
596
+ def test_4_4_known_f_min(self):
597
+ """Test Global mode limiting local evaluations for 1D funcs"""
598
+ options = { # Specify known function value
599
+ 'f_min': test2_1.expected_fun,
600
+ 'f_tol': 1e-6,
601
+ # Specify number of local iterations to perform+
602
+ 'minimize_every_iter': True,
603
+ 'local_iter': 1,
604
+ 'infty_constraints': False}
605
+
606
+ res = shgo(test2_1.f, test2_1.bounds, constraints=test2_1.cons,
607
+ n=None, iters=None, options=options,
608
+ sampling_method='sobol')
609
+ np.testing.assert_allclose(res.x, test2_1.expected_x, rtol=1e-5, atol=1e-5)
610
+
611
+ def test_5_1_simplicial_argless(self):
612
+ """Test Default simplicial sampling settings on TestFunction 1"""
613
+ res = shgo(test1_1.f, test1_1.bounds, constraints=test1_1.cons)
614
+ np.testing.assert_allclose(res.x, test1_1.expected_x, rtol=1e-5, atol=1e-5)
615
+
616
+ def test_5_2_sobol_argless(self):
617
+ """Test Default sobol sampling settings on TestFunction 1"""
618
+ res = shgo(test1_1.f, test1_1.bounds, constraints=test1_1.cons,
619
+ sampling_method='sobol')
620
+ np.testing.assert_allclose(res.x, test1_1.expected_x, rtol=1e-5, atol=1e-5)
621
+
622
+ def test_6_1_simplicial_max_iter(self):
623
+ """Test that maximum iteration option works on TestFunction 3"""
624
+ options = {'max_iter': 2}
625
+ res = shgo(test3_1.f, test3_1.bounds, constraints=test3_1.cons,
626
+ options=options, sampling_method='simplicial')
627
+ np.testing.assert_allclose(res.x, test3_1.expected_x, rtol=1e-5, atol=1e-5)
628
+ np.testing.assert_allclose(res.fun, test3_1.expected_fun, atol=1e-5)
629
+
630
+ def test_6_2_simplicial_min_iter(self):
631
+ """Test that maximum iteration option works on TestFunction 3"""
632
+ options = {'min_iter': 2}
633
+ res = shgo(test3_1.f, test3_1.bounds, constraints=test3_1.cons,
634
+ options=options, sampling_method='simplicial')
635
+ np.testing.assert_allclose(res.x, test3_1.expected_x, rtol=1e-5, atol=1e-5)
636
+ np.testing.assert_allclose(res.fun, test3_1.expected_fun, atol=1e-5)
637
+
638
+ def test_7_1_minkwargs(self):
639
+ """Test the minimizer_kwargs arguments for solvers with constraints"""
640
+ # Test solvers
641
+ for solver in ['COBYLA', 'COBYQA', 'SLSQP']:
642
+ # Note that passing global constraints to SLSQP is tested in other
643
+ # unittests which run test4_1 normally
644
+ minimizer_kwargs = {'method': solver,
645
+ 'constraints': test3_1.cons}
646
+ run_test(test3_1, n=100, test_atol=1e-3,
647
+ minimizer_kwargs=minimizer_kwargs,
648
+ sampling_method='sobol')
649
+
650
+ def test_7_2_minkwargs(self):
651
+ """Test the minimizer_kwargs default inits"""
652
+ minimizer_kwargs = {'ftol': 1e-5}
653
+ options = {'disp': True} # For coverage purposes
654
+ SHGO(test3_1.f, test3_1.bounds, constraints=test3_1.cons[0],
655
+ minimizer_kwargs=minimizer_kwargs, options=options)
656
+
657
+ def test_7_3_minkwargs(self):
658
+ """Test minimizer_kwargs arguments for solvers without constraints"""
659
+ for solver in ['Nelder-Mead', 'Powell', 'CG', 'BFGS', 'Newton-CG',
660
+ 'L-BFGS-B', 'TNC', 'dogleg', 'trust-ncg', 'trust-exact',
661
+ 'trust-krylov']:
662
+ def jac(x):
663
+ return np.array([2 * x[0], 2 * x[1]]).T
664
+
665
+ def hess(x):
666
+ return np.array([[2, 0], [0, 2]])
667
+
668
+ minimizer_kwargs = {'method': solver,
669
+ 'jac': jac,
670
+ 'hess': hess}
671
+ logging.info(f"Solver = {solver}")
672
+ logging.info("=" * 100)
673
+ run_test(test1_1, n=100, test_atol=1e-3,
674
+ minimizer_kwargs=minimizer_kwargs,
675
+ sampling_method='sobol')
676
+
677
+ def test_8_homology_group_diff(self):
678
+ options = {'minhgrd': 1,
679
+ 'minimize_every_iter': True}
680
+
681
+ run_test(test1_1, n=None, iters=None, options=options,
682
+ sampling_method='simplicial')
683
+
684
+ def test_9_cons_g(self):
685
+ """Test single function constraint passing"""
686
+ SHGO(test3_1.f, test3_1.bounds, constraints=test3_1.cons[0])
687
+
688
+ @pytest.mark.xfail(IS_PYPY and sys.platform == 'win32',
689
+ reason="Failing and fix in PyPy not planned (see gh-18632)")
690
+ def test_10_finite_time(self):
691
+ """Test single function constraint passing"""
692
+ options = {'maxtime': 1e-15}
693
+
694
+ def f(x):
695
+ time.sleep(1e-14)
696
+ return 0.0
697
+
698
+ res = shgo(f, test1_1.bounds, iters=5, options=options)
699
+ # Assert that only 1 rather than 5 requested iterations ran:
700
+ assert res.nit == 1
701
+
702
+ def test_11_f_min_0(self):
703
+ """Test to cover the case where f_lowest == 0"""
704
+ options = {'f_min': 0.0,
705
+ 'disp': True}
706
+ res = shgo(test1_2.f, test1_2.bounds, n=10, iters=None,
707
+ options=options, sampling_method='sobol')
708
+ np.testing.assert_equal(0, res.x[0])
709
+ np.testing.assert_equal(0, res.x[1])
710
+
711
+ # @nottest
712
+ @pytest.mark.skip(reason="no way of currently testing this")
713
+ def test_12_sobol_inf_cons(self):
714
+ """Test to cover the case where f_lowest == 0"""
715
+ # TODO: This test doesn't cover anything new, it is unknown what the
716
+ # original test was intended for as it was never complete. Delete or
717
+ # replace in the future.
718
+ options = {'maxtime': 1e-15,
719
+ 'f_min': 0.0}
720
+ res = shgo(test1_2.f, test1_2.bounds, n=1, iters=None,
721
+ options=options, sampling_method='sobol')
722
+ np.testing.assert_equal(0.0, res.fun)
723
+
724
+ def test_13_high_sobol(self):
725
+ """Test init of high-dimensional sobol sequences"""
726
+
727
+ def f(x):
728
+ return 0
729
+
730
+ bounds = [(None, None), ] * 41
731
+ SHGOc = SHGO(f, bounds, sampling_method='sobol')
732
+ # SHGOc.sobol_points(2, 50)
733
+ SHGOc.sampling_function(2, 50)
734
+
735
+ def test_14_local_iter(self):
736
+ """Test limited local iterations for a pseudo-global mode"""
737
+ options = {'local_iter': 4}
738
+ run_test(test5_1, n=60, options=options)
739
+
740
+ def test_15_min_every_iter(self):
741
+ """Test minimize every iter options and cover function cache"""
742
+ options = {'minimize_every_iter': True}
743
+ run_test(test1_1, n=1, iters=7, options=options,
744
+ sampling_method='sobol')
745
+
746
+ def test_16_disp_bounds_minimizer(self, capsys):
747
+ """Test disp=True with minimizers that do not support bounds """
748
+ options = {'disp': True}
749
+ minimizer_kwargs = {'method': 'nelder-mead'}
750
+ run_test(test1_2, sampling_method='simplicial',
751
+ options=options, minimizer_kwargs=minimizer_kwargs)
752
+
753
+ def test_17_custom_sampling(self):
754
+ """Test the functionality to add custom sampling methods to shgo"""
755
+
756
+ def sample(n, d):
757
+ return np.random.uniform(size=(n, d))
758
+
759
+ run_test(test1_1, n=30, sampling_method=sample)
760
+
761
+ def test_18_bounds_class(self):
762
+ # test that new and old bounds yield same result
763
+ def f(x):
764
+ return np.square(x).sum()
765
+
766
+ lb = [-6., 1., -5.]
767
+ ub = [-1., 3., 5.]
768
+ bounds_old = list(zip(lb, ub))
769
+ bounds_new = Bounds(lb, ub)
770
+
771
+ res_old_bounds = shgo(f, bounds_old)
772
+ res_new_bounds = shgo(f, bounds_new)
773
+
774
+ assert res_new_bounds.nfev == res_old_bounds.nfev
775
+ assert res_new_bounds.message == res_old_bounds.message
776
+ assert res_new_bounds.success == res_old_bounds.success
777
+ x_opt = np.array([-1., 1., 0.])
778
+ np.testing.assert_allclose(res_new_bounds.x, x_opt)
779
+ np.testing.assert_allclose(res_new_bounds.x, res_old_bounds.x)
780
+
781
+ @pytest.mark.fail_slow(5)
782
+ def test_19_parallelization(self):
783
+ """Test the functionality to add custom sampling methods to shgo"""
784
+
785
+ with Pool(2) as p:
786
+ run_test(test1_1, n=30, workers=p.map) # Constrained
787
+ run_test(test1_1, n=30, workers=map) # Constrained
788
+ with Pool(2) as p:
789
+ run_test(test_s, n=30, workers=p.map) # Unconstrained
790
+ run_test(test_s, n=30, workers=map) # Unconstrained
791
+
792
+ def test_20_constrained_args(self):
793
+ """Test that constraints can be passed to arguments"""
794
+
795
+ def eggholder(x):
796
+ return (
797
+ -(x[1] + 47.0)*np.sin(np.sqrt(abs(x[0] / 2.0 + (x[1] + 47.0))))
798
+ - x[0]*np.sin(np.sqrt(abs(x[0] - (x[1] + 47.0))))
799
+ )
800
+
801
+ def f(x): # (cattle-feed)
802
+ return 24.55 * x[0] + 26.75 * x[1] + 39 * x[2] + 40.50 * x[3]
803
+
804
+ bounds = [(0, 1.0), ] * 4
805
+
806
+ def g1_modified(x, i):
807
+ return i * 2.3 * x[0] + i * 5.6 * x[1] + 11.1 * x[2] + 1.3 * x[
808
+ 3] - 5 # >=0
809
+
810
+ def g2(x):
811
+ return (
812
+ 12*x[0] + 11.9*x[1] + 41.8*x[2] + 52.1*x[3] - 21
813
+ - 1.645*np.sqrt(
814
+ 0.28*x[0]**2 + 0.19*x[1]**2 + 20.5*x[2]**2 + 0.62*x[3]**2
815
+ )
816
+ ) # >=0
817
+
818
+ def h1(x):
819
+ return x[0] + x[1] + x[2] + x[3] - 1 # == 0
820
+
821
+ cons = ({'type': 'ineq', 'fun': g1_modified, "args": (0,)},
822
+ {'type': 'ineq', 'fun': g2},
823
+ {'type': 'eq', 'fun': h1})
824
+
825
+ shgo(f, bounds, n=300, iters=1, constraints=cons)
826
+ # using constrain with arguments AND sampling method sobol
827
+ shgo(f, bounds, n=300, iters=1, constraints=cons,
828
+ sampling_method='sobol')
829
+
830
+ def test_21_1_jac_true(self):
831
+ """Test that shgo can handle objective functions that return the
832
+ gradient alongside the objective value. Fixes gh-13547"""
833
+ # previous
834
+ def func(x):
835
+ return np.sum(np.power(x, 2)), 2 * x
836
+
837
+ shgo(
838
+ func,
839
+ bounds=[[-1, 1], [1, 2]],
840
+ n=100, iters=5,
841
+ sampling_method="sobol",
842
+ minimizer_kwargs={'method': 'SLSQP', 'jac': True}
843
+ )
844
+
845
+ # new
846
+ def func(x):
847
+ return np.sum(x ** 2), 2 * x
848
+
849
+ bounds = [[-1, 1], [1, 2], [-1, 1], [1, 2], [0, 3]]
850
+
851
+ res = shgo(func, bounds=bounds, sampling_method="sobol",
852
+ minimizer_kwargs={'method': 'SLSQP', 'jac': True})
853
+ ref = minimize(func, x0=[1, 1, 1, 1, 1], bounds=bounds,
854
+ jac=True)
855
+ assert res.success
856
+ assert_allclose(res.fun, ref.fun)
857
+ assert_allclose(res.x, ref.x, atol=1e-15)
858
+
859
+ @pytest.mark.parametrize('derivative', ['jac', 'hess', 'hessp'])
860
+ def test_21_2_derivative_options(self, derivative):
861
+ """shgo used to raise an error when passing `options` with 'jac'
862
+ # see gh-12963. check that this is resolved
863
+ """
864
+
865
+ def objective(x):
866
+ return 3 * x[0] * x[0] + 2 * x[0] + 5
867
+
868
+ def gradient(x):
869
+ return 6 * x[0] + 2
870
+
871
+ def hess(x):
872
+ return 6
873
+
874
+ def hessp(x, p):
875
+ return 6 * p
876
+
877
+ derivative_funcs = {'jac': gradient, 'hess': hess, 'hessp': hessp}
878
+ options = {derivative: derivative_funcs[derivative]}
879
+ minimizer_kwargs = {'method': 'trust-constr'}
880
+
881
+ bounds = [(-100, 100)]
882
+ res = shgo(objective, bounds, minimizer_kwargs=minimizer_kwargs,
883
+ options=options)
884
+ ref = minimize(objective, x0=[0], bounds=bounds, **minimizer_kwargs,
885
+ **options)
886
+
887
+ assert res.success
888
+ np.testing.assert_allclose(res.fun, ref.fun)
889
+ np.testing.assert_allclose(res.x, ref.x)
890
+
891
+ def test_21_3_hess_options_rosen(self):
892
+ """Ensure the Hessian gets passed correctly to the local minimizer
893
+ routine. Previous report gh-14533.
894
+ """
895
+ bounds = [(0, 1.6), (0, 1.6), (0, 1.4), (0, 1.4), (0, 1.4)]
896
+ options = {'jac': rosen_der, 'hess': rosen_hess}
897
+ minimizer_kwargs = {'method': 'Newton-CG'}
898
+ res = shgo(rosen, bounds, minimizer_kwargs=minimizer_kwargs,
899
+ options=options)
900
+ ref = minimize(rosen, np.zeros(5), method='Newton-CG',
901
+ **options)
902
+ assert res.success
903
+ assert_allclose(res.fun, ref.fun)
904
+ assert_allclose(res.x, ref.x, atol=1e-15)
905
+
906
+ def test_21_arg_tuple_sobol(self):
907
+ """shgo used to raise an error when passing `args` with Sobol sampling
908
+ # see gh-12114. check that this is resolved"""
909
+
910
+ def fun(x, k):
911
+ return x[0] ** k
912
+
913
+ constraints = ({'type': 'ineq', 'fun': lambda x: x[0] - 1})
914
+
915
+ bounds = [(0, 10)]
916
+ res = shgo(fun, bounds, args=(1,), constraints=constraints,
917
+ sampling_method='sobol')
918
+ ref = minimize(fun, np.zeros(1), bounds=bounds, args=(1,),
919
+ constraints=constraints)
920
+ assert res.success
921
+ assert_allclose(res.fun, ref.fun)
922
+ assert_allclose(res.x, ref.x)
923
+
924
+
925
+ # Failure test functions
926
+ class TestShgoFailures:
927
+ def test_1_maxiter(self):
928
+ """Test failure on insufficient iterations"""
929
+ options = {'maxiter': 2}
930
+ res = shgo(test4_1.f, test4_1.bounds, n=2, iters=None,
931
+ options=options, sampling_method='sobol')
932
+
933
+ np.testing.assert_equal(False, res.success)
934
+ # np.testing.assert_equal(4, res.nfev)
935
+ np.testing.assert_equal(4, res.tnev)
936
+
937
+ def test_2_sampling(self):
938
+ """Rejection of unknown sampling method"""
939
+ assert_raises(ValueError, shgo, test1_1.f, test1_1.bounds,
940
+ sampling_method='not_Sobol')
941
+
942
+ def test_3_1_no_min_pool_sobol(self):
943
+ """Check that the routine stops when no minimiser is found
944
+ after maximum specified function evaluations"""
945
+ options = {'maxfev': 10,
946
+ # 'maxev': 10,
947
+ 'disp': True}
948
+ res = shgo(test_table.f, test_table.bounds, n=3, options=options,
949
+ sampling_method='sobol')
950
+ np.testing.assert_equal(False, res.success)
951
+ # np.testing.assert_equal(9, res.nfev)
952
+ np.testing.assert_equal(12, res.nfev)
953
+
954
+ def test_3_2_no_min_pool_simplicial(self):
955
+ """Check that the routine stops when no minimiser is found
956
+ after maximum specified sampling evaluations"""
957
+ options = {'maxev': 10,
958
+ 'disp': True}
959
+ res = shgo(test_table.f, test_table.bounds, n=3, options=options,
960
+ sampling_method='simplicial')
961
+ np.testing.assert_equal(False, res.success)
962
+
963
+ def test_4_1_bound_err(self):
964
+ """Specified bounds ub > lb"""
965
+ bounds = [(6, 3), (3, 5)]
966
+ assert_raises(ValueError, shgo, test1_1.f, bounds)
967
+
968
+ def test_4_2_bound_err(self):
969
+ """Specified bounds are of the form (lb, ub)"""
970
+ bounds = [(3, 5, 5), (3, 5)]
971
+ assert_raises(ValueError, shgo, test1_1.f, bounds)
972
+
973
+ def test_5_1_1_infeasible_sobol(self):
974
+ """Ensures the algorithm terminates on infeasible problems
975
+ after maxev is exceeded. Use infty constraints option"""
976
+ options = {'maxev': 100,
977
+ 'disp': True}
978
+
979
+ res = shgo(test_infeasible.f, test_infeasible.bounds,
980
+ constraints=test_infeasible.cons, n=100, options=options,
981
+ sampling_method='sobol')
982
+
983
+ np.testing.assert_equal(False, res.success)
984
+
985
+ def test_5_1_2_infeasible_sobol(self):
986
+ """Ensures the algorithm terminates on infeasible problems
987
+ after maxev is exceeded. Do not use infty constraints option"""
988
+ options = {'maxev': 100,
989
+ 'disp': True,
990
+ 'infty_constraints': False}
991
+
992
+ res = shgo(test_infeasible.f, test_infeasible.bounds,
993
+ constraints=test_infeasible.cons, n=100, options=options,
994
+ sampling_method='sobol')
995
+
996
+ np.testing.assert_equal(False, res.success)
997
+
998
+ def test_5_2_infeasible_simplicial(self):
999
+ """Ensures the algorithm terminates on infeasible problems
1000
+ after maxev is exceeded."""
1001
+ options = {'maxev': 1000,
1002
+ 'disp': False}
1003
+
1004
+ res = shgo(test_infeasible.f, test_infeasible.bounds,
1005
+ constraints=test_infeasible.cons, n=100, options=options,
1006
+ sampling_method='simplicial')
1007
+
1008
+ np.testing.assert_equal(False, res.success)
1009
+
1010
+ def test_6_1_lower_known_f_min(self):
1011
+ """Test Global mode limiting local evaluations with f* too high"""
1012
+ options = { # Specify known function value
1013
+ 'f_min': test2_1.expected_fun + 2.0,
1014
+ 'f_tol': 1e-6,
1015
+ # Specify number of local iterations to perform+
1016
+ 'minimize_every_iter': True,
1017
+ 'local_iter': 1,
1018
+ 'infty_constraints': False}
1019
+ args = (test2_1.f, test2_1.bounds)
1020
+ kwargs = {'constraints': test2_1.cons,
1021
+ 'n': None,
1022
+ 'iters': None,
1023
+ 'options': options,
1024
+ 'sampling_method': 'sobol'
1025
+ }
1026
+ warns(UserWarning, shgo, *args, **kwargs)
1027
+
1028
+ def test(self):
1029
+ from scipy.optimize import rosen, shgo
1030
+ bounds = [(0, 2), (0, 2), (0, 2), (0, 2), (0, 2)]
1031
+
1032
+ def fun(x):
1033
+ fun.nfev += 1
1034
+ return rosen(x)
1035
+
1036
+ fun.nfev = 0
1037
+
1038
+ result = shgo(fun, bounds)
1039
+ print(result.x, result.fun, fun.nfev) # 50
1040
+
1041
+
1042
+ # Returns
1043
+ class TestShgoReturns:
1044
+ def test_1_nfev_simplicial(self):
1045
+ bounds = [(0, 2), (0, 2), (0, 2), (0, 2), (0, 2)]
1046
+
1047
+ def fun(x):
1048
+ fun.nfev += 1
1049
+ return rosen(x)
1050
+
1051
+ fun.nfev = 0
1052
+
1053
+ result = shgo(fun, bounds)
1054
+ np.testing.assert_equal(fun.nfev, result.nfev)
1055
+
1056
+ def test_1_nfev_sobol(self):
1057
+ bounds = [(0, 2), (0, 2), (0, 2), (0, 2), (0, 2)]
1058
+
1059
+ def fun(x):
1060
+ fun.nfev += 1
1061
+ return rosen(x)
1062
+
1063
+ fun.nfev = 0
1064
+
1065
+ result = shgo(fun, bounds, sampling_method='sobol')
1066
+ np.testing.assert_equal(fun.nfev, result.nfev)
1067
+
1068
+
1069
+ def test_vector_constraint():
1070
+ # gh15514
1071
+ def quad(x):
1072
+ x = np.asarray(x)
1073
+ return [np.sum(x ** 2)]
1074
+
1075
+ nlc = NonlinearConstraint(quad, [2.2], [3])
1076
+ oldc = new_constraint_to_old(nlc, np.array([1.0, 1.0]))
1077
+
1078
+ res = shgo(rosen, [(0, 10), (0, 10)], constraints=oldc, sampling_method='sobol')
1079
+ assert np.all(np.sum((res.x)**2) >= 2.2)
1080
+ assert np.all(np.sum((res.x) ** 2) <= 3.0)
1081
+ assert res.success
1082
+
1083
+
1084
+ @pytest.mark.filterwarnings("ignore:delta_grad")
1085
+ def test_trust_constr():
1086
+ def quad(x):
1087
+ x = np.asarray(x)
1088
+ return [np.sum(x ** 2)]
1089
+
1090
+ nlc = NonlinearConstraint(quad, [2.6], [3])
1091
+ minimizer_kwargs = {'method': 'trust-constr'}
1092
+ # note that we don't supply the constraints in minimizer_kwargs,
1093
+ # so if the final result obeys the constraints we know that shgo
1094
+ # passed them on to 'trust-constr'
1095
+ res = shgo(
1096
+ rosen,
1097
+ [(0, 10), (0, 10)],
1098
+ constraints=nlc,
1099
+ sampling_method='sobol',
1100
+ minimizer_kwargs=minimizer_kwargs
1101
+ )
1102
+ assert np.all(np.sum((res.x)**2) >= 2.6)
1103
+ assert np.all(np.sum((res.x) ** 2) <= 3.0)
1104
+ assert res.success
1105
+
1106
+
1107
+ def test_equality_constraints():
1108
+ # gh16260
1109
+ bounds = [(0.9, 4.0)] * 2 # Constrain probabilities to 0 and 1.
1110
+
1111
+ def faulty(x):
1112
+ return x[0] + x[1]
1113
+
1114
+ nlc = NonlinearConstraint(faulty, 3.9, 3.9)
1115
+ res = shgo(rosen, bounds=bounds, constraints=nlc)
1116
+ assert_allclose(np.sum(res.x), 3.9)
1117
+
1118
+ def faulty(x):
1119
+ return x[0] + x[1] - 3.9
1120
+
1121
+ constraints = {'type': 'eq', 'fun': faulty}
1122
+ res = shgo(rosen, bounds=bounds, constraints=constraints)
1123
+ assert_allclose(np.sum(res.x), 3.9)
1124
+
1125
+ bounds = [(0, 1.0)] * 4
1126
+ # sum of variable should equal 1.
1127
+ def faulty(x):
1128
+ return x[0] + x[1] + x[2] + x[3] - 1
1129
+
1130
+ # options = {'minimize_every_iter': True, 'local_iter':10}
1131
+ constraints = {'type': 'eq', 'fun': faulty}
1132
+ res = shgo(
1133
+ lambda x: - np.prod(x),
1134
+ bounds=bounds,
1135
+ constraints=constraints,
1136
+ sampling_method='sobol'
1137
+ )
1138
+ assert_allclose(np.sum(res.x), 1.0)
1139
+
1140
+ def test_gh16971():
1141
+ def cons(x):
1142
+ return np.sum(x**2) - 0
1143
+
1144
+ c = {'fun': cons, 'type': 'ineq'}
1145
+ minimizer_kwargs = {
1146
+ 'method': 'COBYLA',
1147
+ 'options': {'rhobeg': 5, 'tol': 5e-1, 'catol': 0.05}
1148
+ }
1149
+
1150
+ s = SHGO(
1151
+ rosen, [(0, 10)]*2, constraints=c, minimizer_kwargs=minimizer_kwargs
1152
+ )
1153
+
1154
+ assert s.minimizer_kwargs['method'].lower() == 'cobyla'
1155
+ assert s.minimizer_kwargs['options']['catol'] == 0.05
llava_next/lib/python3.10/site-packages/scipy/optimize/tests/test_bracket.py ADDED
@@ -0,0 +1,793 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import pytest
2
+
3
+ import numpy as np
4
+ from numpy.testing import assert_array_less, assert_allclose, assert_equal
5
+
6
+ from scipy.optimize._bracket import _bracket_root, _bracket_minimum, _ELIMITS
7
+ import scipy._lib._elementwise_iterative_method as eim
8
+ from scipy import stats
9
+
10
+ class TestBracketRoot:
11
+ @pytest.mark.parametrize("seed", (615655101, 3141866013, 238075752))
12
+ @pytest.mark.parametrize("use_xmin", (False, True))
13
+ @pytest.mark.parametrize("other_side", (False, True))
14
+ @pytest.mark.parametrize("fix_one_side", (False, True))
15
+ def test_nfev_expected(self, seed, use_xmin, other_side, fix_one_side):
16
+ # Property-based test to confirm that _bracket_root is behaving as
17
+ # expected. The basic case is when root < a < b.
18
+ # The number of times bracket expands (per side) can be found by
19
+ # setting the expression for the left endpoint of the bracket to the
20
+ # root of f (x=0), solving for i, and rounding up. The corresponding
21
+ # lower and upper ends of the bracket are found by plugging this back
22
+ # into the expression for the ends of the bracket.
23
+ # `other_side=True` is the case that a < b < root
24
+ # Special cases like a < root < b are tested separately
25
+
26
+ rng = np.random.default_rng(seed)
27
+ xl0, d, factor = rng.random(size=3) * [1e5, 10, 5]
28
+ factor = 1 + factor # factor must be greater than 1
29
+ xr0 = xl0 + d # xr0 must be greater than a in basic case
30
+
31
+ def f(x):
32
+ f.count += 1
33
+ return x # root is 0
34
+
35
+ if use_xmin:
36
+ xmin = -rng.random()
37
+ n = np.ceil(np.log(-(xl0 - xmin) / xmin) / np.log(factor))
38
+ l, u = xmin + (xl0 - xmin)*factor**-n, xmin + (xl0 - xmin)*factor**-(n - 1)
39
+ kwargs = dict(xl0=xl0, xr0=xr0, factor=factor, xmin=xmin)
40
+ else:
41
+ n = np.ceil(np.log(xr0/d) / np.log(factor))
42
+ l, u = xr0 - d*factor**n, xr0 - d*factor**(n-1)
43
+ kwargs = dict(xl0=xl0, xr0=xr0, factor=factor)
44
+
45
+ if other_side:
46
+ kwargs['xl0'], kwargs['xr0'] = -kwargs['xr0'], -kwargs['xl0']
47
+ l, u = -u, -l
48
+ if 'xmin' in kwargs:
49
+ kwargs['xmax'] = -kwargs.pop('xmin')
50
+
51
+ if fix_one_side:
52
+ if other_side:
53
+ kwargs['xmin'] = -xr0
54
+ else:
55
+ kwargs['xmax'] = xr0
56
+
57
+ f.count = 0
58
+ res = _bracket_root(f, **kwargs)
59
+
60
+ # Compare reported number of function evaluations `nfev` against
61
+ # reported `nit`, actual function call count `f.count`, and theoretical
62
+ # number of expansions `n`.
63
+ # When both sides are free, these get multiplied by 2 because function
64
+ # is evaluated on the left and the right each iteration.
65
+ # When one side is fixed, however, we add one: on the right side, the
66
+ # function gets evaluated once at b.
67
+ # Add 1 to `n` and `res.nit` because function evaluations occur at
68
+ # iterations *0*, 1, ..., `n`. Subtract 1 from `f.count` because
69
+ # function is called separately for left and right in iteration 0.
70
+ if not fix_one_side:
71
+ assert res.nfev == 2*(res.nit+1) == 2*(f.count-1) == 2*(n + 1)
72
+ else:
73
+ assert res.nfev == (res.nit+1)+1 == (f.count-1)+1 == (n+1)+1
74
+
75
+ # Compare reported bracket to theoretical bracket and reported function
76
+ # values to function evaluated at bracket.
77
+ bracket = np.asarray([res.xl, res.xr])
78
+ assert_allclose(bracket, (l, u))
79
+ f_bracket = np.asarray([res.fl, res.fr])
80
+ assert_allclose(f_bracket, f(bracket))
81
+
82
+ # Check that bracket is valid and that status and success are correct
83
+ assert res.xr > res.xl
84
+ signs = np.sign(f_bracket)
85
+ assert signs[0] == -signs[1]
86
+ assert res.status == 0
87
+ assert res.success
88
+
89
+ def f(self, q, p):
90
+ return stats.norm.cdf(q) - p
91
+
92
+ @pytest.mark.parametrize('p', [0.6, np.linspace(0.05, 0.95, 10)])
93
+ @pytest.mark.parametrize('xmin', [-5, None])
94
+ @pytest.mark.parametrize('xmax', [5, None])
95
+ @pytest.mark.parametrize('factor', [1.2, 2])
96
+ def test_basic(self, p, xmin, xmax, factor):
97
+ # Test basic functionality to bracket root (distribution PPF)
98
+ res = _bracket_root(self.f, -0.01, 0.01, xmin=xmin, xmax=xmax,
99
+ factor=factor, args=(p,))
100
+ assert_equal(-np.sign(res.fl), np.sign(res.fr))
101
+
102
+ @pytest.mark.parametrize('shape', [tuple(), (12,), (3, 4), (3, 2, 2)])
103
+ def test_vectorization(self, shape):
104
+ # Test for correct functionality, output shapes, and dtypes for various
105
+ # input shapes.
106
+ p = np.linspace(-0.05, 1.05, 12).reshape(shape) if shape else 0.6
107
+ args = (p,)
108
+ maxiter = 10
109
+
110
+ @np.vectorize
111
+ def bracket_root_single(xl0, xr0, xmin, xmax, factor, p):
112
+ return _bracket_root(self.f, xl0, xr0, xmin=xmin, xmax=xmax,
113
+ factor=factor, args=(p,),
114
+ maxiter=maxiter)
115
+
116
+ def f(*args, **kwargs):
117
+ f.f_evals += 1
118
+ return self.f(*args, **kwargs)
119
+ f.f_evals = 0
120
+
121
+ rng = np.random.default_rng(2348234)
122
+ xl0 = -rng.random(size=shape)
123
+ xr0 = rng.random(size=shape)
124
+ xmin, xmax = 1e3*xl0, 1e3*xr0
125
+ if shape: # make some elements un
126
+ i = rng.random(size=shape) > 0.5
127
+ xmin[i], xmax[i] = -np.inf, np.inf
128
+ factor = rng.random(size=shape) + 1.5
129
+ res = _bracket_root(f, xl0, xr0, xmin=xmin, xmax=xmax, factor=factor,
130
+ args=args, maxiter=maxiter)
131
+ refs = bracket_root_single(xl0, xr0, xmin, xmax, factor, p).ravel()
132
+
133
+ attrs = ['xl', 'xr', 'fl', 'fr', 'success', 'nfev', 'nit']
134
+ for attr in attrs:
135
+ ref_attr = [getattr(ref, attr) for ref in refs]
136
+ res_attr = getattr(res, attr)
137
+ assert_allclose(res_attr.ravel(), ref_attr)
138
+ assert_equal(res_attr.shape, shape)
139
+
140
+ assert np.issubdtype(res.success.dtype, np.bool_)
141
+ if shape:
142
+ assert np.all(res.success[1:-1])
143
+ assert np.issubdtype(res.status.dtype, np.integer)
144
+ assert np.issubdtype(res.nfev.dtype, np.integer)
145
+ assert np.issubdtype(res.nit.dtype, np.integer)
146
+ assert_equal(np.max(res.nit), f.f_evals - 2)
147
+ assert_array_less(res.xl, res.xr)
148
+ assert_allclose(res.fl, self.f(res.xl, *args))
149
+ assert_allclose(res.fr, self.f(res.xr, *args))
150
+
151
+ def test_flags(self):
152
+ # Test cases that should produce different status flags; show that all
153
+ # can be produced simultaneously.
154
+ def f(xs, js):
155
+ funcs = [lambda x: x - 1.5,
156
+ lambda x: x - 1000,
157
+ lambda x: x - 1000,
158
+ lambda x: np.nan,
159
+ lambda x: x]
160
+
161
+ return [funcs[j](x) for x, j in zip(xs, js)]
162
+
163
+ args = (np.arange(5, dtype=np.int64),)
164
+ res = _bracket_root(f,
165
+ xl0=[-1, -1, -1, -1, 4],
166
+ xr0=[1, 1, 1, 1, -4],
167
+ xmin=[-np.inf, -1, -np.inf, -np.inf, 6],
168
+ xmax=[np.inf, 1, np.inf, np.inf, 2],
169
+ args=args, maxiter=3)
170
+
171
+ ref_flags = np.array([eim._ECONVERGED,
172
+ _ELIMITS,
173
+ eim._ECONVERR,
174
+ eim._EVALUEERR,
175
+ eim._EINPUTERR])
176
+
177
+ assert_equal(res.status, ref_flags)
178
+
179
+ @pytest.mark.parametrize("root", (0.622, [0.622, 0.623]))
180
+ @pytest.mark.parametrize('xmin', [-5, None])
181
+ @pytest.mark.parametrize('xmax', [5, None])
182
+ @pytest.mark.parametrize("dtype", (np.float16, np.float32, np.float64))
183
+ def test_dtype(self, root, xmin, xmax, dtype):
184
+ # Test that dtypes are preserved
185
+
186
+ xmin = xmin if xmin is None else dtype(xmin)
187
+ xmax = xmax if xmax is None else dtype(xmax)
188
+ root = dtype(root)
189
+ def f(x, root):
190
+ return ((x - root) ** 3).astype(dtype)
191
+
192
+ bracket = np.asarray([-0.01, 0.01], dtype=dtype)
193
+ res = _bracket_root(f, *bracket, xmin=xmin, xmax=xmax, args=(root,))
194
+ assert np.all(res.success)
195
+ assert res.xl.dtype == res.xr.dtype == dtype
196
+ assert res.fl.dtype == res.fr.dtype == dtype
197
+
198
+ def test_input_validation(self):
199
+ # Test input validation for appropriate error messages
200
+
201
+ message = '`func` must be callable.'
202
+ with pytest.raises(ValueError, match=message):
203
+ _bracket_root(None, -4, 4)
204
+
205
+ message = '...must be numeric and real.'
206
+ with pytest.raises(ValueError, match=message):
207
+ _bracket_root(lambda x: x, -4+1j, 4)
208
+ with pytest.raises(ValueError, match=message):
209
+ _bracket_root(lambda x: x, -4, 'hello')
210
+ with pytest.raises(ValueError, match=message):
211
+ _bracket_root(lambda x: x, -4, 4, xmin=np)
212
+ with pytest.raises(ValueError, match=message):
213
+ _bracket_root(lambda x: x, -4, 4, xmax=object())
214
+ with pytest.raises(ValueError, match=message):
215
+ _bracket_root(lambda x: x, -4, 4, factor=sum)
216
+
217
+ message = "All elements of `factor` must be greater than 1."
218
+ with pytest.raises(ValueError, match=message):
219
+ _bracket_root(lambda x: x, -4, 4, factor=0.5)
220
+
221
+ message = "shape mismatch: objects cannot be broadcast"
222
+ # raised by `np.broadcast, but the traceback is readable IMO
223
+ with pytest.raises(ValueError, match=message):
224
+ _bracket_root(lambda x: x, [-2, -3], [3, 4, 5])
225
+ # Consider making this give a more readable error message
226
+ # with pytest.raises(ValueError, match=message):
227
+ # _bracket_root(lambda x: [x[0], x[1], x[1]], [-3, -3], [5, 5])
228
+
229
+ message = '`maxiter` must be a non-negative integer.'
230
+ with pytest.raises(ValueError, match=message):
231
+ _bracket_root(lambda x: x, -4, 4, maxiter=1.5)
232
+ with pytest.raises(ValueError, match=message):
233
+ _bracket_root(lambda x: x, -4, 4, maxiter=-1)
234
+
235
+ def test_special_cases(self):
236
+ # Test edge cases and other special cases
237
+
238
+ # Test that integers are not passed to `f`
239
+ # (otherwise this would overflow)
240
+ def f(x):
241
+ assert np.issubdtype(x.dtype, np.floating)
242
+ return x ** 99 - 1
243
+
244
+ res = _bracket_root(f, -7, 5)
245
+ assert res.success
246
+
247
+ # Test maxiter = 0. Should do nothing to bracket.
248
+ def f(x):
249
+ return x - 10
250
+
251
+ bracket = (-3, 5)
252
+ res = _bracket_root(f, *bracket, maxiter=0)
253
+ assert res.xl, res.xr == bracket
254
+ assert res.nit == 0
255
+ assert res.nfev == 2
256
+ assert res.status == -2
257
+
258
+ # Test scalar `args` (not in tuple)
259
+ def f(x, c):
260
+ return c*x - 1
261
+
262
+ res = _bracket_root(f, -1, 1, args=3)
263
+ assert res.success
264
+ assert_allclose(res.fl, f(res.xl, 3))
265
+
266
+ # Test other edge cases
267
+
268
+ def f(x):
269
+ f.count += 1
270
+ return x
271
+
272
+ # 1. root lies within guess of bracket
273
+ f.count = 0
274
+ _bracket_root(f, -10, 20)
275
+ assert_equal(f.count, 2)
276
+
277
+ # 2. bracket endpoint hits root exactly
278
+ f.count = 0
279
+ res = _bracket_root(f, 5, 10, factor=2)
280
+ bracket = (res.xl, res.xr)
281
+ assert_equal(res.nfev, 4)
282
+ assert_allclose(bracket, (0, 5), atol=1e-15)
283
+
284
+ # 3. bracket limit hits root exactly
285
+ with np.errstate(over='ignore'):
286
+ res = _bracket_root(f, 5, 10, xmin=0)
287
+ bracket = (res.xl, res.xr)
288
+ assert_allclose(bracket[0], 0, atol=1e-15)
289
+ with np.errstate(over='ignore'):
290
+ res = _bracket_root(f, -10, -5, xmax=0)
291
+ bracket = (res.xl, res.xr)
292
+ assert_allclose(bracket[1], 0, atol=1e-15)
293
+
294
+ # 4. bracket not within min, max
295
+ with np.errstate(over='ignore'):
296
+ res = _bracket_root(f, 5, 10, xmin=1)
297
+ assert not res.success
298
+
299
+
300
+ class TestBracketMinimum:
301
+ def init_f(self):
302
+ def f(x, a, b):
303
+ f.count += 1
304
+ return (x - a)**2 + b
305
+ f.count = 0
306
+ return f
307
+
308
+ def assert_valid_bracket(self, result):
309
+ assert np.all(
310
+ (result.xl < result.xm) & (result.xm < result.xr)
311
+ )
312
+ assert np.all(
313
+ (result.fl >= result.fm) & (result.fr > result.fm)
314
+ | (result.fl > result.fm) & (result.fr > result.fm)
315
+ )
316
+
317
+ def get_kwargs(
318
+ self, *, xl0=None, xr0=None, factor=None, xmin=None, xmax=None, args=()
319
+ ):
320
+ names = ("xl0", "xr0", "xmin", "xmax", "factor", "args")
321
+ return {
322
+ name: val for name, val in zip(names, (xl0, xr0, xmin, xmax, factor, args))
323
+ if isinstance(val, np.ndarray) or np.isscalar(val)
324
+ or val not in [None, ()]
325
+ }
326
+
327
+ @pytest.mark.parametrize(
328
+ "seed",
329
+ (
330
+ 307448016549685229886351382450158984917,
331
+ 11650702770735516532954347931959000479,
332
+ 113767103358505514764278732330028568336,
333
+ )
334
+ )
335
+ @pytest.mark.parametrize("use_xmin", (False, True))
336
+ @pytest.mark.parametrize("other_side", (False, True))
337
+ def test_nfev_expected(self, seed, use_xmin, other_side):
338
+ rng = np.random.default_rng(seed)
339
+ args = (0, 0) # f(x) = x^2 with minimum at 0
340
+ # xl0, xm0, xr0 are chosen such that the initial bracket is to
341
+ # the right of the minimum, and the bracket will expand
342
+ # downhill towards zero.
343
+ xl0, d1, d2, factor = rng.random(size=4) * [1e5, 10, 10, 5]
344
+ xm0 = xl0 + d1
345
+ xr0 = xm0 + d2
346
+ # Factor should be greater than one.
347
+ factor += 1
348
+
349
+ if use_xmin:
350
+ xmin = -rng.random() * 5
351
+ n = int(np.ceil(np.log(-(xl0 - xmin) / xmin) / np.log(factor)))
352
+ lower = xmin + (xl0 - xmin)*factor**-n
353
+ middle = xmin + (xl0 - xmin)*factor**-(n-1)
354
+ upper = xmin + (xl0 - xmin)*factor**-(n-2) if n > 1 else xm0
355
+ # It may be the case the lower is below the minimum, but we still
356
+ # don't have a valid bracket.
357
+ if middle**2 > lower**2:
358
+ n += 1
359
+ lower, middle, upper = (
360
+ xmin + (xl0 - xmin)*factor**-n, lower, middle
361
+ )
362
+ else:
363
+ xmin = None
364
+ n = int(np.ceil(np.log(xl0 / d1) / np.log(factor)))
365
+ lower = xl0 - d1*factor**n
366
+ middle = xl0 - d1*factor**(n-1) if n > 1 else xl0
367
+ upper = xl0 - d1*factor**(n-2) if n > 1 else xm0
368
+ # It may be the case the lower is below the minimum, but we still
369
+ # don't have a valid bracket.
370
+ if middle**2 > lower**2:
371
+ n += 1
372
+ lower, middle, upper = (
373
+ xl0 - d1*factor**n, lower, middle
374
+ )
375
+ f = self.init_f()
376
+
377
+ xmax = None
378
+ if other_side:
379
+ xl0, xm0, xr0 = -xr0, -xm0, -xl0
380
+ xmin, xmax = None, -xmin if xmin is not None else None
381
+ lower, middle, upper = -upper, -middle, -lower
382
+
383
+ kwargs = self.get_kwargs(
384
+ xl0=xl0, xr0=xr0, xmin=xmin, xmax=xmax, factor=factor, args=args
385
+ )
386
+ result = _bracket_minimum(f, xm0, **kwargs)
387
+
388
+ # Check that `nfev` and `nit` have the correct relationship
389
+ assert result.nfev == result.nit + 3
390
+ # Check that `nfev` reports the correct number of function evaluations.
391
+ assert result.nfev == f.count
392
+ # Check that the number of iterations matches the theoretical value.
393
+ assert result.nit == n
394
+
395
+ # Compare reported bracket to theoretical bracket and reported function
396
+ # values to function evaluated at bracket.
397
+ bracket = np.asarray([result.xl, result.xm, result.xr])
398
+ assert_allclose(bracket, (lower, middle, upper))
399
+ f_bracket = np.asarray([result.fl, result.fm, result.fr])
400
+ assert_allclose(f_bracket, f(bracket, *args))
401
+
402
+ self.assert_valid_bracket(result)
403
+ assert result.status == 0
404
+ assert result.success
405
+
406
+ def test_flags(self):
407
+ # Test cases that should produce different status flags; show that all
408
+ # can be produced simultaneously
409
+ def f(xs, js):
410
+ funcs = [lambda x: (x - 1.5)**2,
411
+ lambda x: x,
412
+ lambda x: x,
413
+ lambda x: np.nan,
414
+ lambda x: x**2]
415
+
416
+ return [funcs[j](x) for x, j in zip(xs, js)]
417
+
418
+ args = (np.arange(5, dtype=np.int64),)
419
+ xl0 = [-1.0, -1.0, -1.0, -1.0, 6.0]
420
+ xm0 = [0.0, 0.0, 0.0, 0.0, 4.0]
421
+ xr0 = [1.0, 1.0, 1.0, 1.0, 2.0]
422
+ xmin=[-np.inf, -1.0, -np.inf, -np.inf, 8.0]
423
+
424
+ result = _bracket_minimum(f, xm0, xl0=xl0, xr0=xr0, xmin=xmin,
425
+ args=args, maxiter=3)
426
+
427
+ reference_flags = np.array([eim._ECONVERGED, _ELIMITS,
428
+ eim._ECONVERR, eim._EVALUEERR,
429
+ eim._EINPUTERR])
430
+ assert_equal(result.status, reference_flags)
431
+
432
+ @pytest.mark.parametrize("minimum", (0.622, [0.622, 0.623]))
433
+ @pytest.mark.parametrize("dtype", (np.float16, np.float32, np.float64))
434
+ @pytest.mark.parametrize("xmin", [-5, None])
435
+ @pytest.mark.parametrize("xmax", [5, None])
436
+ def test_dtypes(self, minimum, xmin, xmax, dtype):
437
+ xmin = xmin if xmin is None else dtype(xmin)
438
+ xmax = xmax if xmax is None else dtype(xmax)
439
+ minimum = dtype(minimum)
440
+
441
+ def f(x, minimum):
442
+ return ((x - minimum)**2).astype(dtype)
443
+
444
+ xl0, xm0, xr0 = np.array([-0.01, 0.0, 0.01], dtype=dtype)
445
+ result = _bracket_minimum(
446
+ f, xm0, xl0=xl0, xr0=xr0, xmin=xmin, xmax=xmax, args=(minimum, )
447
+ )
448
+ assert np.all(result.success)
449
+ assert result.xl.dtype == result.xm.dtype == result.xr.dtype == dtype
450
+ assert result.fl.dtype == result.fm.dtype == result.fr.dtype == dtype
451
+
452
+ def test_input_validation(self):
453
+ # Test input validation for appropriate error messages
454
+
455
+ message = '`func` must be callable.'
456
+ with pytest.raises(ValueError, match=message):
457
+ _bracket_minimum(None, -4, xl0=4)
458
+
459
+ message = '...must be numeric and real.'
460
+ with pytest.raises(ValueError, match=message):
461
+ _bracket_minimum(lambda x: x**2, 4+1j)
462
+ with pytest.raises(ValueError, match=message):
463
+ _bracket_minimum(lambda x: x**2, -4, xl0='hello')
464
+ with pytest.raises(ValueError, match=message):
465
+ _bracket_minimum(lambda x: x**2, -4, xmin=np)
466
+ with pytest.raises(ValueError, match=message):
467
+ _bracket_minimum(lambda x: x**2, -4, xmax=object())
468
+ with pytest.raises(ValueError, match=message):
469
+ _bracket_minimum(lambda x: x**2, -4, factor=sum)
470
+
471
+ message = "All elements of `factor` must be greater than 1."
472
+ with pytest.raises(ValueError, match=message):
473
+ _bracket_minimum(lambda x: x, -4, factor=0.5)
474
+
475
+ message = "shape mismatch: objects cannot be broadcast"
476
+ # raised by `np.broadcast, but the traceback is readable IMO
477
+ with pytest.raises(ValueError, match=message):
478
+ _bracket_minimum(lambda x: x**2, [-2, -3], xl0=[-3, -4, -5])
479
+
480
+ message = '`maxiter` must be a non-negative integer.'
481
+ with pytest.raises(ValueError, match=message):
482
+ _bracket_minimum(lambda x: x**2, -4, xr0=4, maxiter=1.5)
483
+ with pytest.raises(ValueError, match=message):
484
+ _bracket_minimum(lambda x: x**2, -4, xr0=4, maxiter=-1)
485
+
486
+ @pytest.mark.parametrize("xl0", [0.0, None])
487
+ @pytest.mark.parametrize("xm0", (0.05, 0.1, 0.15))
488
+ @pytest.mark.parametrize("xr0", (0.2, 0.4, 0.6, None))
489
+ # Minimum is ``a`` for each tuple ``(a, b)`` below. Tests cases where minimum
490
+ # is within, or at varying disances to the left or right of the initial
491
+ # bracket.
492
+ @pytest.mark.parametrize(
493
+ "args",
494
+ (
495
+ (1.2, 0), (-0.5, 0), (0.1, 0), (0.2, 0), (3.6, 0), (21.4, 0),
496
+ (121.6, 0), (5764.1, 0), (-6.4, 0), (-12.9, 0), (-146.2, 0)
497
+ )
498
+ )
499
+ def test_scalar_no_limits(self, xl0, xm0, xr0, args):
500
+ f = self.init_f()
501
+ kwargs = self.get_kwargs(xl0=xl0, xr0=xr0, args=args)
502
+ result = _bracket_minimum(f, xm0, **kwargs)
503
+ self.assert_valid_bracket(result)
504
+ assert result.status == 0
505
+ assert result.success
506
+ assert result.nfev == f.count
507
+
508
+ @pytest.mark.parametrize(
509
+ # xmin is set at 0.0 in all cases.
510
+ "xl0,xm0,xr0,xmin",
511
+ (
512
+ # Initial bracket at varying distances from the xmin.
513
+ (0.5, 0.75, 1.0, 0.0),
514
+ (1.0, 2.5, 4.0, 0.0),
515
+ (2.0, 4.0, 6.0, 0.0),
516
+ (12.0, 16.0, 20.0, 0.0),
517
+ # Test default initial left endpoint selection. It should not
518
+ # be below xmin.
519
+ (None, 0.75, 1.0, 0.0),
520
+ (None, 2.5, 4.0, 0.0),
521
+ (None, 4.0, 6.0, 0.0),
522
+ (None, 16.0, 20.0, 0.0),
523
+ )
524
+ )
525
+ @pytest.mark.parametrize(
526
+ "args", (
527
+ (0.0, 0.0), # Minimum is directly at xmin.
528
+ (1e-300, 0.0), # Minimum is extremely close to xmin.
529
+ (1e-20, 0.0), # Minimum is very close to xmin.
530
+ # Minimum at varying distances from xmin.
531
+ (0.1, 0.0),
532
+ (0.2, 0.0),
533
+ (0.4, 0.0)
534
+ )
535
+ )
536
+ def test_scalar_with_limit_left(self, xl0, xm0, xr0, xmin, args):
537
+ f = self.init_f()
538
+ kwargs = self.get_kwargs(xl0=xl0, xr0=xr0, xmin=xmin, args=args)
539
+ result = _bracket_minimum(f, xm0, **kwargs)
540
+ self.assert_valid_bracket(result)
541
+ assert result.status == 0
542
+ assert result.success
543
+ assert result.nfev == f.count
544
+
545
+ @pytest.mark.parametrize(
546
+ #xmax is set to 1.0 in all cases.
547
+ "xl0,xm0,xr0,xmax",
548
+ (
549
+ # Bracket at varying distances from xmax.
550
+ (0.2, 0.3, 0.4, 1.0),
551
+ (0.05, 0.075, 0.1, 1.0),
552
+ (-0.2, -0.1, 0.0, 1.0),
553
+ (-21.2, -17.7, -14.2, 1.0),
554
+ # Test default right endpoint selection. It should not exceed xmax.
555
+ (0.2, 0.3, None, 1.0),
556
+ (0.05, 0.075, None, 1.0),
557
+ (-0.2, -0.1, None, 1.0),
558
+ (-21.2, -17.7, None, 1.0),
559
+ )
560
+ )
561
+ @pytest.mark.parametrize(
562
+ "args", (
563
+ (0.9999999999999999, 0.0), # Minimum very close to xmax.
564
+ # Minimum at varying distances from xmax.
565
+ (0.9, 0.0),
566
+ (0.7, 0.0),
567
+ (0.5, 0.0)
568
+ )
569
+ )
570
+ def test_scalar_with_limit_right(self, xl0, xm0, xr0, xmax, args):
571
+ f = self.init_f()
572
+ kwargs = self.get_kwargs(xl0=xl0, xr0=xr0, xmax=xmax, args=args)
573
+ result = _bracket_minimum(f, xm0, **kwargs)
574
+ self.assert_valid_bracket(result)
575
+ assert result.status == 0
576
+ assert result.success
577
+ assert result.nfev == f.count
578
+
579
+ @pytest.mark.parametrize(
580
+ "xl0,xm0,xr0,xmin,xmax,args",
581
+ (
582
+ ( # Case 1:
583
+ # Initial bracket.
584
+ 0.2,
585
+ 0.3,
586
+ 0.4,
587
+ # Function slopes down to the right from the bracket to a minimum
588
+ # at 1.0. xmax is also at 1.0
589
+ None,
590
+ 1.0,
591
+ (1.0, 0.0)
592
+ ),
593
+ ( # Case 2:
594
+ # Initial bracket.
595
+ 1.4,
596
+ 1.95,
597
+ 2.5,
598
+ # Function slopes down to the left from the bracket to a minimum at
599
+ # 0.3 with xmin set to 0.3.
600
+ 0.3,
601
+ None,
602
+ (0.3, 0.0)
603
+ ),
604
+ (
605
+ # Case 3:
606
+ # Initial bracket.
607
+ 2.6,
608
+ 3.25,
609
+ 3.9,
610
+ # Function slopes down and to the right to a minimum at 99.4 with xmax
611
+ # at 99.4. Tests case where minimum is at xmax relatively further from
612
+ # the bracket.
613
+ None,
614
+ 99.4,
615
+ (99.4, 0)
616
+ ),
617
+ (
618
+ # Case 4:
619
+ # Initial bracket.
620
+ 4,
621
+ 4.5,
622
+ 5,
623
+ # Function slopes down and to the left away from the bracket with a
624
+ # minimum at -26.3 with xmin set to -26.3. Tests case where minimum is
625
+ # at xmin relatively far from the bracket.
626
+ -26.3,
627
+ None,
628
+ (-26.3, 0)
629
+ ),
630
+ (
631
+ # Case 5:
632
+ # Similar to Case 1 above, but tests default values of xl0 and xr0.
633
+ None,
634
+ 0.3,
635
+ None,
636
+ None,
637
+ 1.0,
638
+ (1.0, 0.0)
639
+ ),
640
+ ( # Case 6:
641
+ # Similar to Case 2 above, but tests default values of xl0 and xr0.
642
+ None,
643
+ 1.95,
644
+ None,
645
+ 0.3,
646
+ None,
647
+ (0.3, 0.0)
648
+ ),
649
+ (
650
+ # Case 7:
651
+ # Similar to Case 3 above, but tests default values of xl0 and xr0.
652
+ None,
653
+ 3.25,
654
+ None,
655
+ None,
656
+ 99.4,
657
+ (99.4, 0)
658
+ ),
659
+ (
660
+ # Case 8:
661
+ # Similar to Case 4 above, but tests default values of xl0 and xr0.
662
+ None,
663
+ 4.5,
664
+ None,
665
+ -26.3,
666
+ None,
667
+ (-26.3, 0)
668
+ ),
669
+ )
670
+ )
671
+ def test_minimum_at_boundary_point(self, xl0, xm0, xr0, xmin, xmax, args):
672
+ f = self.init_f()
673
+ kwargs = self.get_kwargs(xr0=xr0, xmin=xmin, xmax=xmax, args=args)
674
+ result = _bracket_minimum(f, xm0, **kwargs)
675
+ assert result.status == -1
676
+ assert args[0] in (result.xl, result.xr)
677
+ assert result.nfev == f.count
678
+
679
+ @pytest.mark.parametrize('shape', [tuple(), (12, ), (3, 4), (3, 2, 2)])
680
+ def test_vectorization(self, shape):
681
+ # Test for correct functionality, output shapes, and dtypes for
682
+ # various input shapes.
683
+ a = np.linspace(-0.05, 1.05, 12).reshape(shape) if shape else 0.6
684
+ args = (a, 0.0)
685
+ maxiter = 10
686
+
687
+ @np.vectorize
688
+ def bracket_minimum_single(xm0, xl0, xr0, xmin, xmax, factor, a):
689
+ return _bracket_minimum(self.init_f(), xm0, xl0=xl0, xr0=xr0, xmin=xmin,
690
+ xmax=xmax, factor=factor, maxiter=maxiter,
691
+ args=(a, 0.0))
692
+
693
+ f = self.init_f()
694
+
695
+ rng = np.random.default_rng(2348234)
696
+ xl0 = -rng.random(size=shape)
697
+ xr0 = rng.random(size=shape)
698
+ xm0 = xl0 + rng.random(size=shape) * (xr0 - xl0)
699
+ xmin, xmax = 1e3*xl0, 1e3*xr0
700
+ if shape: # make some elements un
701
+ i = rng.random(size=shape) > 0.5
702
+ xmin[i], xmax[i] = -np.inf, np.inf
703
+ factor = rng.random(size=shape) + 1.5
704
+ res = _bracket_minimum(f, xm0, xl0=xl0, xr0=xr0, xmin=xmin, xmax=xmax,
705
+ factor=factor, args=args, maxiter=maxiter)
706
+ refs = bracket_minimum_single(xm0, xl0, xr0, xmin, xmax, factor, a).ravel()
707
+
708
+ attrs = ['xl', 'xm', 'xr', 'fl', 'fm', 'fr', 'success', 'nfev', 'nit']
709
+ for attr in attrs:
710
+ ref_attr = [getattr(ref, attr) for ref in refs]
711
+ res_attr = getattr(res, attr)
712
+ assert_allclose(res_attr.ravel(), ref_attr)
713
+ assert_equal(res_attr.shape, shape)
714
+
715
+ assert np.issubdtype(res.success.dtype, np.bool_)
716
+ if shape:
717
+ assert np.all(res.success[1:-1])
718
+ assert np.issubdtype(res.status.dtype, np.integer)
719
+ assert np.issubdtype(res.nfev.dtype, np.integer)
720
+ assert np.issubdtype(res.nit.dtype, np.integer)
721
+ assert_equal(np.max(res.nit), f.count - 3)
722
+ self.assert_valid_bracket(res)
723
+ assert_allclose(res.fl, f(res.xl, *args))
724
+ assert_allclose(res.fm, f(res.xm, *args))
725
+ assert_allclose(res.fr, f(res.xr, *args))
726
+
727
+ def test_special_cases(self):
728
+ # Test edge cases and other special cases.
729
+
730
+ # Test that integers are not passed to `f`
731
+ # (otherwise this would overflow)
732
+ def f(x):
733
+ assert np.issubdtype(x.dtype, np.floating)
734
+ return x ** 98 - 1
735
+
736
+ result = _bracket_minimum(f, -7, xr0=5)
737
+ assert result.success
738
+
739
+ # Test maxiter = 0. Should do nothing to bracket.
740
+ def f(x):
741
+ return x**2 - 10
742
+
743
+ xl0, xm0, xr0 = -3, -1, 2
744
+ result = _bracket_minimum(f, xm0, xl0=xl0, xr0=xr0, maxiter=0)
745
+ assert_equal([result.xl, result.xm, result.xr], [xl0, xm0, xr0])
746
+
747
+ # Test scalar `args` (not in tuple)
748
+ def f(x, c):
749
+ return c*x**2 - 1
750
+
751
+ result = _bracket_minimum(f, -1, args=3)
752
+ assert result.success
753
+ assert_allclose(result.fl, f(result.xl, 3))
754
+
755
+ # Initial bracket is valid.
756
+ f = self.init_f()
757
+ xl0, xm0, xr0 = [-1.0, -0.2, 1.0]
758
+ args = (0, 0)
759
+ result = _bracket_minimum(f, xm0, xl0=xl0, xr0=xr0, args=args)
760
+ assert f.count == 3
761
+
762
+ assert_equal(
763
+ [result.xl, result.xm, result.xr],
764
+ [xl0, xm0, xr0],
765
+ )
766
+ assert_equal(
767
+ [result.fl, result.fm, result.fr],
768
+ [f(xl0, *args), f(xm0, *args), f(xr0, *args)],
769
+ )
770
+
771
+ def test_gh_20562_left(self):
772
+ # Regression test for https://github.com/scipy/scipy/issues/20562
773
+ # minimum of f in [xmin, xmax] is at xmin.
774
+ xmin, xmax = 0.21933608, 1.39713606
775
+
776
+ def f(x):
777
+ log_a, log_b = np.log([xmin, xmax])
778
+ return -((log_b - log_a)*x)**-1
779
+
780
+ result = _bracket_minimum(f, 0.5535723499480897, xmin=xmin, xmax=xmax)
781
+ assert xmin == result.xl
782
+
783
+ def test_gh_20562_right(self):
784
+ # Regression test for https://github.com/scipy/scipy/issues/20562
785
+ # minimum of f in [xmin, xmax] is at xmax.
786
+ xmin, xmax = -1.39713606, -0.21933608,
787
+
788
+ def f(x):
789
+ log_a, log_b = np.log([-xmax, -xmin])
790
+ return ((log_b - log_a)*x)**-1
791
+
792
+ result = _bracket_minimum(f, -0.5535723499480897, xmin=xmin, xmax=xmax)
793
+ assert xmax == result.xr
llava_next/lib/python3.10/site-packages/scipy/optimize/tests/test_chandrupatla.py ADDED
@@ -0,0 +1,906 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import pytest
2
+ import numpy as np
3
+ from numpy.testing import assert_allclose, assert_equal, assert_array_less
4
+
5
+ from scipy import stats, special
6
+ import scipy._lib._elementwise_iterative_method as eim
7
+ from scipy.conftest import array_api_compatible
8
+ from scipy._lib._array_api import (array_namespace, xp_assert_close, xp_assert_equal,
9
+ xp_assert_less, xp_minimum, is_numpy, is_cupy)
10
+
11
+ from scipy.optimize._chandrupatla import (_chandrupatla_minimize,
12
+ _chandrupatla as _chandrupatla_root)
13
+ from scipy.optimize._tstutils import _CHANDRUPATLA_TESTS
14
+
15
+ from itertools import permutations
16
+ from .test_zeros import TestScalarRootFinders
17
+
18
+ def f1(x):
19
+ return 100*(1 - x**3.)**2 + (1-x**2.) + 2*(1-x)**2.
20
+
21
+
22
+ def f2(x):
23
+ return 5 + (x - 2.)**6
24
+
25
+
26
+ def f3(x):
27
+ return np.exp(x) - 5*x
28
+
29
+
30
+ def f4(x):
31
+ return x**5. - 5*x**3. - 20.*x + 5.
32
+
33
+
34
+ def f5(x):
35
+ return 8*x**3 - 2*x**2 - 7*x + 3
36
+
37
+
38
+ def _bracket_minimum(func, x1, x2):
39
+ phi = 1.61803398875
40
+ maxiter = 100
41
+ f1 = func(x1)
42
+ f2 = func(x2)
43
+ step = x2 - x1
44
+ x1, x2, f1, f2, step = ((x2, x1, f2, f1, -step) if f2 > f1
45
+ else (x1, x2, f1, f2, step))
46
+
47
+ for i in range(maxiter):
48
+ step *= phi
49
+ x3 = x2 + step
50
+ f3 = func(x3)
51
+ if f3 < f2:
52
+ x1, x2, f1, f2 = x2, x3, f2, f3
53
+ else:
54
+ break
55
+ return x1, x2, x3, f1, f2, f3
56
+
57
+
58
+ cases = [
59
+ (f1, -1, 11),
60
+ (f1, -2, 13),
61
+ (f1, -4, 13),
62
+ (f1, -8, 15),
63
+ (f1, -16, 16),
64
+ (f1, -32, 19),
65
+ (f1, -64, 20),
66
+ (f1, -128, 21),
67
+ (f1, -256, 21),
68
+ (f1, -512, 19),
69
+ (f1, -1024, 24),
70
+ (f2, -1, 8),
71
+ (f2, -2, 6),
72
+ (f2, -4, 6),
73
+ (f2, -8, 7),
74
+ (f2, -16, 8),
75
+ (f2, -32, 8),
76
+ (f2, -64, 9),
77
+ (f2, -128, 11),
78
+ (f2, -256, 13),
79
+ (f2, -512, 12),
80
+ (f2, -1024, 13),
81
+ (f3, -1, 11),
82
+ (f3, -2, 11),
83
+ (f3, -4, 11),
84
+ (f3, -8, 10),
85
+ (f3, -16, 14),
86
+ (f3, -32, 12),
87
+ (f3, -64, 15),
88
+ (f3, -128, 18),
89
+ (f3, -256, 18),
90
+ (f3, -512, 19),
91
+ (f3, -1024, 19),
92
+ (f4, -0.05, 9),
93
+ (f4, -0.10, 11),
94
+ (f4, -0.15, 11),
95
+ (f4, -0.20, 11),
96
+ (f4, -0.25, 11),
97
+ (f4, -0.30, 9),
98
+ (f4, -0.35, 9),
99
+ (f4, -0.40, 9),
100
+ (f4, -0.45, 10),
101
+ (f4, -0.50, 10),
102
+ (f4, -0.55, 10),
103
+ (f5, -0.05, 6),
104
+ (f5, -0.10, 7),
105
+ (f5, -0.15, 8),
106
+ (f5, -0.20, 10),
107
+ (f5, -0.25, 9),
108
+ (f5, -0.30, 8),
109
+ (f5, -0.35, 7),
110
+ (f5, -0.40, 7),
111
+ (f5, -0.45, 9),
112
+ (f5, -0.50, 9),
113
+ (f5, -0.55, 8)
114
+ ]
115
+
116
+
117
+ class TestChandrupatlaMinimize:
118
+
119
+ def f(self, x, loc):
120
+ dist = stats.norm()
121
+ return -dist.pdf(x - loc)
122
+
123
+ @pytest.mark.parametrize('loc', [0.6, np.linspace(-1.05, 1.05, 10)])
124
+ def test_basic(self, loc):
125
+ # Find mode of normal distribution. Compare mode against location
126
+ # parameter and value of pdf at mode against expected pdf.
127
+ res = _chandrupatla_minimize(self.f, -5, 0, 5, args=(loc,))
128
+ ref = loc
129
+ np.testing.assert_allclose(res.x, ref, rtol=1e-6)
130
+ np.testing.assert_allclose(res.fun, -stats.norm.pdf(0), atol=0, rtol=0)
131
+ assert res.x.shape == np.shape(ref)
132
+
133
+ @pytest.mark.parametrize('shape', [tuple(), (12,), (3, 4), (3, 2, 2)])
134
+ def test_vectorization(self, shape):
135
+ # Test for correct functionality, output shapes, and dtypes for various
136
+ # input shapes.
137
+ loc = np.linspace(-0.05, 1.05, 12).reshape(shape) if shape else 0.6
138
+ args = (loc,)
139
+
140
+ @np.vectorize
141
+ def chandrupatla_single(loc_single):
142
+ return _chandrupatla_minimize(self.f, -5, 0, 5, args=(loc_single,))
143
+
144
+ def f(*args, **kwargs):
145
+ f.f_evals += 1
146
+ return self.f(*args, **kwargs)
147
+ f.f_evals = 0
148
+
149
+ res = _chandrupatla_minimize(f, -5, 0, 5, args=args)
150
+ refs = chandrupatla_single(loc).ravel()
151
+
152
+ ref_x = [ref.x for ref in refs]
153
+ assert_allclose(res.x.ravel(), ref_x)
154
+ assert_equal(res.x.shape, shape)
155
+
156
+ ref_fun = [ref.fun for ref in refs]
157
+ assert_allclose(res.fun.ravel(), ref_fun)
158
+ assert_equal(res.fun.shape, shape)
159
+ assert_equal(res.fun, self.f(res.x, *args))
160
+
161
+ ref_success = [ref.success for ref in refs]
162
+ assert_equal(res.success.ravel(), ref_success)
163
+ assert_equal(res.success.shape, shape)
164
+ assert np.issubdtype(res.success.dtype, np.bool_)
165
+
166
+ ref_flag = [ref.status for ref in refs]
167
+ assert_equal(res.status.ravel(), ref_flag)
168
+ assert_equal(res.status.shape, shape)
169
+ assert np.issubdtype(res.status.dtype, np.integer)
170
+
171
+ ref_nfev = [ref.nfev for ref in refs]
172
+ assert_equal(res.nfev.ravel(), ref_nfev)
173
+ assert_equal(np.max(res.nfev), f.f_evals)
174
+ assert_equal(res.nfev.shape, res.fun.shape)
175
+ assert np.issubdtype(res.nfev.dtype, np.integer)
176
+
177
+ ref_nit = [ref.nit for ref in refs]
178
+ assert_equal(res.nit.ravel(), ref_nit)
179
+ assert_equal(np.max(res.nit), f.f_evals-3)
180
+ assert_equal(res.nit.shape, res.fun.shape)
181
+ assert np.issubdtype(res.nit.dtype, np.integer)
182
+
183
+ ref_xl = [ref.xl for ref in refs]
184
+ assert_allclose(res.xl.ravel(), ref_xl)
185
+ assert_equal(res.xl.shape, shape)
186
+
187
+ ref_xm = [ref.xm for ref in refs]
188
+ assert_allclose(res.xm.ravel(), ref_xm)
189
+ assert_equal(res.xm.shape, shape)
190
+
191
+ ref_xr = [ref.xr for ref in refs]
192
+ assert_allclose(res.xr.ravel(), ref_xr)
193
+ assert_equal(res.xr.shape, shape)
194
+
195
+ ref_fl = [ref.fl for ref in refs]
196
+ assert_allclose(res.fl.ravel(), ref_fl)
197
+ assert_equal(res.fl.shape, shape)
198
+ assert_allclose(res.fl, self.f(res.xl, *args))
199
+
200
+ ref_fm = [ref.fm for ref in refs]
201
+ assert_allclose(res.fm.ravel(), ref_fm)
202
+ assert_equal(res.fm.shape, shape)
203
+ assert_allclose(res.fm, self.f(res.xm, *args))
204
+
205
+ ref_fr = [ref.fr for ref in refs]
206
+ assert_allclose(res.fr.ravel(), ref_fr)
207
+ assert_equal(res.fr.shape, shape)
208
+ assert_allclose(res.fr, self.f(res.xr, *args))
209
+
210
+ def test_flags(self):
211
+ # Test cases that should produce different status flags; show that all
212
+ # can be produced simultaneously.
213
+ def f(xs, js):
214
+ funcs = [lambda x: (x - 2.5) ** 2,
215
+ lambda x: x - 10,
216
+ lambda x: (x - 2.5) ** 4,
217
+ lambda x: np.nan]
218
+
219
+ return [funcs[j](x) for x, j in zip(xs, js)]
220
+
221
+ args = (np.arange(4, dtype=np.int64),)
222
+
223
+ res = _chandrupatla_minimize(f, [0]*4, [2]*4, [np.pi]*4, args=args,
224
+ maxiter=10)
225
+
226
+ ref_flags = np.array([eim._ECONVERGED,
227
+ eim._ESIGNERR,
228
+ eim._ECONVERR,
229
+ eim._EVALUEERR])
230
+ assert_equal(res.status, ref_flags)
231
+
232
+ def test_convergence(self):
233
+ # Test that the convergence tolerances behave as expected
234
+ rng = np.random.default_rng(2585255913088665241)
235
+ p = rng.random(size=3)
236
+ bracket = (-5, 0, 5)
237
+ args = (p,)
238
+ kwargs0 = dict(args=args, xatol=0, xrtol=0, fatol=0, frtol=0)
239
+
240
+ kwargs = kwargs0.copy()
241
+ kwargs['xatol'] = 1e-3
242
+ res1 = _chandrupatla_minimize(self.f, *bracket, **kwargs)
243
+ j1 = abs(res1.xr - res1.xl)
244
+ assert_array_less(j1, 4*kwargs['xatol'])
245
+ kwargs['xatol'] = 1e-6
246
+ res2 = _chandrupatla_minimize(self.f, *bracket, **kwargs)
247
+ j2 = abs(res2.xr - res2.xl)
248
+ assert_array_less(j2, 4*kwargs['xatol'])
249
+ assert_array_less(j2, j1)
250
+
251
+ kwargs = kwargs0.copy()
252
+ kwargs['xrtol'] = 1e-3
253
+ res1 = _chandrupatla_minimize(self.f, *bracket, **kwargs)
254
+ j1 = abs(res1.xr - res1.xl)
255
+ assert_array_less(j1, 4*kwargs['xrtol']*abs(res1.x))
256
+ kwargs['xrtol'] = 1e-6
257
+ res2 = _chandrupatla_minimize(self.f, *bracket, **kwargs)
258
+ j2 = abs(res2.xr - res2.xl)
259
+ assert_array_less(j2, 4*kwargs['xrtol']*abs(res2.x))
260
+ assert_array_less(j2, j1)
261
+
262
+ kwargs = kwargs0.copy()
263
+ kwargs['fatol'] = 1e-3
264
+ res1 = _chandrupatla_minimize(self.f, *bracket, **kwargs)
265
+ h1 = abs(res1.fl - 2 * res1.fm + res1.fr)
266
+ assert_array_less(h1, 2*kwargs['fatol'])
267
+ kwargs['fatol'] = 1e-6
268
+ res2 = _chandrupatla_minimize(self.f, *bracket, **kwargs)
269
+ h2 = abs(res2.fl - 2 * res2.fm + res2.fr)
270
+ assert_array_less(h2, 2*kwargs['fatol'])
271
+ assert_array_less(h2, h1)
272
+
273
+ kwargs = kwargs0.copy()
274
+ kwargs['frtol'] = 1e-3
275
+ res1 = _chandrupatla_minimize(self.f, *bracket, **kwargs)
276
+ h1 = abs(res1.fl - 2 * res1.fm + res1.fr)
277
+ assert_array_less(h1, 2*kwargs['frtol']*abs(res1.fun))
278
+ kwargs['frtol'] = 1e-6
279
+ res2 = _chandrupatla_minimize(self.f, *bracket, **kwargs)
280
+ h2 = abs(res2.fl - 2 * res2.fm + res2.fr)
281
+ assert_array_less(h2, 2*kwargs['frtol']*abs(res2.fun))
282
+ assert_array_less(h2, h1)
283
+
284
+ def test_maxiter_callback(self):
285
+ # Test behavior of `maxiter` parameter and `callback` interface
286
+ loc = 0.612814
287
+ bracket = (-5, 0, 5)
288
+ maxiter = 5
289
+
290
+ res = _chandrupatla_minimize(self.f, *bracket, args=(loc,),
291
+ maxiter=maxiter)
292
+ assert not np.any(res.success)
293
+ assert np.all(res.nfev == maxiter+3)
294
+ assert np.all(res.nit == maxiter)
295
+
296
+ def callback(res):
297
+ callback.iter += 1
298
+ callback.res = res
299
+ assert hasattr(res, 'x')
300
+ if callback.iter == 0:
301
+ # callback is called once with initial bracket
302
+ assert (res.xl, res.xm, res.xr) == bracket
303
+ else:
304
+ changed_xr = (res.xl == callback.xl) & (res.xr != callback.xr)
305
+ changed_xl = (res.xl != callback.xl) & (res.xr == callback.xr)
306
+ assert np.all(changed_xr | changed_xl)
307
+
308
+ callback.xl = res.xl
309
+ callback.xr = res.xr
310
+ assert res.status == eim._EINPROGRESS
311
+ assert_equal(self.f(res.xl, loc), res.fl)
312
+ assert_equal(self.f(res.xm, loc), res.fm)
313
+ assert_equal(self.f(res.xr, loc), res.fr)
314
+ assert_equal(self.f(res.x, loc), res.fun)
315
+ if callback.iter == maxiter:
316
+ raise StopIteration
317
+
318
+ callback.xl = np.nan
319
+ callback.xr = np.nan
320
+ callback.iter = -1 # callback called once before first iteration
321
+ callback.res = None
322
+
323
+ res2 = _chandrupatla_minimize(self.f, *bracket, args=(loc,),
324
+ callback=callback)
325
+
326
+ # terminating with callback is identical to terminating due to maxiter
327
+ # (except for `status`)
328
+ for key in res.keys():
329
+ if key == 'status':
330
+ assert res[key] == eim._ECONVERR
331
+ assert callback.res[key] == eim._EINPROGRESS
332
+ assert res2[key] == eim._ECALLBACK
333
+ else:
334
+ assert res2[key] == callback.res[key] == res[key]
335
+
336
+ @pytest.mark.parametrize('case', cases)
337
+ def test_nit_expected(self, case):
338
+ # Test that `_chandrupatla` implements Chandrupatla's algorithm:
339
+ # in all 55 test cases, the number of iterations performed
340
+ # matches the number reported in the original paper.
341
+ func, x1, nit = case
342
+
343
+ # Find bracket using the algorithm in the paper
344
+ step = 0.2
345
+ x2 = x1 + step
346
+ x1, x2, x3, f1, f2, f3 = _bracket_minimum(func, x1, x2)
347
+
348
+ # Use tolerances from original paper
349
+ xatol = 0.0001
350
+ fatol = 0.000001
351
+ xrtol = 1e-16
352
+ frtol = 1e-16
353
+
354
+ res = _chandrupatla_minimize(func, x1, x2, x3, xatol=xatol,
355
+ fatol=fatol, xrtol=xrtol, frtol=frtol)
356
+ assert_equal(res.nit, nit)
357
+
358
+ @pytest.mark.parametrize("loc", (0.65, [0.65, 0.7]))
359
+ @pytest.mark.parametrize("dtype", (np.float16, np.float32, np.float64))
360
+ def test_dtype(self, loc, dtype):
361
+ # Test that dtypes are preserved
362
+
363
+ loc = dtype(loc)
364
+
365
+ def f(x, loc):
366
+ assert x.dtype == dtype
367
+ return ((x - loc) ** 2).astype(dtype)
368
+
369
+ res = _chandrupatla_minimize(f, dtype(-3), dtype(1), dtype(5),
370
+ args=(loc,))
371
+ assert res.x.dtype == dtype
372
+ assert_allclose(res.x, loc, rtol=np.sqrt(np.finfo(dtype).eps))
373
+
374
+ def test_input_validation(self):
375
+ # Test input validation for appropriate error messages
376
+
377
+ message = '`func` must be callable.'
378
+ with pytest.raises(ValueError, match=message):
379
+ _chandrupatla_minimize(None, -4, 0, 4)
380
+
381
+ message = 'Abscissae and function output must be real numbers.'
382
+ with pytest.raises(ValueError, match=message):
383
+ _chandrupatla_minimize(lambda x: x, -4+1j, 0, 4)
384
+
385
+ message = "shape mismatch: objects cannot be broadcast"
386
+ # raised by `np.broadcast, but the traceback is readable IMO
387
+ with pytest.raises(ValueError, match=message):
388
+ _chandrupatla_minimize(lambda x: x, [-2, -3], [0, 0], [3, 4, 5])
389
+
390
+ message = "The shape of the array returned by `func` must be the same"
391
+ with pytest.raises(ValueError, match=message):
392
+ _chandrupatla_minimize(lambda x: [x[0], x[1], x[1]], [-3, -3],
393
+ [0, 0], [5, 5])
394
+
395
+ message = 'Tolerances must be non-negative scalars.'
396
+ with pytest.raises(ValueError, match=message):
397
+ _chandrupatla_minimize(lambda x: x, -4, 0, 4, xatol=-1)
398
+ with pytest.raises(ValueError, match=message):
399
+ _chandrupatla_minimize(lambda x: x, -4, 0, 4, xrtol=np.nan)
400
+ with pytest.raises(ValueError, match=message):
401
+ _chandrupatla_minimize(lambda x: x, -4, 0, 4, fatol='ekki')
402
+ with pytest.raises(ValueError, match=message):
403
+ _chandrupatla_minimize(lambda x: x, -4, 0, 4, frtol=np.nan)
404
+
405
+ message = '`maxiter` must be a non-negative integer.'
406
+ with pytest.raises(ValueError, match=message):
407
+ _chandrupatla_minimize(lambda x: x, -4, 0, 4, maxiter=1.5)
408
+ with pytest.raises(ValueError, match=message):
409
+ _chandrupatla_minimize(lambda x: x, -4, 0, 4, maxiter=-1)
410
+
411
+ message = '`callback` must be callable.'
412
+ with pytest.raises(ValueError, match=message):
413
+ _chandrupatla_minimize(lambda x: x, -4, 0, 4, callback='shrubbery')
414
+
415
+ def test_bracket_order(self):
416
+ # Confirm that order of points in bracket doesn't matter
417
+ loc = np.linspace(-1, 1, 6)[:, np.newaxis]
418
+ brackets = np.array(list(permutations([-5, 0, 5]))).T
419
+ res = _chandrupatla_minimize(self.f, *brackets, args=(loc,))
420
+ assert np.all(np.isclose(res.x, loc) | (res.fun == self.f(loc, loc)))
421
+ ref = res.x[:, 0] # all columns should be the same
422
+ assert_allclose(*np.broadcast_arrays(res.x.T, ref), rtol=1e-15)
423
+
424
+ def test_special_cases(self):
425
+ # Test edge cases and other special cases
426
+
427
+ # Test that integers are not passed to `f`
428
+ # (otherwise this would overflow)
429
+ def f(x):
430
+ assert np.issubdtype(x.dtype, np.floating)
431
+ return (x-1) ** 100
432
+
433
+ with np.errstate(invalid='ignore'):
434
+ res = _chandrupatla_minimize(f, -7, 0, 8, fatol=0, frtol=0)
435
+ assert res.success
436
+ assert_allclose(res.x, 1, rtol=1e-3)
437
+ assert_equal(res.fun, 0)
438
+
439
+ # Test that if all elements of bracket equal minimizer, algorithm
440
+ # reports convergence
441
+ def f(x):
442
+ return (x-1)**2
443
+
444
+ res = _chandrupatla_minimize(f, 1, 1, 1)
445
+ assert res.success
446
+ assert_equal(res.x, 1)
447
+
448
+ # Test maxiter = 0. Should do nothing to bracket.
449
+ def f(x):
450
+ return (x-1)**2
451
+
452
+ bracket = (-3, 1.1, 5)
453
+ res = _chandrupatla_minimize(f, *bracket, maxiter=0)
454
+ assert res.xl, res.xr == bracket
455
+ assert res.nit == 0
456
+ assert res.nfev == 3
457
+ assert res.status == -2
458
+ assert res.x == 1.1 # best so far
459
+
460
+ # Test scalar `args` (not in tuple)
461
+ def f(x, c):
462
+ return (x-c)**2 - 1
463
+
464
+ res = _chandrupatla_minimize(f, -1, 0, 1, args=1/3)
465
+ assert_allclose(res.x, 1/3)
466
+
467
+ # Test zero tolerances
468
+ # TODO: fatol/frtol = 0?
469
+ def f(x):
470
+ return -np.sin(x)
471
+
472
+ res = _chandrupatla_minimize(f, 0, 1, np.pi, xatol=0, xrtol=0,
473
+ fatol=0, frtol=0)
474
+ assert res.success
475
+ # found a minimum exactly (according to floating point arithmetic)
476
+ assert res.xl < res.xm < res.xr
477
+ assert f(res.xl) == f(res.xm) == f(res.xr)
478
+
479
+
480
+ @array_api_compatible
481
+ @pytest.mark.usefixtures("skip_xp_backends")
482
+ @pytest.mark.skip_xp_backends('array_api_strict', 'jax.numpy',
483
+ reasons=['Currently uses fancy indexing assignment.',
484
+ 'JAX arrays do not support item assignment.'])
485
+ class TestChandrupatla(TestScalarRootFinders):
486
+
487
+ def f(self, q, p):
488
+ return special.ndtr(q) - p
489
+
490
+ @pytest.mark.parametrize('p', [0.6, np.linspace(-0.05, 1.05, 10)])
491
+ def test_basic(self, p, xp):
492
+ # Invert distribution CDF and compare against distrtibution `ppf`
493
+ a, b = xp.asarray(-5.), xp.asarray(5.)
494
+ res = _chandrupatla_root(self.f, a, b, args=(xp.asarray(p),))
495
+ ref = xp.asarray(stats.norm().ppf(p), dtype=xp.asarray(p).dtype)
496
+ xp_assert_close(res.x, ref)
497
+
498
+ @pytest.mark.parametrize('shape', [tuple(), (12,), (3, 4), (3, 2, 2)])
499
+ def test_vectorization(self, shape, xp):
500
+ # Test for correct functionality, output shapes, and dtypes for various
501
+ # input shapes.
502
+ p = (np.linspace(-0.05, 1.05, 12).reshape(shape) if shape
503
+ else np.float64(0.6))
504
+ p_xp = xp.asarray(p)
505
+ args_xp = (p_xp,)
506
+ dtype = p_xp.dtype
507
+ xp_test = array_namespace(p_xp) # need xp.bool
508
+
509
+ @np.vectorize
510
+ def chandrupatla_single(p):
511
+ return _chandrupatla_root(self.f, -5, 5, args=(p,))
512
+
513
+ def f(*args, **kwargs):
514
+ f.f_evals += 1
515
+ return self.f(*args, **kwargs)
516
+ f.f_evals = 0
517
+
518
+ res = _chandrupatla_root(f, xp.asarray(-5.), xp.asarray(5.), args=args_xp)
519
+ refs = chandrupatla_single(p).ravel()
520
+
521
+ ref_x = [ref.x for ref in refs]
522
+ ref_x = xp.reshape(xp.asarray(ref_x, dtype=dtype), shape)
523
+ xp_assert_close(res.x, ref_x)
524
+
525
+ ref_fun = [ref.fun for ref in refs]
526
+ ref_fun = xp.reshape(xp.asarray(ref_fun, dtype=dtype), shape)
527
+ xp_assert_close(res.fun, ref_fun, atol=1e-15)
528
+ xp_assert_equal(res.fun, self.f(res.x, *args_xp))
529
+
530
+ ref_success = [bool(ref.success) for ref in refs]
531
+ ref_success = xp.reshape(xp.asarray(ref_success, dtype=xp_test.bool), shape)
532
+ xp_assert_equal(res.success, ref_success)
533
+
534
+ ref_flag = [ref.status for ref in refs]
535
+ ref_flag = xp.reshape(xp.asarray(ref_flag, dtype=xp.int32), shape)
536
+ xp_assert_equal(res.status, ref_flag)
537
+
538
+ ref_nfev = [ref.nfev for ref in refs]
539
+ ref_nfev = xp.reshape(xp.asarray(ref_nfev, dtype=xp.int32), shape)
540
+ if is_numpy(xp):
541
+ xp_assert_equal(res.nfev, ref_nfev)
542
+ assert xp.max(res.nfev) == f.f_evals
543
+ else: # different backend may lead to different nfev
544
+ assert res.nfev.shape == shape
545
+ assert res.nfev.dtype == xp.int32
546
+
547
+ ref_nit = [ref.nit for ref in refs]
548
+ ref_nit = xp.reshape(xp.asarray(ref_nit, dtype=xp.int32), shape)
549
+ if is_numpy(xp):
550
+ xp_assert_equal(res.nit, ref_nit)
551
+ assert xp.max(res.nit) == f.f_evals-2
552
+ else:
553
+ assert res.nit.shape == shape
554
+ assert res.nit.dtype == xp.int32
555
+
556
+ ref_xl = [ref.xl for ref in refs]
557
+ ref_xl = xp.reshape(xp.asarray(ref_xl, dtype=dtype), shape)
558
+ xp_assert_close(res.xl, ref_xl)
559
+
560
+ ref_xr = [ref.xr for ref in refs]
561
+ ref_xr = xp.reshape(xp.asarray(ref_xr, dtype=dtype), shape)
562
+ xp_assert_close(res.xr, ref_xr)
563
+
564
+ xp_assert_less(res.xl, res.xr)
565
+ finite = xp.isfinite(res.x)
566
+ assert xp.all((res.x[finite] == res.xl[finite])
567
+ | (res.x[finite] == res.xr[finite]))
568
+
569
+ # PyTorch and CuPy don't solve to the same accuracy as NumPy - that's OK.
570
+ atol = 1e-15 if is_numpy(xp) else 1e-9
571
+
572
+ ref_fl = [ref.fl for ref in refs]
573
+ ref_fl = xp.reshape(xp.asarray(ref_fl, dtype=dtype), shape)
574
+ xp_assert_close(res.fl, ref_fl, atol=atol)
575
+ xp_assert_equal(res.fl, self.f(res.xl, *args_xp))
576
+
577
+ ref_fr = [ref.fr for ref in refs]
578
+ ref_fr = xp.reshape(xp.asarray(ref_fr, dtype=dtype), shape)
579
+ xp_assert_close(res.fr, ref_fr, atol=atol)
580
+ xp_assert_equal(res.fr, self.f(res.xr, *args_xp))
581
+
582
+ assert xp.all(xp.abs(res.fun[finite]) ==
583
+ xp_minimum(xp.abs(res.fl[finite]),
584
+ xp.abs(res.fr[finite])))
585
+
586
+ def test_flags(self, xp):
587
+ # Test cases that should produce different status flags; show that all
588
+ # can be produced simultaneously.
589
+ def f(xs, js):
590
+ # Note that full_like and int(j) shouldn't really be required. CuPy
591
+ # is just really picky here, so I'm making it a special case to
592
+ # make sure the other backends work when the user is less careful.
593
+ assert js.dtype == xp.int64
594
+ if is_cupy(xp):
595
+ funcs = [lambda x: x - 2.5,
596
+ lambda x: x - 10,
597
+ lambda x: (x - 0.1)**3,
598
+ lambda x: xp.full_like(x, xp.nan)]
599
+ return [funcs[int(j)](x) for x, j in zip(xs, js)]
600
+
601
+ funcs = [lambda x: x - 2.5,
602
+ lambda x: x - 10,
603
+ lambda x: (x - 0.1) ** 3,
604
+ lambda x: xp.nan]
605
+ return [funcs[j](x) for x, j in zip(xs, js)]
606
+
607
+ args = (xp.arange(4, dtype=xp.int64),)
608
+ a, b = xp.asarray([0.]*4), xp.asarray([xp.pi]*4)
609
+ res = _chandrupatla_root(f, a, b, args=args, maxiter=2)
610
+
611
+ ref_flags = xp.asarray([eim._ECONVERGED,
612
+ eim._ESIGNERR,
613
+ eim._ECONVERR,
614
+ eim._EVALUEERR], dtype=xp.int32)
615
+ xp_assert_equal(res.status, ref_flags)
616
+
617
+ def test_convergence(self, xp):
618
+ # Test that the convergence tolerances behave as expected
619
+ rng = np.random.default_rng(2585255913088665241)
620
+ p = xp.asarray(rng.random(size=3))
621
+ bracket = (-xp.asarray(5.), xp.asarray(5.))
622
+ args = (p,)
623
+ kwargs0 = dict(args=args, xatol=0, xrtol=0, fatol=0, frtol=0)
624
+
625
+ kwargs = kwargs0.copy()
626
+ kwargs['xatol'] = 1e-3
627
+ res1 = _chandrupatla_root(self.f, *bracket, **kwargs)
628
+ xp_assert_less(res1.xr - res1.xl, xp.full_like(p, 1e-3))
629
+ kwargs['xatol'] = 1e-6
630
+ res2 = _chandrupatla_root(self.f, *bracket, **kwargs)
631
+ xp_assert_less(res2.xr - res2.xl, xp.full_like(p, 1e-6))
632
+ xp_assert_less(res2.xr - res2.xl, res1.xr - res1.xl)
633
+
634
+ kwargs = kwargs0.copy()
635
+ kwargs['xrtol'] = 1e-3
636
+ res1 = _chandrupatla_root(self.f, *bracket, **kwargs)
637
+ xp_assert_less(res1.xr - res1.xl, 1e-3 * xp.abs(res1.x))
638
+ kwargs['xrtol'] = 1e-6
639
+ res2 = _chandrupatla_root(self.f, *bracket, **kwargs)
640
+ xp_assert_less(res2.xr - res2.xl, 1e-6 * xp.abs(res2.x))
641
+ xp_assert_less(res2.xr - res2.xl, res1.xr - res1.xl)
642
+
643
+ kwargs = kwargs0.copy()
644
+ kwargs['fatol'] = 1e-3
645
+ res1 = _chandrupatla_root(self.f, *bracket, **kwargs)
646
+ xp_assert_less(xp.abs(res1.fun), xp.full_like(p, 1e-3))
647
+ kwargs['fatol'] = 1e-6
648
+ res2 = _chandrupatla_root(self.f, *bracket, **kwargs)
649
+ xp_assert_less(xp.abs(res2.fun), xp.full_like(p, 1e-6))
650
+ xp_assert_less(xp.abs(res2.fun), xp.abs(res1.fun))
651
+
652
+ kwargs = kwargs0.copy()
653
+ kwargs['frtol'] = 1e-3
654
+ x1, x2 = bracket
655
+ f0 = xp_minimum(xp.abs(self.f(x1, *args)), xp.abs(self.f(x2, *args)))
656
+ res1 = _chandrupatla_root(self.f, *bracket, **kwargs)
657
+ xp_assert_less(xp.abs(res1.fun), 1e-3*f0)
658
+ kwargs['frtol'] = 1e-6
659
+ res2 = _chandrupatla_root(self.f, *bracket, **kwargs)
660
+ xp_assert_less(xp.abs(res2.fun), 1e-6*f0)
661
+ xp_assert_less(xp.abs(res2.fun), xp.abs(res1.fun))
662
+
663
+ def test_maxiter_callback(self, xp):
664
+ # Test behavior of `maxiter` parameter and `callback` interface
665
+ p = xp.asarray(0.612814)
666
+ bracket = (xp.asarray(-5.), xp.asarray(5.))
667
+ maxiter = 5
668
+
669
+ def f(q, p):
670
+ res = special.ndtr(q) - p
671
+ f.x = q
672
+ f.fun = res
673
+ return res
674
+ f.x = None
675
+ f.fun = None
676
+
677
+ res = _chandrupatla_root(f, *bracket, args=(p,), maxiter=maxiter)
678
+ assert not xp.any(res.success)
679
+ assert xp.all(res.nfev == maxiter+2)
680
+ assert xp.all(res.nit == maxiter)
681
+
682
+ def callback(res):
683
+ callback.iter += 1
684
+ callback.res = res
685
+ assert hasattr(res, 'x')
686
+ if callback.iter == 0:
687
+ # callback is called once with initial bracket
688
+ assert (res.xl, res.xr) == bracket
689
+ else:
690
+ changed = (((res.xl == callback.xl) & (res.xr != callback.xr))
691
+ | ((res.xl != callback.xl) & (res.xr == callback.xr)))
692
+ assert xp.all(changed)
693
+
694
+ callback.xl = res.xl
695
+ callback.xr = res.xr
696
+ assert res.status == eim._EINPROGRESS
697
+ xp_assert_equal(self.f(res.xl, p), res.fl)
698
+ xp_assert_equal(self.f(res.xr, p), res.fr)
699
+ xp_assert_equal(self.f(res.x, p), res.fun)
700
+ if callback.iter == maxiter:
701
+ raise StopIteration
702
+ callback.iter = -1 # callback called once before first iteration
703
+ callback.res = None
704
+ callback.xl = None
705
+ callback.xr = None
706
+
707
+ res2 = _chandrupatla_root(f, *bracket, args=(p,), callback=callback)
708
+
709
+ # terminating with callback is identical to terminating due to maxiter
710
+ # (except for `status`)
711
+ for key in res.keys():
712
+ if key == 'status':
713
+ xp_assert_equal(res[key], xp.asarray(eim._ECONVERR, dtype=xp.int32))
714
+ xp_assert_equal(res2[key], xp.asarray(eim._ECALLBACK, dtype=xp.int32))
715
+ elif key.startswith('_'):
716
+ continue
717
+ else:
718
+ xp_assert_equal(res2[key], res[key])
719
+
720
+ @pytest.mark.parametrize('case', _CHANDRUPATLA_TESTS)
721
+ def test_nit_expected(self, case, xp):
722
+ # Test that `_chandrupatla` implements Chandrupatla's algorithm:
723
+ # in all 40 test cases, the number of iterations performed
724
+ # matches the number reported in the original paper.
725
+ f, bracket, root, nfeval, id = case
726
+ # Chandrupatla's criterion is equivalent to
727
+ # abs(x2-x1) < 4*abs(xmin)*xrtol + xatol, but we use the more standard
728
+ # abs(x2-x1) < abs(xmin)*xrtol + xatol. Therefore, set xrtol to 4x
729
+ # that used by Chandrupatla in tests.
730
+ bracket = (xp.asarray(bracket[0], dtype=xp.float64),
731
+ xp.asarray(bracket[1], dtype=xp.float64))
732
+ root = xp.asarray(root, dtype=xp.float64)
733
+
734
+ res = _chandrupatla_root(f, *bracket, xrtol=4e-10, xatol=1e-5)
735
+ xp_assert_close(res.fun, xp.asarray(f(root), dtype=xp.float64),
736
+ rtol=1e-8, atol=2e-3)
737
+ xp_assert_equal(res.nfev, xp.asarray(nfeval, dtype=xp.int32))
738
+
739
+ @pytest.mark.parametrize("root", (0.622, [0.622, 0.623]))
740
+ @pytest.mark.parametrize("dtype", ('float16', 'float32', 'float64'))
741
+ def test_dtype(self, root, dtype, xp):
742
+ # Test that dtypes are preserved
743
+ not_numpy = not is_numpy(xp)
744
+ if not_numpy and dtype == 'float16':
745
+ pytest.skip("`float16` dtype only supported for NumPy arrays.")
746
+
747
+ dtype = getattr(xp, dtype, None)
748
+ if dtype is None:
749
+ pytest.skip(f"{xp} does not support {dtype}")
750
+
751
+ def f(x, root):
752
+ res = (x - root) ** 3.
753
+ if is_numpy(xp): # NumPy does not preserve dtype
754
+ return xp.asarray(res, dtype=dtype)
755
+ return res
756
+
757
+ a, b = xp.asarray(-3, dtype=dtype), xp.asarray(3, dtype=dtype)
758
+ root = xp.asarray(root, dtype=dtype)
759
+ res = _chandrupatla_root(f, a, b, args=(root,), xatol=1e-3)
760
+ try:
761
+ xp_assert_close(res.x, root, atol=1e-3)
762
+ except AssertionError:
763
+ assert res.x.dtype == dtype
764
+ xp.all(res.fun == 0)
765
+
766
+ def test_input_validation(self, xp):
767
+ # Test input validation for appropriate error messages
768
+
769
+ def func(x):
770
+ return x
771
+
772
+ message = '`func` must be callable.'
773
+ with pytest.raises(ValueError, match=message):
774
+ bracket = xp.asarray(-4), xp.asarray(4)
775
+ _chandrupatla_root(None, *bracket)
776
+
777
+ message = 'Abscissae and function output must be real numbers.'
778
+ with pytest.raises(ValueError, match=message):
779
+ bracket = xp.asarray(-4+1j), xp.asarray(4)
780
+ _chandrupatla_root(func, *bracket)
781
+
782
+ # raised by `np.broadcast, but the traceback is readable IMO
783
+ message = "...not be broadcast..." # all messages include this part
784
+ with pytest.raises((ValueError, RuntimeError), match=message):
785
+ bracket = xp.asarray([-2, -3]), xp.asarray([3, 4, 5])
786
+ _chandrupatla_root(func, *bracket)
787
+
788
+ message = "The shape of the array returned by `func`..."
789
+ with pytest.raises(ValueError, match=message):
790
+ bracket = xp.asarray([-3, -3]), xp.asarray([5, 5])
791
+ _chandrupatla_root(lambda x: [x[0], x[1], x[1]], *bracket)
792
+
793
+ message = 'Tolerances must be non-negative scalars.'
794
+ bracket = xp.asarray(-4), xp.asarray(4)
795
+ with pytest.raises(ValueError, match=message):
796
+ _chandrupatla_root(func, *bracket, xatol=-1)
797
+ with pytest.raises(ValueError, match=message):
798
+ _chandrupatla_root(func, *bracket, xrtol=xp.nan)
799
+ with pytest.raises(ValueError, match=message):
800
+ _chandrupatla_root(func, *bracket, fatol='ekki')
801
+ with pytest.raises(ValueError, match=message):
802
+ _chandrupatla_root(func, *bracket, frtol=xp.nan)
803
+
804
+ message = '`maxiter` must be a non-negative integer.'
805
+ with pytest.raises(ValueError, match=message):
806
+ _chandrupatla_root(func, *bracket, maxiter=1.5)
807
+ with pytest.raises(ValueError, match=message):
808
+ _chandrupatla_root(func, *bracket, maxiter=-1)
809
+
810
+ message = '`callback` must be callable.'
811
+ with pytest.raises(ValueError, match=message):
812
+ _chandrupatla_root(func, *bracket, callback='shrubbery')
813
+
814
+ def test_special_cases(self, xp):
815
+ # Test edge cases and other special cases
816
+
817
+ # Test infinite function values
818
+ def f(x):
819
+ return 1 / x + 1 - 1 / (-x + 1)
820
+
821
+ a, b = xp.asarray([0.1, 0., 0., 0.1]), xp.asarray([0.9, 1.0, 0.9, 1.0])
822
+
823
+ with np.errstate(divide='ignore', invalid='ignore'):
824
+ res = _chandrupatla_root(f, a, b)
825
+
826
+ assert xp.all(res.success)
827
+ xp_assert_close(res.x[1:], xp.full((3,), res.x[0]))
828
+
829
+ # Test that integers are not passed to `f`
830
+ # (otherwise this would overflow)
831
+ xp_test = array_namespace(a) # need isdtype
832
+ def f(x):
833
+ assert xp_test.isdtype(x.dtype, "real floating")
834
+ # this would overflow if x were an xp integer dtype
835
+ return x ** 31 - 1
836
+
837
+ # note that all inputs are integer type; result is automatically default float
838
+ res = _chandrupatla_root(f, xp.asarray(-7), xp.asarray(5))
839
+ assert res.success
840
+ xp_assert_close(res.x, xp.asarray(1.))
841
+
842
+ # Test that if both ends of bracket equal root, algorithm reports
843
+ # convergence.
844
+ def f(x, root):
845
+ return x**2 - root
846
+
847
+ root = xp.asarray([0, 1])
848
+ res = _chandrupatla_root(f, xp.asarray(1), xp.asarray(1), args=(root,))
849
+ xp_assert_equal(res.success, xp.asarray([False, True]))
850
+ xp_assert_equal(res.x, xp.asarray([np.nan, 1.]))
851
+
852
+ def f(x):
853
+ return 1/x
854
+
855
+ with np.errstate(invalid='ignore'):
856
+ inf = xp.asarray(xp.inf)
857
+ res = _chandrupatla_root(f, inf, inf)
858
+ assert res.success
859
+ xp_assert_equal(res.x, xp.asarray(np.inf))
860
+
861
+ # Test maxiter = 0. Should do nothing to bracket.
862
+ def f(x):
863
+ return x**3 - 1
864
+
865
+ a, b = xp.asarray(-3.), xp.asarray(5.)
866
+ res = _chandrupatla_root(f, a, b, maxiter=0)
867
+ xp_assert_equal(res.success, xp.asarray(False))
868
+ xp_assert_equal(res.status, xp.asarray(-2, dtype=xp.int32))
869
+ xp_assert_equal(res.nit, xp.asarray(0, dtype=xp.int32))
870
+ xp_assert_equal(res.nfev, xp.asarray(2, dtype=xp.int32))
871
+ xp_assert_equal(res.xl, a)
872
+ xp_assert_equal(res.xr, b)
873
+ # The `x` attribute is the one with the smaller function value
874
+ xp_assert_equal(res.x, a)
875
+ # Reverse bracket; check that this is still true
876
+ res = _chandrupatla_root(f, -b, -a, maxiter=0)
877
+ xp_assert_equal(res.x, -a)
878
+
879
+ # Test maxiter = 1
880
+ res = _chandrupatla_root(f, a, b, maxiter=1)
881
+ xp_assert_equal(res.success, xp.asarray(True))
882
+ xp_assert_equal(res.status, xp.asarray(0, dtype=xp.int32))
883
+ xp_assert_equal(res.nit, xp.asarray(1, dtype=xp.int32))
884
+ xp_assert_equal(res.nfev, xp.asarray(3, dtype=xp.int32))
885
+ xp_assert_close(res.x, xp.asarray(1.))
886
+
887
+ # Test scalar `args` (not in tuple)
888
+ def f(x, c):
889
+ return c*x - 1
890
+
891
+ res = _chandrupatla_root(f, xp.asarray(-1), xp.asarray(1), args=xp.asarray(3))
892
+ xp_assert_close(res.x, xp.asarray(1/3))
893
+
894
+ # # TODO: Test zero tolerance
895
+ # # ~~What's going on here - why are iterations repeated?~~
896
+ # # tl goes to zero when xatol=xrtol=0. When function is nearly linear,
897
+ # # this causes convergence issues.
898
+ # def f(x):
899
+ # return np.cos(x)
900
+ #
901
+ # res = _chandrupatla_root(f, 0, np.pi, xatol=0, xrtol=0)
902
+ # assert res.nit < 100
903
+ # xp = np.nextafter(res.x, np.inf)
904
+ # xm = np.nextafter(res.x, -np.inf)
905
+ # assert np.abs(res.fun) < np.abs(f(xp))
906
+ # assert np.abs(res.fun) < np.abs(f(xm))
llava_next/lib/python3.10/site-packages/scipy/optimize/tests/test_cobyqa.py ADDED
@@ -0,0 +1,246 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import numpy as np
2
+ import pytest
3
+ from numpy.testing import assert_allclose, assert_equal
4
+
5
+ from scipy.optimize import (
6
+ Bounds,
7
+ LinearConstraint,
8
+ NonlinearConstraint,
9
+ OptimizeResult,
10
+ minimize,
11
+ )
12
+
13
+
14
+ class TestCOBYQA:
15
+
16
+ def setup_method(self):
17
+ self.x0 = [4.95, 0.66]
18
+ self.options = {'maxfev': 100}
19
+
20
+ @staticmethod
21
+ def fun(x, c=1.0):
22
+ return x[0]**2 + c * abs(x[1])**3
23
+
24
+ @staticmethod
25
+ def con(x):
26
+ return x[0]**2 + x[1]**2 - 25.0
27
+
28
+ def test_minimize_simple(self):
29
+ class Callback:
30
+ def __init__(self):
31
+ self.n_calls = 0
32
+
33
+ def __call__(self, x):
34
+ assert isinstance(x, np.ndarray)
35
+ self.n_calls += 1
36
+
37
+ class CallbackNewSyntax:
38
+ def __init__(self):
39
+ self.n_calls = 0
40
+
41
+ def __call__(self, intermediate_result):
42
+ assert isinstance(intermediate_result, OptimizeResult)
43
+ self.n_calls += 1
44
+
45
+ callback = Callback()
46
+ callback_new_syntax = CallbackNewSyntax()
47
+
48
+ # Minimize with method='cobyqa'.
49
+ constraints = NonlinearConstraint(self.con, 0.0, 0.0)
50
+ sol = minimize(
51
+ self.fun,
52
+ self.x0,
53
+ method='cobyqa',
54
+ constraints=constraints,
55
+ callback=callback,
56
+ options=self.options,
57
+ )
58
+ sol_new = minimize(
59
+ self.fun,
60
+ self.x0,
61
+ method='cobyqa',
62
+ constraints=constraints,
63
+ callback=callback_new_syntax,
64
+ options=self.options,
65
+ )
66
+ solution = [np.sqrt(25.0 - 4.0 / 9.0), 2.0 / 3.0]
67
+ assert_allclose(sol.x, solution, atol=1e-4)
68
+ assert sol.success, sol.message
69
+ assert sol.maxcv < 1e-8, sol
70
+ assert sol.nfev <= 100, sol
71
+ assert sol.fun < self.fun(solution) + 1e-3, sol
72
+ assert sol.nfev == callback.n_calls, \
73
+ "Callback is not called exactly once for every function eval."
74
+ assert_equal(sol.x, sol_new.x)
75
+ assert sol_new.success, sol_new.message
76
+ assert sol.fun == sol_new.fun
77
+ assert sol.maxcv == sol_new.maxcv
78
+ assert sol.nfev == sol_new.nfev
79
+ assert sol.nit == sol_new.nit
80
+ assert sol_new.nfev == callback_new_syntax.n_calls, \
81
+ "Callback is not called exactly once for every function eval."
82
+
83
+ def test_minimize_bounds(self):
84
+ def fun_check_bounds(x):
85
+ assert np.all(bounds.lb <= x) and np.all(x <= bounds.ub)
86
+ return self.fun(x)
87
+
88
+ # Case where the bounds are not active at the solution.
89
+ bounds = Bounds([4.5, 0.6], [5.0, 0.7])
90
+ constraints = NonlinearConstraint(self.con, 0.0, 0.0)
91
+ sol = minimize(
92
+ fun_check_bounds,
93
+ self.x0,
94
+ method='cobyqa',
95
+ bounds=bounds,
96
+ constraints=constraints,
97
+ options=self.options,
98
+ )
99
+ solution = [np.sqrt(25.0 - 4.0 / 9.0), 2.0 / 3.0]
100
+ assert_allclose(sol.x, solution, atol=1e-4)
101
+ assert sol.success, sol.message
102
+ assert sol.maxcv < 1e-8, sol
103
+ assert np.all(bounds.lb <= sol.x) and np.all(sol.x <= bounds.ub), sol
104
+ assert sol.nfev <= 100, sol
105
+ assert sol.fun < self.fun(solution) + 1e-3, sol
106
+
107
+ # Case where the bounds are active at the solution.
108
+ bounds = Bounds([5.0, 0.6], [5.5, 0.65])
109
+ sol = minimize(
110
+ fun_check_bounds,
111
+ self.x0,
112
+ method='cobyqa',
113
+ bounds=bounds,
114
+ constraints=constraints,
115
+ options=self.options,
116
+ )
117
+ assert not sol.success, sol.message
118
+ assert sol.maxcv > 0.35, sol
119
+ assert np.all(bounds.lb <= sol.x) and np.all(sol.x <= bounds.ub), sol
120
+ assert sol.nfev <= 100, sol
121
+
122
+ def test_minimize_linear_constraints(self):
123
+ constraints = LinearConstraint([1.0, 1.0], 1.0, 1.0)
124
+ sol = minimize(
125
+ self.fun,
126
+ self.x0,
127
+ method='cobyqa',
128
+ constraints=constraints,
129
+ options=self.options,
130
+ )
131
+ solution = [(4 - np.sqrt(7)) / 3, (np.sqrt(7) - 1) / 3]
132
+ assert_allclose(sol.x, solution, atol=1e-4)
133
+ assert sol.success, sol.message
134
+ assert sol.maxcv < 1e-8, sol
135
+ assert sol.nfev <= 100, sol
136
+ assert sol.fun < self.fun(solution) + 1e-3, sol
137
+
138
+ def test_minimize_args(self):
139
+ constraints = NonlinearConstraint(self.con, 0.0, 0.0)
140
+ sol = minimize(
141
+ self.fun,
142
+ self.x0,
143
+ args=(2.0,),
144
+ method='cobyqa',
145
+ constraints=constraints,
146
+ options=self.options,
147
+ )
148
+ solution = [np.sqrt(25.0 - 4.0 / 36.0), 2.0 / 6.0]
149
+ assert_allclose(sol.x, solution, atol=1e-4)
150
+ assert sol.success, sol.message
151
+ assert sol.maxcv < 1e-8, sol
152
+ assert sol.nfev <= 100, sol
153
+ assert sol.fun < self.fun(solution, 2.0) + 1e-3, sol
154
+
155
+ def test_minimize_array(self):
156
+ def fun_array(x, dim):
157
+ f = np.array(self.fun(x))
158
+ return np.reshape(f, (1,) * dim)
159
+
160
+ # The argument fun can return an array with a single element.
161
+ bounds = Bounds([4.5, 0.6], [5.0, 0.7])
162
+ constraints = NonlinearConstraint(self.con, 0.0, 0.0)
163
+ sol = minimize(
164
+ self.fun,
165
+ self.x0,
166
+ method='cobyqa',
167
+ bounds=bounds,
168
+ constraints=constraints,
169
+ options=self.options,
170
+ )
171
+ for dim in [0, 1, 2]:
172
+ sol_array = minimize(
173
+ fun_array,
174
+ self.x0,
175
+ args=(dim,),
176
+ method='cobyqa',
177
+ bounds=bounds,
178
+ constraints=constraints,
179
+ options=self.options,
180
+ )
181
+ assert_equal(sol.x, sol_array.x)
182
+ assert sol_array.success, sol_array.message
183
+ assert sol.fun == sol_array.fun
184
+ assert sol.maxcv == sol_array.maxcv
185
+ assert sol.nfev == sol_array.nfev
186
+ assert sol.nit == sol_array.nit
187
+
188
+ # The argument fun cannot return an array with more than one element.
189
+ with pytest.raises(TypeError):
190
+ minimize(
191
+ lambda x: np.array([self.fun(x), self.fun(x)]),
192
+ self.x0,
193
+ method='cobyqa',
194
+ bounds=bounds,
195
+ constraints=constraints,
196
+ options=self.options,
197
+ )
198
+
199
+ def test_minimize_maxfev(self):
200
+ constraints = NonlinearConstraint(self.con, 0.0, 0.0)
201
+ options = {'maxfev': 2}
202
+ sol = minimize(
203
+ self.fun,
204
+ self.x0,
205
+ method='cobyqa',
206
+ constraints=constraints,
207
+ options=options,
208
+ )
209
+ assert not sol.success, sol.message
210
+ assert sol.nfev <= 2, sol
211
+
212
+ def test_minimize_maxiter(self):
213
+ constraints = NonlinearConstraint(self.con, 0.0, 0.0)
214
+ options = {'maxiter': 2}
215
+ sol = minimize(
216
+ self.fun,
217
+ self.x0,
218
+ method='cobyqa',
219
+ constraints=constraints,
220
+ options=options,
221
+ )
222
+ assert not sol.success, sol.message
223
+ assert sol.nit <= 2, sol
224
+
225
+ def test_minimize_f_target(self):
226
+ constraints = NonlinearConstraint(self.con, 0.0, 0.0)
227
+ sol_ref = minimize(
228
+ self.fun,
229
+ self.x0,
230
+ method='cobyqa',
231
+ constraints=constraints,
232
+ options=self.options,
233
+ )
234
+ options = dict(self.options)
235
+ options['f_target'] = sol_ref.fun
236
+ sol = minimize(
237
+ self.fun,
238
+ self.x0,
239
+ method='cobyqa',
240
+ constraints=constraints,
241
+ options=options,
242
+ )
243
+ assert sol.success, sol.message
244
+ assert sol.maxcv < 1e-8, sol
245
+ assert sol.nfev <= sol_ref.nfev, sol
246
+ assert sol.fun <= sol_ref.fun, sol
llava_next/lib/python3.10/site-packages/scipy/optimize/tests/test_constraints.py ADDED
@@ -0,0 +1,255 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import pytest
2
+ import numpy as np
3
+ from numpy.testing import TestCase, assert_array_equal
4
+ import scipy.sparse as sps
5
+ from scipy.optimize._constraints import (
6
+ Bounds, LinearConstraint, NonlinearConstraint, PreparedConstraint,
7
+ new_bounds_to_old, old_bound_to_new, strict_bounds)
8
+
9
+
10
+ class TestStrictBounds(TestCase):
11
+ def test_scalarvalue_unique_enforce_feasibility(self):
12
+ m = 3
13
+ lb = 2
14
+ ub = 4
15
+ enforce_feasibility = False
16
+ strict_lb, strict_ub = strict_bounds(lb, ub,
17
+ enforce_feasibility,
18
+ m)
19
+ assert_array_equal(strict_lb, [-np.inf, -np.inf, -np.inf])
20
+ assert_array_equal(strict_ub, [np.inf, np.inf, np.inf])
21
+
22
+ enforce_feasibility = True
23
+ strict_lb, strict_ub = strict_bounds(lb, ub,
24
+ enforce_feasibility,
25
+ m)
26
+ assert_array_equal(strict_lb, [2, 2, 2])
27
+ assert_array_equal(strict_ub, [4, 4, 4])
28
+
29
+ def test_vectorvalue_unique_enforce_feasibility(self):
30
+ m = 3
31
+ lb = [1, 2, 3]
32
+ ub = [4, 5, 6]
33
+ enforce_feasibility = False
34
+ strict_lb, strict_ub = strict_bounds(lb, ub,
35
+ enforce_feasibility,
36
+ m)
37
+ assert_array_equal(strict_lb, [-np.inf, -np.inf, -np.inf])
38
+ assert_array_equal(strict_ub, [np.inf, np.inf, np.inf])
39
+
40
+ enforce_feasibility = True
41
+ strict_lb, strict_ub = strict_bounds(lb, ub,
42
+ enforce_feasibility,
43
+ m)
44
+ assert_array_equal(strict_lb, [1, 2, 3])
45
+ assert_array_equal(strict_ub, [4, 5, 6])
46
+
47
+ def test_scalarvalue_vector_enforce_feasibility(self):
48
+ m = 3
49
+ lb = 2
50
+ ub = 4
51
+ enforce_feasibility = [False, True, False]
52
+ strict_lb, strict_ub = strict_bounds(lb, ub,
53
+ enforce_feasibility,
54
+ m)
55
+ assert_array_equal(strict_lb, [-np.inf, 2, -np.inf])
56
+ assert_array_equal(strict_ub, [np.inf, 4, np.inf])
57
+
58
+ def test_vectorvalue_vector_enforce_feasibility(self):
59
+ m = 3
60
+ lb = [1, 2, 3]
61
+ ub = [4, 6, np.inf]
62
+ enforce_feasibility = [True, False, True]
63
+ strict_lb, strict_ub = strict_bounds(lb, ub,
64
+ enforce_feasibility,
65
+ m)
66
+ assert_array_equal(strict_lb, [1, -np.inf, 3])
67
+ assert_array_equal(strict_ub, [4, np.inf, np.inf])
68
+
69
+
70
+ def test_prepare_constraint_infeasible_x0():
71
+ lb = np.array([0, 20, 30])
72
+ ub = np.array([0.5, np.inf, 70])
73
+ x0 = np.array([1, 2, 3])
74
+ enforce_feasibility = np.array([False, True, True], dtype=bool)
75
+ bounds = Bounds(lb, ub, enforce_feasibility)
76
+ pytest.raises(ValueError, PreparedConstraint, bounds, x0)
77
+
78
+ pc = PreparedConstraint(Bounds(lb, ub), [1, 2, 3])
79
+ assert (pc.violation([1, 2, 3]) > 0).any()
80
+ assert (pc.violation([0.25, 21, 31]) == 0).all()
81
+
82
+ x0 = np.array([1, 2, 3, 4])
83
+ A = np.array([[1, 2, 3, 4], [5, 0, 0, 6], [7, 0, 8, 0]])
84
+ enforce_feasibility = np.array([True, True, True], dtype=bool)
85
+ linear = LinearConstraint(A, -np.inf, 0, enforce_feasibility)
86
+ pytest.raises(ValueError, PreparedConstraint, linear, x0)
87
+
88
+ pc = PreparedConstraint(LinearConstraint(A, -np.inf, 0),
89
+ [1, 2, 3, 4])
90
+ assert (pc.violation([1, 2, 3, 4]) > 0).any()
91
+ assert (pc.violation([-10, 2, -10, 4]) == 0).all()
92
+
93
+ def fun(x):
94
+ return A.dot(x)
95
+
96
+ def jac(x):
97
+ return A
98
+
99
+ def hess(x, v):
100
+ return sps.csr_matrix((4, 4))
101
+
102
+ nonlinear = NonlinearConstraint(fun, -np.inf, 0, jac, hess,
103
+ enforce_feasibility)
104
+ pytest.raises(ValueError, PreparedConstraint, nonlinear, x0)
105
+
106
+ pc = PreparedConstraint(nonlinear, [-10, 2, -10, 4])
107
+ assert (pc.violation([1, 2, 3, 4]) > 0).any()
108
+ assert (pc.violation([-10, 2, -10, 4]) == 0).all()
109
+
110
+
111
+ def test_violation():
112
+ def cons_f(x):
113
+ return np.array([x[0] ** 2 + x[1], x[0] ** 2 - x[1]])
114
+
115
+ nlc = NonlinearConstraint(cons_f, [-1, -0.8500], [2, 2])
116
+ pc = PreparedConstraint(nlc, [0.5, 1])
117
+
118
+ assert_array_equal(pc.violation([0.5, 1]), [0., 0.])
119
+
120
+ np.testing.assert_almost_equal(pc.violation([0.5, 1.2]), [0., 0.1])
121
+
122
+ np.testing.assert_almost_equal(pc.violation([1.2, 1.2]), [0.64, 0])
123
+
124
+ np.testing.assert_almost_equal(pc.violation([0.1, -1.2]), [0.19, 0])
125
+
126
+ np.testing.assert_almost_equal(pc.violation([0.1, 2]), [0.01, 1.14])
127
+
128
+
129
+ def test_new_bounds_to_old():
130
+ lb = np.array([-np.inf, 2, 3])
131
+ ub = np.array([3, np.inf, 10])
132
+
133
+ bounds = [(None, 3), (2, None), (3, 10)]
134
+ assert_array_equal(new_bounds_to_old(lb, ub, 3), bounds)
135
+
136
+ bounds_single_lb = [(-1, 3), (-1, None), (-1, 10)]
137
+ assert_array_equal(new_bounds_to_old(-1, ub, 3), bounds_single_lb)
138
+
139
+ bounds_no_lb = [(None, 3), (None, None), (None, 10)]
140
+ assert_array_equal(new_bounds_to_old(-np.inf, ub, 3), bounds_no_lb)
141
+
142
+ bounds_single_ub = [(None, 20), (2, 20), (3, 20)]
143
+ assert_array_equal(new_bounds_to_old(lb, 20, 3), bounds_single_ub)
144
+
145
+ bounds_no_ub = [(None, None), (2, None), (3, None)]
146
+ assert_array_equal(new_bounds_to_old(lb, np.inf, 3), bounds_no_ub)
147
+
148
+ bounds_single_both = [(1, 2), (1, 2), (1, 2)]
149
+ assert_array_equal(new_bounds_to_old(1, 2, 3), bounds_single_both)
150
+
151
+ bounds_no_both = [(None, None), (None, None), (None, None)]
152
+ assert_array_equal(new_bounds_to_old(-np.inf, np.inf, 3), bounds_no_both)
153
+
154
+
155
+ def test_old_bounds_to_new():
156
+ bounds = ([1, 2], (None, 3), (-1, None))
157
+ lb_true = np.array([1, -np.inf, -1])
158
+ ub_true = np.array([2, 3, np.inf])
159
+
160
+ lb, ub = old_bound_to_new(bounds)
161
+ assert_array_equal(lb, lb_true)
162
+ assert_array_equal(ub, ub_true)
163
+
164
+ bounds = [(-np.inf, np.inf), (np.array([1]), np.array([1]))]
165
+ lb, ub = old_bound_to_new(bounds)
166
+
167
+ assert_array_equal(lb, [-np.inf, 1])
168
+ assert_array_equal(ub, [np.inf, 1])
169
+
170
+
171
+ class TestBounds:
172
+ def test_repr(self):
173
+ # so that eval works
174
+ from numpy import array, inf # noqa: F401
175
+ for args in (
176
+ (-1.0, 5.0),
177
+ (-1.0, np.inf, True),
178
+ (np.array([1.0, -np.inf]), np.array([2.0, np.inf])),
179
+ (np.array([1.0, -np.inf]), np.array([2.0, np.inf]),
180
+ np.array([True, False])),
181
+ ):
182
+ bounds = Bounds(*args)
183
+ bounds2 = eval(repr(Bounds(*args)))
184
+ assert_array_equal(bounds.lb, bounds2.lb)
185
+ assert_array_equal(bounds.ub, bounds2.ub)
186
+ assert_array_equal(bounds.keep_feasible, bounds2.keep_feasible)
187
+
188
+ def test_array(self):
189
+ # gh13501
190
+ b = Bounds(lb=[0.0, 0.0], ub=[1.0, 1.0])
191
+ assert isinstance(b.lb, np.ndarray)
192
+ assert isinstance(b.ub, np.ndarray)
193
+
194
+ def test_defaults(self):
195
+ b1 = Bounds()
196
+ b2 = Bounds(np.asarray(-np.inf), np.asarray(np.inf))
197
+ assert b1.lb == b2.lb
198
+ assert b1.ub == b2.ub
199
+
200
+ def test_input_validation(self):
201
+ message = "Lower and upper bounds must be dense arrays."
202
+ with pytest.raises(ValueError, match=message):
203
+ Bounds(sps.coo_array([1, 2]), [1, 2])
204
+ with pytest.raises(ValueError, match=message):
205
+ Bounds([1, 2], sps.coo_array([1, 2]))
206
+
207
+ message = "`keep_feasible` must be a dense array."
208
+ with pytest.raises(ValueError, match=message):
209
+ Bounds([1, 2], [1, 2], keep_feasible=sps.coo_array([True, True]))
210
+
211
+ message = "`lb`, `ub`, and `keep_feasible` must be broadcastable."
212
+ with pytest.raises(ValueError, match=message):
213
+ Bounds([1, 2], [1, 2, 3])
214
+
215
+ def test_residual(self):
216
+ bounds = Bounds(-2, 4)
217
+ x0 = [-1, 2]
218
+ np.testing.assert_allclose(bounds.residual(x0), ([1, 4], [5, 2]))
219
+
220
+
221
+ class TestLinearConstraint:
222
+ def test_defaults(self):
223
+ A = np.eye(4)
224
+ lc = LinearConstraint(A)
225
+ lc2 = LinearConstraint(A, -np.inf, np.inf)
226
+ assert_array_equal(lc.lb, lc2.lb)
227
+ assert_array_equal(lc.ub, lc2.ub)
228
+
229
+ def test_input_validation(self):
230
+ A = np.eye(4)
231
+ message = "`lb`, `ub`, and `keep_feasible` must be broadcastable"
232
+ with pytest.raises(ValueError, match=message):
233
+ LinearConstraint(A, [1, 2], [1, 2, 3])
234
+
235
+ message = "Constraint limits must be dense arrays"
236
+ with pytest.raises(ValueError, match=message):
237
+ LinearConstraint(A, sps.coo_array([1, 2]), [2, 3])
238
+ with pytest.raises(ValueError, match=message):
239
+ LinearConstraint(A, [1, 2], sps.coo_array([2, 3]))
240
+
241
+ message = "`keep_feasible` must be a dense array"
242
+ with pytest.raises(ValueError, match=message):
243
+ keep_feasible = sps.coo_array([True, True])
244
+ LinearConstraint(A, [1, 2], [2, 3], keep_feasible=keep_feasible)
245
+
246
+ A = np.empty((4, 3, 5))
247
+ message = "`A` must have exactly two dimensions."
248
+ with pytest.raises(ValueError, match=message):
249
+ LinearConstraint(A)
250
+
251
+ def test_residual(self):
252
+ A = np.eye(2)
253
+ lc = LinearConstraint(A, -2, 4)
254
+ x0 = [-1, 2]
255
+ np.testing.assert_allclose(lc.residual(x0), ([1, 4], [5, 2]))
llava_next/lib/python3.10/site-packages/scipy/optimize/tests/test_cython_optimize.py ADDED
@@ -0,0 +1,92 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Test Cython optimize zeros API functions: ``bisect``, ``ridder``, ``brenth``,
3
+ and ``brentq`` in `scipy.optimize.cython_optimize`, by finding the roots of a
4
+ 3rd order polynomial given a sequence of constant terms, ``a0``, and fixed 1st,
5
+ 2nd, and 3rd order terms in ``args``.
6
+
7
+ .. math::
8
+
9
+ f(x, a0, args) = ((args[2]*x + args[1])*x + args[0])*x + a0
10
+
11
+ The 3rd order polynomial function is written in Cython and called in a Python
12
+ wrapper named after the zero function. See the private ``_zeros`` Cython module
13
+ in `scipy.optimize.cython_optimze` for more information.
14
+ """
15
+
16
+ import numpy.testing as npt
17
+ from scipy.optimize.cython_optimize import _zeros
18
+
19
+ # CONSTANTS
20
+ # Solve x**3 - A0 = 0 for A0 = [2.0, 2.1, ..., 2.9].
21
+ # The ARGS have 3 elements just to show how this could be done for any cubic
22
+ # polynomial.
23
+ A0 = tuple(-2.0 - x/10.0 for x in range(10)) # constant term
24
+ ARGS = (0.0, 0.0, 1.0) # 1st, 2nd, and 3rd order terms
25
+ XLO, XHI = 0.0, 2.0 # first and second bounds of zeros functions
26
+ # absolute and relative tolerances and max iterations for zeros functions
27
+ XTOL, RTOL, MITR = 0.001, 0.001, 10
28
+ EXPECTED = [(-a0) ** (1.0/3.0) for a0 in A0]
29
+ # = [1.2599210498948732,
30
+ # 1.2805791649874942,
31
+ # 1.300591446851387,
32
+ # 1.3200061217959123,
33
+ # 1.338865900164339,
34
+ # 1.3572088082974532,
35
+ # 1.375068867074141,
36
+ # 1.3924766500838337,
37
+ # 1.4094597464129783,
38
+ # 1.4260431471424087]
39
+
40
+
41
+ # test bisect
42
+ def test_bisect():
43
+ npt.assert_allclose(
44
+ EXPECTED,
45
+ list(
46
+ _zeros.loop_example('bisect', A0, ARGS, XLO, XHI, XTOL, RTOL, MITR)
47
+ ),
48
+ rtol=RTOL, atol=XTOL
49
+ )
50
+
51
+
52
+ # test ridder
53
+ def test_ridder():
54
+ npt.assert_allclose(
55
+ EXPECTED,
56
+ list(
57
+ _zeros.loop_example('ridder', A0, ARGS, XLO, XHI, XTOL, RTOL, MITR)
58
+ ),
59
+ rtol=RTOL, atol=XTOL
60
+ )
61
+
62
+
63
+ # test brenth
64
+ def test_brenth():
65
+ npt.assert_allclose(
66
+ EXPECTED,
67
+ list(
68
+ _zeros.loop_example('brenth', A0, ARGS, XLO, XHI, XTOL, RTOL, MITR)
69
+ ),
70
+ rtol=RTOL, atol=XTOL
71
+ )
72
+
73
+
74
+ # test brentq
75
+ def test_brentq():
76
+ npt.assert_allclose(
77
+ EXPECTED,
78
+ list(
79
+ _zeros.loop_example('brentq', A0, ARGS, XLO, XHI, XTOL, RTOL, MITR)
80
+ ),
81
+ rtol=RTOL, atol=XTOL
82
+ )
83
+
84
+
85
+ # test brentq with full output
86
+ def test_brentq_full_output():
87
+ output = _zeros.full_output_example(
88
+ (A0[0],) + ARGS, XLO, XHI, XTOL, RTOL, MITR)
89
+ npt.assert_allclose(EXPECTED[0], output['root'], rtol=RTOL, atol=XTOL)
90
+ npt.assert_equal(6, output['iterations'])
91
+ npt.assert_equal(7, output['funcalls'])
92
+ npt.assert_equal(0, output['error_num'])
llava_next/lib/python3.10/site-packages/scipy/optimize/tests/test_differentiable_functions.py ADDED
@@ -0,0 +1,803 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import pytest
2
+ import platform
3
+ import numpy as np
4
+ from numpy.testing import (TestCase, assert_array_almost_equal,
5
+ assert_array_equal, assert_, assert_allclose,
6
+ assert_equal)
7
+ from scipy._lib._gcutils import assert_deallocated
8
+ from scipy.sparse import csr_matrix
9
+ from scipy.sparse.linalg import LinearOperator
10
+ from scipy.optimize._differentiable_functions import (ScalarFunction,
11
+ VectorFunction,
12
+ LinearVectorFunction,
13
+ IdentityVectorFunction)
14
+ from scipy.optimize import rosen, rosen_der, rosen_hess
15
+ from scipy.optimize._hessian_update_strategy import BFGS
16
+
17
+
18
+ class ExScalarFunction:
19
+
20
+ def __init__(self):
21
+ self.nfev = 0
22
+ self.ngev = 0
23
+ self.nhev = 0
24
+
25
+ def fun(self, x):
26
+ self.nfev += 1
27
+ return 2*(x[0]**2 + x[1]**2 - 1) - x[0]
28
+
29
+ def grad(self, x):
30
+ self.ngev += 1
31
+ return np.array([4*x[0]-1, 4*x[1]])
32
+
33
+ def hess(self, x):
34
+ self.nhev += 1
35
+ return 4*np.eye(2)
36
+
37
+
38
+ class TestScalarFunction(TestCase):
39
+
40
+ def test_finite_difference_grad(self):
41
+ ex = ExScalarFunction()
42
+ nfev = 0
43
+ ngev = 0
44
+
45
+ x0 = [1.0, 0.0]
46
+ analit = ScalarFunction(ex.fun, x0, (), ex.grad,
47
+ ex.hess, None, (-np.inf, np.inf))
48
+ nfev += 1
49
+ ngev += 1
50
+ assert_array_equal(ex.nfev, nfev)
51
+ assert_array_equal(analit.nfev, nfev)
52
+ assert_array_equal(ex.ngev, ngev)
53
+ assert_array_equal(analit.ngev, nfev)
54
+ approx = ScalarFunction(ex.fun, x0, (), '2-point',
55
+ ex.hess, None, (-np.inf, np.inf))
56
+ nfev += 3
57
+ ngev += 1
58
+ assert_array_equal(ex.nfev, nfev)
59
+ assert_array_equal(analit.nfev+approx.nfev, nfev)
60
+ assert_array_equal(analit.ngev+approx.ngev, ngev)
61
+ assert_array_equal(analit.f, approx.f)
62
+ assert_array_almost_equal(analit.g, approx.g)
63
+
64
+ x = [10, 0.3]
65
+ f_analit = analit.fun(x)
66
+ g_analit = analit.grad(x)
67
+ nfev += 1
68
+ ngev += 1
69
+ assert_array_equal(ex.nfev, nfev)
70
+ assert_array_equal(analit.nfev+approx.nfev, nfev)
71
+ assert_array_equal(analit.ngev+approx.ngev, ngev)
72
+ f_approx = approx.fun(x)
73
+ g_approx = approx.grad(x)
74
+ nfev += 3
75
+ ngev += 1
76
+ assert_array_equal(ex.nfev, nfev)
77
+ assert_array_equal(analit.nfev+approx.nfev, nfev)
78
+ assert_array_equal(analit.ngev+approx.ngev, ngev)
79
+ assert_array_almost_equal(f_analit, f_approx)
80
+ assert_array_almost_equal(g_analit, g_approx)
81
+
82
+ x = [2.0, 1.0]
83
+ g_analit = analit.grad(x)
84
+ ngev += 1
85
+ assert_array_equal(ex.nfev, nfev)
86
+ assert_array_equal(analit.nfev+approx.nfev, nfev)
87
+ assert_array_equal(analit.ngev+approx.ngev, ngev)
88
+
89
+ g_approx = approx.grad(x)
90
+ nfev += 3
91
+ ngev += 1
92
+ assert_array_equal(ex.nfev, nfev)
93
+ assert_array_equal(analit.nfev+approx.nfev, nfev)
94
+ assert_array_equal(analit.ngev+approx.ngev, ngev)
95
+ assert_array_almost_equal(g_analit, g_approx)
96
+
97
+ x = [2.5, 0.3]
98
+ f_analit = analit.fun(x)
99
+ g_analit = analit.grad(x)
100
+ nfev += 1
101
+ ngev += 1
102
+ assert_array_equal(ex.nfev, nfev)
103
+ assert_array_equal(analit.nfev+approx.nfev, nfev)
104
+ assert_array_equal(analit.ngev+approx.ngev, ngev)
105
+ f_approx = approx.fun(x)
106
+ g_approx = approx.grad(x)
107
+ nfev += 3
108
+ ngev += 1
109
+ assert_array_equal(ex.nfev, nfev)
110
+ assert_array_equal(analit.nfev+approx.nfev, nfev)
111
+ assert_array_equal(analit.ngev+approx.ngev, ngev)
112
+ assert_array_almost_equal(f_analit, f_approx)
113
+ assert_array_almost_equal(g_analit, g_approx)
114
+
115
+ x = [2, 0.3]
116
+ f_analit = analit.fun(x)
117
+ g_analit = analit.grad(x)
118
+ nfev += 1
119
+ ngev += 1
120
+ assert_array_equal(ex.nfev, nfev)
121
+ assert_array_equal(analit.nfev+approx.nfev, nfev)
122
+ assert_array_equal(analit.ngev+approx.ngev, ngev)
123
+ f_approx = approx.fun(x)
124
+ g_approx = approx.grad(x)
125
+ nfev += 3
126
+ ngev += 1
127
+ assert_array_equal(ex.nfev, nfev)
128
+ assert_array_equal(analit.nfev+approx.nfev, nfev)
129
+ assert_array_equal(analit.ngev+approx.ngev, ngev)
130
+ assert_array_almost_equal(f_analit, f_approx)
131
+ assert_array_almost_equal(g_analit, g_approx)
132
+
133
+ def test_fun_and_grad(self):
134
+ ex = ExScalarFunction()
135
+
136
+ def fg_allclose(x, y):
137
+ assert_allclose(x[0], y[0])
138
+ assert_allclose(x[1], y[1])
139
+
140
+ # with analytic gradient
141
+ x0 = [2.0, 0.3]
142
+ analit = ScalarFunction(ex.fun, x0, (), ex.grad,
143
+ ex.hess, None, (-np.inf, np.inf))
144
+
145
+ fg = ex.fun(x0), ex.grad(x0)
146
+ fg_allclose(analit.fun_and_grad(x0), fg)
147
+ assert analit.ngev == 1
148
+
149
+ x0[1] = 1.
150
+ fg = ex.fun(x0), ex.grad(x0)
151
+ fg_allclose(analit.fun_and_grad(x0), fg)
152
+
153
+ # with finite difference gradient
154
+ x0 = [2.0, 0.3]
155
+ sf = ScalarFunction(ex.fun, x0, (), '3-point',
156
+ ex.hess, None, (-np.inf, np.inf))
157
+ assert sf.ngev == 1
158
+ fg = ex.fun(x0), ex.grad(x0)
159
+ fg_allclose(sf.fun_and_grad(x0), fg)
160
+ assert sf.ngev == 1
161
+
162
+ x0[1] = 1.
163
+ fg = ex.fun(x0), ex.grad(x0)
164
+ fg_allclose(sf.fun_and_grad(x0), fg)
165
+
166
+ def test_finite_difference_hess_linear_operator(self):
167
+ ex = ExScalarFunction()
168
+ nfev = 0
169
+ ngev = 0
170
+ nhev = 0
171
+
172
+ x0 = [1.0, 0.0]
173
+ analit = ScalarFunction(ex.fun, x0, (), ex.grad,
174
+ ex.hess, None, (-np.inf, np.inf))
175
+ nfev += 1
176
+ ngev += 1
177
+ nhev += 1
178
+ assert_array_equal(ex.nfev, nfev)
179
+ assert_array_equal(analit.nfev, nfev)
180
+ assert_array_equal(ex.ngev, ngev)
181
+ assert_array_equal(analit.ngev, ngev)
182
+ assert_array_equal(ex.nhev, nhev)
183
+ assert_array_equal(analit.nhev, nhev)
184
+ approx = ScalarFunction(ex.fun, x0, (), ex.grad,
185
+ '2-point', None, (-np.inf, np.inf))
186
+ assert_(isinstance(approx.H, LinearOperator))
187
+ for v in ([1.0, 2.0], [3.0, 4.0], [5.0, 2.0]):
188
+ assert_array_equal(analit.f, approx.f)
189
+ assert_array_almost_equal(analit.g, approx.g)
190
+ assert_array_almost_equal(analit.H.dot(v), approx.H.dot(v))
191
+ nfev += 1
192
+ ngev += 4
193
+ assert_array_equal(ex.nfev, nfev)
194
+ assert_array_equal(analit.nfev+approx.nfev, nfev)
195
+ assert_array_equal(ex.ngev, ngev)
196
+ assert_array_equal(analit.ngev+approx.ngev, ngev)
197
+ assert_array_equal(ex.nhev, nhev)
198
+ assert_array_equal(analit.nhev+approx.nhev, nhev)
199
+
200
+ x = [2.0, 1.0]
201
+ H_analit = analit.hess(x)
202
+ nhev += 1
203
+ assert_array_equal(ex.nfev, nfev)
204
+ assert_array_equal(analit.nfev+approx.nfev, nfev)
205
+ assert_array_equal(ex.ngev, ngev)
206
+ assert_array_equal(analit.ngev+approx.ngev, ngev)
207
+ assert_array_equal(ex.nhev, nhev)
208
+ assert_array_equal(analit.nhev+approx.nhev, nhev)
209
+ H_approx = approx.hess(x)
210
+ assert_(isinstance(H_approx, LinearOperator))
211
+ for v in ([1.0, 2.0], [3.0, 4.0], [5.0, 2.0]):
212
+ assert_array_almost_equal(H_analit.dot(v), H_approx.dot(v))
213
+ ngev += 4
214
+ assert_array_equal(ex.nfev, nfev)
215
+ assert_array_equal(analit.nfev+approx.nfev, nfev)
216
+ assert_array_equal(ex.ngev, ngev)
217
+ assert_array_equal(analit.ngev+approx.ngev, ngev)
218
+ assert_array_equal(ex.nhev, nhev)
219
+ assert_array_equal(analit.nhev+approx.nhev, nhev)
220
+
221
+ x = [2.1, 1.2]
222
+ H_analit = analit.hess(x)
223
+ nhev += 1
224
+ assert_array_equal(ex.nfev, nfev)
225
+ assert_array_equal(analit.nfev+approx.nfev, nfev)
226
+ assert_array_equal(ex.ngev, ngev)
227
+ assert_array_equal(analit.ngev+approx.ngev, ngev)
228
+ assert_array_equal(ex.nhev, nhev)
229
+ assert_array_equal(analit.nhev+approx.nhev, nhev)
230
+ H_approx = approx.hess(x)
231
+ assert_(isinstance(H_approx, LinearOperator))
232
+ for v in ([1.0, 2.0], [3.0, 4.0], [5.0, 2.0]):
233
+ assert_array_almost_equal(H_analit.dot(v), H_approx.dot(v))
234
+ ngev += 4
235
+ assert_array_equal(ex.nfev, nfev)
236
+ assert_array_equal(analit.nfev+approx.nfev, nfev)
237
+ assert_array_equal(ex.ngev, ngev)
238
+ assert_array_equal(analit.ngev+approx.ngev, ngev)
239
+ assert_array_equal(ex.nhev, nhev)
240
+ assert_array_equal(analit.nhev+approx.nhev, nhev)
241
+
242
+ x = [2.5, 0.3]
243
+ _ = analit.grad(x)
244
+ H_analit = analit.hess(x)
245
+ ngev += 1
246
+ nhev += 1
247
+ assert_array_equal(ex.nfev, nfev)
248
+ assert_array_equal(analit.nfev+approx.nfev, nfev)
249
+ assert_array_equal(ex.ngev, ngev)
250
+ assert_array_equal(analit.ngev+approx.ngev, ngev)
251
+ assert_array_equal(ex.nhev, nhev)
252
+ assert_array_equal(analit.nhev+approx.nhev, nhev)
253
+ _ = approx.grad(x)
254
+ H_approx = approx.hess(x)
255
+ assert_(isinstance(H_approx, LinearOperator))
256
+ for v in ([1.0, 2.0], [3.0, 4.0], [5.0, 2.0]):
257
+ assert_array_almost_equal(H_analit.dot(v), H_approx.dot(v))
258
+ ngev += 4
259
+ assert_array_equal(ex.nfev, nfev)
260
+ assert_array_equal(analit.nfev+approx.nfev, nfev)
261
+ assert_array_equal(ex.ngev, ngev)
262
+ assert_array_equal(analit.ngev+approx.ngev, ngev)
263
+ assert_array_equal(ex.nhev, nhev)
264
+ assert_array_equal(analit.nhev+approx.nhev, nhev)
265
+
266
+ x = [5.2, 2.3]
267
+ _ = analit.grad(x)
268
+ H_analit = analit.hess(x)
269
+ ngev += 1
270
+ nhev += 1
271
+ assert_array_equal(ex.nfev, nfev)
272
+ assert_array_equal(analit.nfev+approx.nfev, nfev)
273
+ assert_array_equal(ex.ngev, ngev)
274
+ assert_array_equal(analit.ngev+approx.ngev, ngev)
275
+ assert_array_equal(ex.nhev, nhev)
276
+ assert_array_equal(analit.nhev+approx.nhev, nhev)
277
+ _ = approx.grad(x)
278
+ H_approx = approx.hess(x)
279
+ assert_(isinstance(H_approx, LinearOperator))
280
+ for v in ([1.0, 2.0], [3.0, 4.0], [5.0, 2.0]):
281
+ assert_array_almost_equal(H_analit.dot(v), H_approx.dot(v))
282
+ ngev += 4
283
+ assert_array_equal(ex.nfev, nfev)
284
+ assert_array_equal(analit.nfev+approx.nfev, nfev)
285
+ assert_array_equal(ex.ngev, ngev)
286
+ assert_array_equal(analit.ngev+approx.ngev, ngev)
287
+ assert_array_equal(ex.nhev, nhev)
288
+ assert_array_equal(analit.nhev+approx.nhev, nhev)
289
+
290
+ def test_x_storage_overlap(self):
291
+ # Scalar_Function should not store references to arrays, it should
292
+ # store copies - this checks that updating an array in-place causes
293
+ # Scalar_Function.x to be updated.
294
+
295
+ def f(x):
296
+ return np.sum(np.asarray(x) ** 2)
297
+
298
+ x = np.array([1., 2., 3.])
299
+ sf = ScalarFunction(f, x, (), '3-point', lambda x: x, None, (-np.inf, np.inf))
300
+
301
+ assert x is not sf.x
302
+ assert_equal(sf.fun(x), 14.0)
303
+ assert x is not sf.x
304
+
305
+ x[0] = 0.
306
+ f1 = sf.fun(x)
307
+ assert_equal(f1, 13.0)
308
+
309
+ x[0] = 1
310
+ f2 = sf.fun(x)
311
+ assert_equal(f2, 14.0)
312
+ assert x is not sf.x
313
+
314
+ # now test with a HessianUpdate strategy specified
315
+ hess = BFGS()
316
+ x = np.array([1., 2., 3.])
317
+ sf = ScalarFunction(f, x, (), '3-point', hess, None, (-np.inf, np.inf))
318
+
319
+ assert x is not sf.x
320
+ assert_equal(sf.fun(x), 14.0)
321
+ assert x is not sf.x
322
+
323
+ x[0] = 0.
324
+ f1 = sf.fun(x)
325
+ assert_equal(f1, 13.0)
326
+
327
+ x[0] = 1
328
+ f2 = sf.fun(x)
329
+ assert_equal(f2, 14.0)
330
+ assert x is not sf.x
331
+
332
+ # gh13740 x is changed in user function
333
+ def ff(x):
334
+ x *= x # overwrite x
335
+ return np.sum(x)
336
+
337
+ x = np.array([1., 2., 3.])
338
+ sf = ScalarFunction(
339
+ ff, x, (), '3-point', lambda x: x, None, (-np.inf, np.inf)
340
+ )
341
+ assert x is not sf.x
342
+ assert_equal(sf.fun(x), 14.0)
343
+ assert_equal(sf.x, np.array([1., 2., 3.]))
344
+ assert x is not sf.x
345
+
346
+ def test_lowest_x(self):
347
+ # ScalarFunction should remember the lowest func(x) visited.
348
+ x0 = np.array([2, 3, 4])
349
+ sf = ScalarFunction(rosen, x0, (), rosen_der, rosen_hess,
350
+ None, None)
351
+ sf.fun([1, 1, 1])
352
+ sf.fun(x0)
353
+ sf.fun([1.01, 1, 1.0])
354
+ sf.grad([1.01, 1, 1.0])
355
+ assert_equal(sf._lowest_f, 0.0)
356
+ assert_equal(sf._lowest_x, [1.0, 1.0, 1.0])
357
+
358
+ sf = ScalarFunction(rosen, x0, (), '2-point', rosen_hess,
359
+ None, (-np.inf, np.inf))
360
+ sf.fun([1, 1, 1])
361
+ sf.fun(x0)
362
+ sf.fun([1.01, 1, 1.0])
363
+ sf.grad([1.01, 1, 1.0])
364
+ assert_equal(sf._lowest_f, 0.0)
365
+ assert_equal(sf._lowest_x, [1.0, 1.0, 1.0])
366
+
367
+ def test_float_size(self):
368
+ x0 = np.array([2, 3, 4]).astype(np.float32)
369
+
370
+ # check that ScalarFunction/approx_derivative always send the correct
371
+ # float width
372
+ def rosen_(x):
373
+ assert x.dtype == np.float32
374
+ return rosen(x)
375
+
376
+ sf = ScalarFunction(rosen_, x0, (), '2-point', rosen_hess,
377
+ None, (-np.inf, np.inf))
378
+ res = sf.fun(x0)
379
+ assert res.dtype == np.float32
380
+
381
+
382
+ class ExVectorialFunction:
383
+
384
+ def __init__(self):
385
+ self.nfev = 0
386
+ self.njev = 0
387
+ self.nhev = 0
388
+
389
+ def fun(self, x):
390
+ self.nfev += 1
391
+ return np.array([2*(x[0]**2 + x[1]**2 - 1) - x[0],
392
+ 4*(x[0]**3 + x[1]**2 - 4) - 3*x[0]], dtype=x.dtype)
393
+
394
+ def jac(self, x):
395
+ self.njev += 1
396
+ return np.array([[4*x[0]-1, 4*x[1]],
397
+ [12*x[0]**2-3, 8*x[1]]], dtype=x.dtype)
398
+
399
+ def hess(self, x, v):
400
+ self.nhev += 1
401
+ return v[0]*4*np.eye(2) + v[1]*np.array([[24*x[0], 0],
402
+ [0, 8]])
403
+
404
+
405
+ class TestVectorialFunction(TestCase):
406
+
407
+ def test_finite_difference_jac(self):
408
+ ex = ExVectorialFunction()
409
+ nfev = 0
410
+ njev = 0
411
+
412
+ x0 = [1.0, 0.0]
413
+ analit = VectorFunction(ex.fun, x0, ex.jac, ex.hess, None, None,
414
+ (-np.inf, np.inf), None)
415
+ nfev += 1
416
+ njev += 1
417
+ assert_array_equal(ex.nfev, nfev)
418
+ assert_array_equal(analit.nfev, nfev)
419
+ assert_array_equal(ex.njev, njev)
420
+ assert_array_equal(analit.njev, njev)
421
+ approx = VectorFunction(ex.fun, x0, '2-point', ex.hess, None, None,
422
+ (-np.inf, np.inf), None)
423
+ nfev += 3
424
+ assert_array_equal(ex.nfev, nfev)
425
+ assert_array_equal(analit.nfev+approx.nfev, nfev)
426
+ assert_array_equal(ex.njev, njev)
427
+ assert_array_equal(analit.njev+approx.njev, njev)
428
+ assert_array_equal(analit.f, approx.f)
429
+ assert_array_almost_equal(analit.J, approx.J)
430
+
431
+ x = [10, 0.3]
432
+ f_analit = analit.fun(x)
433
+ J_analit = analit.jac(x)
434
+ nfev += 1
435
+ njev += 1
436
+ assert_array_equal(ex.nfev, nfev)
437
+ assert_array_equal(analit.nfev+approx.nfev, nfev)
438
+ assert_array_equal(ex.njev, njev)
439
+ assert_array_equal(analit.njev+approx.njev, njev)
440
+ f_approx = approx.fun(x)
441
+ J_approx = approx.jac(x)
442
+ nfev += 3
443
+ assert_array_equal(ex.nfev, nfev)
444
+ assert_array_equal(analit.nfev+approx.nfev, nfev)
445
+ assert_array_equal(ex.njev, njev)
446
+ assert_array_equal(analit.njev+approx.njev, njev)
447
+ assert_array_almost_equal(f_analit, f_approx)
448
+ assert_array_almost_equal(J_analit, J_approx, decimal=4)
449
+
450
+ x = [2.0, 1.0]
451
+ J_analit = analit.jac(x)
452
+ njev += 1
453
+ assert_array_equal(ex.nfev, nfev)
454
+ assert_array_equal(analit.nfev+approx.nfev, nfev)
455
+ assert_array_equal(ex.njev, njev)
456
+ assert_array_equal(analit.njev+approx.njev, njev)
457
+ J_approx = approx.jac(x)
458
+ nfev += 3
459
+ assert_array_equal(ex.nfev, nfev)
460
+ assert_array_equal(analit.nfev+approx.nfev, nfev)
461
+ assert_array_equal(ex.njev, njev)
462
+ assert_array_equal(analit.njev+approx.njev, njev)
463
+ assert_array_almost_equal(J_analit, J_approx)
464
+
465
+ x = [2.5, 0.3]
466
+ f_analit = analit.fun(x)
467
+ J_analit = analit.jac(x)
468
+ nfev += 1
469
+ njev += 1
470
+ assert_array_equal(ex.nfev, nfev)
471
+ assert_array_equal(analit.nfev+approx.nfev, nfev)
472
+ assert_array_equal(ex.njev, njev)
473
+ assert_array_equal(analit.njev+approx.njev, njev)
474
+ f_approx = approx.fun(x)
475
+ J_approx = approx.jac(x)
476
+ nfev += 3
477
+ assert_array_equal(ex.nfev, nfev)
478
+ assert_array_equal(analit.nfev+approx.nfev, nfev)
479
+ assert_array_equal(ex.njev, njev)
480
+ assert_array_equal(analit.njev+approx.njev, njev)
481
+ assert_array_almost_equal(f_analit, f_approx)
482
+ assert_array_almost_equal(J_analit, J_approx)
483
+
484
+ x = [2, 0.3]
485
+ f_analit = analit.fun(x)
486
+ J_analit = analit.jac(x)
487
+ nfev += 1
488
+ njev += 1
489
+ assert_array_equal(ex.nfev, nfev)
490
+ assert_array_equal(analit.nfev+approx.nfev, nfev)
491
+ assert_array_equal(ex.njev, njev)
492
+ assert_array_equal(analit.njev+approx.njev, njev)
493
+ f_approx = approx.fun(x)
494
+ J_approx = approx.jac(x)
495
+ nfev += 3
496
+ assert_array_equal(ex.nfev, nfev)
497
+ assert_array_equal(analit.nfev+approx.nfev, nfev)
498
+ assert_array_equal(ex.njev, njev)
499
+ assert_array_equal(analit.njev+approx.njev, njev)
500
+ assert_array_almost_equal(f_analit, f_approx)
501
+ assert_array_almost_equal(J_analit, J_approx)
502
+
503
+ def test_finite_difference_hess_linear_operator(self):
504
+ ex = ExVectorialFunction()
505
+ nfev = 0
506
+ njev = 0
507
+ nhev = 0
508
+
509
+ x0 = [1.0, 0.0]
510
+ v0 = [1.0, 2.0]
511
+ analit = VectorFunction(ex.fun, x0, ex.jac, ex.hess, None, None,
512
+ (-np.inf, np.inf), None)
513
+ nfev += 1
514
+ njev += 1
515
+ nhev += 1
516
+ assert_array_equal(ex.nfev, nfev)
517
+ assert_array_equal(analit.nfev, nfev)
518
+ assert_array_equal(ex.njev, njev)
519
+ assert_array_equal(analit.njev, njev)
520
+ assert_array_equal(ex.nhev, nhev)
521
+ assert_array_equal(analit.nhev, nhev)
522
+ approx = VectorFunction(ex.fun, x0, ex.jac, '2-point', None, None,
523
+ (-np.inf, np.inf), None)
524
+ assert_(isinstance(approx.H, LinearOperator))
525
+ for p in ([1.0, 2.0], [3.0, 4.0], [5.0, 2.0]):
526
+ assert_array_equal(analit.f, approx.f)
527
+ assert_array_almost_equal(analit.J, approx.J)
528
+ assert_array_almost_equal(analit.H.dot(p), approx.H.dot(p))
529
+ nfev += 1
530
+ njev += 4
531
+ assert_array_equal(ex.nfev, nfev)
532
+ assert_array_equal(analit.nfev+approx.nfev, nfev)
533
+ assert_array_equal(ex.njev, njev)
534
+ assert_array_equal(analit.njev+approx.njev, njev)
535
+ assert_array_equal(ex.nhev, nhev)
536
+ assert_array_equal(analit.nhev+approx.nhev, nhev)
537
+
538
+ x = [2.0, 1.0]
539
+ H_analit = analit.hess(x, v0)
540
+ nhev += 1
541
+ assert_array_equal(ex.nfev, nfev)
542
+ assert_array_equal(analit.nfev+approx.nfev, nfev)
543
+ assert_array_equal(ex.njev, njev)
544
+ assert_array_equal(analit.njev+approx.njev, njev)
545
+ assert_array_equal(ex.nhev, nhev)
546
+ assert_array_equal(analit.nhev+approx.nhev, nhev)
547
+ H_approx = approx.hess(x, v0)
548
+ assert_(isinstance(H_approx, LinearOperator))
549
+ for p in ([1.0, 2.0], [3.0, 4.0], [5.0, 2.0]):
550
+ assert_array_almost_equal(H_analit.dot(p), H_approx.dot(p),
551
+ decimal=5)
552
+ njev += 4
553
+ assert_array_equal(ex.nfev, nfev)
554
+ assert_array_equal(analit.nfev+approx.nfev, nfev)
555
+ assert_array_equal(ex.njev, njev)
556
+ assert_array_equal(analit.njev+approx.njev, njev)
557
+ assert_array_equal(ex.nhev, nhev)
558
+ assert_array_equal(analit.nhev+approx.nhev, nhev)
559
+
560
+ x = [2.1, 1.2]
561
+ v = [1.0, 1.0]
562
+ H_analit = analit.hess(x, v)
563
+ nhev += 1
564
+ assert_array_equal(ex.nfev, nfev)
565
+ assert_array_equal(analit.nfev+approx.nfev, nfev)
566
+ assert_array_equal(ex.njev, njev)
567
+ assert_array_equal(analit.njev+approx.njev, njev)
568
+ assert_array_equal(ex.nhev, nhev)
569
+ assert_array_equal(analit.nhev+approx.nhev, nhev)
570
+ H_approx = approx.hess(x, v)
571
+ assert_(isinstance(H_approx, LinearOperator))
572
+ for v in ([1.0, 2.0], [3.0, 4.0], [5.0, 2.0]):
573
+ assert_array_almost_equal(H_analit.dot(v), H_approx.dot(v))
574
+ njev += 4
575
+ assert_array_equal(ex.nfev, nfev)
576
+ assert_array_equal(analit.nfev+approx.nfev, nfev)
577
+ assert_array_equal(ex.njev, njev)
578
+ assert_array_equal(analit.njev+approx.njev, njev)
579
+ assert_array_equal(ex.nhev, nhev)
580
+ assert_array_equal(analit.nhev+approx.nhev, nhev)
581
+
582
+ x = [2.5, 0.3]
583
+ _ = analit.jac(x)
584
+ H_analit = analit.hess(x, v0)
585
+ njev += 1
586
+ nhev += 1
587
+ assert_array_equal(ex.nfev, nfev)
588
+ assert_array_equal(analit.nfev+approx.nfev, nfev)
589
+ assert_array_equal(ex.njev, njev)
590
+ assert_array_equal(analit.njev+approx.njev, njev)
591
+ assert_array_equal(ex.nhev, nhev)
592
+ assert_array_equal(analit.nhev+approx.nhev, nhev)
593
+ _ = approx.jac(x)
594
+ H_approx = approx.hess(x, v0)
595
+ assert_(isinstance(H_approx, LinearOperator))
596
+ for v in ([1.0, 2.0], [3.0, 4.0], [5.0, 2.0]):
597
+ assert_array_almost_equal(H_analit.dot(v), H_approx.dot(v), decimal=4)
598
+ njev += 4
599
+ assert_array_equal(ex.nfev, nfev)
600
+ assert_array_equal(analit.nfev+approx.nfev, nfev)
601
+ assert_array_equal(ex.njev, njev)
602
+ assert_array_equal(analit.njev+approx.njev, njev)
603
+ assert_array_equal(ex.nhev, nhev)
604
+ assert_array_equal(analit.nhev+approx.nhev, nhev)
605
+
606
+ x = [5.2, 2.3]
607
+ v = [2.3, 5.2]
608
+ _ = analit.jac(x)
609
+ H_analit = analit.hess(x, v)
610
+ njev += 1
611
+ nhev += 1
612
+ assert_array_equal(ex.nfev, nfev)
613
+ assert_array_equal(analit.nfev+approx.nfev, nfev)
614
+ assert_array_equal(ex.njev, njev)
615
+ assert_array_equal(analit.njev+approx.njev, njev)
616
+ assert_array_equal(ex.nhev, nhev)
617
+ assert_array_equal(analit.nhev+approx.nhev, nhev)
618
+ _ = approx.jac(x)
619
+ H_approx = approx.hess(x, v)
620
+ assert_(isinstance(H_approx, LinearOperator))
621
+ for v in ([1.0, 2.0], [3.0, 4.0], [5.0, 2.0]):
622
+ assert_array_almost_equal(H_analit.dot(v), H_approx.dot(v), decimal=4)
623
+ njev += 4
624
+ assert_array_equal(ex.nfev, nfev)
625
+ assert_array_equal(analit.nfev+approx.nfev, nfev)
626
+ assert_array_equal(ex.njev, njev)
627
+ assert_array_equal(analit.njev+approx.njev, njev)
628
+ assert_array_equal(ex.nhev, nhev)
629
+ assert_array_equal(analit.nhev+approx.nhev, nhev)
630
+
631
+ def test_x_storage_overlap(self):
632
+ # VectorFunction should not store references to arrays, it should
633
+ # store copies - this checks that updating an array in-place causes
634
+ # Scalar_Function.x to be updated.
635
+ ex = ExVectorialFunction()
636
+ x0 = np.array([1.0, 0.0])
637
+
638
+ vf = VectorFunction(ex.fun, x0, '3-point', ex.hess, None, None,
639
+ (-np.inf, np.inf), None)
640
+
641
+ assert x0 is not vf.x
642
+ assert_equal(vf.fun(x0), ex.fun(x0))
643
+ assert x0 is not vf.x
644
+
645
+ x0[0] = 2.
646
+ assert_equal(vf.fun(x0), ex.fun(x0))
647
+ assert x0 is not vf.x
648
+
649
+ x0[0] = 1.
650
+ assert_equal(vf.fun(x0), ex.fun(x0))
651
+ assert x0 is not vf.x
652
+
653
+ # now test with a HessianUpdate strategy specified
654
+ hess = BFGS()
655
+ x0 = np.array([1.0, 0.0])
656
+ vf = VectorFunction(ex.fun, x0, '3-point', hess, None, None,
657
+ (-np.inf, np.inf), None)
658
+
659
+ with pytest.warns(UserWarning):
660
+ # filter UserWarning because ExVectorialFunction is linear and
661
+ # a quasi-Newton approximation is used for the Hessian.
662
+ assert x0 is not vf.x
663
+ assert_equal(vf.fun(x0), ex.fun(x0))
664
+ assert x0 is not vf.x
665
+
666
+ x0[0] = 2.
667
+ assert_equal(vf.fun(x0), ex.fun(x0))
668
+ assert x0 is not vf.x
669
+
670
+ x0[0] = 1.
671
+ assert_equal(vf.fun(x0), ex.fun(x0))
672
+ assert x0 is not vf.x
673
+
674
+ def test_float_size(self):
675
+ ex = ExVectorialFunction()
676
+ x0 = np.array([1.0, 0.0]).astype(np.float32)
677
+
678
+ vf = VectorFunction(ex.fun, x0, ex.jac, ex.hess, None, None,
679
+ (-np.inf, np.inf), None)
680
+
681
+ res = vf.fun(x0)
682
+ assert res.dtype == np.float32
683
+
684
+ res = vf.jac(x0)
685
+ assert res.dtype == np.float32
686
+
687
+
688
+ def test_LinearVectorFunction():
689
+ A_dense = np.array([
690
+ [-1, 2, 0],
691
+ [0, 4, 2]
692
+ ])
693
+ x0 = np.zeros(3)
694
+ A_sparse = csr_matrix(A_dense)
695
+ x = np.array([1, -1, 0])
696
+ v = np.array([-1, 1])
697
+ Ax = np.array([-3, -4])
698
+
699
+ f1 = LinearVectorFunction(A_dense, x0, None)
700
+ assert_(not f1.sparse_jacobian)
701
+
702
+ f2 = LinearVectorFunction(A_dense, x0, True)
703
+ assert_(f2.sparse_jacobian)
704
+
705
+ f3 = LinearVectorFunction(A_dense, x0, False)
706
+ assert_(not f3.sparse_jacobian)
707
+
708
+ f4 = LinearVectorFunction(A_sparse, x0, None)
709
+ assert_(f4.sparse_jacobian)
710
+
711
+ f5 = LinearVectorFunction(A_sparse, x0, True)
712
+ assert_(f5.sparse_jacobian)
713
+
714
+ f6 = LinearVectorFunction(A_sparse, x0, False)
715
+ assert_(not f6.sparse_jacobian)
716
+
717
+ assert_array_equal(f1.fun(x), Ax)
718
+ assert_array_equal(f2.fun(x), Ax)
719
+ assert_array_equal(f1.jac(x), A_dense)
720
+ assert_array_equal(f2.jac(x).toarray(), A_sparse.toarray())
721
+ assert_array_equal(f1.hess(x, v).toarray(), np.zeros((3, 3)))
722
+
723
+
724
+ def test_LinearVectorFunction_memoization():
725
+ A = np.array([[-1, 2, 0], [0, 4, 2]])
726
+ x0 = np.array([1, 2, -1])
727
+ fun = LinearVectorFunction(A, x0, False)
728
+
729
+ assert_array_equal(x0, fun.x)
730
+ assert_array_equal(A.dot(x0), fun.f)
731
+
732
+ x1 = np.array([-1, 3, 10])
733
+ assert_array_equal(A, fun.jac(x1))
734
+ assert_array_equal(x1, fun.x)
735
+ assert_array_equal(A.dot(x0), fun.f)
736
+ assert_array_equal(A.dot(x1), fun.fun(x1))
737
+ assert_array_equal(A.dot(x1), fun.f)
738
+
739
+
740
+ def test_IdentityVectorFunction():
741
+ x0 = np.zeros(3)
742
+
743
+ f1 = IdentityVectorFunction(x0, None)
744
+ f2 = IdentityVectorFunction(x0, False)
745
+ f3 = IdentityVectorFunction(x0, True)
746
+
747
+ assert_(f1.sparse_jacobian)
748
+ assert_(not f2.sparse_jacobian)
749
+ assert_(f3.sparse_jacobian)
750
+
751
+ x = np.array([-1, 2, 1])
752
+ v = np.array([-2, 3, 0])
753
+
754
+ assert_array_equal(f1.fun(x), x)
755
+ assert_array_equal(f2.fun(x), x)
756
+
757
+ assert_array_equal(f1.jac(x).toarray(), np.eye(3))
758
+ assert_array_equal(f2.jac(x), np.eye(3))
759
+
760
+ assert_array_equal(f1.hess(x, v).toarray(), np.zeros((3, 3)))
761
+
762
+
763
+ @pytest.mark.skipif(
764
+ platform.python_implementation() == "PyPy",
765
+ reason="assert_deallocate not available on PyPy"
766
+ )
767
+ def test_ScalarFunctionNoReferenceCycle():
768
+ """Regression test for gh-20768."""
769
+ ex = ExScalarFunction()
770
+ x0 = np.zeros(3)
771
+ with assert_deallocated(lambda: ScalarFunction(ex.fun, x0, (), ex.grad,
772
+ ex.hess, None, (-np.inf, np.inf))):
773
+ pass
774
+
775
+
776
+ @pytest.mark.skipif(
777
+ platform.python_implementation() == "PyPy",
778
+ reason="assert_deallocate not available on PyPy"
779
+ )
780
+ @pytest.mark.xfail(reason="TODO remove reference cycle from VectorFunction")
781
+ def test_VectorFunctionNoReferenceCycle():
782
+ """Regression test for gh-20768."""
783
+ ex = ExVectorialFunction()
784
+ x0 = [1.0, 0.0]
785
+ with assert_deallocated(lambda: VectorFunction(ex.fun, x0, ex.jac,
786
+ ex.hess, None, None, (-np.inf, np.inf), None)):
787
+ pass
788
+
789
+
790
+ @pytest.mark.skipif(
791
+ platform.python_implementation() == "PyPy",
792
+ reason="assert_deallocate not available on PyPy"
793
+ )
794
+ def test_LinearVectorFunctionNoReferenceCycle():
795
+ """Regression test for gh-20768."""
796
+ A_dense = np.array([
797
+ [-1, 2, 0],
798
+ [0, 4, 2]
799
+ ])
800
+ x0 = np.zeros(3)
801
+ A_sparse = csr_matrix(A_dense)
802
+ with assert_deallocated(lambda: LinearVectorFunction(A_sparse, x0, None)):
803
+ pass
llava_next/lib/python3.10/site-packages/scipy/optimize/tests/test_differentiate.py ADDED
@@ -0,0 +1,512 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import pytest
2
+
3
+ import numpy as np
4
+ from numpy.testing import assert_array_less, assert_allclose, assert_equal
5
+
6
+ import scipy._lib._elementwise_iterative_method as eim
7
+ from scipy import stats, optimize
8
+ from scipy.optimize._differentiate import (_differentiate as differentiate,
9
+ _jacobian as jacobian, _EERRORINCREASE)
10
+
11
+ class TestDifferentiate:
12
+
13
+ def f(self, x):
14
+ return stats.norm().cdf(x)
15
+
16
+ @pytest.mark.parametrize('x', [0.6, np.linspace(-0.05, 1.05, 10)])
17
+ def test_basic(self, x):
18
+ # Invert distribution CDF and compare against distribution `ppf`
19
+ res = differentiate(self.f, x)
20
+ ref = stats.norm().pdf(x)
21
+ np.testing.assert_allclose(res.df, ref)
22
+ # This would be nice, but doesn't always work out. `error` is an
23
+ # estimate, not a bound.
24
+ assert_array_less(abs(res.df - ref), res.error)
25
+ assert res.x.shape == ref.shape
26
+
27
+ @pytest.mark.parametrize('case', stats._distr_params.distcont)
28
+ def test_accuracy(self, case):
29
+ distname, params = case
30
+ dist = getattr(stats, distname)(*params)
31
+ x = dist.median() + 0.1
32
+ res = differentiate(dist.cdf, x)
33
+ ref = dist.pdf(x)
34
+ assert_allclose(res.df, ref, atol=1e-10)
35
+
36
+ @pytest.mark.parametrize('order', [1, 6])
37
+ @pytest.mark.parametrize('shape', [tuple(), (12,), (3, 4), (3, 2, 2)])
38
+ def test_vectorization(self, order, shape):
39
+ # Test for correct functionality, output shapes, and dtypes for various
40
+ # input shapes.
41
+ x = np.linspace(-0.05, 1.05, 12).reshape(shape) if shape else 0.6
42
+ n = np.size(x)
43
+
44
+ @np.vectorize
45
+ def _differentiate_single(x):
46
+ return differentiate(self.f, x, order=order)
47
+
48
+ def f(x, *args, **kwargs):
49
+ f.nit += 1
50
+ f.feval += 1 if (x.size == n or x.ndim <=1) else x.shape[-1]
51
+ return self.f(x, *args, **kwargs)
52
+ f.nit = -1
53
+ f.feval = 0
54
+
55
+ res = differentiate(f, x, order=order)
56
+ refs = _differentiate_single(x).ravel()
57
+
58
+ ref_x = [ref.x for ref in refs]
59
+ assert_allclose(res.x.ravel(), ref_x)
60
+ assert_equal(res.x.shape, shape)
61
+
62
+ ref_df = [ref.df for ref in refs]
63
+ assert_allclose(res.df.ravel(), ref_df)
64
+ assert_equal(res.df.shape, shape)
65
+
66
+ ref_error = [ref.error for ref in refs]
67
+ assert_allclose(res.error.ravel(), ref_error, atol=1e-12)
68
+ assert_equal(res.error.shape, shape)
69
+
70
+ ref_success = [ref.success for ref in refs]
71
+ assert_equal(res.success.ravel(), ref_success)
72
+ assert_equal(res.success.shape, shape)
73
+ assert np.issubdtype(res.success.dtype, np.bool_)
74
+
75
+ ref_flag = [ref.status for ref in refs]
76
+ assert_equal(res.status.ravel(), ref_flag)
77
+ assert_equal(res.status.shape, shape)
78
+ assert np.issubdtype(res.status.dtype, np.integer)
79
+
80
+ ref_nfev = [ref.nfev for ref in refs]
81
+ assert_equal(res.nfev.ravel(), ref_nfev)
82
+ assert_equal(np.max(res.nfev), f.feval)
83
+ assert_equal(res.nfev.shape, res.x.shape)
84
+ assert np.issubdtype(res.nfev.dtype, np.integer)
85
+
86
+ ref_nit = [ref.nit for ref in refs]
87
+ assert_equal(res.nit.ravel(), ref_nit)
88
+ assert_equal(np.max(res.nit), f.nit)
89
+ assert_equal(res.nit.shape, res.x.shape)
90
+ assert np.issubdtype(res.nit.dtype, np.integer)
91
+
92
+ def test_flags(self):
93
+ # Test cases that should produce different status flags; show that all
94
+ # can be produced simultaneously.
95
+ rng = np.random.default_rng(5651219684984213)
96
+ def f(xs, js):
97
+ f.nit += 1
98
+ funcs = [lambda x: x - 2.5, # converges
99
+ lambda x: np.exp(x)*rng.random(), # error increases
100
+ lambda x: np.exp(x), # reaches maxiter due to order=2
101
+ lambda x: np.full_like(x, np.nan)[()]] # stops due to NaN
102
+ res = [funcs[j](x) for x, j in zip(xs, js.ravel())]
103
+ return res
104
+ f.nit = 0
105
+
106
+ args = (np.arange(4, dtype=np.int64),)
107
+ res = differentiate(f, [1]*4, rtol=1e-14, order=2, args=args)
108
+
109
+ ref_flags = np.array([eim._ECONVERGED,
110
+ _EERRORINCREASE,
111
+ eim._ECONVERR,
112
+ eim._EVALUEERR])
113
+ assert_equal(res.status, ref_flags)
114
+
115
+ def test_flags_preserve_shape(self):
116
+ # Same test as above but using `preserve_shape` option to simplify.
117
+ rng = np.random.default_rng(5651219684984213)
118
+ def f(x):
119
+ return [x - 2.5, # converges
120
+ np.exp(x)*rng.random(), # error increases
121
+ np.exp(x), # reaches maxiter due to order=2
122
+ np.full_like(x, np.nan)[()]] # stops due to NaN
123
+
124
+ res = differentiate(f, 1, rtol=1e-14, order=2, preserve_shape=True)
125
+
126
+ ref_flags = np.array([eim._ECONVERGED,
127
+ _EERRORINCREASE,
128
+ eim._ECONVERR,
129
+ eim._EVALUEERR])
130
+ assert_equal(res.status, ref_flags)
131
+
132
+ def test_preserve_shape(self):
133
+ # Test `preserve_shape` option
134
+ def f(x):
135
+ return [x, np.sin(3*x), x+np.sin(10*x), np.sin(20*x)*(x-1)**2]
136
+
137
+ x = 0
138
+ ref = [1, 3*np.cos(3*x), 1+10*np.cos(10*x),
139
+ 20*np.cos(20*x)*(x-1)**2 + 2*np.sin(20*x)*(x-1)]
140
+ res = differentiate(f, x, preserve_shape=True)
141
+ assert_allclose(res.df, ref)
142
+
143
+ def test_convergence(self):
144
+ # Test that the convergence tolerances behave as expected
145
+ dist = stats.norm()
146
+ x = 1
147
+ f = dist.cdf
148
+ ref = dist.pdf(x)
149
+ kwargs0 = dict(atol=0, rtol=0, order=4)
150
+
151
+ kwargs = kwargs0.copy()
152
+ kwargs['atol'] = 1e-3
153
+ res1 = differentiate(f, x, **kwargs)
154
+ assert_array_less(abs(res1.df - ref), 1e-3)
155
+ kwargs['atol'] = 1e-6
156
+ res2 = differentiate(f, x, **kwargs)
157
+ assert_array_less(abs(res2.df - ref), 1e-6)
158
+ assert_array_less(abs(res2.df - ref), abs(res1.df - ref))
159
+
160
+ kwargs = kwargs0.copy()
161
+ kwargs['rtol'] = 1e-3
162
+ res1 = differentiate(f, x, **kwargs)
163
+ assert_array_less(abs(res1.df - ref), 1e-3 * np.abs(ref))
164
+ kwargs['rtol'] = 1e-6
165
+ res2 = differentiate(f, x, **kwargs)
166
+ assert_array_less(abs(res2.df - ref), 1e-6 * np.abs(ref))
167
+ assert_array_less(abs(res2.df - ref), abs(res1.df - ref))
168
+
169
+ def test_step_parameters(self):
170
+ # Test that step factors have the expected effect on accuracy
171
+ dist = stats.norm()
172
+ x = 1
173
+ f = dist.cdf
174
+ ref = dist.pdf(x)
175
+
176
+ res1 = differentiate(f, x, initial_step=0.5, maxiter=1)
177
+ res2 = differentiate(f, x, initial_step=0.05, maxiter=1)
178
+ assert abs(res2.df - ref) < abs(res1.df - ref)
179
+
180
+ res1 = differentiate(f, x, step_factor=2, maxiter=1)
181
+ res2 = differentiate(f, x, step_factor=20, maxiter=1)
182
+ assert abs(res2.df - ref) < abs(res1.df - ref)
183
+
184
+ # `step_factor` can be less than 1: `initial_step` is the minimum step
185
+ kwargs = dict(order=4, maxiter=1, step_direction=0)
186
+ res = differentiate(f, x, initial_step=0.5, step_factor=0.5, **kwargs)
187
+ ref = differentiate(f, x, initial_step=1, step_factor=2, **kwargs)
188
+ assert_allclose(res.df, ref.df, rtol=5e-15)
189
+
190
+ # This is a similar test for one-sided difference
191
+ kwargs = dict(order=2, maxiter=1, step_direction=1)
192
+ res = differentiate(f, x, initial_step=1, step_factor=2, **kwargs)
193
+ ref = differentiate(f, x, initial_step=1/np.sqrt(2), step_factor=0.5,
194
+ **kwargs)
195
+ assert_allclose(res.df, ref.df, rtol=5e-15)
196
+
197
+ kwargs['step_direction'] = -1
198
+ res = differentiate(f, x, initial_step=1, step_factor=2, **kwargs)
199
+ ref = differentiate(f, x, initial_step=1/np.sqrt(2), step_factor=0.5,
200
+ **kwargs)
201
+ assert_allclose(res.df, ref.df, rtol=5e-15)
202
+
203
+ def test_step_direction(self):
204
+ # test that `step_direction` works as expected
205
+ def f(x):
206
+ y = np.exp(x)
207
+ y[(x < 0) + (x > 2)] = np.nan
208
+ return y
209
+
210
+ x = np.linspace(0, 2, 10)
211
+ step_direction = np.zeros_like(x)
212
+ step_direction[x < 0.6], step_direction[x > 1.4] = 1, -1
213
+ res = differentiate(f, x, step_direction=step_direction)
214
+ assert_allclose(res.df, np.exp(x))
215
+ assert np.all(res.success)
216
+
217
+ def test_vectorized_step_direction_args(self):
218
+ # test that `step_direction` and `args` are vectorized properly
219
+ def f(x, p):
220
+ return x ** p
221
+
222
+ def df(x, p):
223
+ return p * x ** (p - 1)
224
+
225
+ x = np.array([1, 2, 3, 4]).reshape(-1, 1, 1)
226
+ hdir = np.array([-1, 0, 1]).reshape(1, -1, 1)
227
+ p = np.array([2, 3]).reshape(1, 1, -1)
228
+ res = differentiate(f, x, step_direction=hdir, args=(p,))
229
+ ref = np.broadcast_to(df(x, p), res.df.shape)
230
+ assert_allclose(res.df, ref)
231
+
232
+ def test_maxiter_callback(self):
233
+ # Test behavior of `maxiter` parameter and `callback` interface
234
+ x = 0.612814
235
+ dist = stats.norm()
236
+ maxiter = 3
237
+
238
+ def f(x):
239
+ res = dist.cdf(x)
240
+ return res
241
+
242
+ default_order = 8
243
+ res = differentiate(f, x, maxiter=maxiter, rtol=1e-15)
244
+ assert not np.any(res.success)
245
+ assert np.all(res.nfev == default_order + 1 + (maxiter - 1)*2)
246
+ assert np.all(res.nit == maxiter)
247
+
248
+ def callback(res):
249
+ callback.iter += 1
250
+ callback.res = res
251
+ assert hasattr(res, 'x')
252
+ assert res.df not in callback.dfs
253
+ callback.dfs.add(res.df)
254
+ assert res.status == eim._EINPROGRESS
255
+ if callback.iter == maxiter:
256
+ raise StopIteration
257
+ callback.iter = -1 # callback called once before first iteration
258
+ callback.res = None
259
+ callback.dfs = set()
260
+
261
+ res2 = differentiate(f, x, callback=callback, rtol=1e-15)
262
+ # terminating with callback is identical to terminating due to maxiter
263
+ # (except for `status`)
264
+ for key in res.keys():
265
+ if key == 'status':
266
+ assert res[key] == eim._ECONVERR
267
+ assert callback.res[key] == eim._EINPROGRESS
268
+ assert res2[key] == eim._ECALLBACK
269
+ else:
270
+ assert res2[key] == callback.res[key] == res[key]
271
+
272
+ @pytest.mark.parametrize("hdir", (-1, 0, 1))
273
+ @pytest.mark.parametrize("x", (0.65, [0.65, 0.7]))
274
+ @pytest.mark.parametrize("dtype", (np.float16, np.float32, np.float64))
275
+ def test_dtype(self, hdir, x, dtype):
276
+ # Test that dtypes are preserved
277
+ x = np.asarray(x, dtype=dtype)[()]
278
+
279
+ def f(x):
280
+ assert x.dtype == dtype
281
+ return np.exp(x)
282
+
283
+ def callback(res):
284
+ assert res.x.dtype == dtype
285
+ assert res.df.dtype == dtype
286
+ assert res.error.dtype == dtype
287
+
288
+ res = differentiate(f, x, order=4, step_direction=hdir,
289
+ callback=callback)
290
+ assert res.x.dtype == dtype
291
+ assert res.df.dtype == dtype
292
+ assert res.error.dtype == dtype
293
+ eps = np.finfo(dtype).eps
294
+ assert_allclose(res.df, np.exp(res.x), rtol=np.sqrt(eps))
295
+
296
+ def test_input_validation(self):
297
+ # Test input validation for appropriate error messages
298
+
299
+ message = '`func` must be callable.'
300
+ with pytest.raises(ValueError, match=message):
301
+ differentiate(None, 1)
302
+
303
+ message = 'Abscissae and function output must be real numbers.'
304
+ with pytest.raises(ValueError, match=message):
305
+ differentiate(lambda x: x, -4+1j)
306
+
307
+ message = "When `preserve_shape=False`, the shape of the array..."
308
+ with pytest.raises(ValueError, match=message):
309
+ differentiate(lambda x: [1, 2, 3], [-2, -3])
310
+
311
+ message = 'Tolerances and step parameters must be non-negative...'
312
+ with pytest.raises(ValueError, match=message):
313
+ differentiate(lambda x: x, 1, atol=-1)
314
+ with pytest.raises(ValueError, match=message):
315
+ differentiate(lambda x: x, 1, rtol='ekki')
316
+ with pytest.raises(ValueError, match=message):
317
+ differentiate(lambda x: x, 1, initial_step=None)
318
+ with pytest.raises(ValueError, match=message):
319
+ differentiate(lambda x: x, 1, step_factor=object())
320
+
321
+ message = '`maxiter` must be a positive integer.'
322
+ with pytest.raises(ValueError, match=message):
323
+ differentiate(lambda x: x, 1, maxiter=1.5)
324
+ with pytest.raises(ValueError, match=message):
325
+ differentiate(lambda x: x, 1, maxiter=0)
326
+
327
+ message = '`order` must be a positive integer'
328
+ with pytest.raises(ValueError, match=message):
329
+ differentiate(lambda x: x, 1, order=1.5)
330
+ with pytest.raises(ValueError, match=message):
331
+ differentiate(lambda x: x, 1, order=0)
332
+
333
+ message = '`preserve_shape` must be True or False.'
334
+ with pytest.raises(ValueError, match=message):
335
+ differentiate(lambda x: x, 1, preserve_shape='herring')
336
+
337
+ message = '`callback` must be callable.'
338
+ with pytest.raises(ValueError, match=message):
339
+ differentiate(lambda x: x, 1, callback='shrubbery')
340
+
341
+ def test_special_cases(self):
342
+ # Test edge cases and other special cases
343
+
344
+ # Test that integers are not passed to `f`
345
+ # (otherwise this would overflow)
346
+ def f(x):
347
+ assert np.issubdtype(x.dtype, np.floating)
348
+ return x ** 99 - 1
349
+
350
+ res = differentiate(f, 7, rtol=1e-10)
351
+ assert res.success
352
+ assert_allclose(res.df, 99*7.**98)
353
+
354
+ # Test that if success is achieved in the correct number
355
+ # of iterations if function is a polynomial. Ideally, all polynomials
356
+ # of order 0-2 would get exact result with 0 refinement iterations,
357
+ # all polynomials of order 3-4 would be differentiated exactly after
358
+ # 1 iteration, etc. However, it seems that _differentiate needs an
359
+ # extra iteration to detect convergence based on the error estimate.
360
+
361
+ for n in range(6):
362
+ x = 1.5
363
+ def f(x):
364
+ return 2*x**n
365
+
366
+ ref = 2*n*x**(n-1)
367
+
368
+ res = differentiate(f, x, maxiter=1, order=max(1, n))
369
+ assert_allclose(res.df, ref, rtol=1e-15)
370
+ assert_equal(res.error, np.nan)
371
+
372
+ res = differentiate(f, x, order=max(1, n))
373
+ assert res.success
374
+ assert res.nit == 2
375
+ assert_allclose(res.df, ref, rtol=1e-15)
376
+
377
+ # Test scalar `args` (not in tuple)
378
+ def f(x, c):
379
+ return c*x - 1
380
+
381
+ res = differentiate(f, 2, args=3)
382
+ assert_allclose(res.df, 3)
383
+
384
+ @pytest.mark.xfail
385
+ @pytest.mark.parametrize("case", ( # function, evaluation point
386
+ (lambda x: (x - 1) ** 3, 1),
387
+ (lambda x: np.where(x > 1, (x - 1) ** 5, (x - 1) ** 3), 1)
388
+ ))
389
+ def test_saddle_gh18811(self, case):
390
+ # With default settings, _differentiate will not always converge when
391
+ # the true derivative is exactly zero. This tests that specifying a
392
+ # (tight) `atol` alleviates the problem. See discussion in gh-18811.
393
+ atol = 1e-16
394
+ res = differentiate(*case, step_direction=[-1, 0, 1], atol=atol)
395
+ assert np.all(res.success)
396
+ assert_allclose(res.df, 0, atol=atol)
397
+
398
+
399
+ class TestJacobian:
400
+
401
+ # Example functions and Jacobians from Wikipedia:
402
+ # https://en.wikipedia.org/wiki/Jacobian_matrix_and_determinant#Examples
403
+
404
+ def f1(z):
405
+ x, y = z
406
+ return [x ** 2 * y, 5 * x + np.sin(y)]
407
+
408
+ def df1(z):
409
+ x, y = z
410
+ return [[2 * x * y, x ** 2], [np.full_like(x, 5), np.cos(y)]]
411
+
412
+ f1.mn = 2, 2 # type: ignore[attr-defined]
413
+ f1.ref = df1 # type: ignore[attr-defined]
414
+
415
+ def f2(z):
416
+ r, phi = z
417
+ return [r * np.cos(phi), r * np.sin(phi)]
418
+
419
+ def df2(z):
420
+ r, phi = z
421
+ return [[np.cos(phi), -r * np.sin(phi)],
422
+ [np.sin(phi), r * np.cos(phi)]]
423
+
424
+ f2.mn = 2, 2 # type: ignore[attr-defined]
425
+ f2.ref = df2 # type: ignore[attr-defined]
426
+
427
+ def f3(z):
428
+ r, phi, th = z
429
+ return [r * np.sin(phi) * np.cos(th), r * np.sin(phi) * np.sin(th),
430
+ r * np.cos(phi)]
431
+
432
+ def df3(z):
433
+ r, phi, th = z
434
+ return [[np.sin(phi) * np.cos(th), r * np.cos(phi) * np.cos(th),
435
+ -r * np.sin(phi) * np.sin(th)],
436
+ [np.sin(phi) * np.sin(th), r * np.cos(phi) * np.sin(th),
437
+ r * np.sin(phi) * np.cos(th)],
438
+ [np.cos(phi), -r * np.sin(phi), np.zeros_like(r)]]
439
+
440
+ f3.mn = 3, 3 # type: ignore[attr-defined]
441
+ f3.ref = df3 # type: ignore[attr-defined]
442
+
443
+ def f4(x):
444
+ x1, x2, x3 = x
445
+ return [x1, 5 * x3, 4 * x2 ** 2 - 2 * x3, x3 * np.sin(x1)]
446
+
447
+ def df4(x):
448
+ x1, x2, x3 = x
449
+ one = np.ones_like(x1)
450
+ return [[one, 0 * one, 0 * one],
451
+ [0 * one, 0 * one, 5 * one],
452
+ [0 * one, 8 * x2, -2 * one],
453
+ [x3 * np.cos(x1), 0 * one, np.sin(x1)]]
454
+
455
+ f4.mn = 3, 4 # type: ignore[attr-defined]
456
+ f4.ref = df4 # type: ignore[attr-defined]
457
+
458
+ def f5(x):
459
+ x1, x2, x3 = x
460
+ return [5 * x2, 4 * x1 ** 2 - 2 * np.sin(x2 * x3), x2 * x3]
461
+
462
+ def df5(x):
463
+ x1, x2, x3 = x
464
+ one = np.ones_like(x1)
465
+ return [[0 * one, 5 * one, 0 * one],
466
+ [8 * x1, -2 * x3 * np.cos(x2 * x3), -2 * x2 * np.cos(x2 * x3)],
467
+ [0 * one, x3, x2]]
468
+
469
+ f5.mn = 3, 3 # type: ignore[attr-defined]
470
+ f5.ref = df5 # type: ignore[attr-defined]
471
+
472
+ rosen = optimize.rosen
473
+ rosen.mn = 5, 1 # type: ignore[attr-defined]
474
+ rosen.ref = optimize.rosen_der # type: ignore[attr-defined]
475
+
476
+ @pytest.mark.parametrize('size', [(), (6,), (2, 3)])
477
+ @pytest.mark.parametrize('func', [f1, f2, f3, f4, f5, rosen])
478
+ def test_examples(self, size, func):
479
+ rng = np.random.default_rng(458912319542)
480
+ m, n = func.mn
481
+ x = rng.random(size=(m,) + size)
482
+ res = jacobian(func, x).df
483
+ ref = func.ref(x)
484
+ np.testing.assert_allclose(res, ref, atol=1e-10)
485
+
486
+ def test_iv(self):
487
+ # Test input validation
488
+ message = "Argument `x` must be at least 1-D."
489
+ with pytest.raises(ValueError, match=message):
490
+ jacobian(np.sin, 1, atol=-1)
491
+
492
+ # Confirm that other parameters are being passed to `_derivative`,
493
+ # which raises an appropriate error message.
494
+ x = np.ones(3)
495
+ func = optimize.rosen
496
+ message = 'Tolerances and step parameters must be non-negative scalars.'
497
+ with pytest.raises(ValueError, match=message):
498
+ jacobian(func, x, atol=-1)
499
+ with pytest.raises(ValueError, match=message):
500
+ jacobian(func, x, rtol=-1)
501
+ with pytest.raises(ValueError, match=message):
502
+ jacobian(func, x, initial_step=-1)
503
+ with pytest.raises(ValueError, match=message):
504
+ jacobian(func, x, step_factor=-1)
505
+
506
+ message = '`order` must be a positive integer.'
507
+ with pytest.raises(ValueError, match=message):
508
+ jacobian(func, x, order=-1)
509
+
510
+ message = '`maxiter` must be a positive integer.'
511
+ with pytest.raises(ValueError, match=message):
512
+ jacobian(func, x, maxiter=-1)
llava_next/lib/python3.10/site-packages/scipy/optimize/tests/test_hessian_update_strategy.py ADDED
@@ -0,0 +1,292 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import re
2
+ from copy import deepcopy
3
+
4
+ import numpy as np
5
+ import pytest
6
+ from numpy.linalg import norm
7
+ from numpy.testing import (TestCase, assert_array_almost_equal,
8
+ assert_array_equal, assert_array_less)
9
+ from scipy.optimize import (BFGS, SR1)
10
+
11
+
12
+ class Rosenbrock:
13
+ """Rosenbrock function.
14
+
15
+ The following optimization problem:
16
+ minimize sum(100.0*(x[1:] - x[:-1]**2.0)**2.0 + (1 - x[:-1])**2.0)
17
+ """
18
+
19
+ def __init__(self, n=2, random_state=0):
20
+ rng = np.random.RandomState(random_state)
21
+ self.x0 = rng.uniform(-1, 1, n)
22
+ self.x_opt = np.ones(n)
23
+
24
+ def fun(self, x):
25
+ x = np.asarray(x)
26
+ r = np.sum(100.0 * (x[1:] - x[:-1]**2.0)**2.0 + (1 - x[:-1])**2.0,
27
+ axis=0)
28
+ return r
29
+
30
+ def grad(self, x):
31
+ x = np.asarray(x)
32
+ xm = x[1:-1]
33
+ xm_m1 = x[:-2]
34
+ xm_p1 = x[2:]
35
+ der = np.zeros_like(x)
36
+ der[1:-1] = (200 * (xm - xm_m1**2) -
37
+ 400 * (xm_p1 - xm**2) * xm - 2 * (1 - xm))
38
+ der[0] = -400 * x[0] * (x[1] - x[0]**2) - 2 * (1 - x[0])
39
+ der[-1] = 200 * (x[-1] - x[-2]**2)
40
+ return der
41
+
42
+ def hess(self, x):
43
+ x = np.atleast_1d(x)
44
+ H = np.diag(-400 * x[:-1], 1) - np.diag(400 * x[:-1], -1)
45
+ diagonal = np.zeros(len(x), dtype=x.dtype)
46
+ diagonal[0] = 1200 * x[0]**2 - 400 * x[1] + 2
47
+ diagonal[-1] = 200
48
+ diagonal[1:-1] = 202 + 1200 * x[1:-1]**2 - 400 * x[2:]
49
+ H = H + np.diag(diagonal)
50
+ return H
51
+
52
+
53
+ class TestHessianUpdateStrategy(TestCase):
54
+
55
+
56
+ def test_hessian_initialization(self):
57
+
58
+ ndims = 5
59
+ symmetric_matrix = np.array([[43, 24, 33, 34, 49],
60
+ [24, 36, 44, 15, 44],
61
+ [33, 44, 37, 1, 30],
62
+ [34, 15, 1, 5, 46],
63
+ [49, 44, 30, 46, 22]])
64
+ init_scales = (
65
+ ('auto', np.eye(ndims)),
66
+ (2, np.eye(ndims) * 2),
67
+ (np.arange(1, ndims + 1) * np.eye(ndims),
68
+ np.arange(1, ndims + 1) * np.eye(ndims)),
69
+ (symmetric_matrix, symmetric_matrix),)
70
+ for approx_type in ['hess', 'inv_hess']:
71
+ for init_scale, true_matrix in init_scales:
72
+ # large min_{denominator,curvatur} makes them skip an update,
73
+ # so we can have our initial matrix
74
+ quasi_newton = (BFGS(init_scale=init_scale,
75
+ min_curvature=1e50,
76
+ exception_strategy='skip_update'),
77
+ SR1(init_scale=init_scale,
78
+ min_denominator=1e50))
79
+
80
+ for qn in quasi_newton:
81
+ qn.initialize(ndims, approx_type)
82
+ B = qn.get_matrix()
83
+
84
+ assert_array_equal(B, np.eye(ndims))
85
+ # don't test the auto init scale
86
+ if isinstance(init_scale, str) and init_scale == 'auto':
87
+ continue
88
+
89
+ qn.update(np.ones(ndims) * 1e-5, np.arange(ndims) + 0.2)
90
+ B = qn.get_matrix()
91
+ assert_array_equal(B, true_matrix)
92
+
93
+ # For this list of points, it is known
94
+ # that no exception occur during the
95
+ # Hessian update. Hence no update is
96
+ # skiped or damped.
97
+
98
+
99
+ def test_initialize_catch_illegal(self):
100
+ ndims = 3
101
+ # no complex allowed
102
+ inits_msg_errtype = ((complex(3.14),
103
+ re.escape("float() argument must be a "
104
+ "string or a real number, "
105
+ "not 'complex'"),
106
+ TypeError),
107
+
108
+ (np.array([3.2, 2.3, 1.2]).astype(np.complex128),
109
+ "init_scale contains complex elements, "
110
+ "must be real.",
111
+ TypeError),
112
+
113
+ (np.array([[43, 24, 33],
114
+ [24, 36, 44, ],
115
+ [33, 44, 37, ]]).astype(np.complex128),
116
+ "init_scale contains complex elements, "
117
+ "must be real.",
118
+ TypeError),
119
+
120
+ # not square
121
+ (np.array([[43, 55, 66]]),
122
+ re.escape(
123
+ "If init_scale is an array, it must have the "
124
+ "dimensions of the hess/inv_hess: (3, 3)."
125
+ " Got (1, 3)."),
126
+ ValueError),
127
+
128
+ # not symmetric
129
+ (np.array([[43, 24, 33],
130
+ [24.1, 36, 44, ],
131
+ [33, 44, 37, ]]),
132
+ re.escape("If init_scale is an array, it must be"
133
+ " symmetric (passing scipy.linalg.issymmetric)"
134
+ " to be an approximation of a hess/inv_hess."),
135
+ ValueError),
136
+ )
137
+ for approx_type in ['hess', 'inv_hess']:
138
+ for init_scale, message, errortype in inits_msg_errtype:
139
+ # large min_{denominator,curvatur} makes it skip an update,
140
+ # so we can retrieve our initial matrix
141
+ quasi_newton = (BFGS(init_scale=init_scale),
142
+ SR1(init_scale=init_scale))
143
+
144
+ for qn in quasi_newton:
145
+ qn.initialize(ndims, approx_type)
146
+ with pytest.raises(errortype, match=message):
147
+ qn.update(np.ones(ndims), np.arange(ndims))
148
+
149
+ def test_rosenbrock_with_no_exception(self):
150
+ # Define auxiliary problem
151
+ prob = Rosenbrock(n=5)
152
+ # Define iteration points
153
+ x_list = [[0.0976270, 0.4303787, 0.2055267, 0.0897663, -0.15269040],
154
+ [0.1847239, 0.0505757, 0.2123832, 0.0255081, 0.00083286],
155
+ [0.2142498, -0.0188480, 0.0503822, 0.0347033, 0.03323606],
156
+ [0.2071680, -0.0185071, 0.0341337, -0.0139298, 0.02881750],
157
+ [0.1533055, -0.0322935, 0.0280418, -0.0083592, 0.01503699],
158
+ [0.1382378, -0.0276671, 0.0266161, -0.0074060, 0.02801610],
159
+ [0.1651957, -0.0049124, 0.0269665, -0.0040025, 0.02138184],
160
+ [0.2354930, 0.0443711, 0.0173959, 0.0041872, 0.00794563],
161
+ [0.4168118, 0.1433867, 0.0111714, 0.0126265, -0.00658537],
162
+ [0.4681972, 0.2153273, 0.0225249, 0.0152704, -0.00463809],
163
+ [0.6023068, 0.3346815, 0.0731108, 0.0186618, -0.00371541],
164
+ [0.6415743, 0.3985468, 0.1324422, 0.0214160, -0.00062401],
165
+ [0.7503690, 0.5447616, 0.2804541, 0.0539851, 0.00242230],
166
+ [0.7452626, 0.5644594, 0.3324679, 0.0865153, 0.00454960],
167
+ [0.8059782, 0.6586838, 0.4229577, 0.1452990, 0.00976702],
168
+ [0.8549542, 0.7226562, 0.4991309, 0.2420093, 0.02772661],
169
+ [0.8571332, 0.7285741, 0.5279076, 0.2824549, 0.06030276],
170
+ [0.8835633, 0.7727077, 0.5957984, 0.3411303, 0.09652185],
171
+ [0.9071558, 0.8299587, 0.6771400, 0.4402896, 0.17469338],
172
+ [0.9190793, 0.8486480, 0.7163332, 0.5083780, 0.26107691],
173
+ [0.9371223, 0.8762177, 0.7653702, 0.5773109, 0.32181041],
174
+ [0.9554613, 0.9119893, 0.8282687, 0.6776178, 0.43162744],
175
+ [0.9545744, 0.9099264, 0.8270244, 0.6822220, 0.45237623],
176
+ [0.9688112, 0.9351710, 0.8730961, 0.7546601, 0.56622448],
177
+ [0.9743227, 0.9491953, 0.9005150, 0.8086497, 0.64505437],
178
+ [0.9807345, 0.9638853, 0.9283012, 0.8631675, 0.73812581],
179
+ [0.9886746, 0.9777760, 0.9558950, 0.9123417, 0.82726553],
180
+ [0.9899096, 0.9803828, 0.9615592, 0.9255600, 0.85822149],
181
+ [0.9969510, 0.9935441, 0.9864657, 0.9726775, 0.94358663],
182
+ [0.9979533, 0.9960274, 0.9921724, 0.9837415, 0.96626288],
183
+ [0.9995981, 0.9989171, 0.9974178, 0.9949954, 0.99023356],
184
+ [1.0002640, 1.0005088, 1.0010594, 1.0021161, 1.00386912],
185
+ [0.9998903, 0.9998459, 0.9997795, 0.9995484, 0.99916305],
186
+ [1.0000008, 0.9999905, 0.9999481, 0.9998903, 0.99978047],
187
+ [1.0000004, 0.9999983, 1.0000001, 1.0000031, 1.00000297],
188
+ [0.9999995, 1.0000003, 1.0000005, 1.0000001, 1.00000032],
189
+ [0.9999999, 0.9999997, 0.9999994, 0.9999989, 0.99999786],
190
+ [0.9999999, 0.9999999, 0.9999999, 0.9999999, 0.99999991]]
191
+ # Get iteration points
192
+ grad_list = [prob.grad(x) for x in x_list]
193
+ delta_x = [np.array(x_list[i+1])-np.array(x_list[i])
194
+ for i in range(len(x_list)-1)]
195
+ delta_grad = [grad_list[i+1]-grad_list[i]
196
+ for i in range(len(grad_list)-1)]
197
+ # Check curvature condition
198
+ for s, y in zip(delta_x, delta_grad):
199
+ if np.dot(s, y) <= 0:
200
+ raise ArithmeticError()
201
+ # Define QuasiNewton update
202
+ for quasi_newton in (BFGS(init_scale=1, min_curvature=1e-4),
203
+ SR1(init_scale=1)):
204
+ hess = deepcopy(quasi_newton)
205
+ inv_hess = deepcopy(quasi_newton)
206
+ hess.initialize(len(x_list[0]), 'hess')
207
+ inv_hess.initialize(len(x_list[0]), 'inv_hess')
208
+ # Compare the hessian and its inverse
209
+ for s, y in zip(delta_x, delta_grad):
210
+ hess.update(s, y)
211
+ inv_hess.update(s, y)
212
+ B = hess.get_matrix()
213
+ H = inv_hess.get_matrix()
214
+ assert_array_almost_equal(np.linalg.inv(B), H, decimal=10)
215
+ B_true = prob.hess(x_list[len(delta_x)])
216
+ assert_array_less(norm(B - B_true)/norm(B_true), 0.1)
217
+
218
+ def test_SR1_skip_update(self):
219
+ # Define auxiliary problem
220
+ prob = Rosenbrock(n=5)
221
+ # Define iteration points
222
+ x_list = [[0.0976270, 0.4303787, 0.2055267, 0.0897663, -0.15269040],
223
+ [0.1847239, 0.0505757, 0.2123832, 0.0255081, 0.00083286],
224
+ [0.2142498, -0.0188480, 0.0503822, 0.0347033, 0.03323606],
225
+ [0.2071680, -0.0185071, 0.0341337, -0.0139298, 0.02881750],
226
+ [0.1533055, -0.0322935, 0.0280418, -0.0083592, 0.01503699],
227
+ [0.1382378, -0.0276671, 0.0266161, -0.0074060, 0.02801610],
228
+ [0.1651957, -0.0049124, 0.0269665, -0.0040025, 0.02138184],
229
+ [0.2354930, 0.0443711, 0.0173959, 0.0041872, 0.00794563],
230
+ [0.4168118, 0.1433867, 0.0111714, 0.0126265, -0.00658537],
231
+ [0.4681972, 0.2153273, 0.0225249, 0.0152704, -0.00463809],
232
+ [0.6023068, 0.3346815, 0.0731108, 0.0186618, -0.00371541],
233
+ [0.6415743, 0.3985468, 0.1324422, 0.0214160, -0.00062401],
234
+ [0.7503690, 0.5447616, 0.2804541, 0.0539851, 0.00242230],
235
+ [0.7452626, 0.5644594, 0.3324679, 0.0865153, 0.00454960],
236
+ [0.8059782, 0.6586838, 0.4229577, 0.1452990, 0.00976702],
237
+ [0.8549542, 0.7226562, 0.4991309, 0.2420093, 0.02772661],
238
+ [0.8571332, 0.7285741, 0.5279076, 0.2824549, 0.06030276],
239
+ [0.8835633, 0.7727077, 0.5957984, 0.3411303, 0.09652185],
240
+ [0.9071558, 0.8299587, 0.6771400, 0.4402896, 0.17469338]]
241
+ # Get iteration points
242
+ grad_list = [prob.grad(x) for x in x_list]
243
+ delta_x = [np.array(x_list[i+1])-np.array(x_list[i])
244
+ for i in range(len(x_list)-1)]
245
+ delta_grad = [grad_list[i+1]-grad_list[i]
246
+ for i in range(len(grad_list)-1)]
247
+ hess = SR1(init_scale=1, min_denominator=1e-2)
248
+ hess.initialize(len(x_list[0]), 'hess')
249
+ # Compare the Hessian and its inverse
250
+ for i in range(len(delta_x)-1):
251
+ s = delta_x[i]
252
+ y = delta_grad[i]
253
+ hess.update(s, y)
254
+ # Test skip update
255
+ B = np.copy(hess.get_matrix())
256
+ s = delta_x[17]
257
+ y = delta_grad[17]
258
+ hess.update(s, y)
259
+ B_updated = np.copy(hess.get_matrix())
260
+ assert_array_equal(B, B_updated)
261
+
262
+ def test_BFGS_skip_update(self):
263
+ # Define auxiliary problem
264
+ prob = Rosenbrock(n=5)
265
+ # Define iteration points
266
+ x_list = [[0.0976270, 0.4303787, 0.2055267, 0.0897663, -0.15269040],
267
+ [0.1847239, 0.0505757, 0.2123832, 0.0255081, 0.00083286],
268
+ [0.2142498, -0.0188480, 0.0503822, 0.0347033, 0.03323606],
269
+ [0.2071680, -0.0185071, 0.0341337, -0.0139298, 0.02881750],
270
+ [0.1533055, -0.0322935, 0.0280418, -0.0083592, 0.01503699],
271
+ [0.1382378, -0.0276671, 0.0266161, -0.0074060, 0.02801610],
272
+ [0.1651957, -0.0049124, 0.0269665, -0.0040025, 0.02138184]]
273
+ # Get iteration points
274
+ grad_list = [prob.grad(x) for x in x_list]
275
+ delta_x = [np.array(x_list[i+1])-np.array(x_list[i])
276
+ for i in range(len(x_list)-1)]
277
+ delta_grad = [grad_list[i+1]-grad_list[i]
278
+ for i in range(len(grad_list)-1)]
279
+ hess = BFGS(init_scale=1, min_curvature=10)
280
+ hess.initialize(len(x_list[0]), 'hess')
281
+ # Compare the Hessian and its inverse
282
+ for i in range(len(delta_x)-1):
283
+ s = delta_x[i]
284
+ y = delta_grad[i]
285
+ hess.update(s, y)
286
+ # Test skip update
287
+ B = np.copy(hess.get_matrix())
288
+ s = delta_x[5]
289
+ y = delta_grad[5]
290
+ hess.update(s, y)
291
+ B_updated = np.copy(hess.get_matrix())
292
+ assert_array_equal(B, B_updated)
llava_next/lib/python3.10/site-packages/scipy/optimize/tests/test_isotonic_regression.py ADDED
@@ -0,0 +1,167 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import numpy as np
2
+ from numpy.testing import assert_allclose, assert_equal
3
+ import pytest
4
+
5
+ from scipy.optimize._pava_pybind import pava
6
+ from scipy.optimize import isotonic_regression
7
+
8
+
9
+ class TestIsotonicRegression:
10
+ @pytest.mark.parametrize(
11
+ ("y", "w", "msg"),
12
+ [
13
+ ([[0, 1]], None,
14
+ "array has incorrect number of dimensions: 2; expected 1"),
15
+ ([0, 1], [[1, 2]],
16
+ "Input arrays y and w must have one dimension of equal length"),
17
+ ([0, 1], [1],
18
+ "Input arrays y and w must have one dimension of equal length"),
19
+ (1, [1, 2],
20
+ "Input arrays y and w must have one dimension of equal length"),
21
+ ([1, 2], 1,
22
+ "Input arrays y and w must have one dimension of equal length"),
23
+ ([0, 1], [0, 1],
24
+ "Weights w must be strictly positive"),
25
+ ]
26
+ )
27
+ def test_raise_error(self, y, w, msg):
28
+ with pytest.raises(ValueError, match=msg):
29
+ isotonic_regression(y=y, weights=w)
30
+
31
+ def test_simple_pava(self):
32
+ # Test case of Busing 2020
33
+ # https://doi.org/10.18637/jss.v102.c01
34
+ y = np.array([8, 4, 8, 2, 2, 0, 8], dtype=np.float64)
35
+ w = np.ones_like(y)
36
+ r = np.full(shape=y.shape[0] + 1, fill_value=-1, dtype=np.intp)
37
+ pava(y, w, r)
38
+ assert_allclose(y, [4, 4, 4, 4, 4, 4, 8])
39
+ # Only first 2 elements of w are changed.
40
+ assert_allclose(w, [6, 1, 1, 1, 1, 1, 1])
41
+ # Only first 3 elements of r are changed.
42
+ assert_allclose(r, [0, 6, 7, -1, -1, -1, -1, -1])
43
+
44
+ @pytest.mark.parametrize("y_dtype", [np.float64, np.float32, np.int64, np.int32])
45
+ @pytest.mark.parametrize("w_dtype", [np.float64, np.float32, np.int64, np.int32])
46
+ @pytest.mark.parametrize("w", [None, "ones"])
47
+ def test_simple_isotonic_regression(self, w, w_dtype, y_dtype):
48
+ # Test case of Busing 2020
49
+ # https://doi.org/10.18637/jss.v102.c01
50
+ y = np.array([8, 4, 8, 2, 2, 0, 8], dtype=y_dtype)
51
+ if w is not None:
52
+ w = np.ones_like(y, dtype=w_dtype)
53
+ res = isotonic_regression(y, weights=w)
54
+ assert res.x.dtype == np.float64
55
+ assert res.weights.dtype == np.float64
56
+ assert_allclose(res.x, [4, 4, 4, 4, 4, 4, 8])
57
+ assert_allclose(res.weights, [6, 1])
58
+ assert_allclose(res.blocks, [0, 6, 7])
59
+ # Assert that y was not overwritten
60
+ assert_equal(y, np.array([8, 4, 8, 2, 2, 0, 8], dtype=np.float64))
61
+
62
+ @pytest.mark.parametrize("increasing", [True, False])
63
+ def test_linspace(self, increasing):
64
+ n = 10
65
+ y = np.linspace(0, 1, n) if increasing else np.linspace(1, 0, n)
66
+ res = isotonic_regression(y, increasing=increasing)
67
+ assert_allclose(res.x, y)
68
+ assert_allclose(res.blocks, np.arange(n + 1))
69
+
70
+ def test_weights(self):
71
+ w = np.array([1, 2, 5, 0.5, 0.5, 0.5, 1, 3])
72
+ y = np.array([3, 2, 1, 10, 9, 8, 20, 10])
73
+ res = isotonic_regression(y, weights=w)
74
+ assert_allclose(res.x, [12/8, 12/8, 12/8, 9, 9, 9, 50/4, 50/4])
75
+ assert_allclose(res.weights, [8, 1.5, 4])
76
+ assert_allclose(res.blocks, [0, 3, 6, 8])
77
+
78
+ # weights are like repeated observations, we repeat the 3rd element 5
79
+ # times.
80
+ w2 = np.array([1, 2, 1, 1, 1, 1, 1, 0.5, 0.5, 0.5, 1, 3])
81
+ y2 = np.array([3, 2, 1, 1, 1, 1, 1, 10, 9, 8, 20, 10])
82
+ res2 = isotonic_regression(y2, weights=w2)
83
+ assert_allclose(np.diff(res2.x[0:7]), 0)
84
+ assert_allclose(res2.x[4:], res.x)
85
+ assert_allclose(res2.weights, res.weights)
86
+ assert_allclose(res2.blocks[1:] - 4, res.blocks[1:])
87
+
88
+ def test_against_R_monotone(self):
89
+ y = [0, 6, 8, 3, 5, 2, 1, 7, 9, 4]
90
+ res = isotonic_regression(y)
91
+ # R code
92
+ # library(monotone)
93
+ # options(digits=8)
94
+ # monotone(c(0, 6, 8, 3, 5, 2, 1, 7, 9, 4))
95
+ x_R = [
96
+ 0, 4.1666667, 4.1666667, 4.1666667, 4.1666667, 4.1666667,
97
+ 4.1666667, 6.6666667, 6.6666667, 6.6666667,
98
+ ]
99
+ assert_allclose(res.x, x_R)
100
+ assert_equal(res.blocks, [0, 1, 7, 10])
101
+
102
+ n = 100
103
+ y = np.linspace(0, 1, num=n, endpoint=False)
104
+ y = 5 * y + np.sin(10 * y)
105
+ res = isotonic_regression(y)
106
+ # R code
107
+ # library(monotone)
108
+ # n <- 100
109
+ # y <- 5 * ((1:n)-1)/n + sin(10 * ((1:n)-1)/n)
110
+ # options(digits=8)
111
+ # monotone(y)
112
+ x_R = [
113
+ 0.00000000, 0.14983342, 0.29866933, 0.44552021, 0.58941834, 0.72942554,
114
+ 0.86464247, 0.99421769, 1.11735609, 1.23332691, 1.34147098, 1.44120736,
115
+ 1.53203909, 1.57081100, 1.57081100, 1.57081100, 1.57081100, 1.57081100,
116
+ 1.57081100, 1.57081100, 1.57081100, 1.57081100, 1.57081100, 1.57081100,
117
+ 1.57081100, 1.57081100, 1.57081100, 1.57081100, 1.57081100, 1.57081100,
118
+ 1.57081100, 1.57081100, 1.57081100, 1.57081100, 1.57081100, 1.57081100,
119
+ 1.57081100, 1.57081100, 1.57081100, 1.57081100, 1.57081100, 1.57081100,
120
+ 1.57081100, 1.57081100, 1.57081100, 1.57081100, 1.57081100, 1.57081100,
121
+ 1.57081100, 1.57081100, 1.57081100, 1.62418532, 1.71654534, 1.81773256,
122
+ 1.92723551, 2.04445967, 2.16873336, 2.29931446, 2.43539782, 2.57612334,
123
+ 2.72058450, 2.86783750, 3.01691060, 3.16681390, 3.31654920, 3.46511999,
124
+ 3.61154136, 3.75484992, 3.89411335, 4.02843976, 4.15698660, 4.27896904,
125
+ 4.39366786, 4.50043662, 4.59870810, 4.68799998, 4.76791967, 4.83816823,
126
+ 4.86564130, 4.86564130, 4.86564130, 4.86564130, 4.86564130, 4.86564130,
127
+ 4.86564130, 4.86564130, 4.86564130, 4.86564130, 4.86564130, 4.86564130,
128
+ 4.86564130, 4.86564130, 4.86564130, 4.86564130, 4.86564130, 4.86564130,
129
+ 4.86564130, 4.86564130, 4.86564130, 4.86564130,
130
+ ]
131
+ assert_allclose(res.x, x_R)
132
+
133
+ # Test increasing
134
+ assert np.all(np.diff(res.x) >= 0)
135
+
136
+ # Test balance property: sum(y) == sum(x)
137
+ assert_allclose(np.sum(res.x), np.sum(y))
138
+
139
+ # Reverse order
140
+ res_inv = isotonic_regression(-y, increasing=False)
141
+ assert_allclose(-res_inv.x, res.x)
142
+ assert_equal(res_inv.blocks, res.blocks)
143
+
144
+ def test_readonly(self):
145
+ x = np.arange(3, dtype=float)
146
+ w = np.ones(3, dtype=float)
147
+
148
+ x.flags.writeable = False
149
+ w.flags.writeable = False
150
+
151
+ res = isotonic_regression(x, weights=w)
152
+ assert np.all(np.isfinite(res.x))
153
+ assert np.all(np.isfinite(res.weights))
154
+ assert np.all(np.isfinite(res.blocks))
155
+
156
+ def test_non_contiguous_arrays(self):
157
+ x = np.arange(10, dtype=float)[::3]
158
+ w = np.ones(10, dtype=float)[::3]
159
+ assert not x.flags.c_contiguous
160
+ assert not x.flags.f_contiguous
161
+ assert not w.flags.c_contiguous
162
+ assert not w.flags.f_contiguous
163
+
164
+ res = isotonic_regression(x, weights=w)
165
+ assert np.all(np.isfinite(res.x))
166
+ assert np.all(np.isfinite(res.weights))
167
+ assert np.all(np.isfinite(res.blocks))
llava_next/lib/python3.10/site-packages/scipy/optimize/tests/test_lbfgsb_hessinv.py ADDED
@@ -0,0 +1,43 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import numpy as np
2
+ from numpy.testing import assert_allclose
3
+ import scipy.linalg
4
+ from scipy.optimize import minimize
5
+
6
+
7
+ def test_1():
8
+ def f(x):
9
+ return x**4, 4*x**3
10
+
11
+ for gtol in [1e-8, 1e-12, 1e-20]:
12
+ for maxcor in range(20, 35):
13
+ result = minimize(fun=f, jac=True, method='L-BFGS-B', x0=20,
14
+ options={'gtol': gtol, 'maxcor': maxcor})
15
+
16
+ H1 = result.hess_inv(np.array([1])).reshape(1,1)
17
+ H2 = result.hess_inv.todense()
18
+
19
+ assert_allclose(H1, H2)
20
+
21
+
22
+ def test_2():
23
+ H0 = [[3, 0], [1, 2]]
24
+
25
+ def f(x):
26
+ return np.dot(x, np.dot(scipy.linalg.inv(H0), x))
27
+
28
+ result1 = minimize(fun=f, method='L-BFGS-B', x0=[10, 20])
29
+ result2 = minimize(fun=f, method='BFGS', x0=[10, 20])
30
+
31
+ H1 = result1.hess_inv.todense()
32
+
33
+ H2 = np.vstack((
34
+ result1.hess_inv(np.array([1, 0])),
35
+ result1.hess_inv(np.array([0, 1]))))
36
+
37
+ assert_allclose(
38
+ result1.hess_inv(np.array([1, 0]).reshape(2,1)).reshape(-1),
39
+ result1.hess_inv(np.array([1, 0])))
40
+ assert_allclose(H1, H2)
41
+ assert_allclose(H1, result2.hess_inv, rtol=1e-2, atol=0.03)
42
+
43
+
llava_next/lib/python3.10/site-packages/scipy/optimize/tests/test_lbfgsb_setulb.py ADDED
@@ -0,0 +1,128 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import numpy as np
2
+ from scipy.optimize import _lbfgsb, minimize
3
+
4
+
5
+ def objfun(x):
6
+ """simplified objective func to test lbfgsb bound violation"""
7
+ x0 = [0.8750000000000278,
8
+ 0.7500000000000153,
9
+ 0.9499999999999722,
10
+ 0.8214285714285992,
11
+ 0.6363636363636085]
12
+ x1 = [1.0, 0.0, 1.0, 0.0, 0.0]
13
+ x2 = [1.0,
14
+ 0.0,
15
+ 0.9889733043149325,
16
+ 0.0,
17
+ 0.026353554421041155]
18
+ x3 = [1.0,
19
+ 0.0,
20
+ 0.9889917442915558,
21
+ 0.0,
22
+ 0.020341986743231205]
23
+
24
+ f0 = 5163.647901211178
25
+ f1 = 5149.8181642072905
26
+ f2 = 5149.379332309634
27
+ f3 = 5149.374490771297
28
+
29
+ g0 = np.array([-0.5934820547965749,
30
+ 1.6251549718258351,
31
+ -71.99168459202559,
32
+ 5.346636965797545,
33
+ 37.10732723092604])
34
+ g1 = np.array([-0.43295349282641515,
35
+ 1.008607936794592,
36
+ 18.223666726602975,
37
+ 31.927010036981997,
38
+ -19.667512518739386])
39
+ g2 = np.array([-0.4699874455100256,
40
+ 0.9466285353668347,
41
+ -0.016874360242016825,
42
+ 48.44999161133457,
43
+ 5.819631620590712])
44
+ g3 = np.array([-0.46970678696829116,
45
+ 0.9612719312174818,
46
+ 0.006129809488833699,
47
+ 48.43557729419473,
48
+ 6.005481418498221])
49
+
50
+ if np.allclose(x, x0):
51
+ f = f0
52
+ g = g0
53
+ elif np.allclose(x, x1):
54
+ f = f1
55
+ g = g1
56
+ elif np.allclose(x, x2):
57
+ f = f2
58
+ g = g2
59
+ elif np.allclose(x, x3):
60
+ f = f3
61
+ g = g3
62
+ else:
63
+ raise ValueError(
64
+ 'Simplified objective function not defined '
65
+ 'at requested point')
66
+ return (np.copy(f), np.copy(g))
67
+
68
+
69
+ def test_setulb_floatround():
70
+ """test if setulb() violates bounds
71
+
72
+ checks for violation due to floating point rounding error
73
+ """
74
+
75
+ n = 5
76
+ m = 10
77
+ factr = 1e7
78
+ pgtol = 1e-5
79
+ maxls = 20
80
+ iprint = -1
81
+ nbd = np.full((n,), 2)
82
+ low_bnd = np.zeros(n, np.float64)
83
+ upper_bnd = np.ones(n, np.float64)
84
+
85
+ x0 = np.array(
86
+ [0.8750000000000278,
87
+ 0.7500000000000153,
88
+ 0.9499999999999722,
89
+ 0.8214285714285992,
90
+ 0.6363636363636085])
91
+ x = np.copy(x0)
92
+
93
+ f = np.array(0.0, np.float64)
94
+ g = np.zeros(n, np.float64)
95
+
96
+ fortran_int = _lbfgsb.types.intvar.dtype
97
+
98
+ wa = np.zeros(2*m*n + 5*n + 11*m*m + 8*m, np.float64)
99
+ iwa = np.zeros(3*n, fortran_int)
100
+ task = np.zeros(1, 'S60')
101
+ csave = np.zeros(1, 'S60')
102
+ lsave = np.zeros(4, fortran_int)
103
+ isave = np.zeros(44, fortran_int)
104
+ dsave = np.zeros(29, np.float64)
105
+
106
+ task[:] = b'START'
107
+
108
+ for n_iter in range(7): # 7 steps required to reproduce error
109
+ f, g = objfun(x)
110
+
111
+ _lbfgsb.setulb(m, x, low_bnd, upper_bnd, nbd, f, g, factr,
112
+ pgtol, wa, iwa, task, iprint, csave, lsave,
113
+ isave, dsave, maxls)
114
+
115
+ assert (x <= upper_bnd).all() and (x >= low_bnd).all(), (
116
+ "_lbfgsb.setulb() stepped to a point outside of the bounds")
117
+
118
+
119
+ def test_gh_issue18730():
120
+ # issue 18730 reported that l-bfgs-b did not work with objectives
121
+ # returning single precision gradient arrays
122
+ def fun_single_precision(x):
123
+ x = x.astype(np.float32)
124
+ return np.sum(x**2), (2*x)
125
+
126
+ res = minimize(fun_single_precision, x0=np.array([1., 1.]), jac=True,
127
+ method="l-bfgs-b")
128
+ np.testing.assert_allclose(res.fun, 0., atol=1e-15)
llava_next/lib/python3.10/site-packages/scipy/optimize/tests/test_least_squares.py ADDED
@@ -0,0 +1,874 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from itertools import product
2
+
3
+ import numpy as np
4
+ from numpy.linalg import norm
5
+ from numpy.testing import (assert_, assert_allclose,
6
+ assert_equal, suppress_warnings)
7
+ import pytest
8
+ from pytest import raises as assert_raises
9
+ from scipy.sparse import issparse, lil_matrix
10
+ from scipy.sparse.linalg import aslinearoperator
11
+
12
+ from scipy.optimize import least_squares, Bounds
13
+ from scipy.optimize._lsq.least_squares import IMPLEMENTED_LOSSES
14
+ from scipy.optimize._lsq.common import EPS, make_strictly_feasible, CL_scaling_vector
15
+
16
+
17
+ def fun_trivial(x, a=0):
18
+ return (x - a)**2 + 5.0
19
+
20
+
21
+ def jac_trivial(x, a=0.0):
22
+ return 2 * (x - a)
23
+
24
+
25
+ def fun_2d_trivial(x):
26
+ return np.array([x[0], x[1]])
27
+
28
+
29
+ def jac_2d_trivial(x):
30
+ return np.identity(2)
31
+
32
+
33
+ def fun_rosenbrock(x):
34
+ return np.array([10 * (x[1] - x[0]**2), (1 - x[0])])
35
+
36
+
37
+ def jac_rosenbrock(x):
38
+ return np.array([
39
+ [-20 * x[0], 10],
40
+ [-1, 0]
41
+ ])
42
+
43
+
44
+ def jac_rosenbrock_bad_dim(x):
45
+ return np.array([
46
+ [-20 * x[0], 10],
47
+ [-1, 0],
48
+ [0.0, 0.0]
49
+ ])
50
+
51
+
52
+ def fun_rosenbrock_cropped(x):
53
+ return fun_rosenbrock(x)[0]
54
+
55
+
56
+ def jac_rosenbrock_cropped(x):
57
+ return jac_rosenbrock(x)[0]
58
+
59
+
60
+ # When x is 1-D array, return is 2-D array.
61
+ def fun_wrong_dimensions(x):
62
+ return np.array([x, x**2, x**3])
63
+
64
+
65
+ def jac_wrong_dimensions(x, a=0.0):
66
+ return np.atleast_3d(jac_trivial(x, a=a))
67
+
68
+
69
+ def fun_bvp(x):
70
+ n = int(np.sqrt(x.shape[0]))
71
+ u = np.zeros((n + 2, n + 2))
72
+ x = x.reshape((n, n))
73
+ u[1:-1, 1:-1] = x
74
+ y = u[:-2, 1:-1] + u[2:, 1:-1] + u[1:-1, :-2] + u[1:-1, 2:] - 4 * x + x**3
75
+ return y.ravel()
76
+
77
+
78
+ class BroydenTridiagonal:
79
+ def __init__(self, n=100, mode='sparse'):
80
+ np.random.seed(0)
81
+
82
+ self.n = n
83
+
84
+ self.x0 = -np.ones(n)
85
+ self.lb = np.linspace(-2, -1.5, n)
86
+ self.ub = np.linspace(-0.8, 0.0, n)
87
+
88
+ self.lb += 0.1 * np.random.randn(n)
89
+ self.ub += 0.1 * np.random.randn(n)
90
+
91
+ self.x0 += 0.1 * np.random.randn(n)
92
+ self.x0 = make_strictly_feasible(self.x0, self.lb, self.ub)
93
+
94
+ if mode == 'sparse':
95
+ self.sparsity = lil_matrix((n, n), dtype=int)
96
+ i = np.arange(n)
97
+ self.sparsity[i, i] = 1
98
+ i = np.arange(1, n)
99
+ self.sparsity[i, i - 1] = 1
100
+ i = np.arange(n - 1)
101
+ self.sparsity[i, i + 1] = 1
102
+
103
+ self.jac = self._jac
104
+ elif mode == 'operator':
105
+ self.jac = lambda x: aslinearoperator(self._jac(x))
106
+ elif mode == 'dense':
107
+ self.sparsity = None
108
+ self.jac = lambda x: self._jac(x).toarray()
109
+ else:
110
+ assert_(False)
111
+
112
+ def fun(self, x):
113
+ f = (3 - x) * x + 1
114
+ f[1:] -= x[:-1]
115
+ f[:-1] -= 2 * x[1:]
116
+ return f
117
+
118
+ def _jac(self, x):
119
+ J = lil_matrix((self.n, self.n))
120
+ i = np.arange(self.n)
121
+ J[i, i] = 3 - 2 * x
122
+ i = np.arange(1, self.n)
123
+ J[i, i - 1] = -1
124
+ i = np.arange(self.n - 1)
125
+ J[i, i + 1] = -2
126
+ return J
127
+
128
+
129
+ class ExponentialFittingProblem:
130
+ """Provide data and function for exponential fitting in the form
131
+ y = a + exp(b * x) + noise."""
132
+
133
+ def __init__(self, a, b, noise, n_outliers=1, x_range=(-1, 1),
134
+ n_points=11, random_seed=None):
135
+ np.random.seed(random_seed)
136
+ self.m = n_points
137
+ self.n = 2
138
+
139
+ self.p0 = np.zeros(2)
140
+ self.x = np.linspace(x_range[0], x_range[1], n_points)
141
+
142
+ self.y = a + np.exp(b * self.x)
143
+ self.y += noise * np.random.randn(self.m)
144
+
145
+ outliers = np.random.randint(0, self.m, n_outliers)
146
+ self.y[outliers] += 50 * noise * np.random.rand(n_outliers)
147
+
148
+ self.p_opt = np.array([a, b])
149
+
150
+ def fun(self, p):
151
+ return p[0] + np.exp(p[1] * self.x) - self.y
152
+
153
+ def jac(self, p):
154
+ J = np.empty((self.m, self.n))
155
+ J[:, 0] = 1
156
+ J[:, 1] = self.x * np.exp(p[1] * self.x)
157
+ return J
158
+
159
+
160
+ def cubic_soft_l1(z):
161
+ rho = np.empty((3, z.size))
162
+
163
+ t = 1 + z
164
+ rho[0] = 3 * (t**(1/3) - 1)
165
+ rho[1] = t ** (-2/3)
166
+ rho[2] = -2/3 * t**(-5/3)
167
+
168
+ return rho
169
+
170
+
171
+ LOSSES = list(IMPLEMENTED_LOSSES.keys()) + [cubic_soft_l1]
172
+
173
+
174
+ class BaseMixin:
175
+ def test_basic(self):
176
+ # Test that the basic calling sequence works.
177
+ res = least_squares(fun_trivial, 2., method=self.method)
178
+ assert_allclose(res.x, 0, atol=1e-4)
179
+ assert_allclose(res.fun, fun_trivial(res.x))
180
+
181
+ def test_args_kwargs(self):
182
+ # Test that args and kwargs are passed correctly to the functions.
183
+ a = 3.0
184
+ for jac in ['2-point', '3-point', 'cs', jac_trivial]:
185
+ with suppress_warnings() as sup:
186
+ sup.filter(
187
+ UserWarning,
188
+ "jac='(3-point|cs)' works equivalently to '2-point' for method='lm'"
189
+ )
190
+ res = least_squares(fun_trivial, 2.0, jac, args=(a,),
191
+ method=self.method)
192
+ res1 = least_squares(fun_trivial, 2.0, jac, kwargs={'a': a},
193
+ method=self.method)
194
+
195
+ assert_allclose(res.x, a, rtol=1e-4)
196
+ assert_allclose(res1.x, a, rtol=1e-4)
197
+
198
+ assert_raises(TypeError, least_squares, fun_trivial, 2.0,
199
+ args=(3, 4,), method=self.method)
200
+ assert_raises(TypeError, least_squares, fun_trivial, 2.0,
201
+ kwargs={'kaboom': 3}, method=self.method)
202
+
203
+ def test_jac_options(self):
204
+ for jac in ['2-point', '3-point', 'cs', jac_trivial]:
205
+ with suppress_warnings() as sup:
206
+ sup.filter(
207
+ UserWarning,
208
+ "jac='(3-point|cs)' works equivalently to '2-point' for method='lm'"
209
+ )
210
+ res = least_squares(fun_trivial, 2.0, jac, method=self.method)
211
+ assert_allclose(res.x, 0, atol=1e-4)
212
+
213
+ assert_raises(ValueError, least_squares, fun_trivial, 2.0, jac='oops',
214
+ method=self.method)
215
+
216
+ def test_nfev_options(self):
217
+ for max_nfev in [None, 20]:
218
+ res = least_squares(fun_trivial, 2.0, max_nfev=max_nfev,
219
+ method=self.method)
220
+ assert_allclose(res.x, 0, atol=1e-4)
221
+
222
+ def test_x_scale_options(self):
223
+ for x_scale in [1.0, np.array([0.5]), 'jac']:
224
+ res = least_squares(fun_trivial, 2.0, x_scale=x_scale)
225
+ assert_allclose(res.x, 0)
226
+ assert_raises(ValueError, least_squares, fun_trivial,
227
+ 2.0, x_scale='auto', method=self.method)
228
+ assert_raises(ValueError, least_squares, fun_trivial,
229
+ 2.0, x_scale=-1.0, method=self.method)
230
+ assert_raises(ValueError, least_squares, fun_trivial,
231
+ 2.0, x_scale=None, method=self.method)
232
+ assert_raises(ValueError, least_squares, fun_trivial,
233
+ 2.0, x_scale=1.0+2.0j, method=self.method)
234
+
235
+ def test_diff_step(self):
236
+ # res1 and res2 should be equivalent.
237
+ # res2 and res3 should be different.
238
+ res1 = least_squares(fun_trivial, 2.0, diff_step=1e-1,
239
+ method=self.method)
240
+ res2 = least_squares(fun_trivial, 2.0, diff_step=-1e-1,
241
+ method=self.method)
242
+ res3 = least_squares(fun_trivial, 2.0,
243
+ diff_step=None, method=self.method)
244
+ assert_allclose(res1.x, 0, atol=1e-4)
245
+ assert_allclose(res2.x, 0, atol=1e-4)
246
+ assert_allclose(res3.x, 0, atol=1e-4)
247
+ assert_equal(res1.x, res2.x)
248
+ assert_equal(res1.nfev, res2.nfev)
249
+
250
+ def test_incorrect_options_usage(self):
251
+ assert_raises(TypeError, least_squares, fun_trivial, 2.0,
252
+ method=self.method, options={'no_such_option': 100})
253
+ assert_raises(TypeError, least_squares, fun_trivial, 2.0,
254
+ method=self.method, options={'max_nfev': 100})
255
+
256
+ def test_full_result(self):
257
+ # MINPACK doesn't work very well with factor=100 on this problem,
258
+ # thus using low 'atol'.
259
+ res = least_squares(fun_trivial, 2.0, method=self.method)
260
+ assert_allclose(res.x, 0, atol=1e-4)
261
+ assert_allclose(res.cost, 12.5)
262
+ assert_allclose(res.fun, 5)
263
+ assert_allclose(res.jac, 0, atol=1e-4)
264
+ assert_allclose(res.grad, 0, atol=1e-2)
265
+ assert_allclose(res.optimality, 0, atol=1e-2)
266
+ assert_equal(res.active_mask, 0)
267
+ if self.method == 'lm':
268
+ assert_(res.nfev < 30)
269
+ assert_(res.njev is None)
270
+ else:
271
+ assert_(res.nfev < 10)
272
+ assert_(res.njev < 10)
273
+ assert_(res.status > 0)
274
+ assert_(res.success)
275
+
276
+ def test_full_result_single_fev(self):
277
+ # MINPACK checks the number of nfev after the iteration,
278
+ # so it's hard to tell what he is going to compute.
279
+ if self.method == 'lm':
280
+ return
281
+
282
+ res = least_squares(fun_trivial, 2.0, method=self.method,
283
+ max_nfev=1)
284
+ assert_equal(res.x, np.array([2]))
285
+ assert_equal(res.cost, 40.5)
286
+ assert_equal(res.fun, np.array([9]))
287
+ assert_equal(res.jac, np.array([[4]]))
288
+ assert_equal(res.grad, np.array([36]))
289
+ assert_equal(res.optimality, 36)
290
+ assert_equal(res.active_mask, np.array([0]))
291
+ assert_equal(res.nfev, 1)
292
+ assert_equal(res.njev, 1)
293
+ assert_equal(res.status, 0)
294
+ assert_equal(res.success, 0)
295
+
296
+ def test_rosenbrock(self):
297
+ x0 = [-2, 1]
298
+ x_opt = [1, 1]
299
+ for jac, x_scale, tr_solver in product(
300
+ ['2-point', '3-point', 'cs', jac_rosenbrock],
301
+ [1.0, np.array([1.0, 0.2]), 'jac'],
302
+ ['exact', 'lsmr']):
303
+ with suppress_warnings() as sup:
304
+ sup.filter(
305
+ UserWarning,
306
+ "jac='(3-point|cs)' works equivalently to '2-point' for method='lm'"
307
+ )
308
+ res = least_squares(fun_rosenbrock, x0, jac, x_scale=x_scale,
309
+ tr_solver=tr_solver, method=self.method)
310
+ assert_allclose(res.x, x_opt)
311
+
312
+ def test_rosenbrock_cropped(self):
313
+ x0 = [-2, 1]
314
+ if self.method == 'lm':
315
+ assert_raises(ValueError, least_squares, fun_rosenbrock_cropped,
316
+ x0, method='lm')
317
+ else:
318
+ for jac, x_scale, tr_solver in product(
319
+ ['2-point', '3-point', 'cs', jac_rosenbrock_cropped],
320
+ [1.0, np.array([1.0, 0.2]), 'jac'],
321
+ ['exact', 'lsmr']):
322
+ res = least_squares(
323
+ fun_rosenbrock_cropped, x0, jac, x_scale=x_scale,
324
+ tr_solver=tr_solver, method=self.method)
325
+ assert_allclose(res.cost, 0, atol=1e-14)
326
+
327
+ def test_fun_wrong_dimensions(self):
328
+ assert_raises(ValueError, least_squares, fun_wrong_dimensions,
329
+ 2.0, method=self.method)
330
+
331
+ def test_jac_wrong_dimensions(self):
332
+ assert_raises(ValueError, least_squares, fun_trivial,
333
+ 2.0, jac_wrong_dimensions, method=self.method)
334
+
335
+ def test_fun_and_jac_inconsistent_dimensions(self):
336
+ x0 = [1, 2]
337
+ assert_raises(ValueError, least_squares, fun_rosenbrock, x0,
338
+ jac_rosenbrock_bad_dim, method=self.method)
339
+
340
+ def test_x0_multidimensional(self):
341
+ x0 = np.ones(4).reshape(2, 2)
342
+ assert_raises(ValueError, least_squares, fun_trivial, x0,
343
+ method=self.method)
344
+
345
+ def test_x0_complex_scalar(self):
346
+ x0 = 2.0 + 0.0*1j
347
+ assert_raises(ValueError, least_squares, fun_trivial, x0,
348
+ method=self.method)
349
+
350
+ def test_x0_complex_array(self):
351
+ x0 = [1.0, 2.0 + 0.0*1j]
352
+ assert_raises(ValueError, least_squares, fun_trivial, x0,
353
+ method=self.method)
354
+
355
+ def test_bvp(self):
356
+ # This test was introduced with fix #5556. It turned out that
357
+ # dogbox solver had a bug with trust-region radius update, which
358
+ # could block its progress and create an infinite loop. And this
359
+ # discrete boundary value problem is the one which triggers it.
360
+ n = 10
361
+ x0 = np.ones(n**2)
362
+ if self.method == 'lm':
363
+ max_nfev = 5000 # To account for Jacobian estimation.
364
+ else:
365
+ max_nfev = 100
366
+ res = least_squares(fun_bvp, x0, ftol=1e-2, method=self.method,
367
+ max_nfev=max_nfev)
368
+
369
+ assert_(res.nfev < max_nfev)
370
+ assert_(res.cost < 0.5)
371
+
372
+ def test_error_raised_when_all_tolerances_below_eps(self):
373
+ # Test that all 0 tolerances are not allowed.
374
+ assert_raises(ValueError, least_squares, fun_trivial, 2.0,
375
+ method=self.method, ftol=None, xtol=None, gtol=None)
376
+
377
+ def test_convergence_with_only_one_tolerance_enabled(self):
378
+ if self.method == 'lm':
379
+ return # should not do test
380
+ x0 = [-2, 1]
381
+ x_opt = [1, 1]
382
+ for ftol, xtol, gtol in [(1e-8, None, None),
383
+ (None, 1e-8, None),
384
+ (None, None, 1e-8)]:
385
+ res = least_squares(fun_rosenbrock, x0, jac=jac_rosenbrock,
386
+ ftol=ftol, gtol=gtol, xtol=xtol,
387
+ method=self.method)
388
+ assert_allclose(res.x, x_opt)
389
+
390
+
391
+ class BoundsMixin:
392
+ def test_inconsistent(self):
393
+ assert_raises(ValueError, least_squares, fun_trivial, 2.0,
394
+ bounds=(10.0, 0.0), method=self.method)
395
+
396
+ def test_infeasible(self):
397
+ assert_raises(ValueError, least_squares, fun_trivial, 2.0,
398
+ bounds=(3., 4), method=self.method)
399
+
400
+ def test_wrong_number(self):
401
+ assert_raises(ValueError, least_squares, fun_trivial, 2.,
402
+ bounds=(1., 2, 3), method=self.method)
403
+
404
+ def test_inconsistent_shape(self):
405
+ assert_raises(ValueError, least_squares, fun_trivial, 2.0,
406
+ bounds=(1.0, [2.0, 3.0]), method=self.method)
407
+ # 1-D array wont't be broadcasted
408
+ assert_raises(ValueError, least_squares, fun_rosenbrock, [1.0, 2.0],
409
+ bounds=([0.0], [3.0, 4.0]), method=self.method)
410
+
411
+ def test_in_bounds(self):
412
+ for jac in ['2-point', '3-point', 'cs', jac_trivial]:
413
+ res = least_squares(fun_trivial, 2.0, jac=jac,
414
+ bounds=(-1.0, 3.0), method=self.method)
415
+ assert_allclose(res.x, 0.0, atol=1e-4)
416
+ assert_equal(res.active_mask, [0])
417
+ assert_(-1 <= res.x <= 3)
418
+ res = least_squares(fun_trivial, 2.0, jac=jac,
419
+ bounds=(0.5, 3.0), method=self.method)
420
+ assert_allclose(res.x, 0.5, atol=1e-4)
421
+ assert_equal(res.active_mask, [-1])
422
+ assert_(0.5 <= res.x <= 3)
423
+
424
+ def test_bounds_shape(self):
425
+ def get_bounds_direct(lb, ub):
426
+ return lb, ub
427
+
428
+ def get_bounds_instances(lb, ub):
429
+ return Bounds(lb, ub)
430
+
431
+ for jac in ['2-point', '3-point', 'cs', jac_2d_trivial]:
432
+ for bounds_func in [get_bounds_direct, get_bounds_instances]:
433
+ x0 = [1.0, 1.0]
434
+ res = least_squares(fun_2d_trivial, x0, jac=jac)
435
+ assert_allclose(res.x, [0.0, 0.0])
436
+ res = least_squares(fun_2d_trivial, x0, jac=jac,
437
+ bounds=bounds_func(0.5, [2.0, 2.0]),
438
+ method=self.method)
439
+ assert_allclose(res.x, [0.5, 0.5])
440
+ res = least_squares(fun_2d_trivial, x0, jac=jac,
441
+ bounds=bounds_func([0.3, 0.2], 3.0),
442
+ method=self.method)
443
+ assert_allclose(res.x, [0.3, 0.2])
444
+ res = least_squares(
445
+ fun_2d_trivial, x0, jac=jac,
446
+ bounds=bounds_func([-1, 0.5], [1.0, 3.0]),
447
+ method=self.method)
448
+ assert_allclose(res.x, [0.0, 0.5], atol=1e-5)
449
+
450
+ def test_bounds_instances(self):
451
+ res = least_squares(fun_trivial, 0.5, bounds=Bounds())
452
+ assert_allclose(res.x, 0.0, atol=1e-4)
453
+
454
+ res = least_squares(fun_trivial, 3.0, bounds=Bounds(lb=1.0))
455
+ assert_allclose(res.x, 1.0, atol=1e-4)
456
+
457
+ res = least_squares(fun_trivial, 0.5, bounds=Bounds(lb=-1.0, ub=1.0))
458
+ assert_allclose(res.x, 0.0, atol=1e-4)
459
+
460
+ res = least_squares(fun_trivial, -3.0, bounds=Bounds(ub=-1.0))
461
+ assert_allclose(res.x, -1.0, atol=1e-4)
462
+
463
+ res = least_squares(fun_2d_trivial, [0.5, 0.5],
464
+ bounds=Bounds(lb=[-1.0, -1.0], ub=1.0))
465
+ assert_allclose(res.x, [0.0, 0.0], atol=1e-5)
466
+
467
+ res = least_squares(fun_2d_trivial, [0.5, 0.5],
468
+ bounds=Bounds(lb=[0.1, 0.1]))
469
+ assert_allclose(res.x, [0.1, 0.1], atol=1e-5)
470
+
471
+ @pytest.mark.fail_slow(5)
472
+ def test_rosenbrock_bounds(self):
473
+ x0_1 = np.array([-2.0, 1.0])
474
+ x0_2 = np.array([2.0, 2.0])
475
+ x0_3 = np.array([-2.0, 2.0])
476
+ x0_4 = np.array([0.0, 2.0])
477
+ x0_5 = np.array([-1.2, 1.0])
478
+ problems = [
479
+ (x0_1, ([-np.inf, -1.5], np.inf)),
480
+ (x0_2, ([-np.inf, 1.5], np.inf)),
481
+ (x0_3, ([-np.inf, 1.5], np.inf)),
482
+ (x0_4, ([-np.inf, 1.5], [1.0, np.inf])),
483
+ (x0_2, ([1.0, 1.5], [3.0, 3.0])),
484
+ (x0_5, ([-50.0, 0.0], [0.5, 100]))
485
+ ]
486
+ for x0, bounds in problems:
487
+ for jac, x_scale, tr_solver in product(
488
+ ['2-point', '3-point', 'cs', jac_rosenbrock],
489
+ [1.0, [1.0, 0.5], 'jac'],
490
+ ['exact', 'lsmr']):
491
+ res = least_squares(fun_rosenbrock, x0, jac, bounds,
492
+ x_scale=x_scale, tr_solver=tr_solver,
493
+ method=self.method)
494
+ assert_allclose(res.optimality, 0.0, atol=1e-5)
495
+
496
+
497
+ class SparseMixin:
498
+ def test_exact_tr_solver(self):
499
+ p = BroydenTridiagonal()
500
+ assert_raises(ValueError, least_squares, p.fun, p.x0, p.jac,
501
+ tr_solver='exact', method=self.method)
502
+ assert_raises(ValueError, least_squares, p.fun, p.x0,
503
+ tr_solver='exact', jac_sparsity=p.sparsity,
504
+ method=self.method)
505
+
506
+ def test_equivalence(self):
507
+ sparse = BroydenTridiagonal(mode='sparse')
508
+ dense = BroydenTridiagonal(mode='dense')
509
+ res_sparse = least_squares(
510
+ sparse.fun, sparse.x0, jac=sparse.jac,
511
+ method=self.method)
512
+ res_dense = least_squares(
513
+ dense.fun, dense.x0, jac=sparse.jac,
514
+ method=self.method)
515
+ assert_equal(res_sparse.nfev, res_dense.nfev)
516
+ assert_allclose(res_sparse.x, res_dense.x, atol=1e-20)
517
+ assert_allclose(res_sparse.cost, 0, atol=1e-20)
518
+ assert_allclose(res_dense.cost, 0, atol=1e-20)
519
+
520
+ def test_tr_options(self):
521
+ p = BroydenTridiagonal()
522
+ res = least_squares(p.fun, p.x0, p.jac, method=self.method,
523
+ tr_options={'btol': 1e-10})
524
+ assert_allclose(res.cost, 0, atol=1e-20)
525
+
526
+ def test_wrong_parameters(self):
527
+ p = BroydenTridiagonal()
528
+ assert_raises(ValueError, least_squares, p.fun, p.x0, p.jac,
529
+ tr_solver='best', method=self.method)
530
+ assert_raises(TypeError, least_squares, p.fun, p.x0, p.jac,
531
+ tr_solver='lsmr', tr_options={'tol': 1e-10})
532
+
533
+ def test_solver_selection(self):
534
+ sparse = BroydenTridiagonal(mode='sparse')
535
+ dense = BroydenTridiagonal(mode='dense')
536
+ res_sparse = least_squares(sparse.fun, sparse.x0, jac=sparse.jac,
537
+ method=self.method)
538
+ res_dense = least_squares(dense.fun, dense.x0, jac=dense.jac,
539
+ method=self.method)
540
+ assert_allclose(res_sparse.cost, 0, atol=1e-20)
541
+ assert_allclose(res_dense.cost, 0, atol=1e-20)
542
+ assert_(issparse(res_sparse.jac))
543
+ assert_(isinstance(res_dense.jac, np.ndarray))
544
+
545
+ def test_numerical_jac(self):
546
+ p = BroydenTridiagonal()
547
+ for jac in ['2-point', '3-point', 'cs']:
548
+ res_dense = least_squares(p.fun, p.x0, jac, method=self.method)
549
+ res_sparse = least_squares(
550
+ p.fun, p.x0, jac,method=self.method,
551
+ jac_sparsity=p.sparsity)
552
+ assert_equal(res_dense.nfev, res_sparse.nfev)
553
+ assert_allclose(res_dense.x, res_sparse.x, atol=1e-20)
554
+ assert_allclose(res_dense.cost, 0, atol=1e-20)
555
+ assert_allclose(res_sparse.cost, 0, atol=1e-20)
556
+
557
+ @pytest.mark.fail_slow(5)
558
+ def test_with_bounds(self):
559
+ p = BroydenTridiagonal()
560
+ for jac, jac_sparsity in product(
561
+ [p.jac, '2-point', '3-point', 'cs'], [None, p.sparsity]):
562
+ res_1 = least_squares(
563
+ p.fun, p.x0, jac, bounds=(p.lb, np.inf),
564
+ method=self.method,jac_sparsity=jac_sparsity)
565
+ res_2 = least_squares(
566
+ p.fun, p.x0, jac, bounds=(-np.inf, p.ub),
567
+ method=self.method, jac_sparsity=jac_sparsity)
568
+ res_3 = least_squares(
569
+ p.fun, p.x0, jac, bounds=(p.lb, p.ub),
570
+ method=self.method, jac_sparsity=jac_sparsity)
571
+ assert_allclose(res_1.optimality, 0, atol=1e-10)
572
+ assert_allclose(res_2.optimality, 0, atol=1e-10)
573
+ assert_allclose(res_3.optimality, 0, atol=1e-10)
574
+
575
+ def test_wrong_jac_sparsity(self):
576
+ p = BroydenTridiagonal()
577
+ sparsity = p.sparsity[:-1]
578
+ assert_raises(ValueError, least_squares, p.fun, p.x0,
579
+ jac_sparsity=sparsity, method=self.method)
580
+
581
+ def test_linear_operator(self):
582
+ p = BroydenTridiagonal(mode='operator')
583
+ res = least_squares(p.fun, p.x0, p.jac, method=self.method)
584
+ assert_allclose(res.cost, 0.0, atol=1e-20)
585
+ assert_raises(ValueError, least_squares, p.fun, p.x0, p.jac,
586
+ method=self.method, tr_solver='exact')
587
+
588
+ def test_x_scale_jac_scale(self):
589
+ p = BroydenTridiagonal()
590
+ res = least_squares(p.fun, p.x0, p.jac, method=self.method,
591
+ x_scale='jac')
592
+ assert_allclose(res.cost, 0.0, atol=1e-20)
593
+
594
+ p = BroydenTridiagonal(mode='operator')
595
+ assert_raises(ValueError, least_squares, p.fun, p.x0, p.jac,
596
+ method=self.method, x_scale='jac')
597
+
598
+
599
+ class LossFunctionMixin:
600
+ def test_options(self):
601
+ for loss in LOSSES:
602
+ res = least_squares(fun_trivial, 2.0, loss=loss,
603
+ method=self.method)
604
+ assert_allclose(res.x, 0, atol=1e-15)
605
+
606
+ assert_raises(ValueError, least_squares, fun_trivial, 2.0,
607
+ loss='hinge', method=self.method)
608
+
609
+ def test_fun(self):
610
+ # Test that res.fun is actual residuals, and not modified by loss
611
+ # function stuff.
612
+ for loss in LOSSES:
613
+ res = least_squares(fun_trivial, 2.0, loss=loss,
614
+ method=self.method)
615
+ assert_equal(res.fun, fun_trivial(res.x))
616
+
617
+ def test_grad(self):
618
+ # Test that res.grad is true gradient of loss function at the
619
+ # solution. Use max_nfev = 1, to avoid reaching minimum.
620
+ x = np.array([2.0]) # res.x will be this.
621
+
622
+ res = least_squares(fun_trivial, x, jac_trivial, loss='linear',
623
+ max_nfev=1, method=self.method)
624
+ assert_equal(res.grad, 2 * x * (x**2 + 5))
625
+
626
+ res = least_squares(fun_trivial, x, jac_trivial, loss='huber',
627
+ max_nfev=1, method=self.method)
628
+ assert_equal(res.grad, 2 * x)
629
+
630
+ res = least_squares(fun_trivial, x, jac_trivial, loss='soft_l1',
631
+ max_nfev=1, method=self.method)
632
+ assert_allclose(res.grad,
633
+ 2 * x * (x**2 + 5) / (1 + (x**2 + 5)**2)**0.5)
634
+
635
+ res = least_squares(fun_trivial, x, jac_trivial, loss='cauchy',
636
+ max_nfev=1, method=self.method)
637
+ assert_allclose(res.grad, 2 * x * (x**2 + 5) / (1 + (x**2 + 5)**2))
638
+
639
+ res = least_squares(fun_trivial, x, jac_trivial, loss='arctan',
640
+ max_nfev=1, method=self.method)
641
+ assert_allclose(res.grad, 2 * x * (x**2 + 5) / (1 + (x**2 + 5)**4))
642
+
643
+ res = least_squares(fun_trivial, x, jac_trivial, loss=cubic_soft_l1,
644
+ max_nfev=1, method=self.method)
645
+ assert_allclose(res.grad,
646
+ 2 * x * (x**2 + 5) / (1 + (x**2 + 5)**2)**(2/3))
647
+
648
+ def test_jac(self):
649
+ # Test that res.jac.T.dot(res.jac) gives Gauss-Newton approximation
650
+ # of Hessian. This approximation is computed by doubly differentiating
651
+ # the cost function and dropping the part containing second derivative
652
+ # of f. For a scalar function it is computed as
653
+ # H = (rho' + 2 * rho'' * f**2) * f'**2, if the expression inside the
654
+ # brackets is less than EPS it is replaced by EPS. Here, we check
655
+ # against the root of H.
656
+
657
+ x = 2.0 # res.x will be this.
658
+ f = x**2 + 5 # res.fun will be this.
659
+
660
+ res = least_squares(fun_trivial, x, jac_trivial, loss='linear',
661
+ max_nfev=1, method=self.method)
662
+ assert_equal(res.jac, 2 * x)
663
+
664
+ # For `huber` loss the Jacobian correction is identically zero
665
+ # in outlier region, in such cases it is modified to be equal EPS**0.5.
666
+ res = least_squares(fun_trivial, x, jac_trivial, loss='huber',
667
+ max_nfev=1, method=self.method)
668
+ assert_equal(res.jac, 2 * x * EPS**0.5)
669
+
670
+ # Now, let's apply `loss_scale` to turn the residual into an inlier.
671
+ # The loss function becomes linear.
672
+ res = least_squares(fun_trivial, x, jac_trivial, loss='huber',
673
+ f_scale=10, max_nfev=1)
674
+ assert_equal(res.jac, 2 * x)
675
+
676
+ # 'soft_l1' always gives a positive scaling.
677
+ res = least_squares(fun_trivial, x, jac_trivial, loss='soft_l1',
678
+ max_nfev=1, method=self.method)
679
+ assert_allclose(res.jac, 2 * x * (1 + f**2)**-0.75)
680
+
681
+ # For 'cauchy' the correction term turns out to be negative, and it
682
+ # replaced by EPS**0.5.
683
+ res = least_squares(fun_trivial, x, jac_trivial, loss='cauchy',
684
+ max_nfev=1, method=self.method)
685
+ assert_allclose(res.jac, 2 * x * EPS**0.5)
686
+
687
+ # Now use scaling to turn the residual to inlier.
688
+ res = least_squares(fun_trivial, x, jac_trivial, loss='cauchy',
689
+ f_scale=10, max_nfev=1, method=self.method)
690
+ fs = f / 10
691
+ assert_allclose(res.jac, 2 * x * (1 - fs**2)**0.5 / (1 + fs**2))
692
+
693
+ # 'arctan' gives an outlier.
694
+ res = least_squares(fun_trivial, x, jac_trivial, loss='arctan',
695
+ max_nfev=1, method=self.method)
696
+ assert_allclose(res.jac, 2 * x * EPS**0.5)
697
+
698
+ # Turn to inlier.
699
+ res = least_squares(fun_trivial, x, jac_trivial, loss='arctan',
700
+ f_scale=20.0, max_nfev=1, method=self.method)
701
+ fs = f / 20
702
+ assert_allclose(res.jac, 2 * x * (1 - 3 * fs**4)**0.5 / (1 + fs**4))
703
+
704
+ # cubic_soft_l1 will give an outlier.
705
+ res = least_squares(fun_trivial, x, jac_trivial, loss=cubic_soft_l1,
706
+ max_nfev=1)
707
+ assert_allclose(res.jac, 2 * x * EPS**0.5)
708
+
709
+ # Turn to inlier.
710
+ res = least_squares(fun_trivial, x, jac_trivial,
711
+ loss=cubic_soft_l1, f_scale=6, max_nfev=1)
712
+ fs = f / 6
713
+ assert_allclose(res.jac,
714
+ 2 * x * (1 - fs**2 / 3)**0.5 * (1 + fs**2)**(-5/6))
715
+
716
+ def test_robustness(self):
717
+ for noise in [0.1, 1.0]:
718
+ p = ExponentialFittingProblem(1, 0.1, noise, random_seed=0)
719
+
720
+ for jac in ['2-point', '3-point', 'cs', p.jac]:
721
+ res_lsq = least_squares(p.fun, p.p0, jac=jac,
722
+ method=self.method)
723
+ assert_allclose(res_lsq.optimality, 0, atol=1e-2)
724
+ for loss in LOSSES:
725
+ if loss == 'linear':
726
+ continue
727
+ res_robust = least_squares(
728
+ p.fun, p.p0, jac=jac, loss=loss, f_scale=noise,
729
+ method=self.method)
730
+ assert_allclose(res_robust.optimality, 0, atol=1e-2)
731
+ assert_(norm(res_robust.x - p.p_opt) <
732
+ norm(res_lsq.x - p.p_opt))
733
+
734
+
735
+ class TestDogbox(BaseMixin, BoundsMixin, SparseMixin, LossFunctionMixin):
736
+ method = 'dogbox'
737
+
738
+
739
+ class TestTRF(BaseMixin, BoundsMixin, SparseMixin, LossFunctionMixin):
740
+ method = 'trf'
741
+
742
+ def test_lsmr_regularization(self):
743
+ p = BroydenTridiagonal()
744
+ for regularize in [True, False]:
745
+ res = least_squares(p.fun, p.x0, p.jac, method='trf',
746
+ tr_options={'regularize': regularize})
747
+ assert_allclose(res.cost, 0, atol=1e-20)
748
+
749
+
750
+ class TestLM(BaseMixin):
751
+ method = 'lm'
752
+
753
+ def test_bounds_not_supported(self):
754
+ assert_raises(ValueError, least_squares, fun_trivial,
755
+ 2.0, bounds=(-3.0, 3.0), method='lm')
756
+
757
+ def test_m_less_n_not_supported(self):
758
+ x0 = [-2, 1]
759
+ assert_raises(ValueError, least_squares, fun_rosenbrock_cropped, x0,
760
+ method='lm')
761
+
762
+ def test_sparse_not_supported(self):
763
+ p = BroydenTridiagonal()
764
+ assert_raises(ValueError, least_squares, p.fun, p.x0, p.jac,
765
+ method='lm')
766
+
767
+ def test_jac_sparsity_not_supported(self):
768
+ assert_raises(ValueError, least_squares, fun_trivial, 2.0,
769
+ jac_sparsity=[1], method='lm')
770
+
771
+ def test_LinearOperator_not_supported(self):
772
+ p = BroydenTridiagonal(mode="operator")
773
+ assert_raises(ValueError, least_squares, p.fun, p.x0, p.jac,
774
+ method='lm')
775
+
776
+ def test_loss(self):
777
+ res = least_squares(fun_trivial, 2.0, loss='linear', method='lm')
778
+ assert_allclose(res.x, 0.0, atol=1e-4)
779
+
780
+ assert_raises(ValueError, least_squares, fun_trivial, 2.0,
781
+ method='lm', loss='huber')
782
+
783
+
784
+ def test_basic():
785
+ # test that 'method' arg is really optional
786
+ res = least_squares(fun_trivial, 2.0)
787
+ assert_allclose(res.x, 0, atol=1e-10)
788
+
789
+
790
+ def test_small_tolerances_for_lm():
791
+ for ftol, xtol, gtol in [(None, 1e-13, 1e-13),
792
+ (1e-13, None, 1e-13),
793
+ (1e-13, 1e-13, None)]:
794
+ assert_raises(ValueError, least_squares, fun_trivial, 2.0, xtol=xtol,
795
+ ftol=ftol, gtol=gtol, method='lm')
796
+
797
+
798
+ def test_fp32_gh12991():
799
+ # checks that smaller FP sizes can be used in least_squares
800
+ # this is the minimum working example reported for gh12991
801
+ np.random.seed(1)
802
+
803
+ x = np.linspace(0, 1, 100).astype("float32")
804
+ y = np.random.random(100).astype("float32")
805
+
806
+ def func(p, x):
807
+ return p[0] + p[1] * x
808
+
809
+ def err(p, x, y):
810
+ return func(p, x) - y
811
+
812
+ res = least_squares(err, [-1.0, -1.0], args=(x, y))
813
+ # previously the initial jacobian calculated for this would be all 0
814
+ # and the minimize would terminate immediately, with nfev=1, would
815
+ # report a successful minimization (it shouldn't have done), but be
816
+ # unchanged from the initial solution.
817
+ # It was terminating early because the underlying approx_derivative
818
+ # used a step size for FP64 when the working space was FP32.
819
+ assert res.nfev > 2
820
+ assert_allclose(res.x, np.array([0.4082241, 0.15530563]), atol=5e-5)
821
+
822
+
823
+ def test_gh_18793_and_19351():
824
+ answer = 1e-12
825
+ initial_guess = 1.1e-12
826
+
827
+ def chi2(x):
828
+ return (x-answer)**2
829
+
830
+ gtol = 1e-15
831
+ res = least_squares(chi2, x0=initial_guess, gtol=1e-15, bounds=(0, np.inf))
832
+ # Original motivation: gh-18793
833
+ # if we choose an initial condition that is close to the solution
834
+ # we shouldn't return an answer that is further away from the solution
835
+
836
+ # Update: gh-19351
837
+ # However this requirement does not go well with 'trf' algorithm logic.
838
+ # Some regressions were reported after the presumed fix.
839
+ # The returned solution is good as long as it satisfies the convergence
840
+ # conditions.
841
+ # Specifically in this case the scaled gradient will be sufficiently low.
842
+
843
+ scaling, _ = CL_scaling_vector(res.x, res.grad,
844
+ np.atleast_1d(0), np.atleast_1d(np.inf))
845
+ assert res.status == 1 # Converged by gradient
846
+ assert np.linalg.norm(res.grad * scaling, ord=np.inf) < gtol
847
+
848
+
849
+ def test_gh_19103():
850
+ # Checks that least_squares trf method selects a strictly feasible point,
851
+ # and thus succeeds instead of failing,
852
+ # when the initial guess is reported exactly at a boundary point.
853
+ # This is a reduced example from gh191303
854
+
855
+ ydata = np.array([0.] * 66 + [
856
+ 1., 0., 0., 0., 0., 0., 1., 1., 0., 0., 1.,
857
+ 1., 1., 1., 0., 0., 0., 1., 0., 0., 2., 1.,
858
+ 0., 3., 1., 6., 5., 0., 0., 2., 8., 4., 4.,
859
+ 6., 9., 7., 2., 7., 8., 2., 13., 9., 8., 11.,
860
+ 10., 13., 14., 19., 11., 15., 18., 26., 19., 32., 29.,
861
+ 28., 36., 32., 35., 36., 43., 52., 32., 58., 56., 52.,
862
+ 67., 53., 72., 88., 77., 95., 94., 84., 86., 101., 107.,
863
+ 108., 118., 96., 115., 138., 137.,
864
+ ])
865
+ xdata = np.arange(0, ydata.size) * 0.1
866
+
867
+ def exponential_wrapped(params):
868
+ A, B, x0 = params
869
+ return A * np.exp(B * (xdata - x0)) - ydata
870
+
871
+ x0 = [0.01, 1., 5.]
872
+ bounds = ((0.01, 0, 0), (np.inf, 10, 20.9))
873
+ res = least_squares(exponential_wrapped, x0, method='trf', bounds=bounds)
874
+ assert res.success
llava_next/lib/python3.10/site-packages/scipy/optimize/tests/test_linear_assignment.py ADDED
@@ -0,0 +1,116 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Author: Brian M. Clapper, G. Varoquaux, Lars Buitinck
2
+ # License: BSD
3
+
4
+ from numpy.testing import assert_array_equal
5
+ import pytest
6
+
7
+ import numpy as np
8
+
9
+ from scipy.optimize import linear_sum_assignment
10
+ from scipy.sparse import random
11
+ from scipy.sparse._sputils import matrix
12
+ from scipy.sparse.csgraph import min_weight_full_bipartite_matching
13
+ from scipy.sparse.csgraph.tests.test_matching import (
14
+ linear_sum_assignment_assertions, linear_sum_assignment_test_cases
15
+ )
16
+
17
+
18
+ def test_linear_sum_assignment_input_shape():
19
+ with pytest.raises(ValueError, match="expected a matrix"):
20
+ linear_sum_assignment([1, 2, 3])
21
+
22
+
23
+ def test_linear_sum_assignment_input_object():
24
+ C = [[1, 2, 3], [4, 5, 6]]
25
+ assert_array_equal(linear_sum_assignment(C),
26
+ linear_sum_assignment(np.asarray(C)))
27
+ assert_array_equal(linear_sum_assignment(C),
28
+ linear_sum_assignment(matrix(C)))
29
+
30
+
31
+ def test_linear_sum_assignment_input_bool():
32
+ I = np.identity(3)
33
+ assert_array_equal(linear_sum_assignment(I.astype(np.bool_)),
34
+ linear_sum_assignment(I))
35
+
36
+
37
+ def test_linear_sum_assignment_input_string():
38
+ I = np.identity(3)
39
+ with pytest.raises(TypeError, match="Cannot cast array data"):
40
+ linear_sum_assignment(I.astype(str))
41
+
42
+
43
+ def test_linear_sum_assignment_input_nan():
44
+ I = np.diag([np.nan, 1, 1])
45
+ with pytest.raises(ValueError, match="contains invalid numeric entries"):
46
+ linear_sum_assignment(I)
47
+
48
+
49
+ def test_linear_sum_assignment_input_neginf():
50
+ I = np.diag([1, -np.inf, 1])
51
+ with pytest.raises(ValueError, match="contains invalid numeric entries"):
52
+ linear_sum_assignment(I)
53
+
54
+
55
+ def test_linear_sum_assignment_input_inf():
56
+ I = np.identity(3)
57
+ I[:, 0] = np.inf
58
+ with pytest.raises(ValueError, match="cost matrix is infeasible"):
59
+ linear_sum_assignment(I)
60
+
61
+
62
+ def test_constant_cost_matrix():
63
+ # Fixes #11602
64
+ n = 8
65
+ C = np.ones((n, n))
66
+ row_ind, col_ind = linear_sum_assignment(C)
67
+ assert_array_equal(row_ind, np.arange(n))
68
+ assert_array_equal(col_ind, np.arange(n))
69
+
70
+
71
+ @pytest.mark.parametrize('num_rows,num_cols', [(0, 0), (2, 0), (0, 3)])
72
+ def test_linear_sum_assignment_trivial_cost(num_rows, num_cols):
73
+ C = np.empty(shape=(num_cols, num_rows))
74
+ row_ind, col_ind = linear_sum_assignment(C)
75
+ assert len(row_ind) == 0
76
+ assert len(col_ind) == 0
77
+
78
+
79
+ @pytest.mark.parametrize('sign,test_case', linear_sum_assignment_test_cases)
80
+ def test_linear_sum_assignment_small_inputs(sign, test_case):
81
+ linear_sum_assignment_assertions(
82
+ linear_sum_assignment, np.array, sign, test_case)
83
+
84
+
85
+ # Tests that combine scipy.optimize.linear_sum_assignment and
86
+ # scipy.sparse.csgraph.min_weight_full_bipartite_matching
87
+ def test_two_methods_give_same_result_on_many_sparse_inputs():
88
+ # As opposed to the test above, here we do not spell out the expected
89
+ # output; only assert that the two methods give the same result.
90
+ # Concretely, the below tests 100 cases of size 100x100, out of which
91
+ # 36 are infeasible.
92
+ np.random.seed(1234)
93
+ for _ in range(100):
94
+ lsa_raises = False
95
+ mwfbm_raises = False
96
+ sparse = random(100, 100, density=0.06,
97
+ data_rvs=lambda size: np.random.randint(1, 100, size))
98
+ # In csgraph, zeros correspond to missing edges, so we explicitly
99
+ # replace those with infinities
100
+ dense = np.full(sparse.shape, np.inf)
101
+ dense[sparse.row, sparse.col] = sparse.data
102
+ sparse = sparse.tocsr()
103
+ try:
104
+ row_ind, col_ind = linear_sum_assignment(dense)
105
+ lsa_cost = dense[row_ind, col_ind].sum()
106
+ except ValueError:
107
+ lsa_raises = True
108
+ try:
109
+ row_ind, col_ind = min_weight_full_bipartite_matching(sparse)
110
+ mwfbm_cost = sparse[row_ind, col_ind].sum()
111
+ except ValueError:
112
+ mwfbm_raises = True
113
+ # Ensure that if one method raises, so does the other one.
114
+ assert lsa_raises == mwfbm_raises
115
+ if not lsa_raises:
116
+ assert lsa_cost == mwfbm_cost
llava_next/lib/python3.10/site-packages/scipy/optimize/tests/test_linprog.py ADDED
The diff for this file is too large to render. See raw diff
 
llava_next/lib/python3.10/site-packages/scipy/optimize/tests/test_lsq_common.py ADDED
@@ -0,0 +1,297 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from numpy.testing import assert_, assert_allclose, assert_equal
2
+ from pytest import raises as assert_raises
3
+ import numpy as np
4
+
5
+ from scipy.optimize._lsq.common import (
6
+ step_size_to_bound, find_active_constraints, make_strictly_feasible,
7
+ CL_scaling_vector, intersect_trust_region, build_quadratic_1d,
8
+ minimize_quadratic_1d, evaluate_quadratic, reflective_transformation,
9
+ left_multiplied_operator, right_multiplied_operator)
10
+
11
+
12
+ class TestBounds:
13
+ def test_step_size_to_bounds(self):
14
+ lb = np.array([-1.0, 2.5, 10.0])
15
+ ub = np.array([1.0, 5.0, 100.0])
16
+ x = np.array([0.0, 2.5, 12.0])
17
+
18
+ s = np.array([0.1, 0.0, 0.0])
19
+ step, hits = step_size_to_bound(x, s, lb, ub)
20
+ assert_equal(step, 10)
21
+ assert_equal(hits, [1, 0, 0])
22
+
23
+ s = np.array([0.01, 0.05, -1.0])
24
+ step, hits = step_size_to_bound(x, s, lb, ub)
25
+ assert_equal(step, 2)
26
+ assert_equal(hits, [0, 0, -1])
27
+
28
+ s = np.array([10.0, -0.0001, 100.0])
29
+ step, hits = step_size_to_bound(x, s, lb, ub)
30
+ assert_equal(step, np.array(-0))
31
+ assert_equal(hits, [0, -1, 0])
32
+
33
+ s = np.array([1.0, 0.5, -2.0])
34
+ step, hits = step_size_to_bound(x, s, lb, ub)
35
+ assert_equal(step, 1.0)
36
+ assert_equal(hits, [1, 0, -1])
37
+
38
+ s = np.zeros(3)
39
+ step, hits = step_size_to_bound(x, s, lb, ub)
40
+ assert_equal(step, np.inf)
41
+ assert_equal(hits, [0, 0, 0])
42
+
43
+ def test_find_active_constraints(self):
44
+ lb = np.array([0.0, -10.0, 1.0])
45
+ ub = np.array([1.0, 0.0, 100.0])
46
+
47
+ x = np.array([0.5, -5.0, 2.0])
48
+ active = find_active_constraints(x, lb, ub)
49
+ assert_equal(active, [0, 0, 0])
50
+
51
+ x = np.array([0.0, 0.0, 10.0])
52
+ active = find_active_constraints(x, lb, ub)
53
+ assert_equal(active, [-1, 1, 0])
54
+
55
+ active = find_active_constraints(x, lb, ub, rtol=0)
56
+ assert_equal(active, [-1, 1, 0])
57
+
58
+ x = np.array([1e-9, -1e-8, 100 - 1e-9])
59
+ active = find_active_constraints(x, lb, ub)
60
+ assert_equal(active, [0, 0, 1])
61
+
62
+ active = find_active_constraints(x, lb, ub, rtol=1.5e-9)
63
+ assert_equal(active, [-1, 0, 1])
64
+
65
+ lb = np.array([1.0, -np.inf, -np.inf])
66
+ ub = np.array([np.inf, 10.0, np.inf])
67
+
68
+ x = np.ones(3)
69
+ active = find_active_constraints(x, lb, ub)
70
+ assert_equal(active, [-1, 0, 0])
71
+
72
+ # Handles out-of-bound cases.
73
+ x = np.array([0.0, 11.0, 0.0])
74
+ active = find_active_constraints(x, lb, ub)
75
+ assert_equal(active, [-1, 1, 0])
76
+
77
+ active = find_active_constraints(x, lb, ub, rtol=0)
78
+ assert_equal(active, [-1, 1, 0])
79
+
80
+ def test_make_strictly_feasible(self):
81
+ lb = np.array([-0.5, -0.8, 2.0])
82
+ ub = np.array([0.8, 1.0, 3.0])
83
+
84
+ x = np.array([-0.5, 0.0, 2 + 1e-10])
85
+
86
+ x_new = make_strictly_feasible(x, lb, ub, rstep=0)
87
+ assert_(x_new[0] > -0.5)
88
+ assert_equal(x_new[1:], x[1:])
89
+
90
+ x_new = make_strictly_feasible(x, lb, ub, rstep=1e-4)
91
+ assert_equal(x_new, [-0.5 + 1e-4, 0.0, 2 * (1 + 1e-4)])
92
+
93
+ x = np.array([-0.5, -1, 3.1])
94
+ x_new = make_strictly_feasible(x, lb, ub)
95
+ assert_(np.all((x_new >= lb) & (x_new <= ub)))
96
+
97
+ x_new = make_strictly_feasible(x, lb, ub, rstep=0)
98
+ assert_(np.all((x_new >= lb) & (x_new <= ub)))
99
+
100
+ lb = np.array([-1, 100.0])
101
+ ub = np.array([1, 100.0 + 1e-10])
102
+ x = np.array([0, 100.0])
103
+ x_new = make_strictly_feasible(x, lb, ub, rstep=1e-8)
104
+ assert_equal(x_new, [0, 100.0 + 0.5e-10])
105
+
106
+ def test_scaling_vector(self):
107
+ lb = np.array([-np.inf, -5.0, 1.0, -np.inf])
108
+ ub = np.array([1.0, np.inf, 10.0, np.inf])
109
+ x = np.array([0.5, 2.0, 5.0, 0.0])
110
+ g = np.array([1.0, 0.1, -10.0, 0.0])
111
+ v, dv = CL_scaling_vector(x, g, lb, ub)
112
+ assert_equal(v, [1.0, 7.0, 5.0, 1.0])
113
+ assert_equal(dv, [0.0, 1.0, -1.0, 0.0])
114
+
115
+
116
+ class TestQuadraticFunction:
117
+ def setup_method(self):
118
+ self.J = np.array([
119
+ [0.1, 0.2],
120
+ [-1.0, 1.0],
121
+ [0.5, 0.2]])
122
+ self.g = np.array([0.8, -2.0])
123
+ self.diag = np.array([1.0, 2.0])
124
+
125
+ def test_build_quadratic_1d(self):
126
+ s = np.zeros(2)
127
+ a, b = build_quadratic_1d(self.J, self.g, s)
128
+ assert_equal(a, 0)
129
+ assert_equal(b, 0)
130
+
131
+ a, b = build_quadratic_1d(self.J, self.g, s, diag=self.diag)
132
+ assert_equal(a, 0)
133
+ assert_equal(b, 0)
134
+
135
+ s = np.array([1.0, -1.0])
136
+ a, b = build_quadratic_1d(self.J, self.g, s)
137
+ assert_equal(a, 2.05)
138
+ assert_equal(b, 2.8)
139
+
140
+ a, b = build_quadratic_1d(self.J, self.g, s, diag=self.diag)
141
+ assert_equal(a, 3.55)
142
+ assert_equal(b, 2.8)
143
+
144
+ s0 = np.array([0.5, 0.5])
145
+ a, b, c = build_quadratic_1d(self.J, self.g, s, diag=self.diag, s0=s0)
146
+ assert_equal(a, 3.55)
147
+ assert_allclose(b, 2.39)
148
+ assert_allclose(c, -0.1525)
149
+
150
+ def test_minimize_quadratic_1d(self):
151
+ a = 5
152
+ b = -1
153
+
154
+ t, y = minimize_quadratic_1d(a, b, 1, 2)
155
+ assert_equal(t, 1)
156
+ assert_allclose(y, a * t**2 + b * t, rtol=1e-15)
157
+
158
+ t, y = minimize_quadratic_1d(a, b, -2, -1)
159
+ assert_equal(t, -1)
160
+ assert_allclose(y, a * t**2 + b * t, rtol=1e-15)
161
+
162
+ t, y = minimize_quadratic_1d(a, b, -1, 1)
163
+ assert_equal(t, 0.1)
164
+ assert_allclose(y, a * t**2 + b * t, rtol=1e-15)
165
+
166
+ c = 10
167
+ t, y = minimize_quadratic_1d(a, b, -1, 1, c=c)
168
+ assert_equal(t, 0.1)
169
+ assert_allclose(y, a * t**2 + b * t + c, rtol=1e-15)
170
+
171
+ t, y = minimize_quadratic_1d(a, b, -np.inf, np.inf, c=c)
172
+ assert_equal(t, 0.1)
173
+ assert_allclose(y, a * t ** 2 + b * t + c, rtol=1e-15)
174
+
175
+ t, y = minimize_quadratic_1d(a, b, 0, np.inf, c=c)
176
+ assert_equal(t, 0.1)
177
+ assert_allclose(y, a * t ** 2 + b * t + c, rtol=1e-15)
178
+
179
+ t, y = minimize_quadratic_1d(a, b, -np.inf, 0, c=c)
180
+ assert_equal(t, 0)
181
+ assert_allclose(y, a * t ** 2 + b * t + c, rtol=1e-15)
182
+
183
+ a = -1
184
+ b = 0.2
185
+ t, y = minimize_quadratic_1d(a, b, -np.inf, np.inf)
186
+ assert_equal(y, -np.inf)
187
+
188
+ t, y = minimize_quadratic_1d(a, b, 0, np.inf)
189
+ assert_equal(t, np.inf)
190
+ assert_equal(y, -np.inf)
191
+
192
+ t, y = minimize_quadratic_1d(a, b, -np.inf, 0)
193
+ assert_equal(t, -np.inf)
194
+ assert_equal(y, -np.inf)
195
+
196
+ def test_evaluate_quadratic(self):
197
+ s = np.array([1.0, -1.0])
198
+
199
+ value = evaluate_quadratic(self.J, self.g, s)
200
+ assert_equal(value, 4.85)
201
+
202
+ value = evaluate_quadratic(self.J, self.g, s, diag=self.diag)
203
+ assert_equal(value, 6.35)
204
+
205
+ s = np.array([[1.0, -1.0],
206
+ [1.0, 1.0],
207
+ [0.0, 0.0]])
208
+
209
+ values = evaluate_quadratic(self.J, self.g, s)
210
+ assert_allclose(values, [4.85, -0.91, 0.0])
211
+
212
+ values = evaluate_quadratic(self.J, self.g, s, diag=self.diag)
213
+ assert_allclose(values, [6.35, 0.59, 0.0])
214
+
215
+
216
+ class TestTrustRegion:
217
+ def test_intersect(self):
218
+ Delta = 1.0
219
+
220
+ x = np.zeros(3)
221
+ s = np.array([1.0, 0.0, 0.0])
222
+ t_neg, t_pos = intersect_trust_region(x, s, Delta)
223
+ assert_equal(t_neg, -1)
224
+ assert_equal(t_pos, 1)
225
+
226
+ s = np.array([-1.0, 1.0, -1.0])
227
+ t_neg, t_pos = intersect_trust_region(x, s, Delta)
228
+ assert_allclose(t_neg, -3**-0.5)
229
+ assert_allclose(t_pos, 3**-0.5)
230
+
231
+ x = np.array([0.5, -0.5, 0])
232
+ s = np.array([0, 0, 1.0])
233
+ t_neg, t_pos = intersect_trust_region(x, s, Delta)
234
+ assert_allclose(t_neg, -2**-0.5)
235
+ assert_allclose(t_pos, 2**-0.5)
236
+
237
+ x = np.ones(3)
238
+ assert_raises(ValueError, intersect_trust_region, x, s, Delta)
239
+
240
+ x = np.zeros(3)
241
+ s = np.zeros(3)
242
+ assert_raises(ValueError, intersect_trust_region, x, s, Delta)
243
+
244
+
245
+ def test_reflective_transformation():
246
+ lb = np.array([-1, -2], dtype=float)
247
+ ub = np.array([5, 3], dtype=float)
248
+
249
+ y = np.array([0, 0])
250
+ x, g = reflective_transformation(y, lb, ub)
251
+ assert_equal(x, y)
252
+ assert_equal(g, np.ones(2))
253
+
254
+ y = np.array([-4, 4], dtype=float)
255
+
256
+ x, g = reflective_transformation(y, lb, np.array([np.inf, np.inf]))
257
+ assert_equal(x, [2, 4])
258
+ assert_equal(g, [-1, 1])
259
+
260
+ x, g = reflective_transformation(y, np.array([-np.inf, -np.inf]), ub)
261
+ assert_equal(x, [-4, 2])
262
+ assert_equal(g, [1, -1])
263
+
264
+ x, g = reflective_transformation(y, lb, ub)
265
+ assert_equal(x, [2, 2])
266
+ assert_equal(g, [-1, -1])
267
+
268
+ lb = np.array([-np.inf, -2])
269
+ ub = np.array([5, np.inf])
270
+ y = np.array([10, 10], dtype=float)
271
+ x, g = reflective_transformation(y, lb, ub)
272
+ assert_equal(x, [0, 10])
273
+ assert_equal(g, [-1, 1])
274
+
275
+
276
+ def test_linear_operators():
277
+ A = np.arange(6).reshape((3, 2))
278
+
279
+ d_left = np.array([-1, 2, 5])
280
+ DA = np.diag(d_left).dot(A)
281
+ J_left = left_multiplied_operator(A, d_left)
282
+
283
+ d_right = np.array([5, 10])
284
+ AD = A.dot(np.diag(d_right))
285
+ J_right = right_multiplied_operator(A, d_right)
286
+
287
+ x = np.array([-2, 3])
288
+ X = -2 * np.arange(2, 8).reshape((2, 3))
289
+ xt = np.array([0, -2, 15])
290
+
291
+ assert_allclose(DA.dot(x), J_left.matvec(x))
292
+ assert_allclose(DA.dot(X), J_left.matmat(X))
293
+ assert_allclose(DA.T.dot(xt), J_left.rmatvec(xt))
294
+
295
+ assert_allclose(AD.dot(x), J_right.matvec(x))
296
+ assert_allclose(AD.dot(X), J_right.matmat(X))
297
+ assert_allclose(AD.T.dot(xt), J_right.rmatvec(xt))
llava_next/lib/python3.10/site-packages/scipy/optimize/tests/test_lsq_linear.py ADDED
@@ -0,0 +1,285 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import pytest
2
+
3
+ import numpy as np
4
+ from numpy.linalg import lstsq
5
+ from numpy.testing import assert_allclose, assert_equal, assert_
6
+
7
+ from scipy.sparse import rand, coo_matrix
8
+ from scipy.sparse.linalg import aslinearoperator
9
+ from scipy.optimize import lsq_linear
10
+ from scipy.optimize._minimize import Bounds
11
+
12
+
13
+ A = np.array([
14
+ [0.171, -0.057],
15
+ [-0.049, -0.248],
16
+ [-0.166, 0.054],
17
+ ])
18
+ b = np.array([0.074, 1.014, -0.383])
19
+
20
+
21
+ class BaseMixin:
22
+ def setup_method(self):
23
+ self.rnd = np.random.RandomState(0)
24
+
25
+ def test_dense_no_bounds(self):
26
+ for lsq_solver in self.lsq_solvers:
27
+ res = lsq_linear(A, b, method=self.method, lsq_solver=lsq_solver)
28
+ assert_allclose(res.x, lstsq(A, b, rcond=-1)[0])
29
+ assert_allclose(res.x, res.unbounded_sol[0])
30
+
31
+ def test_dense_bounds(self):
32
+ # Solutions for comparison are taken from MATLAB.
33
+ lb = np.array([-1, -10])
34
+ ub = np.array([1, 0])
35
+ unbounded_sol = lstsq(A, b, rcond=-1)[0]
36
+ for lsq_solver in self.lsq_solvers:
37
+ res = lsq_linear(A, b, (lb, ub), method=self.method,
38
+ lsq_solver=lsq_solver)
39
+ assert_allclose(res.x, lstsq(A, b, rcond=-1)[0])
40
+ assert_allclose(res.unbounded_sol[0], unbounded_sol)
41
+
42
+ lb = np.array([0.0, -np.inf])
43
+ for lsq_solver in self.lsq_solvers:
44
+ res = lsq_linear(A, b, (lb, np.inf), method=self.method,
45
+ lsq_solver=lsq_solver)
46
+ assert_allclose(res.x, np.array([0.0, -4.084174437334673]),
47
+ atol=1e-6)
48
+ assert_allclose(res.unbounded_sol[0], unbounded_sol)
49
+
50
+ lb = np.array([-1, 0])
51
+ for lsq_solver in self.lsq_solvers:
52
+ res = lsq_linear(A, b, (lb, np.inf), method=self.method,
53
+ lsq_solver=lsq_solver)
54
+ assert_allclose(res.x, np.array([0.448427311733504, 0]),
55
+ atol=1e-15)
56
+ assert_allclose(res.unbounded_sol[0], unbounded_sol)
57
+
58
+ ub = np.array([np.inf, -5])
59
+ for lsq_solver in self.lsq_solvers:
60
+ res = lsq_linear(A, b, (-np.inf, ub), method=self.method,
61
+ lsq_solver=lsq_solver)
62
+ assert_allclose(res.x, np.array([-0.105560998682388, -5]))
63
+ assert_allclose(res.unbounded_sol[0], unbounded_sol)
64
+
65
+ ub = np.array([-1, np.inf])
66
+ for lsq_solver in self.lsq_solvers:
67
+ res = lsq_linear(A, b, (-np.inf, ub), method=self.method,
68
+ lsq_solver=lsq_solver)
69
+ assert_allclose(res.x, np.array([-1, -4.181102129483254]))
70
+ assert_allclose(res.unbounded_sol[0], unbounded_sol)
71
+
72
+ lb = np.array([0, -4])
73
+ ub = np.array([1, 0])
74
+ for lsq_solver in self.lsq_solvers:
75
+ res = lsq_linear(A, b, (lb, ub), method=self.method,
76
+ lsq_solver=lsq_solver)
77
+ assert_allclose(res.x, np.array([0.005236663400791, -4]))
78
+ assert_allclose(res.unbounded_sol[0], unbounded_sol)
79
+
80
+ def test_bounds_variants(self):
81
+ x = np.array([1, 3])
82
+ A = self.rnd.uniform(size=(2, 2))
83
+ b = A@x
84
+ lb = np.array([1, 1])
85
+ ub = np.array([2, 2])
86
+ bounds_old = (lb, ub)
87
+ bounds_new = Bounds(lb, ub)
88
+ res_old = lsq_linear(A, b, bounds_old)
89
+ res_new = lsq_linear(A, b, bounds_new)
90
+ assert not np.allclose(res_new.x, res_new.unbounded_sol[0])
91
+ assert_allclose(res_old.x, res_new.x)
92
+
93
+ def test_np_matrix(self):
94
+ # gh-10711
95
+ with np.testing.suppress_warnings() as sup:
96
+ sup.filter(PendingDeprecationWarning)
97
+ A = np.matrix([[20, -4, 0, 2, 3], [10, -2, 1, 0, -1]])
98
+ k = np.array([20, 15])
99
+ lsq_linear(A, k)
100
+
101
+ def test_dense_rank_deficient(self):
102
+ A = np.array([[-0.307, -0.184]])
103
+ b = np.array([0.773])
104
+ lb = [-0.1, -0.1]
105
+ ub = [0.1, 0.1]
106
+ for lsq_solver in self.lsq_solvers:
107
+ res = lsq_linear(A, b, (lb, ub), method=self.method,
108
+ lsq_solver=lsq_solver)
109
+ assert_allclose(res.x, [-0.1, -0.1])
110
+ assert_allclose(res.unbounded_sol[0], lstsq(A, b, rcond=-1)[0])
111
+
112
+ A = np.array([
113
+ [0.334, 0.668],
114
+ [-0.516, -1.032],
115
+ [0.192, 0.384],
116
+ ])
117
+ b = np.array([-1.436, 0.135, 0.909])
118
+ lb = [0, -1]
119
+ ub = [1, -0.5]
120
+ for lsq_solver in self.lsq_solvers:
121
+ res = lsq_linear(A, b, (lb, ub), method=self.method,
122
+ lsq_solver=lsq_solver)
123
+ assert_allclose(res.optimality, 0, atol=1e-11)
124
+ assert_allclose(res.unbounded_sol[0], lstsq(A, b, rcond=-1)[0])
125
+
126
+ def test_full_result(self):
127
+ lb = np.array([0, -4])
128
+ ub = np.array([1, 0])
129
+ res = lsq_linear(A, b, (lb, ub), method=self.method)
130
+
131
+ assert_allclose(res.x, [0.005236663400791, -4])
132
+ assert_allclose(res.unbounded_sol[0], lstsq(A, b, rcond=-1)[0])
133
+
134
+ r = A.dot(res.x) - b
135
+ assert_allclose(res.cost, 0.5 * np.dot(r, r))
136
+ assert_allclose(res.fun, r)
137
+
138
+ assert_allclose(res.optimality, 0.0, atol=1e-12)
139
+ assert_equal(res.active_mask, [0, -1])
140
+ assert_(res.nit < 15)
141
+ assert_(res.status == 1 or res.status == 3)
142
+ assert_(isinstance(res.message, str))
143
+ assert_(res.success)
144
+
145
+ # This is a test for issue #9982.
146
+ def test_almost_singular(self):
147
+ A = np.array(
148
+ [[0.8854232310355122, 0.0365312146937765, 0.0365312146836789],
149
+ [0.3742460132129041, 0.0130523214078376, 0.0130523214077873],
150
+ [0.9680633871281361, 0.0319366128718639, 0.0319366128718388]])
151
+
152
+ b = np.array(
153
+ [0.0055029366538097, 0.0026677442422208, 0.0066612514782381])
154
+
155
+ result = lsq_linear(A, b, method=self.method)
156
+ assert_(result.cost < 1.1e-8)
157
+
158
+ @pytest.mark.xslow
159
+ def test_large_rank_deficient(self):
160
+ np.random.seed(0)
161
+ n, m = np.sort(np.random.randint(2, 1000, size=2))
162
+ m *= 2 # make m >> n
163
+ A = 1.0 * np.random.randint(-99, 99, size=[m, n])
164
+ b = 1.0 * np.random.randint(-99, 99, size=[m])
165
+ bounds = 1.0 * np.sort(np.random.randint(-99, 99, size=(2, n)), axis=0)
166
+ bounds[1, :] += 1.0 # ensure up > lb
167
+
168
+ # Make the A matrix strongly rank deficient by replicating some columns
169
+ w = np.random.choice(n, n) # Select random columns with duplicates
170
+ A = A[:, w]
171
+
172
+ x_bvls = lsq_linear(A, b, bounds=bounds, method='bvls').x
173
+ x_trf = lsq_linear(A, b, bounds=bounds, method='trf').x
174
+
175
+ cost_bvls = np.sum((A @ x_bvls - b)**2)
176
+ cost_trf = np.sum((A @ x_trf - b)**2)
177
+
178
+ assert_(abs(cost_bvls - cost_trf) < cost_trf*1e-10)
179
+
180
+ def test_convergence_small_matrix(self):
181
+ A = np.array([[49.0, 41.0, -32.0],
182
+ [-19.0, -32.0, -8.0],
183
+ [-13.0, 10.0, 69.0]])
184
+ b = np.array([-41.0, -90.0, 47.0])
185
+ bounds = np.array([[31.0, -44.0, 26.0],
186
+ [54.0, -32.0, 28.0]])
187
+
188
+ x_bvls = lsq_linear(A, b, bounds=bounds, method='bvls').x
189
+ x_trf = lsq_linear(A, b, bounds=bounds, method='trf').x
190
+
191
+ cost_bvls = np.sum((A @ x_bvls - b)**2)
192
+ cost_trf = np.sum((A @ x_trf - b)**2)
193
+
194
+ assert_(abs(cost_bvls - cost_trf) < cost_trf*1e-10)
195
+
196
+
197
+ class SparseMixin:
198
+ def test_sparse_and_LinearOperator(self):
199
+ m = 5000
200
+ n = 1000
201
+ A = rand(m, n, random_state=0)
202
+ b = self.rnd.randn(m)
203
+ res = lsq_linear(A, b)
204
+ assert_allclose(res.optimality, 0, atol=1e-6)
205
+
206
+ A = aslinearoperator(A)
207
+ res = lsq_linear(A, b)
208
+ assert_allclose(res.optimality, 0, atol=1e-6)
209
+
210
+ @pytest.mark.fail_slow(5)
211
+ def test_sparse_bounds(self):
212
+ m = 5000
213
+ n = 1000
214
+ A = rand(m, n, random_state=0)
215
+ b = self.rnd.randn(m)
216
+ lb = self.rnd.randn(n)
217
+ ub = lb + 1
218
+ res = lsq_linear(A, b, (lb, ub))
219
+ assert_allclose(res.optimality, 0.0, atol=1e-6)
220
+
221
+ res = lsq_linear(A, b, (lb, ub), lsmr_tol=1e-13,
222
+ lsmr_maxiter=1500)
223
+ assert_allclose(res.optimality, 0.0, atol=1e-6)
224
+
225
+ res = lsq_linear(A, b, (lb, ub), lsmr_tol='auto')
226
+ assert_allclose(res.optimality, 0.0, atol=1e-6)
227
+
228
+ def test_sparse_ill_conditioned(self):
229
+ # Sparse matrix with condition number of ~4 million
230
+ data = np.array([1., 1., 1., 1. + 1e-6, 1.])
231
+ row = np.array([0, 0, 1, 2, 2])
232
+ col = np.array([0, 2, 1, 0, 2])
233
+ A = coo_matrix((data, (row, col)), shape=(3, 3))
234
+
235
+ # Get the exact solution
236
+ exact_sol = lsq_linear(A.toarray(), b, lsq_solver='exact')
237
+
238
+ # Default lsmr arguments should not fully converge the solution
239
+ default_lsmr_sol = lsq_linear(A, b, lsq_solver='lsmr')
240
+ with pytest.raises(AssertionError, match=""):
241
+ assert_allclose(exact_sol.x, default_lsmr_sol.x)
242
+
243
+ # By increasing the maximum lsmr iters, it will converge
244
+ conv_lsmr = lsq_linear(A, b, lsq_solver='lsmr', lsmr_maxiter=10)
245
+ assert_allclose(exact_sol.x, conv_lsmr.x)
246
+
247
+
248
+ class TestTRF(BaseMixin, SparseMixin):
249
+ method = 'trf'
250
+ lsq_solvers = ['exact', 'lsmr']
251
+
252
+
253
+ class TestBVLS(BaseMixin):
254
+ method = 'bvls'
255
+ lsq_solvers = ['exact']
256
+
257
+
258
+ class TestErrorChecking:
259
+ def test_option_lsmr_tol(self):
260
+ # Should work with a positive float, string equal to 'auto', or None
261
+ _ = lsq_linear(A, b, lsq_solver='lsmr', lsmr_tol=1e-2)
262
+ _ = lsq_linear(A, b, lsq_solver='lsmr', lsmr_tol='auto')
263
+ _ = lsq_linear(A, b, lsq_solver='lsmr', lsmr_tol=None)
264
+
265
+ # Should raise error with negative float, strings
266
+ # other than 'auto', and integers
267
+ err_message = "`lsmr_tol` must be None, 'auto', or positive float."
268
+ with pytest.raises(ValueError, match=err_message):
269
+ _ = lsq_linear(A, b, lsq_solver='lsmr', lsmr_tol=-0.1)
270
+ with pytest.raises(ValueError, match=err_message):
271
+ _ = lsq_linear(A, b, lsq_solver='lsmr', lsmr_tol='foo')
272
+ with pytest.raises(ValueError, match=err_message):
273
+ _ = lsq_linear(A, b, lsq_solver='lsmr', lsmr_tol=1)
274
+
275
+ def test_option_lsmr_maxiter(self):
276
+ # Should work with positive integers or None
277
+ _ = lsq_linear(A, b, lsq_solver='lsmr', lsmr_maxiter=1)
278
+ _ = lsq_linear(A, b, lsq_solver='lsmr', lsmr_maxiter=None)
279
+
280
+ # Should raise error with 0 or negative max iter
281
+ err_message = "`lsmr_maxiter` must be None or positive integer."
282
+ with pytest.raises(ValueError, match=err_message):
283
+ _ = lsq_linear(A, b, lsq_solver='lsmr', lsmr_maxiter=0)
284
+ with pytest.raises(ValueError, match=err_message):
285
+ _ = lsq_linear(A, b, lsq_solver='lsmr', lsmr_maxiter=-1)
llava_next/lib/python3.10/site-packages/scipy/optimize/tests/test_minimize_constrained.py ADDED
@@ -0,0 +1,828 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import numpy as np
2
+ import pytest
3
+ from scipy.linalg import block_diag
4
+ from scipy.sparse import csc_matrix
5
+ from numpy.testing import (assert_array_almost_equal,
6
+ assert_array_less, assert_, assert_allclose,
7
+ suppress_warnings)
8
+ from scipy.optimize import (NonlinearConstraint,
9
+ LinearConstraint,
10
+ Bounds,
11
+ minimize,
12
+ BFGS,
13
+ SR1,
14
+ rosen)
15
+
16
+
17
+ class Maratos:
18
+ """Problem 15.4 from Nocedal and Wright
19
+
20
+ The following optimization problem:
21
+ minimize 2*(x[0]**2 + x[1]**2 - 1) - x[0]
22
+ Subject to: x[0]**2 + x[1]**2 - 1 = 0
23
+ """
24
+
25
+ def __init__(self, degrees=60, constr_jac=None, constr_hess=None):
26
+ rads = degrees/180*np.pi
27
+ self.x0 = [np.cos(rads), np.sin(rads)]
28
+ self.x_opt = np.array([1.0, 0.0])
29
+ self.constr_jac = constr_jac
30
+ self.constr_hess = constr_hess
31
+ self.bounds = None
32
+
33
+ def fun(self, x):
34
+ return 2*(x[0]**2 + x[1]**2 - 1) - x[0]
35
+
36
+ def grad(self, x):
37
+ return np.array([4*x[0]-1, 4*x[1]])
38
+
39
+ def hess(self, x):
40
+ return 4*np.eye(2)
41
+
42
+ @property
43
+ def constr(self):
44
+ def fun(x):
45
+ return x[0]**2 + x[1]**2
46
+
47
+ if self.constr_jac is None:
48
+ def jac(x):
49
+ return [[2*x[0], 2*x[1]]]
50
+ else:
51
+ jac = self.constr_jac
52
+
53
+ if self.constr_hess is None:
54
+ def hess(x, v):
55
+ return 2*v[0]*np.eye(2)
56
+ else:
57
+ hess = self.constr_hess
58
+
59
+ return NonlinearConstraint(fun, 1, 1, jac, hess)
60
+
61
+
62
+ class MaratosTestArgs:
63
+ """Problem 15.4 from Nocedal and Wright
64
+
65
+ The following optimization problem:
66
+ minimize 2*(x[0]**2 + x[1]**2 - 1) - x[0]
67
+ Subject to: x[0]**2 + x[1]**2 - 1 = 0
68
+ """
69
+
70
+ def __init__(self, a, b, degrees=60, constr_jac=None, constr_hess=None):
71
+ rads = degrees/180*np.pi
72
+ self.x0 = [np.cos(rads), np.sin(rads)]
73
+ self.x_opt = np.array([1.0, 0.0])
74
+ self.constr_jac = constr_jac
75
+ self.constr_hess = constr_hess
76
+ self.a = a
77
+ self.b = b
78
+ self.bounds = None
79
+
80
+ def _test_args(self, a, b):
81
+ if self.a != a or self.b != b:
82
+ raise ValueError()
83
+
84
+ def fun(self, x, a, b):
85
+ self._test_args(a, b)
86
+ return 2*(x[0]**2 + x[1]**2 - 1) - x[0]
87
+
88
+ def grad(self, x, a, b):
89
+ self._test_args(a, b)
90
+ return np.array([4*x[0]-1, 4*x[1]])
91
+
92
+ def hess(self, x, a, b):
93
+ self._test_args(a, b)
94
+ return 4*np.eye(2)
95
+
96
+ @property
97
+ def constr(self):
98
+ def fun(x):
99
+ return x[0]**2 + x[1]**2
100
+
101
+ if self.constr_jac is None:
102
+ def jac(x):
103
+ return [[4*x[0], 4*x[1]]]
104
+ else:
105
+ jac = self.constr_jac
106
+
107
+ if self.constr_hess is None:
108
+ def hess(x, v):
109
+ return 2*v[0]*np.eye(2)
110
+ else:
111
+ hess = self.constr_hess
112
+
113
+ return NonlinearConstraint(fun, 1, 1, jac, hess)
114
+
115
+
116
+ class MaratosGradInFunc:
117
+ """Problem 15.4 from Nocedal and Wright
118
+
119
+ The following optimization problem:
120
+ minimize 2*(x[0]**2 + x[1]**2 - 1) - x[0]
121
+ Subject to: x[0]**2 + x[1]**2 - 1 = 0
122
+ """
123
+
124
+ def __init__(self, degrees=60, constr_jac=None, constr_hess=None):
125
+ rads = degrees/180*np.pi
126
+ self.x0 = [np.cos(rads), np.sin(rads)]
127
+ self.x_opt = np.array([1.0, 0.0])
128
+ self.constr_jac = constr_jac
129
+ self.constr_hess = constr_hess
130
+ self.bounds = None
131
+
132
+ def fun(self, x):
133
+ return (2*(x[0]**2 + x[1]**2 - 1) - x[0],
134
+ np.array([4*x[0]-1, 4*x[1]]))
135
+
136
+ @property
137
+ def grad(self):
138
+ return True
139
+
140
+ def hess(self, x):
141
+ return 4*np.eye(2)
142
+
143
+ @property
144
+ def constr(self):
145
+ def fun(x):
146
+ return x[0]**2 + x[1]**2
147
+
148
+ if self.constr_jac is None:
149
+ def jac(x):
150
+ return [[4*x[0], 4*x[1]]]
151
+ else:
152
+ jac = self.constr_jac
153
+
154
+ if self.constr_hess is None:
155
+ def hess(x, v):
156
+ return 2*v[0]*np.eye(2)
157
+ else:
158
+ hess = self.constr_hess
159
+
160
+ return NonlinearConstraint(fun, 1, 1, jac, hess)
161
+
162
+
163
+ class HyperbolicIneq:
164
+ """Problem 15.1 from Nocedal and Wright
165
+
166
+ The following optimization problem:
167
+ minimize 1/2*(x[0] - 2)**2 + 1/2*(x[1] - 1/2)**2
168
+ Subject to: 1/(x[0] + 1) - x[1] >= 1/4
169
+ x[0] >= 0
170
+ x[1] >= 0
171
+ """
172
+ def __init__(self, constr_jac=None, constr_hess=None):
173
+ self.x0 = [0, 0]
174
+ self.x_opt = [1.952823, 0.088659]
175
+ self.constr_jac = constr_jac
176
+ self.constr_hess = constr_hess
177
+ self.bounds = Bounds(0, np.inf)
178
+
179
+ def fun(self, x):
180
+ return 1/2*(x[0] - 2)**2 + 1/2*(x[1] - 1/2)**2
181
+
182
+ def grad(self, x):
183
+ return [x[0] - 2, x[1] - 1/2]
184
+
185
+ def hess(self, x):
186
+ return np.eye(2)
187
+
188
+ @property
189
+ def constr(self):
190
+ def fun(x):
191
+ return 1/(x[0] + 1) - x[1]
192
+
193
+ if self.constr_jac is None:
194
+ def jac(x):
195
+ return [[-1/(x[0] + 1)**2, -1]]
196
+ else:
197
+ jac = self.constr_jac
198
+
199
+ if self.constr_hess is None:
200
+ def hess(x, v):
201
+ return 2*v[0]*np.array([[1/(x[0] + 1)**3, 0],
202
+ [0, 0]])
203
+ else:
204
+ hess = self.constr_hess
205
+
206
+ return NonlinearConstraint(fun, 0.25, np.inf, jac, hess)
207
+
208
+
209
+ class Rosenbrock:
210
+ """Rosenbrock function.
211
+
212
+ The following optimization problem:
213
+ minimize sum(100.0*(x[1:] - x[:-1]**2.0)**2.0 + (1 - x[:-1])**2.0)
214
+ """
215
+
216
+ def __init__(self, n=2, random_state=0):
217
+ rng = np.random.RandomState(random_state)
218
+ self.x0 = rng.uniform(-1, 1, n)
219
+ self.x_opt = np.ones(n)
220
+ self.bounds = None
221
+
222
+ def fun(self, x):
223
+ x = np.asarray(x)
224
+ r = np.sum(100.0 * (x[1:] - x[:-1]**2.0)**2.0 + (1 - x[:-1])**2.0,
225
+ axis=0)
226
+ return r
227
+
228
+ def grad(self, x):
229
+ x = np.asarray(x)
230
+ xm = x[1:-1]
231
+ xm_m1 = x[:-2]
232
+ xm_p1 = x[2:]
233
+ der = np.zeros_like(x)
234
+ der[1:-1] = (200 * (xm - xm_m1**2) -
235
+ 400 * (xm_p1 - xm**2) * xm - 2 * (1 - xm))
236
+ der[0] = -400 * x[0] * (x[1] - x[0]**2) - 2 * (1 - x[0])
237
+ der[-1] = 200 * (x[-1] - x[-2]**2)
238
+ return der
239
+
240
+ def hess(self, x):
241
+ x = np.atleast_1d(x)
242
+ H = np.diag(-400 * x[:-1], 1) - np.diag(400 * x[:-1], -1)
243
+ diagonal = np.zeros(len(x), dtype=x.dtype)
244
+ diagonal[0] = 1200 * x[0]**2 - 400 * x[1] + 2
245
+ diagonal[-1] = 200
246
+ diagonal[1:-1] = 202 + 1200 * x[1:-1]**2 - 400 * x[2:]
247
+ H = H + np.diag(diagonal)
248
+ return H
249
+
250
+ @property
251
+ def constr(self):
252
+ return ()
253
+
254
+
255
+ class IneqRosenbrock(Rosenbrock):
256
+ """Rosenbrock subject to inequality constraints.
257
+
258
+ The following optimization problem:
259
+ minimize sum(100.0*(x[1] - x[0]**2)**2.0 + (1 - x[0])**2)
260
+ subject to: x[0] + 2 x[1] <= 1
261
+
262
+ Taken from matlab ``fmincon`` documentation.
263
+ """
264
+ def __init__(self, random_state=0):
265
+ Rosenbrock.__init__(self, 2, random_state)
266
+ self.x0 = [-1, -0.5]
267
+ self.x_opt = [0.5022, 0.2489]
268
+ self.bounds = None
269
+
270
+ @property
271
+ def constr(self):
272
+ A = [[1, 2]]
273
+ b = 1
274
+ return LinearConstraint(A, -np.inf, b)
275
+
276
+
277
+ class BoundedRosenbrock(Rosenbrock):
278
+ """Rosenbrock subject to inequality constraints.
279
+
280
+ The following optimization problem:
281
+ minimize sum(100.0*(x[1] - x[0]**2)**2.0 + (1 - x[0])**2)
282
+ subject to: -2 <= x[0] <= 0
283
+ 0 <= x[1] <= 2
284
+
285
+ Taken from matlab ``fmincon`` documentation.
286
+ """
287
+ def __init__(self, random_state=0):
288
+ Rosenbrock.__init__(self, 2, random_state)
289
+ self.x0 = [-0.2, 0.2]
290
+ self.x_opt = None
291
+ self.bounds = Bounds([-2, 0], [0, 2])
292
+
293
+
294
+ class EqIneqRosenbrock(Rosenbrock):
295
+ """Rosenbrock subject to equality and inequality constraints.
296
+
297
+ The following optimization problem:
298
+ minimize sum(100.0*(x[1] - x[0]**2)**2.0 + (1 - x[0])**2)
299
+ subject to: x[0] + 2 x[1] <= 1
300
+ 2 x[0] + x[1] = 1
301
+
302
+ Taken from matlab ``fimincon`` documentation.
303
+ """
304
+ def __init__(self, random_state=0):
305
+ Rosenbrock.__init__(self, 2, random_state)
306
+ self.x0 = [-1, -0.5]
307
+ self.x_opt = [0.41494, 0.17011]
308
+ self.bounds = None
309
+
310
+ @property
311
+ def constr(self):
312
+ A_ineq = [[1, 2]]
313
+ b_ineq = 1
314
+ A_eq = [[2, 1]]
315
+ b_eq = 1
316
+ return (LinearConstraint(A_ineq, -np.inf, b_ineq),
317
+ LinearConstraint(A_eq, b_eq, b_eq))
318
+
319
+
320
+ class Elec:
321
+ """Distribution of electrons on a sphere.
322
+
323
+ Problem no 2 from COPS collection [2]_. Find
324
+ the equilibrium state distribution (of minimal
325
+ potential) of the electrons positioned on a
326
+ conducting sphere.
327
+
328
+ References
329
+ ----------
330
+ .. [1] E. D. Dolan, J. J. Mor\'{e}, and T. S. Munson,
331
+ "Benchmarking optimization software with COPS 3.0.",
332
+ Argonne National Lab., Argonne, IL (US), 2004.
333
+ """
334
+ def __init__(self, n_electrons=200, random_state=0,
335
+ constr_jac=None, constr_hess=None):
336
+ self.n_electrons = n_electrons
337
+ self.rng = np.random.RandomState(random_state)
338
+ # Initial Guess
339
+ phi = self.rng.uniform(0, 2 * np.pi, self.n_electrons)
340
+ theta = self.rng.uniform(-np.pi, np.pi, self.n_electrons)
341
+ x = np.cos(theta) * np.cos(phi)
342
+ y = np.cos(theta) * np.sin(phi)
343
+ z = np.sin(theta)
344
+ self.x0 = np.hstack((x, y, z))
345
+ self.x_opt = None
346
+ self.constr_jac = constr_jac
347
+ self.constr_hess = constr_hess
348
+ self.bounds = None
349
+
350
+ def _get_cordinates(self, x):
351
+ x_coord = x[:self.n_electrons]
352
+ y_coord = x[self.n_electrons:2 * self.n_electrons]
353
+ z_coord = x[2 * self.n_electrons:]
354
+ return x_coord, y_coord, z_coord
355
+
356
+ def _compute_coordinate_deltas(self, x):
357
+ x_coord, y_coord, z_coord = self._get_cordinates(x)
358
+ dx = x_coord[:, None] - x_coord
359
+ dy = y_coord[:, None] - y_coord
360
+ dz = z_coord[:, None] - z_coord
361
+ return dx, dy, dz
362
+
363
+ def fun(self, x):
364
+ dx, dy, dz = self._compute_coordinate_deltas(x)
365
+ with np.errstate(divide='ignore'):
366
+ dm1 = (dx**2 + dy**2 + dz**2) ** -0.5
367
+ dm1[np.diag_indices_from(dm1)] = 0
368
+ return 0.5 * np.sum(dm1)
369
+
370
+ def grad(self, x):
371
+ dx, dy, dz = self._compute_coordinate_deltas(x)
372
+
373
+ with np.errstate(divide='ignore'):
374
+ dm3 = (dx**2 + dy**2 + dz**2) ** -1.5
375
+ dm3[np.diag_indices_from(dm3)] = 0
376
+
377
+ grad_x = -np.sum(dx * dm3, axis=1)
378
+ grad_y = -np.sum(dy * dm3, axis=1)
379
+ grad_z = -np.sum(dz * dm3, axis=1)
380
+
381
+ return np.hstack((grad_x, grad_y, grad_z))
382
+
383
+ def hess(self, x):
384
+ dx, dy, dz = self._compute_coordinate_deltas(x)
385
+ d = (dx**2 + dy**2 + dz**2) ** 0.5
386
+
387
+ with np.errstate(divide='ignore'):
388
+ dm3 = d ** -3
389
+ dm5 = d ** -5
390
+
391
+ i = np.arange(self.n_electrons)
392
+ dm3[i, i] = 0
393
+ dm5[i, i] = 0
394
+
395
+ Hxx = dm3 - 3 * dx**2 * dm5
396
+ Hxx[i, i] = -np.sum(Hxx, axis=1)
397
+
398
+ Hxy = -3 * dx * dy * dm5
399
+ Hxy[i, i] = -np.sum(Hxy, axis=1)
400
+
401
+ Hxz = -3 * dx * dz * dm5
402
+ Hxz[i, i] = -np.sum(Hxz, axis=1)
403
+
404
+ Hyy = dm3 - 3 * dy**2 * dm5
405
+ Hyy[i, i] = -np.sum(Hyy, axis=1)
406
+
407
+ Hyz = -3 * dy * dz * dm5
408
+ Hyz[i, i] = -np.sum(Hyz, axis=1)
409
+
410
+ Hzz = dm3 - 3 * dz**2 * dm5
411
+ Hzz[i, i] = -np.sum(Hzz, axis=1)
412
+
413
+ H = np.vstack((
414
+ np.hstack((Hxx, Hxy, Hxz)),
415
+ np.hstack((Hxy, Hyy, Hyz)),
416
+ np.hstack((Hxz, Hyz, Hzz))
417
+ ))
418
+
419
+ return H
420
+
421
+ @property
422
+ def constr(self):
423
+ def fun(x):
424
+ x_coord, y_coord, z_coord = self._get_cordinates(x)
425
+ return x_coord**2 + y_coord**2 + z_coord**2 - 1
426
+
427
+ if self.constr_jac is None:
428
+ def jac(x):
429
+ x_coord, y_coord, z_coord = self._get_cordinates(x)
430
+ Jx = 2 * np.diag(x_coord)
431
+ Jy = 2 * np.diag(y_coord)
432
+ Jz = 2 * np.diag(z_coord)
433
+ return csc_matrix(np.hstack((Jx, Jy, Jz)))
434
+ else:
435
+ jac = self.constr_jac
436
+
437
+ if self.constr_hess is None:
438
+ def hess(x, v):
439
+ D = 2 * np.diag(v)
440
+ return block_diag(D, D, D)
441
+ else:
442
+ hess = self.constr_hess
443
+
444
+ return NonlinearConstraint(fun, -np.inf, 0, jac, hess)
445
+
446
+
447
+ class TestTrustRegionConstr:
448
+ list_of_problems = [Maratos(),
449
+ Maratos(constr_hess='2-point'),
450
+ Maratos(constr_hess=SR1()),
451
+ Maratos(constr_jac='2-point', constr_hess=SR1()),
452
+ MaratosGradInFunc(),
453
+ HyperbolicIneq(),
454
+ HyperbolicIneq(constr_hess='3-point'),
455
+ HyperbolicIneq(constr_hess=BFGS()),
456
+ HyperbolicIneq(constr_jac='3-point',
457
+ constr_hess=BFGS()),
458
+ Rosenbrock(),
459
+ IneqRosenbrock(),
460
+ EqIneqRosenbrock(),
461
+ BoundedRosenbrock(),
462
+ Elec(n_electrons=2),
463
+ Elec(n_electrons=2, constr_hess='2-point'),
464
+ Elec(n_electrons=2, constr_hess=SR1()),
465
+ Elec(n_electrons=2, constr_jac='3-point',
466
+ constr_hess=SR1())]
467
+
468
+ @pytest.mark.parametrize('prob', list_of_problems)
469
+ @pytest.mark.parametrize('grad', ('prob.grad', '3-point', False))
470
+ @pytest.mark.parametrize('hess', ("prob.hess", '3-point', SR1(),
471
+ BFGS(exception_strategy='damp_update'),
472
+ BFGS(exception_strategy='skip_update')))
473
+ def test_list_of_problems(self, prob, grad, hess):
474
+ grad = prob.grad if grad == "prob.grad" else grad
475
+ hess = prob.hess if hess == "prob.hess" else hess
476
+ # Remove exceptions
477
+ if (grad in {'2-point', '3-point', 'cs', False} and
478
+ hess in {'2-point', '3-point', 'cs'}):
479
+ pytest.skip("Numerical Hessian needs analytical gradient")
480
+ if prob.grad is True and grad in {'3-point', False}:
481
+ pytest.skip("prob.grad incompatible with grad in {'3-point', False}")
482
+ sensitive = (isinstance(prob, BoundedRosenbrock) and grad == '3-point'
483
+ and isinstance(hess, BFGS))
484
+ if sensitive:
485
+ pytest.xfail("Seems sensitive to initial conditions w/ Accelerate")
486
+ with suppress_warnings() as sup:
487
+ sup.filter(UserWarning, "delta_grad == 0.0")
488
+ result = minimize(prob.fun, prob.x0,
489
+ method='trust-constr',
490
+ jac=grad, hess=hess,
491
+ bounds=prob.bounds,
492
+ constraints=prob.constr)
493
+
494
+ if prob.x_opt is not None:
495
+ assert_array_almost_equal(result.x, prob.x_opt,
496
+ decimal=5)
497
+ # gtol
498
+ if result.status == 1:
499
+ assert_array_less(result.optimality, 1e-8)
500
+ # xtol
501
+ if result.status == 2:
502
+ assert_array_less(result.tr_radius, 1e-8)
503
+
504
+ if result.method == "tr_interior_point":
505
+ assert_array_less(result.barrier_parameter, 1e-8)
506
+
507
+ # check for max iter
508
+ message = f"Invalid termination condition: {result.status}."
509
+ assert result.status not in {0, 3}, message
510
+
511
+
512
+ def test_default_jac_and_hess(self):
513
+ def fun(x):
514
+ return (x - 1) ** 2
515
+ bounds = [(-2, 2)]
516
+ res = minimize(fun, x0=[-1.5], bounds=bounds, method='trust-constr')
517
+ assert_array_almost_equal(res.x, 1, decimal=5)
518
+
519
+ def test_default_hess(self):
520
+ def fun(x):
521
+ return (x - 1) ** 2
522
+ bounds = [(-2, 2)]
523
+ res = minimize(fun, x0=[-1.5], bounds=bounds, method='trust-constr',
524
+ jac='2-point')
525
+ assert_array_almost_equal(res.x, 1, decimal=5)
526
+
527
+ def test_no_constraints(self):
528
+ prob = Rosenbrock()
529
+ result = minimize(prob.fun, prob.x0,
530
+ method='trust-constr',
531
+ jac=prob.grad, hess=prob.hess)
532
+ result1 = minimize(prob.fun, prob.x0,
533
+ method='L-BFGS-B',
534
+ jac='2-point')
535
+
536
+ result2 = minimize(prob.fun, prob.x0,
537
+ method='L-BFGS-B',
538
+ jac='3-point')
539
+ assert_array_almost_equal(result.x, prob.x_opt, decimal=5)
540
+ assert_array_almost_equal(result1.x, prob.x_opt, decimal=5)
541
+ assert_array_almost_equal(result2.x, prob.x_opt, decimal=5)
542
+
543
+ def test_hessp(self):
544
+ prob = Maratos()
545
+
546
+ def hessp(x, p):
547
+ H = prob.hess(x)
548
+ return H.dot(p)
549
+
550
+ result = minimize(prob.fun, prob.x0,
551
+ method='trust-constr',
552
+ jac=prob.grad, hessp=hessp,
553
+ bounds=prob.bounds,
554
+ constraints=prob.constr)
555
+
556
+ if prob.x_opt is not None:
557
+ assert_array_almost_equal(result.x, prob.x_opt, decimal=2)
558
+
559
+ # gtol
560
+ if result.status == 1:
561
+ assert_array_less(result.optimality, 1e-8)
562
+ # xtol
563
+ if result.status == 2:
564
+ assert_array_less(result.tr_radius, 1e-8)
565
+
566
+ if result.method == "tr_interior_point":
567
+ assert_array_less(result.barrier_parameter, 1e-8)
568
+ # max iter
569
+ if result.status in (0, 3):
570
+ raise RuntimeError("Invalid termination condition.")
571
+
572
+ def test_args(self):
573
+ prob = MaratosTestArgs("a", 234)
574
+
575
+ result = minimize(prob.fun, prob.x0, ("a", 234),
576
+ method='trust-constr',
577
+ jac=prob.grad, hess=prob.hess,
578
+ bounds=prob.bounds,
579
+ constraints=prob.constr)
580
+
581
+ if prob.x_opt is not None:
582
+ assert_array_almost_equal(result.x, prob.x_opt, decimal=2)
583
+
584
+ # gtol
585
+ if result.status == 1:
586
+ assert_array_less(result.optimality, 1e-8)
587
+ # xtol
588
+ if result.status == 2:
589
+ assert_array_less(result.tr_radius, 1e-8)
590
+ if result.method == "tr_interior_point":
591
+ assert_array_less(result.barrier_parameter, 1e-8)
592
+ # max iter
593
+ if result.status in (0, 3):
594
+ raise RuntimeError("Invalid termination condition.")
595
+
596
+ def test_raise_exception(self):
597
+ prob = Maratos()
598
+ message = "Whenever the gradient is estimated via finite-differences"
599
+ with pytest.raises(ValueError, match=message):
600
+ minimize(prob.fun, prob.x0, method='trust-constr', jac='2-point',
601
+ hess='2-point', constraints=prob.constr)
602
+
603
+ def test_issue_9044(self):
604
+ # https://github.com/scipy/scipy/issues/9044
605
+ # Test the returned `OptimizeResult` contains keys consistent with
606
+ # other solvers.
607
+
608
+ def callback(x, info):
609
+ assert_('nit' in info)
610
+ assert_('niter' in info)
611
+
612
+ result = minimize(lambda x: x**2, [0], jac=lambda x: 2*x,
613
+ hess=lambda x: 2, callback=callback,
614
+ method='trust-constr')
615
+ assert_(result.get('success'))
616
+ assert_(result.get('nit', -1) == 1)
617
+
618
+ # Also check existence of the 'niter' attribute, for backward
619
+ # compatibility
620
+ assert_(result.get('niter', -1) == 1)
621
+
622
+ def test_issue_15093(self):
623
+ # scipy docs define bounds as inclusive, so it shouldn't be
624
+ # an issue to set x0 on the bounds even if keep_feasible is
625
+ # True. Previously, trust-constr would treat bounds as
626
+ # exclusive.
627
+
628
+ x0 = np.array([0., 0.5])
629
+
630
+ def obj(x):
631
+ x1 = x[0]
632
+ x2 = x[1]
633
+ return x1 ** 2 + x2 ** 2
634
+
635
+ bounds = Bounds(np.array([0., 0.]), np.array([1., 1.]),
636
+ keep_feasible=True)
637
+
638
+ with suppress_warnings() as sup:
639
+ sup.filter(UserWarning, "delta_grad == 0.0")
640
+ result = minimize(
641
+ method='trust-constr',
642
+ fun=obj,
643
+ x0=x0,
644
+ bounds=bounds)
645
+
646
+ assert result['success']
647
+
648
+ class TestEmptyConstraint:
649
+ """
650
+ Here we minimize x^2+y^2 subject to x^2-y^2>1.
651
+ The actual minimum is at (0, 0) which fails the constraint.
652
+ Therefore we will find a minimum on the boundary at (+/-1, 0).
653
+
654
+ When minimizing on the boundary, optimize uses a set of
655
+ constraints that removes the constraint that sets that
656
+ boundary. In our case, there's only one constraint, so
657
+ the result is an empty constraint.
658
+
659
+ This tests that the empty constraint works.
660
+ """
661
+ def test_empty_constraint(self):
662
+
663
+ def function(x):
664
+ return x[0]**2 + x[1]**2
665
+
666
+ def functionjacobian(x):
667
+ return np.array([2.*x[0], 2.*x[1]])
668
+
669
+ def functionhvp(x, v):
670
+ return 2.*v
671
+
672
+ def constraint(x):
673
+ return np.array([x[0]**2 - x[1]**2])
674
+
675
+ def constraintjacobian(x):
676
+ return np.array([[2*x[0], -2*x[1]]])
677
+
678
+ def constraintlcoh(x, v):
679
+ return np.array([[2., 0.], [0., -2.]]) * v[0]
680
+
681
+ constraint = NonlinearConstraint(constraint, 1., np.inf,
682
+ constraintjacobian, constraintlcoh)
683
+
684
+ startpoint = [1., 2.]
685
+
686
+ bounds = Bounds([-np.inf, -np.inf], [np.inf, np.inf])
687
+
688
+ result = minimize(
689
+ function,
690
+ startpoint,
691
+ method='trust-constr',
692
+ jac=functionjacobian,
693
+ hessp=functionhvp,
694
+ constraints=[constraint],
695
+ bounds=bounds,
696
+ )
697
+
698
+ assert_array_almost_equal(abs(result.x), np.array([1, 0]), decimal=4)
699
+
700
+
701
+ def test_bug_11886():
702
+ def opt(x):
703
+ return x[0]**2+x[1]**2
704
+
705
+ with np.testing.suppress_warnings() as sup:
706
+ sup.filter(PendingDeprecationWarning)
707
+ A = np.matrix(np.diag([1, 1]))
708
+ lin_cons = LinearConstraint(A, -1, np.inf)
709
+ # just checking that there are no errors
710
+ minimize(opt, 2*[1], constraints = lin_cons)
711
+
712
+
713
+ # Remove xfail when gh-11649 is resolved
714
+ @pytest.mark.xfail(reason="Known bug in trust-constr; see gh-11649.",
715
+ strict=True)
716
+ def test_gh11649():
717
+ bnds = Bounds(lb=[-1, -1], ub=[1, 1], keep_feasible=True)
718
+
719
+ def assert_inbounds(x):
720
+ assert np.all(x >= bnds.lb)
721
+ assert np.all(x <= bnds.ub)
722
+
723
+ def obj(x):
724
+ assert_inbounds(x)
725
+ return np.exp(x[0])*(4*x[0]**2 + 2*x[1]**2 + 4*x[0]*x[1] + 2*x[1] + 1)
726
+
727
+ def nce(x):
728
+ assert_inbounds(x)
729
+ return x[0]**2 + x[1]
730
+
731
+ def nci(x):
732
+ assert_inbounds(x)
733
+ return x[0]*x[1]
734
+
735
+ x0 = np.array((0.99, -0.99))
736
+ nlcs = [NonlinearConstraint(nci, -10, np.inf),
737
+ NonlinearConstraint(nce, 1, 1)]
738
+
739
+ res = minimize(fun=obj, x0=x0, method='trust-constr',
740
+ bounds=bnds, constraints=nlcs)
741
+ assert res.success
742
+ assert_inbounds(res.x)
743
+ assert nlcs[0].lb < nlcs[0].fun(res.x) < nlcs[0].ub
744
+ assert_allclose(nce(res.x), nlcs[1].ub)
745
+
746
+ ref = minimize(fun=obj, x0=x0, method='slsqp',
747
+ bounds=bnds, constraints=nlcs)
748
+ assert_allclose(res.fun, ref.fun)
749
+
750
+
751
+ def test_gh20665_too_many_constraints():
752
+ # gh-20665 reports a confusing error message when there are more equality
753
+ # constraints than variables. Check that the error message is improved.
754
+ message = "...more equality constraints than independent variables..."
755
+ with pytest.raises(ValueError, match=message):
756
+ x0 = np.ones((2,))
757
+ A_eq, b_eq = np.arange(6).reshape((3, 2)), np.ones((3,))
758
+ g = NonlinearConstraint(lambda x: A_eq @ x, lb=b_eq, ub=b_eq)
759
+ minimize(rosen, x0, method='trust-constr', constraints=[g])
760
+ # no error with `SVDFactorization`
761
+ with np.testing.suppress_warnings() as sup:
762
+ sup.filter(UserWarning)
763
+ minimize(rosen, x0, method='trust-constr', constraints=[g],
764
+ options={'factorization_method': 'SVDFactorization'})
765
+
766
+
767
+ class TestBoundedNelderMead:
768
+
769
+ @pytest.mark.parametrize('bounds, x_opt',
770
+ [(Bounds(-np.inf, np.inf), Rosenbrock().x_opt),
771
+ (Bounds(-np.inf, -0.8), [-0.8, -0.8]),
772
+ (Bounds(3.0, np.inf), [3.0, 9.0]),
773
+ (Bounds([3.0, 1.0], [4.0, 5.0]), [3., 5.]),
774
+ ])
775
+ def test_rosen_brock_with_bounds(self, bounds, x_opt):
776
+ prob = Rosenbrock()
777
+ with suppress_warnings() as sup:
778
+ sup.filter(UserWarning, "Initial guess is not within "
779
+ "the specified bounds")
780
+ result = minimize(prob.fun, [-10, -10],
781
+ method='Nelder-Mead',
782
+ bounds=bounds)
783
+ assert np.less_equal(bounds.lb, result.x).all()
784
+ assert np.less_equal(result.x, bounds.ub).all()
785
+ assert np.allclose(prob.fun(result.x), result.fun)
786
+ assert np.allclose(result.x, x_opt, atol=1.e-3)
787
+
788
+ def test_equal_all_bounds(self):
789
+ prob = Rosenbrock()
790
+ bounds = Bounds([4.0, 5.0], [4.0, 5.0])
791
+ with suppress_warnings() as sup:
792
+ sup.filter(UserWarning, "Initial guess is not within "
793
+ "the specified bounds")
794
+ result = minimize(prob.fun, [-10, 8],
795
+ method='Nelder-Mead',
796
+ bounds=bounds)
797
+ assert np.allclose(result.x, [4.0, 5.0])
798
+
799
+ def test_equal_one_bounds(self):
800
+ prob = Rosenbrock()
801
+ bounds = Bounds([4.0, 5.0], [4.0, 20.0])
802
+ with suppress_warnings() as sup:
803
+ sup.filter(UserWarning, "Initial guess is not within "
804
+ "the specified bounds")
805
+ result = minimize(prob.fun, [-10, 8],
806
+ method='Nelder-Mead',
807
+ bounds=bounds)
808
+ assert np.allclose(result.x, [4.0, 16.0])
809
+
810
+ def test_invalid_bounds(self):
811
+ prob = Rosenbrock()
812
+ message = 'An upper bound is less than the corresponding lower bound.'
813
+ with pytest.raises(ValueError, match=message):
814
+ bounds = Bounds([-np.inf, 1.0], [4.0, -5.0])
815
+ minimize(prob.fun, [-10, 3],
816
+ method='Nelder-Mead',
817
+ bounds=bounds)
818
+
819
+ @pytest.mark.xfail(reason="Failing on Azure Linux and macOS builds, "
820
+ "see gh-13846")
821
+ def test_outside_bounds_warning(self):
822
+ prob = Rosenbrock()
823
+ message = "Initial guess is not within the specified bounds"
824
+ with pytest.warns(UserWarning, match=message):
825
+ bounds = Bounds([-np.inf, 1.0], [4.0, 5.0])
826
+ minimize(prob.fun, [-10, 8],
827
+ method='Nelder-Mead',
828
+ bounds=bounds)
llava_next/lib/python3.10/site-packages/scipy/optimize/tests/test_nnls.py ADDED
@@ -0,0 +1,318 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import numpy as np
2
+ from numpy.testing import assert_allclose
3
+ from pytest import raises as assert_raises
4
+ from scipy.optimize import nnls
5
+
6
+
7
+ class TestNNLS:
8
+ def setup_method(self):
9
+ self.rng = np.random.default_rng(1685225766635251)
10
+
11
+ def test_nnls(self):
12
+ a = np.arange(25.0).reshape(-1, 5)
13
+ x = np.arange(5.0)
14
+ y = a @ x
15
+ x, res = nnls(a, y)
16
+ assert res < 1e-7
17
+ assert np.linalg.norm((a @ x) - y) < 1e-7
18
+
19
+ def test_nnls_tall(self):
20
+ a = self.rng.uniform(low=-10, high=10, size=[50, 10])
21
+ x = np.abs(self.rng.uniform(low=-2, high=2, size=[10]))
22
+ x[::2] = 0
23
+ b = a @ x
24
+ xact, rnorm = nnls(a, b, atol=500*np.linalg.norm(a, 1)*np.spacing(1.))
25
+ assert_allclose(xact, x, rtol=0., atol=1e-10)
26
+ assert rnorm < 1e-12
27
+
28
+ def test_nnls_wide(self):
29
+ # If too wide then problem becomes too ill-conditioned ans starts
30
+ # emitting warnings, hence small m, n difference.
31
+ a = self.rng.uniform(low=-10, high=10, size=[100, 120])
32
+ x = np.abs(self.rng.uniform(low=-2, high=2, size=[120]))
33
+ x[::2] = 0
34
+ b = a @ x
35
+ xact, rnorm = nnls(a, b, atol=500*np.linalg.norm(a, 1)*np.spacing(1.))
36
+ assert_allclose(xact, x, rtol=0., atol=1e-10)
37
+ assert rnorm < 1e-12
38
+
39
+ def test_maxiter(self):
40
+ # test that maxiter argument does stop iterations
41
+ a = self.rng.uniform(size=(5, 10))
42
+ b = self.rng.uniform(size=5)
43
+ with assert_raises(RuntimeError):
44
+ nnls(a, b, maxiter=1)
45
+
46
+ def test_nnls_inner_loop_case1(self):
47
+ # See gh-20168
48
+ n = np.array(
49
+ [3, 2, 0, 1, 1, 1, 3, 8, 14, 16, 29, 23, 41, 47, 53, 57, 67, 76,
50
+ 103, 89, 97, 94, 85, 95, 78, 78, 78, 77, 73, 50, 50, 56, 68, 98,
51
+ 95, 112, 134, 145, 158, 172, 213, 234, 222, 215, 216, 216, 206,
52
+ 183, 135, 156, 110, 92, 63, 60, 52, 29, 20, 16, 12, 5, 5, 5, 1, 2,
53
+ 3, 0, 2])
54
+ k = np.array(
55
+ [0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0.,
56
+ 0., 0., 0., 0.7205812007860187, 0., 1.4411624015720375,
57
+ 0.7205812007860187, 2.882324803144075, 5.76464960628815,
58
+ 5.76464960628815, 12.249880413362318, 15.132205216506394,
59
+ 20.176273622008523, 27.382085629868712, 48.27894045266326,
60
+ 47.558359251877235, 68.45521407467177, 97.99904330689854,
61
+ 108.0871801179028, 135.46926574777152, 140.51333415327366,
62
+ 184.4687874012208, 171.49832578707245, 205.36564222401535,
63
+ 244.27702706646033, 214.01261663344755, 228.42424064916793,
64
+ 232.02714665309804, 205.36564222401535, 172.9394881886445,
65
+ 191.67459940908097, 162.1307701768542, 153.48379576742198,
66
+ 110.96950492104689, 103.04311171240067, 86.46974409432225,
67
+ 60.528820866025576, 43.234872047161126, 23.779179625938617,
68
+ 24.499760826724636, 17.29394881886445, 11.5292992125763,
69
+ 5.76464960628815, 5.044068405502131, 3.6029060039300935, 0.,
70
+ 2.882324803144075, 0., 0., 0.])
71
+ d = np.array(
72
+ [0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0.,
73
+ 0., 0., 0., 0.003889242101538, 0., 0.007606268390096, 0.,
74
+ 0.025457371599973, 0.036952882091577, 0., 0.08518359183449,
75
+ 0.048201126400243, 0.196234990022205, 0.144116240157247,
76
+ 0.171145134062442, 0., 0., 0.269555036538714, 0., 0., 0.,
77
+ 0.010893241091872, 0., 0., 0., 0., 0., 0., 0., 0.,
78
+ 0.048167058272886, 0.011238724891049, 0., 0., 0.055162603456078,
79
+ 0., 0., 0., 0., 0.027753339088588, 0., 0., 0., 0., 0., 0., 0., 0.,
80
+ 0., 0.])
81
+ # The following code sets up a system of equations such that
82
+ # $k_i-p_i*n_i$ is minimized for $p_i$ with weights $n_i$ and
83
+ # monotonicity constraints on $p_i$. This translates to a system of
84
+ # equations of the form $k_i - (d_1 + ... + d_i) * n_i$ and
85
+ # non-negativity constraints on the $d_i$. If $n_i$ is zero the
86
+ # system is modified such that $d_i - d_{i+1}$ is then minimized.
87
+ N = len(n)
88
+ A = np.diag(n) @ np.tril(np.ones((N, N)))
89
+ w = n ** 0.5
90
+
91
+ nz = (n == 0).nonzero()[0]
92
+ A[nz, nz] = 1
93
+ A[nz, np.minimum(nz + 1, N - 1)] = -1
94
+ w[nz] = 1
95
+ k[nz] = 0
96
+ W = np.diag(w)
97
+
98
+ # Small perturbations can already make the infinite loop go away (just
99
+ # uncomment the next line)
100
+ k = k + 1e-10 * np.random.normal(size=N)
101
+ dact, _ = nnls(W @ A, W @ k)
102
+ assert_allclose(dact, d, rtol=0., atol=1e-10)
103
+
104
+ def test_nnls_inner_loop_case2(self):
105
+ # See gh-20168
106
+ n = np.array(
107
+ [1, 0, 1, 2, 2, 2, 3, 3, 5, 4, 14, 14, 19, 26, 36, 42, 36, 64, 64,
108
+ 64, 81, 85, 85, 95, 95, 95, 75, 76, 69, 81, 62, 59, 68, 64, 71, 67,
109
+ 74, 78, 118, 135, 153, 159, 210, 195, 218, 243, 236, 215, 196, 175,
110
+ 185, 149, 144, 103, 104, 75, 56, 40, 32, 26, 17, 9, 12, 8, 2, 1, 1,
111
+ 1])
112
+ k = np.array(
113
+ [0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0.,
114
+ 0., 0., 0., 0., 0., 0.7064355064917867, 0., 0., 2.11930651947536,
115
+ 0.7064355064917867, 0., 3.5321775324589333, 7.064355064917867,
116
+ 11.302968103868587, 16.95445215580288, 20.486629688261814,
117
+ 20.486629688261814, 37.44108184406469, 55.808405012851146,
118
+ 78.41434122058831, 103.13958394780086, 105.965325973768,
119
+ 125.74552015553803, 149.057891869767, 176.60887662294667,
120
+ 197.09550631120848, 211.930651947536, 204.86629688261814,
121
+ 233.8301526487814, 221.1143135319292, 195.6826352982249,
122
+ 197.80194181770025, 191.4440222592742, 187.91184472681525,
123
+ 144.11284332432447, 131.39700420747232, 116.5618585711448,
124
+ 93.24948685691584, 89.01087381796512, 53.68909849337579,
125
+ 45.211872415474346, 31.083162285638615, 24.72524272721253,
126
+ 16.95445215580288, 9.890097090885014, 9.890097090885014,
127
+ 2.8257420259671466, 2.8257420259671466, 1.4128710129835733,
128
+ 0.7064355064917867, 1.4128710129835733])
129
+ d = np.array(
130
+ [0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0.,
131
+ 0., 0., 0., 0., 0., 0.0021916146355674473, 0., 0.,
132
+ 0.011252740799789484, 0., 0., 0.037746623295934395,
133
+ 0.03602328132946222, 0.09509167709829734, 0.10505765870204821,
134
+ 0.01391037014274718, 0.0188296228752321, 0.20723559202324254,
135
+ 0.3056220879462608, 0.13304643490426477, 0., 0., 0., 0., 0., 0.,
136
+ 0., 0., 0., 0., 0., 0.043185876949706214, 0.0037266261379722554,
137
+ 0., 0., 0., 0., 0., 0.094797899357143, 0., 0., 0., 0., 0., 0., 0.,
138
+ 0., 0.23450935613672663, 0., 0., 0.07064355064917871])
139
+ # The following code sets up a system of equations such that
140
+ # $k_i-p_i*n_i$ is minimized for $p_i$ with weights $n_i$ and
141
+ # monotonicity constraints on $p_i$. This translates to a system of
142
+ # equations of the form $k_i - (d_1 + ... + d_i) * n_i$ and
143
+ # non-negativity constraints on the $d_i$. If $n_i$ is zero the
144
+ # system is modified such that $d_i - d_{i+1}$ is then minimized.
145
+ N = len(n)
146
+ A = np.diag(n) @ np.tril(np.ones((N, N)))
147
+ w = n ** 0.5
148
+
149
+ nz = (n == 0).nonzero()[0]
150
+ A[nz, nz] = 1
151
+ A[nz, np.minimum(nz + 1, N - 1)] = -1
152
+ w[nz] = 1
153
+ k[nz] = 0
154
+ W = np.diag(w)
155
+
156
+ dact, _ = nnls(W @ A, W @ k, atol=1e-7)
157
+
158
+ p = np.cumsum(dact)
159
+ assert np.all(dact >= 0)
160
+ assert np.linalg.norm(k - n * p, ord=np.inf) < 28
161
+ assert_allclose(dact, d, rtol=0., atol=1e-10)
162
+
163
+ def test_nnls_gh20302(self):
164
+ # See gh-20302
165
+ A = np.array(
166
+ [0.33408569134321575, 0.11136189711440525, 0.049140798007949286,
167
+ 0.03712063237146841, 0.055680948557202625, 0.16642814595936478,
168
+ 0.11095209730624318, 0.09791993030943345, 0.14793612974165757,
169
+ 0.44380838922497273, 0.11099502671044059, 0.11099502671044059,
170
+ 0.14693672599330593, 0.3329850801313218, 1.498432860590948,
171
+ 0.0832374225132955, 0.11098323001772734, 0.19589481249472837,
172
+ 0.5919105600945457, 3.5514633605672747, 0.06658716751427037,
173
+ 0.11097861252378394, 0.24485832778293645, 0.9248217710315328,
174
+ 6.936163282736496, 0.05547609388181014, 0.11095218776362029,
175
+ 0.29376003042571264, 1.3314262531634435, 11.982836278470993,
176
+ 0.047506113282944136, 0.11084759766020298, 0.3423969672933396,
177
+ 1.8105107617833156, 19.010362998724812, 0.041507335004505576,
178
+ 0.11068622667868154, 0.39074115283013344, 2.361306169145206,
179
+ 28.335674029742474, 0.03682846280947718, 0.11048538842843154,
180
+ 0.4387861797121048, 2.9831054875676517, 40.2719240821633,
181
+ 0.03311278164362387, 0.11037593881207958, 0.4870572300443105,
182
+ 3.6791979604026523, 55.187969406039784, 0.030079304092299915,
183
+ 0.11029078167176636, 0.5353496017200152, 4.448394860761242,
184
+ 73.3985152025605, 0.02545939709595835, 0.11032405408248619,
185
+ 0.6328767609778363, 6.214921713313388, 121.19097340961108,
186
+ 0.022080881724881523, 0.11040440862440762, 0.7307742886903428,
187
+ 8.28033064683057, 186.30743955368786, 0.020715838214945492,
188
+ 0.1104844704797093, 0.7800578384588346, 9.42800814760186,
189
+ 226.27219554244465, 0.01843179728340054, 0.11059078370040323,
190
+ 0.8784095015912599, 11.94380463964355, 322.48272527037585,
191
+ 0.015812787653789077, 0.11068951357652354, 1.0257259848595766,
192
+ 16.27135849574896, 512.5477926160922, 0.014438550529330062,
193
+ 0.11069555405819713, 1.1234754801775881, 19.519316032262093,
194
+ 673.4164031130423, 0.012760770585072577, 0.110593345070629,
195
+ 1.2688431112524712, 24.920367089248398, 971.8943164806875,
196
+ 0.011427556646114315, 0.11046638091243838, 1.413623342459821,
197
+ 30.967408782453557, 1347.0822820367298, 0.010033330264470307,
198
+ 0.11036663290917338, 1.6071533470570285, 40.063087746029936,
199
+ 1983.122843428482, 0.008950061496507258, 0.11038409179025618,
200
+ 1.802244865119193, 50.37194055362024, 2795.642700725923,
201
+ 0.008071078821135658, 0.11030474388885401, 1.9956465761433504,
202
+ 61.80742482572119, 3801.1566267818534, 0.007191031207777556,
203
+ 0.11026247851925586, 2.238160187262168, 77.7718015155818,
204
+ 5366.2543045751445, 0.00636834224248, 0.11038459886965334,
205
+ 2.5328963107984297, 99.49331844784753, 7760.4788389321075,
206
+ 0.005624259098118485, 0.11061042892966355, 2.879742607664547,
207
+ 128.34496770138628, 11358.529641572684, 0.0050354270614989555,
208
+ 0.11077939535297703, 3.2263279459292575, 160.85168205252265,
209
+ 15924.316523199741, 0.0044997853165982555, 0.1109947044760903,
210
+ 3.6244287189055613, 202.60233390369015, 22488.859063309606,
211
+ 0.004023601950058174, 0.1113196539516095, 4.07713905729421,
212
+ 255.6270320242126, 31825.565487014468, 0.0036024117873727094,
213
+ 0.111674765408554, 4.582933773135057, 321.9583486728612,
214
+ 44913.18963986413, 0.003201503089582304, 0.11205260813538065,
215
+ 5.191786833370116, 411.79333489752383, 64857.45024636,
216
+ 0.0028633044552448853, 0.11262330857296549, 5.864295861648949,
217
+ 522.7223161899905, 92521.84996562831, 0.0025691897303891965,
218
+ 0.11304434813712465, 6.584584405106342, 656.5615739804199,
219
+ 129999.19164812315, 0.0022992911894424675, 0.11343169867916175,
220
+ 7.4080129906658305, 828.2026426227864, 183860.98666225857,
221
+ 0.0020449922071108764, 0.11383789952917212, 8.388975556433872,
222
+ 1058.2750599896935, 265097.9025274183, 0.001831274615120854,
223
+ 0.11414945100919989, 9.419351803810935, 1330.564050780237,
224
+ 373223.2162438565, 0.0016363333454631633, 0.11454333418242145,
225
+ 10.6143816579462, 1683.787012481595, 530392.9089317025,
226
+ 0.0014598610433380044, 0.11484240207592301, 11.959688127956882,
227
+ 2132.0874753402027, 754758.9662704318, 0.0012985240015312626,
228
+ 0.11513579480243862, 13.514425358573531, 2715.5160990137824,
229
+ 1083490.9235064993, 0.0011614735761289934, 0.11537304189548002,
230
+ 15.171418602667567, 3415.195870828736, 1526592.554260445,
231
+ 0.0010347472698811352, 0.11554677847006009, 17.080800985009617,
232
+ 4322.412404600832, 2172012.2333119176, 0.0009232988811258664,
233
+ 0.1157201264344419, 19.20004861829407, 5453.349531598553,
234
+ 3075689.135821584, 0.0008228871862975205, 0.11602709326795038,
235
+ 21.65735242414206, 6920.203923780365, 4390869.389638642,
236
+ 0.00073528900066722, 0.11642075843897651, 24.40223571298994,
237
+ 8755.811207598026, 6238515.485413593, 0.0006602764384729194,
238
+ 0.11752920604817965, 27.694443541914293, 11171.386093291572,
239
+ 8948280.260726549, 0.0005935538977939806, 0.11851292825953147,
240
+ 31.325508920763063, 14174.185724149384, 12735505.873148222,
241
+ 0.0005310755355633124, 0.11913794514470308, 35.381052949627765,
242
+ 17987.010118815077, 18157886.71494382, 0.00047239949671590953,
243
+ 0.1190446731724092, 39.71342528048061, 22679.438775422022,
244
+ 25718483.571328573, 0.00041829129789387623, 0.11851586773659825,
245
+ 44.45299332965028, 28542.57147989741, 36391778.63686921,
246
+ 0.00037321512015419886, 0.11880681324908665, 50.0668539579632,
247
+ 36118.26128449941, 51739409.29004541, 0.0003315539616702064,
248
+ 0.1184752823034871, 56.04387059062639, 45383.29960621684,
249
+ 72976345.76679668, 0.00029456064937920213, 0.11831519416731286,
250
+ 62.91195073220101, 57265.53993693082, 103507463.43600245,
251
+ 0.00026301867496859703, 0.11862142241083726, 70.8217262087034,
252
+ 72383.14781936012, 146901598.49939138, 0.00023618734450420032,
253
+ 0.11966825454879482, 80.26535457124461, 92160.51176984518,
254
+ 210125966.835247, 0.00021165918071578316, 0.12043407382728061,
255
+ 90.7169587544247, 116975.56852918258, 299515943.218972,
256
+ 0.00018757727511329545, 0.11992440455576689, 101.49899864101785,
257
+ 147056.26174166967, 423080865.0307836, 0.00016654469159895833,
258
+ 0.11957908856805206, 113.65970431102812, 184937.67016486943,
259
+ 597533612.3026931, 0.00014717439179415048, 0.11872067604728138,
260
+ 126.77899683346702, 231758.58906776624, 841283678.3159915,
261
+ 0.00012868496382376066, 0.1166314722122684, 139.93635237349534,
262
+ 287417.30847929465, 1172231492.6328032, 0.00011225559452625302,
263
+ 0.11427619522772557, 154.0034283704458, 355281.4912295324,
264
+ 1627544511.322488, 9.879511142981067e-05, 0.11295574406808354,
265
+ 170.96532050841535, 442971.0111288653, 2279085852.2580123,
266
+ 8.71257780313587e-05, 0.11192758284428547, 190.35067416684697,
267
+ 554165.2523674504, 3203629323.93623, 7.665069027765277e-05,
268
+ 0.11060694607065294, 211.28835951100046, 690933.608546013,
269
+ 4486577387.093535, 6.734021094824451e-05, 0.10915848194710433,
270
+ 234.24338803525194, 860487.9079859136, 6276829044.8032465,
271
+ 5.9191625040287665e-05, 0.10776821865668373, 259.7454711820425,
272
+ 1071699.0387579766, 8780430224.544102, 5.1856803674907676e-05,
273
+ 0.10606444911641115, 287.1843540288165, 1331126.3723998806,
274
+ 12251687131.5685, 4.503421404759231e-05, 0.10347361247668461,
275
+ 314.7338642485931, 1638796.0697522392, 16944331963.203278,
276
+ 3.90470387455642e-05, 0.1007804070023012, 344.3427560918527,
277
+ 2014064.4865519698, 23392351979.057854, 3.46557661636393e-05,
278
+ 0.10046706610839032, 385.56603915081587, 2533036.2523656,
279
+ 33044724430.235435, 3.148745865254635e-05, 0.1025441570117926,
280
+ 442.09038234164746, 3262712.3882769793, 47815050050.199135,
281
+ 2.9790762078715404e-05, 0.1089845379379672, 527.8068231298969,
282
+ 4375751.903321453, 72035815708.42941, 2.8772639817606534e-05,
283
+ 0.11823636789048445, 643.2048194503195, 5989838.001888927,
284
+ 110764084330.93005, 2.7951691815106586e-05, 0.12903432664913705,
285
+ 788.5500418523591, 8249371.000613411, 171368308481.2427,
286
+ 2.6844392423114212e-05, 0.1392060709754626, 955.6296403631383,
287
+ 11230229.319931043, 262063016295.25085, 2.499458273851386e-05,
288
+ 0.14559344445184325, 1122.7022399726002, 14820229.698461473,
289
+ 388475270970.9214, 2.337386729019776e-05, 0.15294300496886065,
290
+ 1324.8158105672455, 19644861.137128454, 578442936182.7473,
291
+ 2.0081014872174113e-05, 0.14760215298210377, 1436.2385042492353,
292
+ 23923681.729276657, 791311658718.4193, 1.773374462991839e-05,
293
+ 0.14642752940923615, 1600.5596278736678, 29949429.82503553,
294
+ 1112815989293.9326, 1.5303115839590797e-05, 0.14194150045081785,
295
+ 1742.873058605698, 36634451.931305364, 1529085389160.7544,
296
+ 1.3148448731163076e-05, 0.13699368732998807, 1889.5284359054356,
297
+ 44614279.74469635, 2091762812969.9607, 1.1739194407590062e-05,
298
+ 0.13739553134643406, 2128.794599579694, 56462810.11822766,
299
+ 2973783283306.8145, 1.0293367506254706e-05, 0.13533033372723272,
300
+ 2355.372854690074, 70176508.28667311, 4151852759764.441,
301
+ 9.678312586863569e-06, 0.14293577249119244, 2794.531827932675,
302
+ 93528671.31952812, 6215821967224.52, -1.174086323572049e-05,
303
+ 0.1429501325944908, 3139.4804810720925, 118031680.16618933,
304
+ -6466892421886.174, -2.1188265307407812e-05, 0.1477108290912869,
305
+ 3644.1133424610953, 153900132.62392554, -4828013117542.036,
306
+ -8.614483025123122e-05, 0.16037100755883044, 4444.386620899393,
307
+ 210846007.89660168, -1766340937974.433, 4.981445776141726e-05,
308
+ 0.16053420251962536, 4997.558254401547, 266327328.4755411,
309
+ 3862250287024.725, 1.8500019169456637e-05, 0.15448417164977674,
310
+ 5402.289867444643, 323399508.1475582, 12152445411933.408,
311
+ -5.647882376069748e-05, 0.1406372975946189, 5524.633133597753,
312
+ 371512945.9909363, -4162951345292.1514, 2.8048523486337994e-05,
313
+ 0.13183417571186926, 5817.462495763679, 439447252.3728975,
314
+ 9294740538175.03]).reshape(89, 5)
315
+ b = np.ones(89, dtype=np.float64)
316
+ sol, rnorm = nnls(A, b)
317
+ assert_allclose(sol, np.array([0.61124315, 8.22262829, 0., 0., 0.]))
318
+ assert_allclose(rnorm, 1.0556460808977297)
llava_next/lib/python3.10/site-packages/scipy/optimize/tests/test_nonlin.py ADDED
@@ -0,0 +1,534 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """ Unit tests for nonlinear solvers
2
+ Author: Ondrej Certik
3
+ May 2007
4
+ """
5
+ from numpy.testing import assert_
6
+ import pytest
7
+
8
+ from scipy.optimize import _nonlin as nonlin, root
9
+ from scipy.sparse import csr_array
10
+ from numpy import diag, dot
11
+ from numpy.linalg import inv
12
+ import numpy as np
13
+ import scipy
14
+
15
+ from .test_minpack import pressure_network
16
+
17
+ SOLVERS = {'anderson': nonlin.anderson,
18
+ 'diagbroyden': nonlin.diagbroyden,
19
+ 'linearmixing': nonlin.linearmixing,
20
+ 'excitingmixing': nonlin.excitingmixing,
21
+ 'broyden1': nonlin.broyden1,
22
+ 'broyden2': nonlin.broyden2,
23
+ 'krylov': nonlin.newton_krylov}
24
+ MUST_WORK = {'anderson': nonlin.anderson, 'broyden1': nonlin.broyden1,
25
+ 'broyden2': nonlin.broyden2, 'krylov': nonlin.newton_krylov}
26
+
27
+ # ----------------------------------------------------------------------------
28
+ # Test problems
29
+ # ----------------------------------------------------------------------------
30
+
31
+
32
+ def F(x):
33
+ x = np.asarray(x).T
34
+ d = diag([3, 2, 1.5, 1, 0.5])
35
+ c = 0.01
36
+ f = -d @ x - c * float(x.T @ x) * x
37
+ return f
38
+
39
+
40
+ F.xin = [1, 1, 1, 1, 1]
41
+ F.KNOWN_BAD = {}
42
+ F.JAC_KSP_BAD = {}
43
+ F.ROOT_JAC_KSP_BAD = {}
44
+
45
+
46
+ def F2(x):
47
+ return x
48
+
49
+
50
+ F2.xin = [1, 2, 3, 4, 5, 6]
51
+ F2.KNOWN_BAD = {'linearmixing': nonlin.linearmixing,
52
+ 'excitingmixing': nonlin.excitingmixing}
53
+ F2.JAC_KSP_BAD = {}
54
+ F2.ROOT_JAC_KSP_BAD = {}
55
+
56
+
57
+ def F2_lucky(x):
58
+ return x
59
+
60
+
61
+ F2_lucky.xin = [0, 0, 0, 0, 0, 0]
62
+ F2_lucky.KNOWN_BAD = {}
63
+ F2_lucky.JAC_KSP_BAD = {}
64
+ F2_lucky.ROOT_JAC_KSP_BAD = {}
65
+
66
+
67
+ def F3(x):
68
+ A = np.array([[-2, 1, 0.], [1, -2, 1], [0, 1, -2]])
69
+ b = np.array([1, 2, 3.])
70
+ return A @ x - b
71
+
72
+
73
+ F3.xin = [1, 2, 3]
74
+ F3.KNOWN_BAD = {}
75
+ F3.JAC_KSP_BAD = {}
76
+ F3.ROOT_JAC_KSP_BAD = {}
77
+
78
+
79
+ def F4_powell(x):
80
+ A = 1e4
81
+ return [A*x[0]*x[1] - 1, np.exp(-x[0]) + np.exp(-x[1]) - (1 + 1/A)]
82
+
83
+
84
+ F4_powell.xin = [-1, -2]
85
+ F4_powell.KNOWN_BAD = {'linearmixing': nonlin.linearmixing,
86
+ 'excitingmixing': nonlin.excitingmixing,
87
+ 'diagbroyden': nonlin.diagbroyden}
88
+ # In the extreme case, it does not converge for nolinear problem solved by
89
+ # MINRES and root problem solved by GMRES/BiCGStab/CGS/MINRES/TFQMR when using
90
+ # Krylov method to approximate Jacobian
91
+ F4_powell.JAC_KSP_BAD = {'minres'}
92
+ F4_powell.ROOT_JAC_KSP_BAD = {'gmres', 'bicgstab', 'cgs', 'minres', 'tfqmr'}
93
+
94
+
95
+ def F5(x):
96
+ return pressure_network(x, 4, np.array([.5, .5, .5, .5]))
97
+
98
+
99
+ F5.xin = [2., 0, 2, 0]
100
+ F5.KNOWN_BAD = {'excitingmixing': nonlin.excitingmixing,
101
+ 'linearmixing': nonlin.linearmixing,
102
+ 'diagbroyden': nonlin.diagbroyden}
103
+ # In the extreme case, the Jacobian inversion yielded zero vector for nonlinear
104
+ # problem solved by CGS/MINRES and it does not converge for root problem solved
105
+ # by MINRES and when using Krylov method to approximate Jacobian
106
+ F5.JAC_KSP_BAD = {'cgs', 'minres'}
107
+ F5.ROOT_JAC_KSP_BAD = {'minres'}
108
+
109
+
110
+ def F6(x):
111
+ x1, x2 = x
112
+ J0 = np.array([[-4.256, 14.7],
113
+ [0.8394989, 0.59964207]])
114
+ v = np.array([(x1 + 3) * (x2**5 - 7) + 3*6,
115
+ np.sin(x2 * np.exp(x1) - 1)])
116
+ return -np.linalg.solve(J0, v)
117
+
118
+
119
+ F6.xin = [-0.5, 1.4]
120
+ F6.KNOWN_BAD = {'excitingmixing': nonlin.excitingmixing,
121
+ 'linearmixing': nonlin.linearmixing,
122
+ 'diagbroyden': nonlin.diagbroyden}
123
+ F6.JAC_KSP_BAD = {}
124
+ F6.ROOT_JAC_KSP_BAD = {}
125
+
126
+
127
+ # ----------------------------------------------------------------------------
128
+ # Tests
129
+ # ----------------------------------------------------------------------------
130
+
131
+
132
+ class TestNonlin:
133
+ """
134
+ Check the Broyden methods for a few test problems.
135
+
136
+ broyden1, broyden2, and newton_krylov must succeed for
137
+ all functions. Some of the others don't -- tests in KNOWN_BAD are skipped.
138
+
139
+ """
140
+
141
+ def _check_nonlin_func(self, f, func, f_tol=1e-2):
142
+ # Test all methods mentioned in the class `KrylovJacobian`
143
+ if func == SOLVERS['krylov']:
144
+ for method in ['gmres', 'bicgstab', 'cgs', 'minres', 'tfqmr']:
145
+ if method in f.JAC_KSP_BAD:
146
+ continue
147
+
148
+ x = func(f, f.xin, method=method, line_search=None,
149
+ f_tol=f_tol, maxiter=200, verbose=0)
150
+ assert_(np.absolute(f(x)).max() < f_tol)
151
+
152
+ x = func(f, f.xin, f_tol=f_tol, maxiter=200, verbose=0)
153
+ assert_(np.absolute(f(x)).max() < f_tol)
154
+
155
+ def _check_root(self, f, method, f_tol=1e-2):
156
+ # Test Krylov methods
157
+ if method == 'krylov':
158
+ for jac_method in ['gmres', 'bicgstab', 'cgs', 'minres', 'tfqmr']:
159
+ if jac_method in f.ROOT_JAC_KSP_BAD:
160
+ continue
161
+
162
+ res = root(f, f.xin, method=method,
163
+ options={'ftol': f_tol, 'maxiter': 200,
164
+ 'disp': 0,
165
+ 'jac_options': {'method': jac_method}})
166
+ assert_(np.absolute(res.fun).max() < f_tol)
167
+
168
+ res = root(f, f.xin, method=method,
169
+ options={'ftol': f_tol, 'maxiter': 200, 'disp': 0})
170
+ assert_(np.absolute(res.fun).max() < f_tol)
171
+
172
+ @pytest.mark.xfail
173
+ def _check_func_fail(self, *a, **kw):
174
+ pass
175
+
176
+ @pytest.mark.filterwarnings('ignore::DeprecationWarning')
177
+ def test_problem_nonlin(self):
178
+ for f in [F, F2, F2_lucky, F3, F4_powell, F5, F6]:
179
+ for func in SOLVERS.values():
180
+ if func in f.KNOWN_BAD.values():
181
+ if func in MUST_WORK.values():
182
+ self._check_func_fail(f, func)
183
+ continue
184
+ self._check_nonlin_func(f, func)
185
+
186
+ @pytest.mark.filterwarnings('ignore::DeprecationWarning')
187
+ @pytest.mark.parametrize("method", ['lgmres', 'gmres', 'bicgstab', 'cgs',
188
+ 'minres', 'tfqmr'])
189
+ def test_tol_norm_called(self, method):
190
+ # Check that supplying tol_norm keyword to nonlin_solve works
191
+ self._tol_norm_used = False
192
+
193
+ def local_norm_func(x):
194
+ self._tol_norm_used = True
195
+ return np.absolute(x).max()
196
+
197
+ nonlin.newton_krylov(F, F.xin, method=method, f_tol=1e-2,
198
+ maxiter=200, verbose=0,
199
+ tol_norm=local_norm_func)
200
+ assert_(self._tol_norm_used)
201
+
202
+ @pytest.mark.filterwarnings('ignore::DeprecationWarning')
203
+ def test_problem_root(self):
204
+ for f in [F, F2, F2_lucky, F3, F4_powell, F5, F6]:
205
+ for meth in SOLVERS:
206
+ if meth in f.KNOWN_BAD:
207
+ if meth in MUST_WORK:
208
+ self._check_func_fail(f, meth)
209
+ continue
210
+ self._check_root(f, meth)
211
+
212
+ def test_no_convergence(self):
213
+ def wont_converge(x):
214
+ return 1e3 + x
215
+
216
+ with pytest.raises(scipy.optimize.NoConvergence):
217
+ nonlin.newton_krylov(wont_converge, xin=[0], maxiter=1)
218
+
219
+
220
+ class TestSecant:
221
+ """Check that some Jacobian approximations satisfy the secant condition"""
222
+
223
+ xs = [np.array([1., 2., 3., 4., 5.]),
224
+ np.array([2., 3., 4., 5., 1.]),
225
+ np.array([3., 4., 5., 1., 2.]),
226
+ np.array([4., 5., 1., 2., 3.]),
227
+ np.array([9., 1., 9., 1., 3.]),
228
+ np.array([0., 1., 9., 1., 3.]),
229
+ np.array([5., 5., 7., 1., 1.]),
230
+ np.array([1., 2., 7., 5., 1.]),]
231
+ fs = [x**2 - 1 for x in xs]
232
+
233
+ def _check_secant(self, jac_cls, npoints=1, **kw):
234
+ """
235
+ Check that the given Jacobian approximation satisfies secant
236
+ conditions for last `npoints` points.
237
+ """
238
+ jac = jac_cls(**kw)
239
+ jac.setup(self.xs[0], self.fs[0], None)
240
+ for j, (x, f) in enumerate(zip(self.xs[1:], self.fs[1:])):
241
+ jac.update(x, f)
242
+
243
+ for k in range(min(npoints, j+1)):
244
+ dx = self.xs[j-k+1] - self.xs[j-k]
245
+ df = self.fs[j-k+1] - self.fs[j-k]
246
+ assert_(np.allclose(dx, jac.solve(df)))
247
+
248
+ # Check that the `npoints` secant bound is strict
249
+ if j >= npoints:
250
+ dx = self.xs[j-npoints+1] - self.xs[j-npoints]
251
+ df = self.fs[j-npoints+1] - self.fs[j-npoints]
252
+ assert_(not np.allclose(dx, jac.solve(df)))
253
+
254
+ def test_broyden1(self):
255
+ self._check_secant(nonlin.BroydenFirst)
256
+
257
+ def test_broyden2(self):
258
+ self._check_secant(nonlin.BroydenSecond)
259
+
260
+ def test_broyden1_update(self):
261
+ # Check that BroydenFirst update works as for a dense matrix
262
+ jac = nonlin.BroydenFirst(alpha=0.1)
263
+ jac.setup(self.xs[0], self.fs[0], None)
264
+
265
+ B = np.identity(5) * (-1/0.1)
266
+
267
+ for last_j, (x, f) in enumerate(zip(self.xs[1:], self.fs[1:])):
268
+ df = f - self.fs[last_j]
269
+ dx = x - self.xs[last_j]
270
+ B += (df - dot(B, dx))[:, None] * dx[None, :] / dot(dx, dx)
271
+ jac.update(x, f)
272
+ assert_(np.allclose(jac.todense(), B, rtol=1e-10, atol=1e-13))
273
+
274
+ def test_broyden2_update(self):
275
+ # Check that BroydenSecond update works as for a dense matrix
276
+ jac = nonlin.BroydenSecond(alpha=0.1)
277
+ jac.setup(self.xs[0], self.fs[0], None)
278
+
279
+ H = np.identity(5) * (-0.1)
280
+
281
+ for last_j, (x, f) in enumerate(zip(self.xs[1:], self.fs[1:])):
282
+ df = f - self.fs[last_j]
283
+ dx = x - self.xs[last_j]
284
+ H += (dx - dot(H, df))[:, None] * df[None, :] / dot(df, df)
285
+ jac.update(x, f)
286
+ assert_(np.allclose(jac.todense(), inv(H), rtol=1e-10, atol=1e-13))
287
+
288
+ def test_anderson(self):
289
+ # Anderson mixing (with w0=0) satisfies secant conditions
290
+ # for the last M iterates, see [Ey]_
291
+ #
292
+ # .. [Ey] V. Eyert, J. Comp. Phys., 124, 271 (1996).
293
+ self._check_secant(nonlin.Anderson, M=3, w0=0, npoints=3)
294
+
295
+
296
+ class TestLinear:
297
+ """Solve a linear equation;
298
+ some methods find the exact solution in a finite number of steps"""
299
+
300
+ def _check(self, jac, N, maxiter, complex=False, **kw):
301
+ np.random.seed(123)
302
+
303
+ A = np.random.randn(N, N)
304
+ if complex:
305
+ A = A + 1j*np.random.randn(N, N)
306
+ b = np.random.randn(N)
307
+ if complex:
308
+ b = b + 1j*np.random.randn(N)
309
+
310
+ def func(x):
311
+ return dot(A, x) - b
312
+
313
+ sol = nonlin.nonlin_solve(func, np.zeros(N), jac, maxiter=maxiter,
314
+ f_tol=1e-6, line_search=None, verbose=0)
315
+ assert_(np.allclose(dot(A, sol), b, atol=1e-6))
316
+
317
+ def test_broyden1(self):
318
+ # Broyden methods solve linear systems exactly in 2*N steps
319
+ self._check(nonlin.BroydenFirst(alpha=1.0), 20, 41, False)
320
+ self._check(nonlin.BroydenFirst(alpha=1.0), 20, 41, True)
321
+
322
+ def test_broyden2(self):
323
+ # Broyden methods solve linear systems exactly in 2*N steps
324
+ self._check(nonlin.BroydenSecond(alpha=1.0), 20, 41, False)
325
+ self._check(nonlin.BroydenSecond(alpha=1.0), 20, 41, True)
326
+
327
+ def test_anderson(self):
328
+ # Anderson is rather similar to Broyden, if given enough storage space
329
+ self._check(nonlin.Anderson(M=50, alpha=1.0), 20, 29, False)
330
+ self._check(nonlin.Anderson(M=50, alpha=1.0), 20, 29, True)
331
+
332
+ def test_krylov(self):
333
+ # Krylov methods solve linear systems exactly in N inner steps
334
+ self._check(nonlin.KrylovJacobian, 20, 2, False, inner_m=10)
335
+ self._check(nonlin.KrylovJacobian, 20, 2, True, inner_m=10)
336
+
337
+ def _check_autojac(self, A, b):
338
+ def func(x):
339
+ return A.dot(x) - b
340
+
341
+ def jac(v):
342
+ return A
343
+
344
+ sol = nonlin.nonlin_solve(func, np.zeros(b.shape[0]), jac, maxiter=2,
345
+ f_tol=1e-6, line_search=None, verbose=0)
346
+ np.testing.assert_allclose(A @ sol, b, atol=1e-6)
347
+ # test jac input as array -- not a function
348
+ sol = nonlin.nonlin_solve(func, np.zeros(b.shape[0]), A, maxiter=2,
349
+ f_tol=1e-6, line_search=None, verbose=0)
350
+ np.testing.assert_allclose(A @ sol, b, atol=1e-6)
351
+
352
+ def test_jac_sparse(self):
353
+ A = csr_array([[1, 2], [2, 1]])
354
+ b = np.array([1, -1])
355
+ self._check_autojac(A, b)
356
+ self._check_autojac((1 + 2j) * A, (2 + 2j) * b)
357
+
358
+ def test_jac_ndarray(self):
359
+ A = np.array([[1, 2], [2, 1]])
360
+ b = np.array([1, -1])
361
+ self._check_autojac(A, b)
362
+ self._check_autojac((1 + 2j) * A, (2 + 2j) * b)
363
+
364
+
365
+ class TestJacobianDotSolve:
366
+ """
367
+ Check that solve/dot methods in Jacobian approximations are consistent
368
+ """
369
+
370
+ def _func(self, x):
371
+ return x**2 - 1 + np.dot(self.A, x)
372
+
373
+ def _check_dot(self, jac_cls, complex=False, tol=1e-6, **kw):
374
+ np.random.seed(123)
375
+
376
+ N = 7
377
+
378
+ def rand(*a):
379
+ q = np.random.rand(*a)
380
+ if complex:
381
+ q = q + 1j*np.random.rand(*a)
382
+ return q
383
+
384
+ def assert_close(a, b, msg):
385
+ d = abs(a - b).max()
386
+ f = tol + abs(b).max()*tol
387
+ if d > f:
388
+ raise AssertionError(f'{msg}: err {d:g}')
389
+
390
+ self.A = rand(N, N)
391
+
392
+ # initialize
393
+ x0 = np.random.rand(N)
394
+ jac = jac_cls(**kw)
395
+ jac.setup(x0, self._func(x0), self._func)
396
+
397
+ # check consistency
398
+ for k in range(2*N):
399
+ v = rand(N)
400
+
401
+ if hasattr(jac, '__array__'):
402
+ Jd = np.array(jac)
403
+ if hasattr(jac, 'solve'):
404
+ Gv = jac.solve(v)
405
+ Gv2 = np.linalg.solve(Jd, v)
406
+ assert_close(Gv, Gv2, 'solve vs array')
407
+ if hasattr(jac, 'rsolve'):
408
+ Gv = jac.rsolve(v)
409
+ Gv2 = np.linalg.solve(Jd.T.conj(), v)
410
+ assert_close(Gv, Gv2, 'rsolve vs array')
411
+ if hasattr(jac, 'matvec'):
412
+ Jv = jac.matvec(v)
413
+ Jv2 = np.dot(Jd, v)
414
+ assert_close(Jv, Jv2, 'dot vs array')
415
+ if hasattr(jac, 'rmatvec'):
416
+ Jv = jac.rmatvec(v)
417
+ Jv2 = np.dot(Jd.T.conj(), v)
418
+ assert_close(Jv, Jv2, 'rmatvec vs array')
419
+
420
+ if hasattr(jac, 'matvec') and hasattr(jac, 'solve'):
421
+ Jv = jac.matvec(v)
422
+ Jv2 = jac.solve(jac.matvec(Jv))
423
+ assert_close(Jv, Jv2, 'dot vs solve')
424
+
425
+ if hasattr(jac, 'rmatvec') and hasattr(jac, 'rsolve'):
426
+ Jv = jac.rmatvec(v)
427
+ Jv2 = jac.rmatvec(jac.rsolve(Jv))
428
+ assert_close(Jv, Jv2, 'rmatvec vs rsolve')
429
+
430
+ x = rand(N)
431
+ jac.update(x, self._func(x))
432
+
433
+ def test_broyden1(self):
434
+ self._check_dot(nonlin.BroydenFirst, complex=False)
435
+ self._check_dot(nonlin.BroydenFirst, complex=True)
436
+
437
+ def test_broyden2(self):
438
+ self._check_dot(nonlin.BroydenSecond, complex=False)
439
+ self._check_dot(nonlin.BroydenSecond, complex=True)
440
+
441
+ def test_anderson(self):
442
+ self._check_dot(nonlin.Anderson, complex=False)
443
+ self._check_dot(nonlin.Anderson, complex=True)
444
+
445
+ def test_diagbroyden(self):
446
+ self._check_dot(nonlin.DiagBroyden, complex=False)
447
+ self._check_dot(nonlin.DiagBroyden, complex=True)
448
+
449
+ def test_linearmixing(self):
450
+ self._check_dot(nonlin.LinearMixing, complex=False)
451
+ self._check_dot(nonlin.LinearMixing, complex=True)
452
+
453
+ def test_excitingmixing(self):
454
+ self._check_dot(nonlin.ExcitingMixing, complex=False)
455
+ self._check_dot(nonlin.ExcitingMixing, complex=True)
456
+
457
+ def test_krylov(self):
458
+ self._check_dot(nonlin.KrylovJacobian, complex=False, tol=1e-3)
459
+ self._check_dot(nonlin.KrylovJacobian, complex=True, tol=1e-3)
460
+
461
+
462
+ class TestNonlinOldTests:
463
+ """ Test case for a simple constrained entropy maximization problem
464
+ (the machine translation example of Berger et al in
465
+ Computational Linguistics, vol 22, num 1, pp 39--72, 1996.)
466
+ """
467
+
468
+ def test_broyden1(self):
469
+ x = nonlin.broyden1(F, F.xin, iter=12, alpha=1)
470
+ assert_(nonlin.norm(x) < 1e-9)
471
+ assert_(nonlin.norm(F(x)) < 1e-9)
472
+
473
+ def test_broyden2(self):
474
+ x = nonlin.broyden2(F, F.xin, iter=12, alpha=1)
475
+ assert_(nonlin.norm(x) < 1e-9)
476
+ assert_(nonlin.norm(F(x)) < 1e-9)
477
+
478
+ def test_anderson(self):
479
+ x = nonlin.anderson(F, F.xin, iter=12, alpha=0.03, M=5)
480
+ assert_(nonlin.norm(x) < 0.33)
481
+
482
+ def test_linearmixing(self):
483
+ x = nonlin.linearmixing(F, F.xin, iter=60, alpha=0.5)
484
+ assert_(nonlin.norm(x) < 1e-7)
485
+ assert_(nonlin.norm(F(x)) < 1e-7)
486
+
487
+ def test_exciting(self):
488
+ x = nonlin.excitingmixing(F, F.xin, iter=20, alpha=0.5)
489
+ assert_(nonlin.norm(x) < 1e-5)
490
+ assert_(nonlin.norm(F(x)) < 1e-5)
491
+
492
+ def test_diagbroyden(self):
493
+ x = nonlin.diagbroyden(F, F.xin, iter=11, alpha=1)
494
+ assert_(nonlin.norm(x) < 1e-8)
495
+ assert_(nonlin.norm(F(x)) < 1e-8)
496
+
497
+ def test_root_broyden1(self):
498
+ res = root(F, F.xin, method='broyden1',
499
+ options={'nit': 12, 'jac_options': {'alpha': 1}})
500
+ assert_(nonlin.norm(res.x) < 1e-9)
501
+ assert_(nonlin.norm(res.fun) < 1e-9)
502
+
503
+ def test_root_broyden2(self):
504
+ res = root(F, F.xin, method='broyden2',
505
+ options={'nit': 12, 'jac_options': {'alpha': 1}})
506
+ assert_(nonlin.norm(res.x) < 1e-9)
507
+ assert_(nonlin.norm(res.fun) < 1e-9)
508
+
509
+ def test_root_anderson(self):
510
+ res = root(F, F.xin, method='anderson',
511
+ options={'nit': 12,
512
+ 'jac_options': {'alpha': 0.03, 'M': 5}})
513
+ assert_(nonlin.norm(res.x) < 0.33)
514
+
515
+ def test_root_linearmixing(self):
516
+ res = root(F, F.xin, method='linearmixing',
517
+ options={'nit': 60,
518
+ 'jac_options': {'alpha': 0.5}})
519
+ assert_(nonlin.norm(res.x) < 1e-7)
520
+ assert_(nonlin.norm(res.fun) < 1e-7)
521
+
522
+ def test_root_excitingmixing(self):
523
+ res = root(F, F.xin, method='excitingmixing',
524
+ options={'nit': 20,
525
+ 'jac_options': {'alpha': 0.5}})
526
+ assert_(nonlin.norm(res.x) < 1e-5)
527
+ assert_(nonlin.norm(res.fun) < 1e-5)
528
+
529
+ def test_root_diagbroyden(self):
530
+ res = root(F, F.xin, method='diagbroyden',
531
+ options={'nit': 11,
532
+ 'jac_options': {'alpha': 1}})
533
+ assert_(nonlin.norm(res.x) < 1e-8)
534
+ assert_(nonlin.norm(res.fun) < 1e-8)
llava_next/lib/python3.10/site-packages/scipy/optimize/tests/test_optimize.py ADDED
The diff for this file is too large to render. See raw diff
 
llava_next/lib/python3.10/site-packages/scipy/optimize/tests/test_regression.py ADDED
@@ -0,0 +1,40 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """Regression tests for optimize.
2
+
3
+ """
4
+ import numpy as np
5
+ from numpy.testing import assert_almost_equal
6
+ from pytest import raises as assert_raises
7
+
8
+ import scipy.optimize
9
+
10
+
11
+ class TestRegression:
12
+
13
+ def test_newton_x0_is_0(self):
14
+ # Regression test for gh-1601
15
+ tgt = 1
16
+ res = scipy.optimize.newton(lambda x: x - 1, 0)
17
+ assert_almost_equal(res, tgt)
18
+
19
+ def test_newton_integers(self):
20
+ # Regression test for gh-1741
21
+ root = scipy.optimize.newton(lambda x: x**2 - 1, x0=2,
22
+ fprime=lambda x: 2*x)
23
+ assert_almost_equal(root, 1.0)
24
+
25
+ def test_lmdif_errmsg(self):
26
+ # This shouldn't cause a crash on Python 3
27
+ class SomeError(Exception):
28
+ pass
29
+ counter = [0]
30
+
31
+ def func(x):
32
+ counter[0] += 1
33
+ if counter[0] < 3:
34
+ return x**2 - np.array([9, 10, 11])
35
+ else:
36
+ raise SomeError()
37
+ assert_raises(SomeError,
38
+ scipy.optimize.leastsq,
39
+ func, [1, 2, 3])
40
+
llava_next/lib/python3.10/site-packages/scipy/optimize/tests/test_slsqp.py ADDED
@@ -0,0 +1,608 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Unit test for SLSQP optimization.
3
+ """
4
+ from numpy.testing import (assert_, assert_array_almost_equal,
5
+ assert_allclose, assert_equal)
6
+ from pytest import raises as assert_raises
7
+ import pytest
8
+ import numpy as np
9
+
10
+ from scipy.optimize import fmin_slsqp, minimize, Bounds, NonlinearConstraint
11
+
12
+
13
+ class MyCallBack:
14
+ """pass a custom callback function
15
+
16
+ This makes sure it's being used.
17
+ """
18
+ def __init__(self):
19
+ self.been_called = False
20
+ self.ncalls = 0
21
+
22
+ def __call__(self, x):
23
+ self.been_called = True
24
+ self.ncalls += 1
25
+
26
+
27
+ class TestSLSQP:
28
+ """
29
+ Test SLSQP algorithm using Example 14.4 from Numerical Methods for
30
+ Engineers by Steven Chapra and Raymond Canale.
31
+ This example maximizes the function f(x) = 2*x*y + 2*x - x**2 - 2*y**2,
32
+ which has a maximum at x=2, y=1.
33
+ """
34
+ def setup_method(self):
35
+ self.opts = {'disp': False}
36
+
37
+ def fun(self, d, sign=1.0):
38
+ """
39
+ Arguments:
40
+ d - A list of two elements, where d[0] represents x and d[1] represents y
41
+ in the following equation.
42
+ sign - A multiplier for f. Since we want to optimize it, and the SciPy
43
+ optimizers can only minimize functions, we need to multiply it by
44
+ -1 to achieve the desired solution
45
+ Returns:
46
+ 2*x*y + 2*x - x**2 - 2*y**2
47
+
48
+ """
49
+ x = d[0]
50
+ y = d[1]
51
+ return sign*(2*x*y + 2*x - x**2 - 2*y**2)
52
+
53
+ def jac(self, d, sign=1.0):
54
+ """
55
+ This is the derivative of fun, returning a NumPy array
56
+ representing df/dx and df/dy.
57
+
58
+ """
59
+ x = d[0]
60
+ y = d[1]
61
+ dfdx = sign*(-2*x + 2*y + 2)
62
+ dfdy = sign*(2*x - 4*y)
63
+ return np.array([dfdx, dfdy], float)
64
+
65
+ def fun_and_jac(self, d, sign=1.0):
66
+ return self.fun(d, sign), self.jac(d, sign)
67
+
68
+ def f_eqcon(self, x, sign=1.0):
69
+ """ Equality constraint """
70
+ return np.array([x[0] - x[1]])
71
+
72
+ def fprime_eqcon(self, x, sign=1.0):
73
+ """ Equality constraint, derivative """
74
+ return np.array([[1, -1]])
75
+
76
+ def f_eqcon_scalar(self, x, sign=1.0):
77
+ """ Scalar equality constraint """
78
+ return self.f_eqcon(x, sign)[0]
79
+
80
+ def fprime_eqcon_scalar(self, x, sign=1.0):
81
+ """ Scalar equality constraint, derivative """
82
+ return self.fprime_eqcon(x, sign)[0].tolist()
83
+
84
+ def f_ieqcon(self, x, sign=1.0):
85
+ """ Inequality constraint """
86
+ return np.array([x[0] - x[1] - 1.0])
87
+
88
+ def fprime_ieqcon(self, x, sign=1.0):
89
+ """ Inequality constraint, derivative """
90
+ return np.array([[1, -1]])
91
+
92
+ def f_ieqcon2(self, x):
93
+ """ Vector inequality constraint """
94
+ return np.asarray(x)
95
+
96
+ def fprime_ieqcon2(self, x):
97
+ """ Vector inequality constraint, derivative """
98
+ return np.identity(x.shape[0])
99
+
100
+ # minimize
101
+ def test_minimize_unbounded_approximated(self):
102
+ # Minimize, method='SLSQP': unbounded, approximated jacobian.
103
+ jacs = [None, False, '2-point', '3-point']
104
+ for jac in jacs:
105
+ res = minimize(self.fun, [-1.0, 1.0], args=(-1.0, ),
106
+ jac=jac, method='SLSQP',
107
+ options=self.opts)
108
+ assert_(res['success'], res['message'])
109
+ assert_allclose(res.x, [2, 1])
110
+
111
+ def test_minimize_unbounded_given(self):
112
+ # Minimize, method='SLSQP': unbounded, given Jacobian.
113
+ res = minimize(self.fun, [-1.0, 1.0], args=(-1.0, ),
114
+ jac=self.jac, method='SLSQP', options=self.opts)
115
+ assert_(res['success'], res['message'])
116
+ assert_allclose(res.x, [2, 1])
117
+
118
+ def test_minimize_bounded_approximated(self):
119
+ # Minimize, method='SLSQP': bounded, approximated jacobian.
120
+ jacs = [None, False, '2-point', '3-point']
121
+ for jac in jacs:
122
+ with np.errstate(invalid='ignore'):
123
+ res = minimize(self.fun, [-1.0, 1.0], args=(-1.0, ),
124
+ jac=jac,
125
+ bounds=((2.5, None), (None, 0.5)),
126
+ method='SLSQP', options=self.opts)
127
+ assert_(res['success'], res['message'])
128
+ assert_allclose(res.x, [2.5, 0.5])
129
+ assert_(2.5 <= res.x[0])
130
+ assert_(res.x[1] <= 0.5)
131
+
132
+ def test_minimize_unbounded_combined(self):
133
+ # Minimize, method='SLSQP': unbounded, combined function and Jacobian.
134
+ res = minimize(self.fun_and_jac, [-1.0, 1.0], args=(-1.0, ),
135
+ jac=True, method='SLSQP', options=self.opts)
136
+ assert_(res['success'], res['message'])
137
+ assert_allclose(res.x, [2, 1])
138
+
139
+ def test_minimize_equality_approximated(self):
140
+ # Minimize with method='SLSQP': equality constraint, approx. jacobian.
141
+ jacs = [None, False, '2-point', '3-point']
142
+ for jac in jacs:
143
+ res = minimize(self.fun, [-1.0, 1.0], args=(-1.0, ),
144
+ jac=jac,
145
+ constraints={'type': 'eq',
146
+ 'fun': self.f_eqcon,
147
+ 'args': (-1.0, )},
148
+ method='SLSQP', options=self.opts)
149
+ assert_(res['success'], res['message'])
150
+ assert_allclose(res.x, [1, 1])
151
+
152
+ def test_minimize_equality_given(self):
153
+ # Minimize with method='SLSQP': equality constraint, given Jacobian.
154
+ res = minimize(self.fun, [-1.0, 1.0], jac=self.jac,
155
+ method='SLSQP', args=(-1.0,),
156
+ constraints={'type': 'eq', 'fun':self.f_eqcon,
157
+ 'args': (-1.0, )},
158
+ options=self.opts)
159
+ assert_(res['success'], res['message'])
160
+ assert_allclose(res.x, [1, 1])
161
+
162
+ def test_minimize_equality_given2(self):
163
+ # Minimize with method='SLSQP': equality constraint, given Jacobian
164
+ # for fun and const.
165
+ res = minimize(self.fun, [-1.0, 1.0], method='SLSQP',
166
+ jac=self.jac, args=(-1.0,),
167
+ constraints={'type': 'eq',
168
+ 'fun': self.f_eqcon,
169
+ 'args': (-1.0, ),
170
+ 'jac': self.fprime_eqcon},
171
+ options=self.opts)
172
+ assert_(res['success'], res['message'])
173
+ assert_allclose(res.x, [1, 1])
174
+
175
+ def test_minimize_equality_given_cons_scalar(self):
176
+ # Minimize with method='SLSQP': scalar equality constraint, given
177
+ # Jacobian for fun and const.
178
+ res = minimize(self.fun, [-1.0, 1.0], method='SLSQP',
179
+ jac=self.jac, args=(-1.0,),
180
+ constraints={'type': 'eq',
181
+ 'fun': self.f_eqcon_scalar,
182
+ 'args': (-1.0, ),
183
+ 'jac': self.fprime_eqcon_scalar},
184
+ options=self.opts)
185
+ assert_(res['success'], res['message'])
186
+ assert_allclose(res.x, [1, 1])
187
+
188
+ def test_minimize_inequality_given(self):
189
+ # Minimize with method='SLSQP': inequality constraint, given Jacobian.
190
+ res = minimize(self.fun, [-1.0, 1.0], method='SLSQP',
191
+ jac=self.jac, args=(-1.0, ),
192
+ constraints={'type': 'ineq',
193
+ 'fun': self.f_ieqcon,
194
+ 'args': (-1.0, )},
195
+ options=self.opts)
196
+ assert_(res['success'], res['message'])
197
+ assert_allclose(res.x, [2, 1], atol=1e-3)
198
+
199
+ def test_minimize_inequality_given_vector_constraints(self):
200
+ # Minimize with method='SLSQP': vector inequality constraint, given
201
+ # Jacobian.
202
+ res = minimize(self.fun, [-1.0, 1.0], jac=self.jac,
203
+ method='SLSQP', args=(-1.0,),
204
+ constraints={'type': 'ineq',
205
+ 'fun': self.f_ieqcon2,
206
+ 'jac': self.fprime_ieqcon2},
207
+ options=self.opts)
208
+ assert_(res['success'], res['message'])
209
+ assert_allclose(res.x, [2, 1])
210
+
211
+ def test_minimize_bounded_constraint(self):
212
+ # when the constraint makes the solver go up against a parameter
213
+ # bound make sure that the numerical differentiation of the
214
+ # jacobian doesn't try to exceed that bound using a finite difference.
215
+ # gh11403
216
+ def c(x):
217
+ assert 0 <= x[0] <= 1 and 0 <= x[1] <= 1, x
218
+ return x[0] ** 0.5 + x[1]
219
+
220
+ def f(x):
221
+ assert 0 <= x[0] <= 1 and 0 <= x[1] <= 1, x
222
+ return -x[0] ** 2 + x[1] ** 2
223
+
224
+ cns = [NonlinearConstraint(c, 0, 1.5)]
225
+ x0 = np.asarray([0.9, 0.5])
226
+ bnd = Bounds([0., 0.], [1.0, 1.0])
227
+ minimize(f, x0, method='SLSQP', bounds=bnd, constraints=cns)
228
+
229
+ def test_minimize_bound_equality_given2(self):
230
+ # Minimize with method='SLSQP': bounds, eq. const., given jac. for
231
+ # fun. and const.
232
+ res = minimize(self.fun, [-1.0, 1.0], method='SLSQP',
233
+ jac=self.jac, args=(-1.0, ),
234
+ bounds=[(-0.8, 1.), (-1, 0.8)],
235
+ constraints={'type': 'eq',
236
+ 'fun': self.f_eqcon,
237
+ 'args': (-1.0, ),
238
+ 'jac': self.fprime_eqcon},
239
+ options=self.opts)
240
+ assert_(res['success'], res['message'])
241
+ assert_allclose(res.x, [0.8, 0.8], atol=1e-3)
242
+ assert_(-0.8 <= res.x[0] <= 1)
243
+ assert_(-1 <= res.x[1] <= 0.8)
244
+
245
+ # fmin_slsqp
246
+ def test_unbounded_approximated(self):
247
+ # SLSQP: unbounded, approximated Jacobian.
248
+ res = fmin_slsqp(self.fun, [-1.0, 1.0], args=(-1.0, ),
249
+ iprint = 0, full_output = 1)
250
+ x, fx, its, imode, smode = res
251
+ assert_(imode == 0, imode)
252
+ assert_array_almost_equal(x, [2, 1])
253
+
254
+ def test_unbounded_given(self):
255
+ # SLSQP: unbounded, given Jacobian.
256
+ res = fmin_slsqp(self.fun, [-1.0, 1.0], args=(-1.0, ),
257
+ fprime = self.jac, iprint = 0,
258
+ full_output = 1)
259
+ x, fx, its, imode, smode = res
260
+ assert_(imode == 0, imode)
261
+ assert_array_almost_equal(x, [2, 1])
262
+
263
+ def test_equality_approximated(self):
264
+ # SLSQP: equality constraint, approximated Jacobian.
265
+ res = fmin_slsqp(self.fun,[-1.0,1.0], args=(-1.0,),
266
+ eqcons = [self.f_eqcon],
267
+ iprint = 0, full_output = 1)
268
+ x, fx, its, imode, smode = res
269
+ assert_(imode == 0, imode)
270
+ assert_array_almost_equal(x, [1, 1])
271
+
272
+ def test_equality_given(self):
273
+ # SLSQP: equality constraint, given Jacobian.
274
+ res = fmin_slsqp(self.fun, [-1.0, 1.0],
275
+ fprime=self.jac, args=(-1.0,),
276
+ eqcons = [self.f_eqcon], iprint = 0,
277
+ full_output = 1)
278
+ x, fx, its, imode, smode = res
279
+ assert_(imode == 0, imode)
280
+ assert_array_almost_equal(x, [1, 1])
281
+
282
+ def test_equality_given2(self):
283
+ # SLSQP: equality constraint, given Jacobian for fun and const.
284
+ res = fmin_slsqp(self.fun, [-1.0, 1.0],
285
+ fprime=self.jac, args=(-1.0,),
286
+ f_eqcons = self.f_eqcon,
287
+ fprime_eqcons = self.fprime_eqcon,
288
+ iprint = 0,
289
+ full_output = 1)
290
+ x, fx, its, imode, smode = res
291
+ assert_(imode == 0, imode)
292
+ assert_array_almost_equal(x, [1, 1])
293
+
294
+ def test_inequality_given(self):
295
+ # SLSQP: inequality constraint, given Jacobian.
296
+ res = fmin_slsqp(self.fun, [-1.0, 1.0],
297
+ fprime=self.jac, args=(-1.0, ),
298
+ ieqcons = [self.f_ieqcon],
299
+ iprint = 0, full_output = 1)
300
+ x, fx, its, imode, smode = res
301
+ assert_(imode == 0, imode)
302
+ assert_array_almost_equal(x, [2, 1], decimal=3)
303
+
304
+ def test_bound_equality_given2(self):
305
+ # SLSQP: bounds, eq. const., given jac. for fun. and const.
306
+ res = fmin_slsqp(self.fun, [-1.0, 1.0],
307
+ fprime=self.jac, args=(-1.0, ),
308
+ bounds = [(-0.8, 1.), (-1, 0.8)],
309
+ f_eqcons = self.f_eqcon,
310
+ fprime_eqcons = self.fprime_eqcon,
311
+ iprint = 0, full_output = 1)
312
+ x, fx, its, imode, smode = res
313
+ assert_(imode == 0, imode)
314
+ assert_array_almost_equal(x, [0.8, 0.8], decimal=3)
315
+ assert_(-0.8 <= x[0] <= 1)
316
+ assert_(-1 <= x[1] <= 0.8)
317
+
318
+ def test_scalar_constraints(self):
319
+ # Regression test for gh-2182
320
+ x = fmin_slsqp(lambda z: z**2, [3.],
321
+ ieqcons=[lambda z: z[0] - 1],
322
+ iprint=0)
323
+ assert_array_almost_equal(x, [1.])
324
+
325
+ x = fmin_slsqp(lambda z: z**2, [3.],
326
+ f_ieqcons=lambda z: [z[0] - 1],
327
+ iprint=0)
328
+ assert_array_almost_equal(x, [1.])
329
+
330
+ def test_integer_bounds(self):
331
+ # This should not raise an exception
332
+ fmin_slsqp(lambda z: z**2 - 1, [0], bounds=[[0, 1]], iprint=0)
333
+
334
+ def test_array_bounds(self):
335
+ # NumPy used to treat n-dimensional 1-element arrays as scalars
336
+ # in some cases. The handling of `bounds` by `fmin_slsqp` still
337
+ # supports this behavior.
338
+ bounds = [(-np.inf, np.inf), (np.array([2]), np.array([3]))]
339
+ x = fmin_slsqp(lambda z: np.sum(z**2 - 1), [2.5, 2.5], bounds=bounds,
340
+ iprint=0)
341
+ assert_array_almost_equal(x, [0, 2])
342
+
343
+ def test_obj_must_return_scalar(self):
344
+ # Regression test for Github Issue #5433
345
+ # If objective function does not return a scalar, raises ValueError
346
+ with assert_raises(ValueError):
347
+ fmin_slsqp(lambda x: [0, 1], [1, 2, 3])
348
+
349
+ def test_obj_returns_scalar_in_list(self):
350
+ # Test for Github Issue #5433 and PR #6691
351
+ # Objective function should be able to return length-1 Python list
352
+ # containing the scalar
353
+ fmin_slsqp(lambda x: [0], [1, 2, 3], iprint=0)
354
+
355
+ def test_callback(self):
356
+ # Minimize, method='SLSQP': unbounded, approximated jacobian. Check for callback
357
+ callback = MyCallBack()
358
+ res = minimize(self.fun, [-1.0, 1.0], args=(-1.0, ),
359
+ method='SLSQP', callback=callback, options=self.opts)
360
+ assert_(res['success'], res['message'])
361
+ assert_(callback.been_called)
362
+ assert_equal(callback.ncalls, res['nit'])
363
+
364
+ def test_inconsistent_linearization(self):
365
+ # SLSQP must be able to solve this problem, even if the
366
+ # linearized problem at the starting point is infeasible.
367
+
368
+ # Linearized constraints are
369
+ #
370
+ # 2*x0[0]*x[0] >= 1
371
+ #
372
+ # At x0 = [0, 1], the second constraint is clearly infeasible.
373
+ # This triggers a call with n2==1 in the LSQ subroutine.
374
+ x = [0, 1]
375
+ def f1(x):
376
+ return x[0] + x[1] - 2
377
+ def f2(x):
378
+ return x[0] ** 2 - 1
379
+ sol = minimize(
380
+ lambda x: x[0]**2 + x[1]**2,
381
+ x,
382
+ constraints=({'type':'eq','fun': f1},
383
+ {'type':'ineq','fun': f2}),
384
+ bounds=((0,None), (0,None)),
385
+ method='SLSQP')
386
+ x = sol.x
387
+
388
+ assert_allclose(f1(x), 0, atol=1e-8)
389
+ assert_(f2(x) >= -1e-8)
390
+ assert_(sol.success, sol)
391
+
392
+ def test_regression_5743(self):
393
+ # SLSQP must not indicate success for this problem,
394
+ # which is infeasible.
395
+ x = [1, 2]
396
+ sol = minimize(
397
+ lambda x: x[0]**2 + x[1]**2,
398
+ x,
399
+ constraints=({'type':'eq','fun': lambda x: x[0]+x[1]-1},
400
+ {'type':'ineq','fun': lambda x: x[0]-2}),
401
+ bounds=((0,None), (0,None)),
402
+ method='SLSQP')
403
+ assert_(not sol.success, sol)
404
+
405
+ def test_gh_6676(self):
406
+ def func(x):
407
+ return (x[0] - 1)**2 + 2*(x[1] - 1)**2 + 0.5*(x[2] - 1)**2
408
+
409
+ sol = minimize(func, [0, 0, 0], method='SLSQP')
410
+ assert_(sol.jac.shape == (3,))
411
+
412
+ def test_invalid_bounds(self):
413
+ # Raise correct error when lower bound is greater than upper bound.
414
+ # See Github issue 6875.
415
+ bounds_list = [
416
+ ((1, 2), (2, 1)),
417
+ ((2, 1), (1, 2)),
418
+ ((2, 1), (2, 1)),
419
+ ((np.inf, 0), (np.inf, 0)),
420
+ ((1, -np.inf), (0, 1)),
421
+ ]
422
+ for bounds in bounds_list:
423
+ with assert_raises(ValueError):
424
+ minimize(self.fun, [-1.0, 1.0], bounds=bounds, method='SLSQP')
425
+
426
+ def test_bounds_clipping(self):
427
+ #
428
+ # SLSQP returns bogus results for initial guess out of bounds, gh-6859
429
+ #
430
+ def f(x):
431
+ return (x[0] - 1)**2
432
+
433
+ sol = minimize(f, [10], method='slsqp', bounds=[(None, 0)])
434
+ assert_(sol.success)
435
+ assert_allclose(sol.x, 0, atol=1e-10)
436
+
437
+ sol = minimize(f, [-10], method='slsqp', bounds=[(2, None)])
438
+ assert_(sol.success)
439
+ assert_allclose(sol.x, 2, atol=1e-10)
440
+
441
+ sol = minimize(f, [-10], method='slsqp', bounds=[(None, 0)])
442
+ assert_(sol.success)
443
+ assert_allclose(sol.x, 0, atol=1e-10)
444
+
445
+ sol = minimize(f, [10], method='slsqp', bounds=[(2, None)])
446
+ assert_(sol.success)
447
+ assert_allclose(sol.x, 2, atol=1e-10)
448
+
449
+ sol = minimize(f, [-0.5], method='slsqp', bounds=[(-1, 0)])
450
+ assert_(sol.success)
451
+ assert_allclose(sol.x, 0, atol=1e-10)
452
+
453
+ sol = minimize(f, [10], method='slsqp', bounds=[(-1, 0)])
454
+ assert_(sol.success)
455
+ assert_allclose(sol.x, 0, atol=1e-10)
456
+
457
+ def test_infeasible_initial(self):
458
+ # Check SLSQP behavior with infeasible initial point
459
+ def f(x):
460
+ x, = x
461
+ return x*x - 2*x + 1
462
+
463
+ cons_u = [{'type': 'ineq', 'fun': lambda x: 0 - x}]
464
+ cons_l = [{'type': 'ineq', 'fun': lambda x: x - 2}]
465
+ cons_ul = [{'type': 'ineq', 'fun': lambda x: 0 - x},
466
+ {'type': 'ineq', 'fun': lambda x: x + 1}]
467
+
468
+ sol = minimize(f, [10], method='slsqp', constraints=cons_u)
469
+ assert_(sol.success)
470
+ assert_allclose(sol.x, 0, atol=1e-10)
471
+
472
+ sol = minimize(f, [-10], method='slsqp', constraints=cons_l)
473
+ assert_(sol.success)
474
+ assert_allclose(sol.x, 2, atol=1e-10)
475
+
476
+ sol = minimize(f, [-10], method='slsqp', constraints=cons_u)
477
+ assert_(sol.success)
478
+ assert_allclose(sol.x, 0, atol=1e-10)
479
+
480
+ sol = minimize(f, [10], method='slsqp', constraints=cons_l)
481
+ assert_(sol.success)
482
+ assert_allclose(sol.x, 2, atol=1e-10)
483
+
484
+ sol = minimize(f, [-0.5], method='slsqp', constraints=cons_ul)
485
+ assert_(sol.success)
486
+ assert_allclose(sol.x, 0, atol=1e-10)
487
+
488
+ sol = minimize(f, [10], method='slsqp', constraints=cons_ul)
489
+ assert_(sol.success)
490
+ assert_allclose(sol.x, 0, atol=1e-10)
491
+
492
+ def test_inconsistent_inequalities(self):
493
+ # gh-7618
494
+
495
+ def cost(x):
496
+ return -1 * x[0] + 4 * x[1]
497
+
498
+ def ineqcons1(x):
499
+ return x[1] - x[0] - 1
500
+
501
+ def ineqcons2(x):
502
+ return x[0] - x[1]
503
+
504
+ # The inequalities are inconsistent, so no solution can exist:
505
+ #
506
+ # x1 >= x0 + 1
507
+ # x0 >= x1
508
+
509
+ x0 = (1,5)
510
+ bounds = ((-5, 5), (-5, 5))
511
+ cons = (dict(type='ineq', fun=ineqcons1), dict(type='ineq', fun=ineqcons2))
512
+ res = minimize(cost, x0, method='SLSQP', bounds=bounds, constraints=cons)
513
+
514
+ assert_(not res.success)
515
+
516
+ def test_new_bounds_type(self):
517
+ def f(x):
518
+ return x[0] ** 2 + x[1] ** 2
519
+ bounds = Bounds([1, 0], [np.inf, np.inf])
520
+ sol = minimize(f, [0, 0], method='slsqp', bounds=bounds)
521
+ assert_(sol.success)
522
+ assert_allclose(sol.x, [1, 0])
523
+
524
+ def test_nested_minimization(self):
525
+
526
+ class NestedProblem:
527
+
528
+ def __init__(self):
529
+ self.F_outer_count = 0
530
+
531
+ def F_outer(self, x):
532
+ self.F_outer_count += 1
533
+ if self.F_outer_count > 1000:
534
+ raise Exception("Nested minimization failed to terminate.")
535
+ inner_res = minimize(self.F_inner, (3, 4), method="SLSQP")
536
+ assert_(inner_res.success)
537
+ assert_allclose(inner_res.x, [1, 1])
538
+ return x[0]**2 + x[1]**2 + x[2]**2
539
+
540
+ def F_inner(self, x):
541
+ return (x[0] - 1)**2 + (x[1] - 1)**2
542
+
543
+ def solve(self):
544
+ outer_res = minimize(self.F_outer, (5, 5, 5), method="SLSQP")
545
+ assert_(outer_res.success)
546
+ assert_allclose(outer_res.x, [0, 0, 0])
547
+
548
+ problem = NestedProblem()
549
+ problem.solve()
550
+
551
+ def test_gh1758(self):
552
+ # the test suggested in gh1758
553
+ # https://nlopt.readthedocs.io/en/latest/NLopt_Tutorial/
554
+ # implement two equality constraints, in R^2.
555
+ def fun(x):
556
+ return np.sqrt(x[1])
557
+
558
+ def f_eqcon(x):
559
+ """ Equality constraint """
560
+ return x[1] - (2 * x[0]) ** 3
561
+
562
+ def f_eqcon2(x):
563
+ """ Equality constraint """
564
+ return x[1] - (-x[0] + 1) ** 3
565
+
566
+ c1 = {'type': 'eq', 'fun': f_eqcon}
567
+ c2 = {'type': 'eq', 'fun': f_eqcon2}
568
+
569
+ res = minimize(fun, [8, 0.25], method='SLSQP',
570
+ constraints=[c1, c2], bounds=[(-0.5, 1), (0, 8)])
571
+
572
+ np.testing.assert_allclose(res.fun, 0.5443310539518)
573
+ np.testing.assert_allclose(res.x, [0.33333333, 0.2962963])
574
+ assert res.success
575
+
576
+ def test_gh9640(self):
577
+ np.random.seed(10)
578
+ cons = ({'type': 'ineq', 'fun': lambda x: -x[0] - x[1] - 3},
579
+ {'type': 'ineq', 'fun': lambda x: x[1] + x[2] - 2})
580
+ bnds = ((-2, 2), (-2, 2), (-2, 2))
581
+
582
+ def target(x):
583
+ return 1
584
+ x0 = [-1.8869783504471584, -0.640096352696244, -0.8174212253407696]
585
+ res = minimize(target, x0, method='SLSQP', bounds=bnds, constraints=cons,
586
+ options={'disp':False, 'maxiter':10000})
587
+
588
+ # The problem is infeasible, so it cannot succeed
589
+ assert not res.success
590
+
591
+ def test_parameters_stay_within_bounds(self):
592
+ # gh11403. For some problems the SLSQP Fortran code suggests a step
593
+ # outside one of the lower/upper bounds. When this happens
594
+ # approx_derivative complains because it's being asked to evaluate
595
+ # a gradient outside its domain.
596
+ np.random.seed(1)
597
+ bounds = Bounds(np.array([0.1]), np.array([1.0]))
598
+ n_inputs = len(bounds.lb)
599
+ x0 = np.array(bounds.lb + (bounds.ub - bounds.lb) *
600
+ np.random.random(n_inputs))
601
+
602
+ def f(x):
603
+ assert (x >= bounds.lb).all()
604
+ return np.linalg.norm(x)
605
+
606
+ with pytest.warns(RuntimeWarning, match='x were outside bounds'):
607
+ res = minimize(f, x0, method='SLSQP', bounds=bounds)
608
+ assert res.success
llava_next/lib/python3.10/site-packages/scipy/optimize/tests/test_trustregion.py ADDED
@@ -0,0 +1,112 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Unit tests for trust-region optimization routines.
3
+
4
+ To run it in its simplest form::
5
+ nosetests test_optimize.py
6
+
7
+ """
8
+ import pytest
9
+ import numpy as np
10
+ from numpy.testing import assert_, assert_equal, assert_allclose
11
+ from scipy.optimize import (minimize, rosen, rosen_der, rosen_hess,
12
+ rosen_hess_prod)
13
+
14
+
15
+ class Accumulator:
16
+ """ This is for testing callbacks."""
17
+ def __init__(self):
18
+ self.count = 0
19
+ self.accum = None
20
+
21
+ def __call__(self, x):
22
+ self.count += 1
23
+ if self.accum is None:
24
+ self.accum = np.array(x)
25
+ else:
26
+ self.accum += x
27
+
28
+
29
+ class TestTrustRegionSolvers:
30
+
31
+ def setup_method(self):
32
+ self.x_opt = [1.0, 1.0]
33
+ self.easy_guess = [2.0, 2.0]
34
+ self.hard_guess = [-1.2, 1.0]
35
+
36
+ def test_dogleg_accuracy(self):
37
+ # test the accuracy and the return_all option
38
+ x0 = self.hard_guess
39
+ r = minimize(rosen, x0, jac=rosen_der, hess=rosen_hess, tol=1e-8,
40
+ method='dogleg', options={'return_all': True},)
41
+ assert_allclose(x0, r['allvecs'][0])
42
+ assert_allclose(r['x'], r['allvecs'][-1])
43
+ assert_allclose(r['x'], self.x_opt)
44
+
45
+ def test_dogleg_callback(self):
46
+ # test the callback mechanism and the maxiter and return_all options
47
+ accumulator = Accumulator()
48
+ maxiter = 5
49
+ r = minimize(rosen, self.hard_guess, jac=rosen_der, hess=rosen_hess,
50
+ callback=accumulator, method='dogleg',
51
+ options={'return_all': True, 'maxiter': maxiter},)
52
+ assert_equal(accumulator.count, maxiter)
53
+ assert_equal(len(r['allvecs']), maxiter+1)
54
+ assert_allclose(r['x'], r['allvecs'][-1])
55
+ assert_allclose(sum(r['allvecs'][1:]), accumulator.accum)
56
+
57
+ def test_dogleg_user_warning(self):
58
+ with pytest.warns(RuntimeWarning,
59
+ match=r'Maximum number of iterations'):
60
+ minimize(rosen, self.hard_guess, jac=rosen_der,
61
+ hess=rosen_hess, method='dogleg',
62
+ options={'disp': True, 'maxiter': 1}, )
63
+
64
+ def test_solver_concordance(self):
65
+ # Assert that dogleg uses fewer iterations than ncg on the Rosenbrock
66
+ # test function, although this does not necessarily mean
67
+ # that dogleg is faster or better than ncg even for this function
68
+ # and especially not for other test functions.
69
+ f = rosen
70
+ g = rosen_der
71
+ h = rosen_hess
72
+ for x0 in (self.easy_guess, self.hard_guess):
73
+ r_dogleg = minimize(f, x0, jac=g, hess=h, tol=1e-8,
74
+ method='dogleg', options={'return_all': True})
75
+ r_trust_ncg = minimize(f, x0, jac=g, hess=h, tol=1e-8,
76
+ method='trust-ncg',
77
+ options={'return_all': True})
78
+ r_trust_krylov = minimize(f, x0, jac=g, hess=h, tol=1e-8,
79
+ method='trust-krylov',
80
+ options={'return_all': True})
81
+ r_ncg = minimize(f, x0, jac=g, hess=h, tol=1e-8,
82
+ method='newton-cg', options={'return_all': True})
83
+ r_iterative = minimize(f, x0, jac=g, hess=h, tol=1e-8,
84
+ method='trust-exact',
85
+ options={'return_all': True})
86
+ assert_allclose(self.x_opt, r_dogleg['x'])
87
+ assert_allclose(self.x_opt, r_trust_ncg['x'])
88
+ assert_allclose(self.x_opt, r_trust_krylov['x'])
89
+ assert_allclose(self.x_opt, r_ncg['x'])
90
+ assert_allclose(self.x_opt, r_iterative['x'])
91
+ assert_(len(r_dogleg['allvecs']) < len(r_ncg['allvecs']))
92
+
93
+ def test_trust_ncg_hessp(self):
94
+ for x0 in (self.easy_guess, self.hard_guess, self.x_opt):
95
+ r = minimize(rosen, x0, jac=rosen_der, hessp=rosen_hess_prod,
96
+ tol=1e-8, method='trust-ncg')
97
+ assert_allclose(self.x_opt, r['x'])
98
+
99
+ def test_trust_ncg_start_in_optimum(self):
100
+ r = minimize(rosen, x0=self.x_opt, jac=rosen_der, hess=rosen_hess,
101
+ tol=1e-8, method='trust-ncg')
102
+ assert_allclose(self.x_opt, r['x'])
103
+
104
+ def test_trust_krylov_start_in_optimum(self):
105
+ r = minimize(rosen, x0=self.x_opt, jac=rosen_der, hess=rosen_hess,
106
+ tol=1e-8, method='trust-krylov')
107
+ assert_allclose(self.x_opt, r['x'])
108
+
109
+ def test_trust_exact_start_in_optimum(self):
110
+ r = minimize(rosen, x0=self.x_opt, jac=rosen_der, hess=rosen_hess,
111
+ tol=1e-8, method='trust-exact')
112
+ assert_allclose(self.x_opt, r['x'])
llava_next/lib/python3.10/site-packages/scipy/optimize/tests/test_zeros.py ADDED
@@ -0,0 +1,939 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import pytest
2
+
3
+ from functools import lru_cache
4
+
5
+ from numpy.testing import (assert_warns, assert_,
6
+ assert_allclose,
7
+ assert_equal,
8
+ assert_array_equal,
9
+ suppress_warnings)
10
+ import numpy as np
11
+ from numpy import finfo, power, nan, isclose, sqrt, exp, sin, cos
12
+
13
+ from scipy import optimize
14
+ from scipy.optimize import (_zeros_py as zeros, newton, root_scalar,
15
+ OptimizeResult)
16
+
17
+ from scipy._lib._util import getfullargspec_no_self as _getfullargspec
18
+
19
+ # Import testing parameters
20
+ from scipy.optimize._tstutils import get_tests, functions as tstutils_functions
21
+
22
+ TOL = 4*np.finfo(float).eps # tolerance
23
+
24
+ _FLOAT_EPS = finfo(float).eps
25
+
26
+ bracket_methods = [zeros.bisect, zeros.ridder, zeros.brentq, zeros.brenth,
27
+ zeros.toms748]
28
+ gradient_methods = [zeros.newton]
29
+ all_methods = bracket_methods + gradient_methods
30
+
31
+ # A few test functions used frequently:
32
+ # # A simple quadratic, (x-1)^2 - 1
33
+ def f1(x):
34
+ return x ** 2 - 2 * x - 1
35
+
36
+
37
+ def f1_1(x):
38
+ return 2 * x - 2
39
+
40
+
41
+ def f1_2(x):
42
+ return 2.0 + 0 * x
43
+
44
+
45
+ def f1_and_p_and_pp(x):
46
+ return f1(x), f1_1(x), f1_2(x)
47
+
48
+
49
+ # Simple transcendental function
50
+ def f2(x):
51
+ return exp(x) - cos(x)
52
+
53
+
54
+ def f2_1(x):
55
+ return exp(x) + sin(x)
56
+
57
+
58
+ def f2_2(x):
59
+ return exp(x) + cos(x)
60
+
61
+
62
+ # lru cached function
63
+ @lru_cache
64
+ def f_lrucached(x):
65
+ return x
66
+
67
+
68
+ class TestScalarRootFinders:
69
+ # Basic tests for all scalar root finders
70
+
71
+ xtol = 4 * np.finfo(float).eps
72
+ rtol = 4 * np.finfo(float).eps
73
+
74
+ def _run_one_test(self, tc, method, sig_args_keys=None,
75
+ sig_kwargs_keys=None, **kwargs):
76
+ method_args = []
77
+ for k in sig_args_keys or []:
78
+ if k not in tc:
79
+ # If a,b not present use x0, x1. Similarly for f and func
80
+ k = {'a': 'x0', 'b': 'x1', 'func': 'f'}.get(k, k)
81
+ method_args.append(tc[k])
82
+
83
+ method_kwargs = dict(**kwargs)
84
+ method_kwargs.update({'full_output': True, 'disp': False})
85
+ for k in sig_kwargs_keys or []:
86
+ method_kwargs[k] = tc[k]
87
+
88
+ root = tc.get('root')
89
+ func_args = tc.get('args', ())
90
+
91
+ try:
92
+ r, rr = method(*method_args, args=func_args, **method_kwargs)
93
+ return root, rr, tc
94
+ except Exception:
95
+ return root, zeros.RootResults(nan, -1, -1, zeros._EVALUEERR, method), tc
96
+
97
+ def run_tests(self, tests, method, name, known_fail=None, **kwargs):
98
+ r"""Run test-cases using the specified method and the supplied signature.
99
+
100
+ Extract the arguments for the method call from the test case
101
+ dictionary using the supplied keys for the method's signature."""
102
+ # The methods have one of two base signatures:
103
+ # (f, a, b, **kwargs) # newton
104
+ # (func, x0, **kwargs) # bisect/brentq/...
105
+
106
+ # FullArgSpec with args, varargs, varkw, defaults, ...
107
+ sig = _getfullargspec(method)
108
+ assert_(not sig.kwonlyargs)
109
+ nDefaults = len(sig.defaults)
110
+ nRequired = len(sig.args) - nDefaults
111
+ sig_args_keys = sig.args[:nRequired]
112
+ sig_kwargs_keys = []
113
+ if name in ['secant', 'newton', 'halley']:
114
+ if name in ['newton', 'halley']:
115
+ sig_kwargs_keys.append('fprime')
116
+ if name in ['halley']:
117
+ sig_kwargs_keys.append('fprime2')
118
+ kwargs['tol'] = self.xtol
119
+ else:
120
+ kwargs['xtol'] = self.xtol
121
+ kwargs['rtol'] = self.rtol
122
+
123
+ results = [list(self._run_one_test(
124
+ tc, method, sig_args_keys=sig_args_keys,
125
+ sig_kwargs_keys=sig_kwargs_keys, **kwargs)) for tc in tests]
126
+ # results= [[true root, full output, tc], ...]
127
+
128
+ known_fail = known_fail or []
129
+ notcvgd = [elt for elt in results if not elt[1].converged]
130
+ notcvgd = [elt for elt in notcvgd if elt[-1]['ID'] not in known_fail]
131
+ notcvged_IDS = [elt[-1]['ID'] for elt in notcvgd]
132
+ assert_equal([len(notcvged_IDS), notcvged_IDS], [0, []])
133
+
134
+ # The usable xtol and rtol depend on the test
135
+ tols = {'xtol': self.xtol, 'rtol': self.rtol}
136
+ tols.update(**kwargs)
137
+ rtol = tols['rtol']
138
+ atol = tols.get('tol', tols['xtol'])
139
+
140
+ cvgd = [elt for elt in results if elt[1].converged]
141
+ approx = [elt[1].root for elt in cvgd]
142
+ correct = [elt[0] for elt in cvgd]
143
+ # See if the root matches the reference value
144
+ notclose = [[a] + elt for a, c, elt in zip(approx, correct, cvgd) if
145
+ not isclose(a, c, rtol=rtol, atol=atol)
146
+ and elt[-1]['ID'] not in known_fail]
147
+ # If not, evaluate the function and see if is 0 at the purported root
148
+ fvs = [tc['f'](aroot, *tc.get('args', tuple()))
149
+ for aroot, c, fullout, tc in notclose]
150
+ notclose = [[fv] + elt for fv, elt in zip(fvs, notclose) if fv != 0]
151
+ assert_equal([notclose, len(notclose)], [[], 0])
152
+ method_from_result = [result[1].method for result in results]
153
+ expected_method = [name for _ in results]
154
+ assert_equal(method_from_result, expected_method)
155
+
156
+ def run_collection(self, collection, method, name, smoothness=None,
157
+ known_fail=None, **kwargs):
158
+ r"""Run a collection of tests using the specified method.
159
+
160
+ The name is used to determine some optional arguments."""
161
+ tests = get_tests(collection, smoothness=smoothness)
162
+ self.run_tests(tests, method, name, known_fail=known_fail, **kwargs)
163
+
164
+
165
+ class TestBracketMethods(TestScalarRootFinders):
166
+ @pytest.mark.parametrize('method', bracket_methods)
167
+ @pytest.mark.parametrize('function', tstutils_functions)
168
+ def test_basic_root_scalar(self, method, function):
169
+ # Tests bracketing root finders called via `root_scalar` on a small
170
+ # set of simple problems, each of which has a root at `x=1`. Checks for
171
+ # converged status and that the root was found.
172
+ a, b = .5, sqrt(3)
173
+
174
+ r = root_scalar(function, method=method.__name__, bracket=[a, b], x0=a,
175
+ xtol=self.xtol, rtol=self.rtol)
176
+ assert r.converged
177
+ assert_allclose(r.root, 1.0, atol=self.xtol, rtol=self.rtol)
178
+ assert r.method == method.__name__
179
+
180
+ @pytest.mark.parametrize('method', bracket_methods)
181
+ @pytest.mark.parametrize('function', tstutils_functions)
182
+ def test_basic_individual(self, method, function):
183
+ # Tests individual bracketing root finders on a small set of simple
184
+ # problems, each of which has a root at `x=1`. Checks for converged
185
+ # status and that the root was found.
186
+ a, b = .5, sqrt(3)
187
+ root, r = method(function, a, b, xtol=self.xtol, rtol=self.rtol,
188
+ full_output=True)
189
+
190
+ assert r.converged
191
+ assert_allclose(root, 1.0, atol=self.xtol, rtol=self.rtol)
192
+
193
+ @pytest.mark.parametrize('method', bracket_methods)
194
+ def test_aps_collection(self, method):
195
+ self.run_collection('aps', method, method.__name__, smoothness=1)
196
+
197
+ @pytest.mark.parametrize('method', [zeros.bisect, zeros.ridder,
198
+ zeros.toms748])
199
+ def test_chandrupatla_collection(self, method):
200
+ known_fail = {'fun7.4'} if method == zeros.ridder else {}
201
+ self.run_collection('chandrupatla', method, method.__name__,
202
+ known_fail=known_fail)
203
+
204
+ @pytest.mark.parametrize('method', bracket_methods)
205
+ def test_lru_cached_individual(self, method):
206
+ # check that https://github.com/scipy/scipy/issues/10846 is fixed
207
+ # (`root_scalar` failed when passed a function that was `@lru_cache`d)
208
+ a, b = -1, 1
209
+ root, r = method(f_lrucached, a, b, full_output=True)
210
+ assert r.converged
211
+ assert_allclose(root, 0)
212
+
213
+
214
+ class TestNewton(TestScalarRootFinders):
215
+ def test_newton_collections(self):
216
+ known_fail = ['aps.13.00']
217
+ known_fail += ['aps.12.05', 'aps.12.17'] # fails under Windows Py27
218
+ for collection in ['aps', 'complex']:
219
+ self.run_collection(collection, zeros.newton, 'newton',
220
+ smoothness=2, known_fail=known_fail)
221
+
222
+ def test_halley_collections(self):
223
+ known_fail = ['aps.12.06', 'aps.12.07', 'aps.12.08', 'aps.12.09',
224
+ 'aps.12.10', 'aps.12.11', 'aps.12.12', 'aps.12.13',
225
+ 'aps.12.14', 'aps.12.15', 'aps.12.16', 'aps.12.17',
226
+ 'aps.12.18', 'aps.13.00']
227
+ for collection in ['aps', 'complex']:
228
+ self.run_collection(collection, zeros.newton, 'halley',
229
+ smoothness=2, known_fail=known_fail)
230
+
231
+ def test_newton(self):
232
+ for f, f_1, f_2 in [(f1, f1_1, f1_2), (f2, f2_1, f2_2)]:
233
+ x = zeros.newton(f, 3, tol=1e-6)
234
+ assert_allclose(f(x), 0, atol=1e-6)
235
+ x = zeros.newton(f, 3, x1=5, tol=1e-6) # secant, x0 and x1
236
+ assert_allclose(f(x), 0, atol=1e-6)
237
+ x = zeros.newton(f, 3, fprime=f_1, tol=1e-6) # newton
238
+ assert_allclose(f(x), 0, atol=1e-6)
239
+ x = zeros.newton(f, 3, fprime=f_1, fprime2=f_2, tol=1e-6) # halley
240
+ assert_allclose(f(x), 0, atol=1e-6)
241
+
242
+ def test_newton_by_name(self):
243
+ r"""Invoke newton through root_scalar()"""
244
+ for f, f_1, f_2 in [(f1, f1_1, f1_2), (f2, f2_1, f2_2)]:
245
+ r = root_scalar(f, method='newton', x0=3, fprime=f_1, xtol=1e-6)
246
+ assert_allclose(f(r.root), 0, atol=1e-6)
247
+ for f, f_1, f_2 in [(f1, f1_1, f1_2), (f2, f2_1, f2_2)]:
248
+ r = root_scalar(f, method='newton', x0=3, xtol=1e-6) # without f'
249
+ assert_allclose(f(r.root), 0, atol=1e-6)
250
+
251
+ def test_secant_by_name(self):
252
+ r"""Invoke secant through root_scalar()"""
253
+ for f, f_1, f_2 in [(f1, f1_1, f1_2), (f2, f2_1, f2_2)]:
254
+ r = root_scalar(f, method='secant', x0=3, x1=2, xtol=1e-6)
255
+ assert_allclose(f(r.root), 0, atol=1e-6)
256
+ r = root_scalar(f, method='secant', x0=3, x1=5, xtol=1e-6)
257
+ assert_allclose(f(r.root), 0, atol=1e-6)
258
+ for f, f_1, f_2 in [(f1, f1_1, f1_2), (f2, f2_1, f2_2)]:
259
+ r = root_scalar(f, method='secant', x0=3, xtol=1e-6) # without x1
260
+ assert_allclose(f(r.root), 0, atol=1e-6)
261
+
262
+ def test_halley_by_name(self):
263
+ r"""Invoke halley through root_scalar()"""
264
+ for f, f_1, f_2 in [(f1, f1_1, f1_2), (f2, f2_1, f2_2)]:
265
+ r = root_scalar(f, method='halley', x0=3,
266
+ fprime=f_1, fprime2=f_2, xtol=1e-6)
267
+ assert_allclose(f(r.root), 0, atol=1e-6)
268
+
269
+ def test_root_scalar_fail(self):
270
+ message = 'fprime2 must be specified for halley'
271
+ with pytest.raises(ValueError, match=message):
272
+ root_scalar(f1, method='halley', fprime=f1_1, x0=3, xtol=1e-6) # no fprime2
273
+ message = 'fprime must be specified for halley'
274
+ with pytest.raises(ValueError, match=message):
275
+ root_scalar(f1, method='halley', fprime2=f1_2, x0=3, xtol=1e-6) # no fprime
276
+
277
+ def test_array_newton(self):
278
+ """test newton with array"""
279
+
280
+ def f1(x, *a):
281
+ b = a[0] + x * a[3]
282
+ return a[1] - a[2] * (np.exp(b / a[5]) - 1.0) - b / a[4] - x
283
+
284
+ def f1_1(x, *a):
285
+ b = a[3] / a[5]
286
+ return -a[2] * np.exp(a[0] / a[5] + x * b) * b - a[3] / a[4] - 1
287
+
288
+ def f1_2(x, *a):
289
+ b = a[3] / a[5]
290
+ return -a[2] * np.exp(a[0] / a[5] + x * b) * b**2
291
+
292
+ a0 = np.array([
293
+ 5.32725221, 5.48673747, 5.49539973,
294
+ 5.36387202, 4.80237316, 1.43764452,
295
+ 5.23063958, 5.46094772, 5.50512718,
296
+ 5.42046290
297
+ ])
298
+ a1 = (np.sin(range(10)) + 1.0) * 7.0
299
+ args = (a0, a1, 1e-09, 0.004, 10, 0.27456)
300
+ x0 = [7.0] * 10
301
+ x = zeros.newton(f1, x0, f1_1, args)
302
+ x_expected = (
303
+ 6.17264965, 11.7702805, 12.2219954,
304
+ 7.11017681, 1.18151293, 0.143707955,
305
+ 4.31928228, 10.5419107, 12.7552490,
306
+ 8.91225749
307
+ )
308
+ assert_allclose(x, x_expected)
309
+ # test halley's
310
+ x = zeros.newton(f1, x0, f1_1, args, fprime2=f1_2)
311
+ assert_allclose(x, x_expected)
312
+ # test secant
313
+ x = zeros.newton(f1, x0, args=args)
314
+ assert_allclose(x, x_expected)
315
+
316
+ def test_array_newton_complex(self):
317
+ def f(x):
318
+ return x + 1+1j
319
+
320
+ def fprime(x):
321
+ return 1.0
322
+
323
+ t = np.full(4, 1j)
324
+ x = zeros.newton(f, t, fprime=fprime)
325
+ assert_allclose(f(x), 0.)
326
+
327
+ # should work even if x0 is not complex
328
+ t = np.ones(4)
329
+ x = zeros.newton(f, t, fprime=fprime)
330
+ assert_allclose(f(x), 0.)
331
+
332
+ x = zeros.newton(f, t)
333
+ assert_allclose(f(x), 0.)
334
+
335
+ def test_array_secant_active_zero_der(self):
336
+ """test secant doesn't continue to iterate zero derivatives"""
337
+ x = zeros.newton(lambda x, *a: x*x - a[0], x0=[4.123, 5],
338
+ args=[np.array([17, 25])])
339
+ assert_allclose(x, (4.123105625617661, 5.0))
340
+
341
+ def test_array_newton_integers(self):
342
+ # test secant with float
343
+ x = zeros.newton(lambda y, z: z - y ** 2, [4.0] * 2,
344
+ args=([15.0, 17.0],))
345
+ assert_allclose(x, (3.872983346207417, 4.123105625617661))
346
+ # test integer becomes float
347
+ x = zeros.newton(lambda y, z: z - y ** 2, [4] * 2, args=([15, 17],))
348
+ assert_allclose(x, (3.872983346207417, 4.123105625617661))
349
+
350
+ def test_array_newton_zero_der_failures(self):
351
+ # test derivative zero warning
352
+ assert_warns(RuntimeWarning, zeros.newton,
353
+ lambda y: y**2 - 2, [0., 0.], lambda y: 2 * y)
354
+ # test failures and zero_der
355
+ with pytest.warns(RuntimeWarning):
356
+ results = zeros.newton(lambda y: y**2 - 2, [0., 0.],
357
+ lambda y: 2*y, full_output=True)
358
+ assert_allclose(results.root, 0)
359
+ assert results.zero_der.all()
360
+ assert not results.converged.any()
361
+
362
+ def test_newton_combined(self):
363
+ def f1(x):
364
+ return x ** 2 - 2 * x - 1
365
+ def f1_1(x):
366
+ return 2 * x - 2
367
+ def f1_2(x):
368
+ return 2.0 + 0 * x
369
+
370
+ def f1_and_p_and_pp(x):
371
+ return x**2 - 2*x-1, 2*x-2, 2.0
372
+
373
+ sol0 = root_scalar(f1, method='newton', x0=3, fprime=f1_1)
374
+ sol = root_scalar(f1_and_p_and_pp, method='newton', x0=3, fprime=True)
375
+ assert_allclose(sol0.root, sol.root, atol=1e-8)
376
+ assert_equal(2*sol.function_calls, sol0.function_calls)
377
+
378
+ sol0 = root_scalar(f1, method='halley', x0=3, fprime=f1_1, fprime2=f1_2)
379
+ sol = root_scalar(f1_and_p_and_pp, method='halley', x0=3, fprime2=True)
380
+ assert_allclose(sol0.root, sol.root, atol=1e-8)
381
+ assert_equal(3*sol.function_calls, sol0.function_calls)
382
+
383
+ def test_newton_full_output(self, capsys):
384
+ # Test the full_output capability, both when converging and not.
385
+ # Use simple polynomials, to avoid hitting platform dependencies
386
+ # (e.g., exp & trig) in number of iterations
387
+
388
+ x0 = 3
389
+ expected_counts = [(6, 7), (5, 10), (3, 9)]
390
+
391
+ for derivs in range(3):
392
+ kwargs = {'tol': 1e-6, 'full_output': True, }
393
+ for k, v in [['fprime', f1_1], ['fprime2', f1_2]][:derivs]:
394
+ kwargs[k] = v
395
+
396
+ x, r = zeros.newton(f1, x0, disp=False, **kwargs)
397
+ assert_(r.converged)
398
+ assert_equal(x, r.root)
399
+ assert_equal((r.iterations, r.function_calls), expected_counts[derivs])
400
+ if derivs == 0:
401
+ assert r.function_calls <= r.iterations + 1
402
+ else:
403
+ assert_equal(r.function_calls, (derivs + 1) * r.iterations)
404
+
405
+ # Now repeat, allowing one fewer iteration to force convergence failure
406
+ iters = r.iterations - 1
407
+ x, r = zeros.newton(f1, x0, maxiter=iters, disp=False, **kwargs)
408
+ assert_(not r.converged)
409
+ assert_equal(x, r.root)
410
+ assert_equal(r.iterations, iters)
411
+
412
+ if derivs == 1:
413
+ # Check that the correct Exception is raised and
414
+ # validate the start of the message.
415
+ msg = 'Failed to converge after %d iterations, value is .*' % (iters)
416
+ with pytest.raises(RuntimeError, match=msg):
417
+ x, r = zeros.newton(f1, x0, maxiter=iters, disp=True, **kwargs)
418
+
419
+ def test_deriv_zero_warning(self):
420
+ def func(x):
421
+ return x ** 2 - 2.0
422
+ def dfunc(x):
423
+ return 2 * x
424
+ assert_warns(RuntimeWarning, zeros.newton, func, 0.0, dfunc, disp=False)
425
+ with pytest.raises(RuntimeError, match='Derivative was zero'):
426
+ zeros.newton(func, 0.0, dfunc)
427
+
428
+ def test_newton_does_not_modify_x0(self):
429
+ # https://github.com/scipy/scipy/issues/9964
430
+ x0 = np.array([0.1, 3])
431
+ x0_copy = x0.copy() # Copy to test for equality.
432
+ newton(np.sin, x0, np.cos)
433
+ assert_array_equal(x0, x0_copy)
434
+
435
+ def test_gh17570_defaults(self):
436
+ # Previously, when fprime was not specified, root_scalar would default
437
+ # to secant. When x1 was not specified, secant failed.
438
+ # Check that without fprime, the default is secant if x1 is specified
439
+ # and newton otherwise.
440
+ res_newton_default = root_scalar(f1, method='newton', x0=3, xtol=1e-6)
441
+ res_secant_default = root_scalar(f1, method='secant', x0=3, x1=2,
442
+ xtol=1e-6)
443
+ # `newton` uses the secant method when `x1` and `x2` are specified
444
+ res_secant = newton(f1, x0=3, x1=2, tol=1e-6, full_output=True)[1]
445
+
446
+ # all three found a root
447
+ assert_allclose(f1(res_newton_default.root), 0, atol=1e-6)
448
+ assert res_newton_default.root.shape == tuple()
449
+ assert_allclose(f1(res_secant_default.root), 0, atol=1e-6)
450
+ assert res_secant_default.root.shape == tuple()
451
+ assert_allclose(f1(res_secant.root), 0, atol=1e-6)
452
+ assert res_secant.root.shape == tuple()
453
+
454
+ # Defaults are correct
455
+ assert (res_secant_default.root
456
+ == res_secant.root
457
+ != res_newton_default.iterations)
458
+ assert (res_secant_default.iterations
459
+ == res_secant_default.function_calls - 1 # true for secant
460
+ == res_secant.iterations
461
+ != res_newton_default.iterations
462
+ == res_newton_default.function_calls/2) # newton 2-point diff
463
+
464
+ @pytest.mark.parametrize('kwargs', [dict(), {'method': 'newton'}])
465
+ def test_args_gh19090(self, kwargs):
466
+ def f(x, a, b):
467
+ assert a == 3
468
+ assert b == 1
469
+ return (x ** a - b)
470
+
471
+ res = optimize.root_scalar(f, x0=3, args=(3, 1), **kwargs)
472
+ assert res.converged
473
+ assert_allclose(res.root, 1)
474
+
475
+ @pytest.mark.parametrize('method', ['secant', 'newton'])
476
+ def test_int_x0_gh19280(self, method):
477
+ # Originally, `newton` ensured that only floats were passed to the
478
+ # callable. This was indadvertently changed by gh-17669. Check that
479
+ # it has been changed back.
480
+ def f(x):
481
+ # an integer raised to a negative integer power would fail
482
+ return x**-2 - 2
483
+
484
+ res = optimize.root_scalar(f, x0=1, method=method)
485
+ assert res.converged
486
+ assert_allclose(abs(res.root), 2**-0.5)
487
+ assert res.root.dtype == np.dtype(np.float64)
488
+
489
+
490
+ def test_gh_5555():
491
+ root = 0.1
492
+
493
+ def f(x):
494
+ return x - root
495
+
496
+ methods = [zeros.bisect, zeros.ridder]
497
+ xtol = rtol = TOL
498
+ for method in methods:
499
+ res = method(f, -1e8, 1e7, xtol=xtol, rtol=rtol)
500
+ assert_allclose(root, res, atol=xtol, rtol=rtol,
501
+ err_msg='method %s' % method.__name__)
502
+
503
+
504
+ def test_gh_5557():
505
+ # Show that without the changes in 5557 brentq and brenth might
506
+ # only achieve a tolerance of 2*(xtol + rtol*|res|).
507
+
508
+ # f linearly interpolates (0, -0.1), (0.5, -0.1), and (1,
509
+ # 0.4). The important parts are that |f(0)| < |f(1)| (so that
510
+ # brent takes 0 as the initial guess), |f(0)| < atol (so that
511
+ # brent accepts 0 as the root), and that the exact root of f lies
512
+ # more than atol away from 0 (so that brent doesn't achieve the
513
+ # desired tolerance).
514
+ def f(x):
515
+ if x < 0.5:
516
+ return -0.1
517
+ else:
518
+ return x - 0.6
519
+
520
+ atol = 0.51
521
+ rtol = 4 * _FLOAT_EPS
522
+ methods = [zeros.brentq, zeros.brenth]
523
+ for method in methods:
524
+ res = method(f, 0, 1, xtol=atol, rtol=rtol)
525
+ assert_allclose(0.6, res, atol=atol, rtol=rtol)
526
+
527
+
528
+ def test_brent_underflow_in_root_bracketing():
529
+ # Testing if an interval [a,b] brackets a zero of a function
530
+ # by checking f(a)*f(b) < 0 is not reliable when the product
531
+ # underflows/overflows. (reported in issue# 13737)
532
+
533
+ underflow_scenario = (-450.0, -350.0, -400.0)
534
+ overflow_scenario = (350.0, 450.0, 400.0)
535
+
536
+ for a, b, root in [underflow_scenario, overflow_scenario]:
537
+ c = np.exp(root)
538
+ for method in [zeros.brenth, zeros.brentq]:
539
+ res = method(lambda x: np.exp(x)-c, a, b)
540
+ assert_allclose(root, res)
541
+
542
+
543
+ class TestRootResults:
544
+ r = zeros.RootResults(root=1.0, iterations=44, function_calls=46, flag=0,
545
+ method="newton")
546
+
547
+ def test_repr(self):
548
+ expected_repr = (" converged: True\n flag: converged"
549
+ "\n function_calls: 46\n iterations: 44\n"
550
+ " root: 1.0\n method: newton")
551
+ assert_equal(repr(self.r), expected_repr)
552
+
553
+ def test_type(self):
554
+ assert isinstance(self.r, OptimizeResult)
555
+
556
+
557
+ def test_complex_halley():
558
+ """Test Halley's works with complex roots"""
559
+ def f(x, *a):
560
+ return a[0] * x**2 + a[1] * x + a[2]
561
+
562
+ def f_1(x, *a):
563
+ return 2 * a[0] * x + a[1]
564
+
565
+ def f_2(x, *a):
566
+ retval = 2 * a[0]
567
+ try:
568
+ size = len(x)
569
+ except TypeError:
570
+ return retval
571
+ else:
572
+ return [retval] * size
573
+
574
+ z = complex(1.0, 2.0)
575
+ coeffs = (2.0, 3.0, 4.0)
576
+ y = zeros.newton(f, z, args=coeffs, fprime=f_1, fprime2=f_2, tol=1e-6)
577
+ # (-0.75000000000000078+1.1989578808281789j)
578
+ assert_allclose(f(y, *coeffs), 0, atol=1e-6)
579
+ z = [z] * 10
580
+ coeffs = (2.0, 3.0, 4.0)
581
+ y = zeros.newton(f, z, args=coeffs, fprime=f_1, fprime2=f_2, tol=1e-6)
582
+ assert_allclose(f(y, *coeffs), 0, atol=1e-6)
583
+
584
+
585
+ def test_zero_der_nz_dp(capsys):
586
+ """Test secant method with a non-zero dp, but an infinite newton step"""
587
+ # pick a symmetrical functions and choose a point on the side that with dx
588
+ # makes a secant that is a flat line with zero slope, EG: f = (x - 100)**2,
589
+ # which has a root at x = 100 and is symmetrical around the line x = 100
590
+ # we have to pick a really big number so that it is consistently true
591
+ # now find a point on each side so that the secant has a zero slope
592
+ dx = np.finfo(float).eps ** 0.33
593
+ # 100 - p0 = p1 - 100 = p0 * (1 + dx) + dx - 100
594
+ # -> 200 = p0 * (2 + dx) + dx
595
+ p0 = (200.0 - dx) / (2.0 + dx)
596
+ with suppress_warnings() as sup:
597
+ sup.filter(RuntimeWarning, "RMS of")
598
+ x = zeros.newton(lambda y: (y - 100.0)**2, x0=[p0] * 10)
599
+ assert_allclose(x, [100] * 10)
600
+ # test scalar cases too
601
+ p0 = (2.0 - 1e-4) / (2.0 + 1e-4)
602
+ with suppress_warnings() as sup:
603
+ sup.filter(RuntimeWarning, "Tolerance of")
604
+ x = zeros.newton(lambda y: (y - 1.0) ** 2, x0=p0, disp=False)
605
+ assert_allclose(x, 1)
606
+ with pytest.raises(RuntimeError, match='Tolerance of'):
607
+ x = zeros.newton(lambda y: (y - 1.0) ** 2, x0=p0, disp=True)
608
+ p0 = (-2.0 + 1e-4) / (2.0 + 1e-4)
609
+ with suppress_warnings() as sup:
610
+ sup.filter(RuntimeWarning, "Tolerance of")
611
+ x = zeros.newton(lambda y: (y + 1.0) ** 2, x0=p0, disp=False)
612
+ assert_allclose(x, -1)
613
+ with pytest.raises(RuntimeError, match='Tolerance of'):
614
+ x = zeros.newton(lambda y: (y + 1.0) ** 2, x0=p0, disp=True)
615
+
616
+
617
+ def test_array_newton_failures():
618
+ """Test that array newton fails as expected"""
619
+ # p = 0.68 # [MPa]
620
+ # dp = -0.068 * 1e6 # [Pa]
621
+ # T = 323 # [K]
622
+ diameter = 0.10 # [m]
623
+ # L = 100 # [m]
624
+ roughness = 0.00015 # [m]
625
+ rho = 988.1 # [kg/m**3]
626
+ mu = 5.4790e-04 # [Pa*s]
627
+ u = 2.488 # [m/s]
628
+ reynolds_number = rho * u * diameter / mu # Reynolds number
629
+
630
+ def colebrook_eqn(darcy_friction, re, dia):
631
+ return (1 / np.sqrt(darcy_friction) +
632
+ 2 * np.log10(roughness / 3.7 / dia +
633
+ 2.51 / re / np.sqrt(darcy_friction)))
634
+
635
+ # only some failures
636
+ with pytest.warns(RuntimeWarning):
637
+ result = zeros.newton(
638
+ colebrook_eqn, x0=[0.01, 0.2, 0.02223, 0.3], maxiter=2,
639
+ args=[reynolds_number, diameter], full_output=True
640
+ )
641
+ assert not result.converged.all()
642
+ # they all fail
643
+ with pytest.raises(RuntimeError):
644
+ result = zeros.newton(
645
+ colebrook_eqn, x0=[0.01] * 2, maxiter=2,
646
+ args=[reynolds_number, diameter], full_output=True
647
+ )
648
+
649
+
650
+ # this test should **not** raise a RuntimeWarning
651
+ def test_gh8904_zeroder_at_root_fails():
652
+ """Test that Newton or Halley don't warn if zero derivative at root"""
653
+
654
+ # a function that has a zero derivative at it's root
655
+ def f_zeroder_root(x):
656
+ return x**3 - x**2
657
+
658
+ # should work with secant
659
+ r = zeros.newton(f_zeroder_root, x0=0)
660
+ assert_allclose(r, 0, atol=zeros._xtol, rtol=zeros._rtol)
661
+ # test again with array
662
+ r = zeros.newton(f_zeroder_root, x0=[0]*10)
663
+ assert_allclose(r, 0, atol=zeros._xtol, rtol=zeros._rtol)
664
+
665
+ # 1st derivative
666
+ def fder(x):
667
+ return 3 * x**2 - 2 * x
668
+
669
+ # 2nd derivative
670
+ def fder2(x):
671
+ return 6*x - 2
672
+
673
+ # should work with newton and halley
674
+ r = zeros.newton(f_zeroder_root, x0=0, fprime=fder)
675
+ assert_allclose(r, 0, atol=zeros._xtol, rtol=zeros._rtol)
676
+ r = zeros.newton(f_zeroder_root, x0=0, fprime=fder,
677
+ fprime2=fder2)
678
+ assert_allclose(r, 0, atol=zeros._xtol, rtol=zeros._rtol)
679
+ # test again with array
680
+ r = zeros.newton(f_zeroder_root, x0=[0]*10, fprime=fder)
681
+ assert_allclose(r, 0, atol=zeros._xtol, rtol=zeros._rtol)
682
+ r = zeros.newton(f_zeroder_root, x0=[0]*10, fprime=fder,
683
+ fprime2=fder2)
684
+ assert_allclose(r, 0, atol=zeros._xtol, rtol=zeros._rtol)
685
+
686
+ # also test that if a root is found we do not raise RuntimeWarning even if
687
+ # the derivative is zero, EG: at x = 0.5, then fval = -0.125 and
688
+ # fder = -0.25 so the next guess is 0.5 - (-0.125/-0.5) = 0 which is the
689
+ # root, but if the solver continued with that guess, then it will calculate
690
+ # a zero derivative, so it should return the root w/o RuntimeWarning
691
+ r = zeros.newton(f_zeroder_root, x0=0.5, fprime=fder)
692
+ assert_allclose(r, 0, atol=zeros._xtol, rtol=zeros._rtol)
693
+ # test again with array
694
+ r = zeros.newton(f_zeroder_root, x0=[0.5]*10, fprime=fder)
695
+ assert_allclose(r, 0, atol=zeros._xtol, rtol=zeros._rtol)
696
+ # doesn't apply to halley
697
+
698
+
699
+ def test_gh_8881():
700
+ r"""Test that Halley's method realizes that the 2nd order adjustment
701
+ is too big and drops off to the 1st order adjustment."""
702
+ n = 9
703
+
704
+ def f(x):
705
+ return power(x, 1.0/n) - power(n, 1.0/n)
706
+
707
+ def fp(x):
708
+ return power(x, (1.0-n)/n)/n
709
+
710
+ def fpp(x):
711
+ return power(x, (1.0-2*n)/n) * (1.0/n) * (1.0-n)/n
712
+
713
+ x0 = 0.1
714
+ # The root is at x=9.
715
+ # The function has positive slope, x0 < root.
716
+ # Newton succeeds in 8 iterations
717
+ rt, r = newton(f, x0, fprime=fp, full_output=True)
718
+ assert r.converged
719
+ # Before the Issue 8881/PR 8882, halley would send x in the wrong direction.
720
+ # Check that it now succeeds.
721
+ rt, r = newton(f, x0, fprime=fp, fprime2=fpp, full_output=True)
722
+ assert r.converged
723
+
724
+
725
+ def test_gh_9608_preserve_array_shape():
726
+ """
727
+ Test that shape is preserved for array inputs even if fprime or fprime2 is
728
+ scalar
729
+ """
730
+ def f(x):
731
+ return x**2
732
+
733
+ def fp(x):
734
+ return 2 * x
735
+
736
+ def fpp(x):
737
+ return 2
738
+
739
+ x0 = np.array([-2], dtype=np.float32)
740
+ rt, r = newton(f, x0, fprime=fp, fprime2=fpp, full_output=True)
741
+ assert r.converged
742
+
743
+ x0_array = np.array([-2, -3], dtype=np.float32)
744
+ # This next invocation should fail
745
+ with pytest.raises(IndexError):
746
+ result = zeros.newton(
747
+ f, x0_array, fprime=fp, fprime2=fpp, full_output=True
748
+ )
749
+
750
+ def fpp_array(x):
751
+ return np.full(np.shape(x), 2, dtype=np.float32)
752
+
753
+ result = zeros.newton(
754
+ f, x0_array, fprime=fp, fprime2=fpp_array, full_output=True
755
+ )
756
+ assert result.converged.all()
757
+
758
+
759
+ @pytest.mark.parametrize(
760
+ "maximum_iterations,flag_expected",
761
+ [(10, zeros.CONVERR), (100, zeros.CONVERGED)])
762
+ def test_gh9254_flag_if_maxiter_exceeded(maximum_iterations, flag_expected):
763
+ """
764
+ Test that if the maximum iterations is exceeded that the flag is not
765
+ converged.
766
+ """
767
+ result = zeros.brentq(
768
+ lambda x: ((1.2*x - 2.3)*x + 3.4)*x - 4.5,
769
+ -30, 30, (), 1e-6, 1e-6, maximum_iterations,
770
+ full_output=True, disp=False)
771
+ assert result[1].flag == flag_expected
772
+ if flag_expected == zeros.CONVERR:
773
+ # didn't converge because exceeded maximum iterations
774
+ assert result[1].iterations == maximum_iterations
775
+ elif flag_expected == zeros.CONVERGED:
776
+ # converged before maximum iterations
777
+ assert result[1].iterations < maximum_iterations
778
+
779
+
780
+ def test_gh9551_raise_error_if_disp_true():
781
+ """Test that if disp is true then zero derivative raises RuntimeError"""
782
+
783
+ def f(x):
784
+ return x*x + 1
785
+
786
+ def f_p(x):
787
+ return 2*x
788
+
789
+ assert_warns(RuntimeWarning, zeros.newton, f, 1.0, f_p, disp=False)
790
+ with pytest.raises(
791
+ RuntimeError,
792
+ match=r'^Derivative was zero\. Failed to converge after \d+ iterations, '
793
+ r'value is [+-]?\d*\.\d+\.$'):
794
+ zeros.newton(f, 1.0, f_p)
795
+ root = zeros.newton(f, complex(10.0, 10.0), f_p)
796
+ assert_allclose(root, complex(0.0, 1.0))
797
+
798
+
799
+ @pytest.mark.parametrize('solver_name',
800
+ ['brentq', 'brenth', 'bisect', 'ridder', 'toms748'])
801
+ def test_gh3089_8394(solver_name):
802
+ # gh-3089 and gh-8394 reported that bracketing solvers returned incorrect
803
+ # results when they encountered NaNs. Check that this is resolved.
804
+ def f(x):
805
+ return np.nan
806
+
807
+ solver = getattr(zeros, solver_name)
808
+ with pytest.raises(ValueError, match="The function value at x..."):
809
+ solver(f, 0, 1)
810
+
811
+
812
+ @pytest.mark.parametrize('method',
813
+ ['brentq', 'brenth', 'bisect', 'ridder', 'toms748'])
814
+ def test_gh18171(method):
815
+ # gh-3089 and gh-8394 reported that bracketing solvers returned incorrect
816
+ # results when they encountered NaNs. Check that `root_scalar` returns
817
+ # normally but indicates that convergence was unsuccessful. See gh-18171.
818
+ def f(x):
819
+ f._count += 1
820
+ return np.nan
821
+ f._count = 0
822
+
823
+ res = root_scalar(f, bracket=(0, 1), method=method)
824
+ assert res.converged is False
825
+ assert res.flag.startswith("The function value at x")
826
+ assert res.function_calls == f._count
827
+ assert str(res.root) in res.flag
828
+
829
+
830
+ @pytest.mark.parametrize('solver_name',
831
+ ['brentq', 'brenth', 'bisect', 'ridder', 'toms748'])
832
+ @pytest.mark.parametrize('rs_interface', [True, False])
833
+ def test_function_calls(solver_name, rs_interface):
834
+ # There do not appear to be checks that the bracketing solvers report the
835
+ # correct number of function evaluations. Check that this is the case.
836
+ solver = ((lambda f, a, b, **kwargs: root_scalar(f, bracket=(a, b)))
837
+ if rs_interface else getattr(zeros, solver_name))
838
+
839
+ def f(x):
840
+ f.calls += 1
841
+ return x**2 - 1
842
+ f.calls = 0
843
+
844
+ res = solver(f, 0, 10, full_output=True)
845
+
846
+ if rs_interface:
847
+ assert res.function_calls == f.calls
848
+ else:
849
+ assert res[1].function_calls == f.calls
850
+
851
+
852
+ def test_gh_14486_converged_false():
853
+ """Test that zero slope with secant method results in a converged=False"""
854
+ def lhs(x):
855
+ return x * np.exp(-x*x) - 0.07
856
+
857
+ with pytest.warns(RuntimeWarning, match='Tolerance of'):
858
+ res = root_scalar(lhs, method='secant', x0=-0.15, x1=1.0)
859
+ assert not res.converged
860
+ assert res.flag == 'convergence error'
861
+
862
+ with pytest.warns(RuntimeWarning, match='Tolerance of'):
863
+ res = newton(lhs, x0=-0.15, x1=1.0, disp=False, full_output=True)[1]
864
+ assert not res.converged
865
+ assert res.flag == 'convergence error'
866
+
867
+
868
+ @pytest.mark.parametrize('solver_name',
869
+ ['brentq', 'brenth', 'bisect', 'ridder', 'toms748'])
870
+ @pytest.mark.parametrize('rs_interface', [True, False])
871
+ def test_gh5584(solver_name, rs_interface):
872
+ # gh-5584 reported that an underflow can cause sign checks in the algorithm
873
+ # to fail. Check that this is resolved.
874
+ solver = ((lambda f, a, b, **kwargs: root_scalar(f, bracket=(a, b)))
875
+ if rs_interface else getattr(zeros, solver_name))
876
+
877
+ def f(x):
878
+ return 1e-200*x
879
+
880
+ # Report failure when signs are the same
881
+ with pytest.raises(ValueError, match='...must have different signs'):
882
+ solver(f, -0.5, -0.4, full_output=True)
883
+
884
+ # Solve successfully when signs are different
885
+ res = solver(f, -0.5, 0.4, full_output=True)
886
+ res = res if rs_interface else res[1]
887
+ assert res.converged
888
+ assert_allclose(res.root, 0, atol=1e-8)
889
+
890
+ # Solve successfully when one side is negative zero
891
+ res = solver(f, -0.5, float('-0.0'), full_output=True)
892
+ res = res if rs_interface else res[1]
893
+ assert res.converged
894
+ assert_allclose(res.root, 0, atol=1e-8)
895
+
896
+
897
+ def test_gh13407():
898
+ # gh-13407 reported that the message produced by `scipy.optimize.toms748`
899
+ # when `rtol < eps` is incorrect, and also that toms748 is unusual in
900
+ # accepting `rtol` as low as eps while other solvers raise at 4*eps. Check
901
+ # that the error message has been corrected and that `rtol=eps` can produce
902
+ # a lower function value than `rtol=4*eps`.
903
+ def f(x):
904
+ return x**3 - 2*x - 5
905
+
906
+ xtol = 1e-300
907
+ eps = np.finfo(float).eps
908
+ x1 = zeros.toms748(f, 1e-10, 1e10, xtol=xtol, rtol=1*eps)
909
+ f1 = f(x1)
910
+ x4 = zeros.toms748(f, 1e-10, 1e10, xtol=xtol, rtol=4*eps)
911
+ f4 = f(x4)
912
+ assert f1 < f4
913
+
914
+ # using old-style syntax to get exactly the same message
915
+ message = fr"rtol too small \({eps/2:g} < {eps:g}\)"
916
+ with pytest.raises(ValueError, match=message):
917
+ zeros.toms748(f, 1e-10, 1e10, xtol=xtol, rtol=eps/2)
918
+
919
+
920
+ def test_newton_complex_gh10103():
921
+ # gh-10103 reported a problem when `newton` is pass a Python complex x0,
922
+ # no `fprime` (secant method), and no `x1` (`x1` must be constructed).
923
+ # Check that this is resolved.
924
+ def f(z):
925
+ return z - 1
926
+ res = newton(f, 1+1j)
927
+ assert_allclose(res, 1, atol=1e-12)
928
+
929
+ res = root_scalar(f, x0=1+1j, x1=2+1.5j, method='secant')
930
+ assert_allclose(res.root, 1, atol=1e-12)
931
+
932
+
933
+ @pytest.mark.parametrize('method', all_methods)
934
+ def test_maxiter_int_check_gh10236(method):
935
+ # gh-10236 reported that the error message when `maxiter` is not an integer
936
+ # was difficult to interpret. Check that this was resolved (by gh-10907).
937
+ message = "'float' object cannot be interpreted as an integer"
938
+ with pytest.raises(TypeError, match=message):
939
+ method(f1, 0.0, 1.0, maxiter=72.45)