diff --git a/.gitattributes b/.gitattributes index 70c5f7ad9c5c5a2259f1cd035ef10173a331193b..f261ebd5478e9b69d753dd9a46ca6ae2bb29e808 100644 --- a/.gitattributes +++ b/.gitattributes @@ -50,3 +50,4 @@ colmap/lib/libpba.a filter=lfs diff=lfs merge=lfs -text colmap/lib/libpoisson_recon.a filter=lfs diff=lfs merge=lfs -text colmap/lib/libsift_gpu.a filter=lfs diff=lfs merge=lfs -text ceres/lib/libceres.a filter=lfs diff=lfs merge=lfs -text +ceres-v2/lib/libceres.a filter=lfs diff=lfs merge=lfs -text diff --git a/ceres-v2/include/autodiff_cost_function.h b/ceres-v2/include/autodiff_cost_function.h new file mode 100644 index 0000000000000000000000000000000000000000..cd256432a9889a3d02787cfbded016624e467127 --- /dev/null +++ b/ceres-v2/include/autodiff_cost_function.h @@ -0,0 +1,228 @@ +// Ceres Solver - A fast non-linear least squares minimizer +// Copyright 2019 Google Inc. All rights reserved. +// http://ceres-solver.org/ +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are met: +// +// * Redistributions of source code must retain the above copyright notice, +// this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above copyright notice, +// this list of conditions and the following disclaimer in the documentation +// and/or other materials provided with the distribution. +// * Neither the name of Google Inc. nor the names of its contributors may be +// used to endorse or promote products derived from this software without +// specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE +// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR +// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF +// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN +// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) +// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE +// POSSIBILITY OF SUCH DAMAGE. +// +// Author: sameeragarwal@google.com (Sameer Agarwal) +// +// Create CostFunctions as needed by the least squares framework, with +// Jacobians computed via automatic differentiation. For more +// information on automatic differentiation, see the wikipedia article +// at http://en.wikipedia.org/wiki/Automatic_differentiation +// +// To get an auto differentiated cost function, you must define a class with a +// templated operator() (a functor) that computes the cost function in terms of +// the template parameter T. The autodiff framework substitutes appropriate +// "jet" objects for T in order to compute the derivative when necessary, but +// this is hidden, and you should write the function as if T were a scalar type +// (e.g. a double-precision floating point number). +// +// The function must write the computed value in the last argument +// (the only non-const one) and return true to indicate +// success. Please see cost_function.h for details on how the return +// value maybe used to impose simple constraints on the parameter +// block. +// +// For example, consider a scalar error e = k - x'y, where both x and y are +// two-dimensional column vector parameters, the prime sign indicates +// transposition, and k is a constant. The form of this error, which is the +// difference between a constant and an expression, is a common pattern in least +// squares problems. For example, the value x'y might be the model expectation +// for a series of measurements, where there is an instance of the cost function +// for each measurement k. +// +// The actual cost added to the total problem is e^2, or (k - x'y)^2; however, +// the squaring is implicitly done by the optimization framework. +// +// To write an auto-differentiable cost function for the above model, first +// define the object +// +// class MyScalarCostFunctor { +// MyScalarCostFunctor(double k): k_(k) {} +// +// template +// bool operator()(const T* const x , const T* const y, T* e) const { +// e[0] = T(k_) - x[0] * y[0] + x[1] * y[1]; +// return true; +// } +// +// private: +// double k_; +// }; +// +// Note that in the declaration of operator() the input parameters x and y come +// first, and are passed as const pointers to arrays of T. If there were three +// input parameters, then the third input parameter would come after y. The +// output is always the last parameter, and is also a pointer to an array. In +// the example above, e is a scalar, so only e[0] is set. +// +// Then given this class definition, the auto differentiated cost function for +// it can be constructed as follows. +// +// CostFunction* cost_function +// = new AutoDiffCostFunction( +// new MyScalarCostFunctor(1.0)); ^ ^ ^ +// | | | +// Dimension of residual -----+ | | +// Dimension of x ---------------+ | +// Dimension of y ------------------+ +// +// In this example, there is usually an instance for each measurement of k. +// +// In the instantiation above, the template parameters following +// "MyScalarCostFunctor", "1, 2, 2", describe the functor as computing a +// 1-dimensional output from two arguments, both 2-dimensional. +// +// AutoDiffCostFunction also supports cost functions with a +// runtime-determined number of residuals. For example: +// +// CostFunction* cost_function +// = new AutoDiffCostFunction( +// new CostFunctorWithDynamicNumResiduals(1.0), ^ ^ ^ +// runtime_number_of_residuals); <----+ | | | +// | | | | +// | | | | +// Actual number of residuals ------+ | | | +// Indicate dynamic number of residuals --------+ | | +// Dimension of x ------------------------------------+ | +// Dimension of y ---------------------------------------+ +// +// WARNING #1: Since the functor will get instantiated with different types for +// T, you must convert from other numeric types to T before mixing +// computations with other variables of type T. In the example above, this is +// seen where instead of using k_ directly, k_ is wrapped with T(k_). +// +// WARNING #2: A common beginner's error when first using autodiff cost +// functions is to get the sizing wrong. In particular, there is a tendency to +// set the template parameters to (dimension of residual, number of parameters) +// instead of passing a dimension parameter for *every parameter*. In the +// example above, that would be , which is missing +// the last '2' argument. Please be careful when setting the size parameters. + +#ifndef CERES_PUBLIC_AUTODIFF_COST_FUNCTION_H_ +#define CERES_PUBLIC_AUTODIFF_COST_FUNCTION_H_ + +#include + +#include "ceres/internal/autodiff.h" +#include "ceres/sized_cost_function.h" +#include "ceres/types.h" +#include "glog/logging.h" + +namespace ceres { + +// A cost function which computes the derivative of the cost with respect to +// the parameters (a.k.a. the jacobian) using an auto differentiation framework. +// The first template argument is the functor object, described in the header +// comment. The second argument is the dimension of the residual (or +// ceres::DYNAMIC to indicate it will be set at runtime), and subsequent +// arguments describe the size of the Nth parameter, one per parameter. +// +// The constructors take ownership of the cost functor. +// +// If the number of residuals (argument kNumResiduals below) is +// ceres::DYNAMIC, then the two-argument constructor must be used. The +// second constructor takes a number of residuals (in addition to the +// templated number of residuals). This allows for varying the number +// of residuals for a single autodiff cost function at runtime. +template // Number of parameters in each parameter block. +class AutoDiffCostFunction final + : public SizedCostFunction { + public: + // Takes ownership of functor by default. Uses the template-provided + // value for the number of residuals ("kNumResiduals"). + explicit AutoDiffCostFunction(CostFunctor* functor, + Ownership ownership = TAKE_OWNERSHIP) + : functor_(functor), ownership_(ownership) { + static_assert(kNumResiduals != DYNAMIC, + "Can't run the fixed-size constructor if the number of " + "residuals is set to ceres::DYNAMIC."); + } + + // Takes ownership of functor by default. Ignores the template-provided + // kNumResiduals in favor of the "num_residuals" argument provided. + // + // This allows for having autodiff cost functions which return varying + // numbers of residuals at runtime. + AutoDiffCostFunction(CostFunctor* functor, + int num_residuals, + Ownership ownership = TAKE_OWNERSHIP) + : functor_(functor), ownership_(ownership) { + static_assert(kNumResiduals == DYNAMIC, + "Can't run the dynamic-size constructor if the number of " + "residuals is not ceres::DYNAMIC."); + SizedCostFunction::set_num_residuals(num_residuals); + } + + AutoDiffCostFunction(AutoDiffCostFunction&& other) + : functor_(std::move(other.functor_)), ownership_(other.ownership_) {} + + virtual ~AutoDiffCostFunction() { + // Manually release pointer if configured to not take ownership rather than + // deleting only if ownership is taken. + // This is to stay maximally compatible to old user code which may have + // forgotten to implement a virtual destructor, from when the + // AutoDiffCostFunction always took ownership. + if (ownership_ == DO_NOT_TAKE_OWNERSHIP) { + functor_.release(); + } + } + + // Implementation details follow; clients of the autodiff cost function should + // not have to examine below here. + // + // To handle variadic cost functions, some template magic is needed. It's + // mostly hidden inside autodiff.h. + bool Evaluate(double const* const* parameters, + double* residuals, + double** jacobians) const override { + using ParameterDims = + typename SizedCostFunction::ParameterDims; + + if (!jacobians) { + return internal::VariadicEvaluate( + *functor_, parameters, residuals); + } + return internal::AutoDifferentiate( + *functor_, + parameters, + SizedCostFunction::num_residuals(), + residuals, + jacobians); + }; + + const CostFunctor& functor() const { return *functor_; } + + private: + std::unique_ptr functor_; + Ownership ownership_; +}; + +} // namespace ceres + +#endif // CERES_PUBLIC_AUTODIFF_COST_FUNCTION_H_ diff --git a/ceres-v2/include/autodiff_first_order_function.h b/ceres-v2/include/autodiff_first_order_function.h new file mode 100644 index 0000000000000000000000000000000000000000..7c13f4239a6b8322be221b064983f06289e78e54 --- /dev/null +++ b/ceres-v2/include/autodiff_first_order_function.h @@ -0,0 +1,151 @@ +// Ceres Solver - A fast non-linear least squares minimizer +// Copyright 2019 Google Inc. All rights reserved. +// http://ceres-solver.org/ +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are met: +// +// * Redistributions of source code must retain the above copyright notice, +// this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above copyright notice, +// this list of conditions and the following disclaimer in the documentation +// and/or other materials provided with the distribution. +// * Neither the name of Google Inc. nor the names of its contributors may be +// used to endorse or promote products derived from this software without +// specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE +// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR +// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF +// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN +// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) +// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE +// POSSIBILITY OF SUCH DAMAGE. +// +// Author: sameeragarwal@google.com (Sameer Agarwal) + +#ifndef CERES_PUBLIC_AUTODIFF_FIRST_ORDER_FUNCTION_H_ +#define CERES_PUBLIC_AUTODIFF_FIRST_ORDER_FUNCTION_H_ + +#include + +#include "ceres/first_order_function.h" +#include "ceres/internal/eigen.h" +#include "ceres/internal/fixed_array.h" +#include "ceres/jet.h" +#include "ceres/types.h" + +namespace ceres { + +// Create FirstOrderFunctions as needed by the GradientProblem +// framework, with gradients computed via automatic +// differentiation. For more information on automatic differentiation, +// see the wikipedia article at +// http://en.wikipedia.org/wiki/Automatic_differentiation +// +// To get an auto differentiated function, you must define a class +// with a templated operator() (a functor) that computes the cost +// function in terms of the template parameter T. The autodiff +// framework substitutes appropriate "jet" objects for T in order to +// compute the derivative when necessary, but this is hidden, and you +// should write the function as if T were a scalar type (e.g. a +// double-precision floating point number). +// +// The function must write the computed value in the last argument +// (the only non-const one) and return true to indicate +// success. +// +// For example, consider a scalar error e = x'y - a, where both x and y are +// two-dimensional column vector parameters, the prime sign indicates +// transposition, and a is a constant. +// +// To write an auto-differentiable FirstOrderFunction for the above model, first +// define the object +// +// class QuadraticCostFunctor { +// public: +// explicit QuadraticCostFunctor(double a) : a_(a) {} +// template +// bool operator()(const T* const xy, T* cost) const { +// const T* const x = xy; +// const T* const y = xy + 2; +// *cost = x[0] * y[0] + x[1] * y[1] - T(a_); +// return true; +// } +// +// private: +// double a_; +// }; +// +// Note that in the declaration of operator() the input parameters xy come +// first, and are passed as const pointers to arrays of T. The +// output is the last parameter. +// +// Then given this class definition, the auto differentiated FirstOrderFunction +// for it can be constructed as follows. +// +// FirstOrderFunction* function = +// new AutoDiffFirstOrderFunction( +// new QuadraticCostFunctor(1.0))); +// +// In the instantiation above, the template parameters following +// "QuadraticCostFunctor", "4", describe the functor as computing a +// 1-dimensional output from a four dimensional vector. +// +// WARNING: Since the functor will get instantiated with different types for +// T, you must convert from other numeric types to T before mixing +// computations with other variables of type T. In the example above, this is +// seen where instead of using a_ directly, a_ is wrapped with T(a_). + +template +class AutoDiffFirstOrderFunction final : public FirstOrderFunction { + public: + // Takes ownership of functor. + explicit AutoDiffFirstOrderFunction(FirstOrderFunctor* functor) + : functor_(functor) { + static_assert(kNumParameters > 0, "kNumParameters must be positive"); + } + + bool Evaluate(const double* const parameters, + double* cost, + double* gradient) const override { + if (gradient == nullptr) { + return (*functor_)(parameters, cost); + } + + using JetT = Jet; + internal::FixedArray x(kNumParameters); + for (int i = 0; i < kNumParameters; ++i) { + x[i].a = parameters[i]; + x[i].v.setZero(); + x[i].v[i] = 1.0; + } + + JetT output; + output.a = kImpossibleValue; + output.v.setConstant(kImpossibleValue); + + if (!(*functor_)(x.data(), &output)) { + return false; + } + + *cost = output.a; + VectorRef(gradient, kNumParameters) = output.v; + return true; + } + + int NumParameters() const override { return kNumParameters; } + + const FirstOrderFunctor& functor() const { return *functor_; } + + private: + std::unique_ptr functor_; +}; + +} // namespace ceres + +#endif // CERES_PUBLIC_AUTODIFF_FIRST_ORDER_FUNCTION_H_ diff --git a/ceres-v2/include/autodiff_local_parameterization.h b/ceres-v2/include/autodiff_local_parameterization.h new file mode 100644 index 0000000000000000000000000000000000000000..5f9b04d06707ca08553aa2a523e8ee6b65b91417 --- /dev/null +++ b/ceres-v2/include/autodiff_local_parameterization.h @@ -0,0 +1,158 @@ +// Ceres Solver - A fast non-linear least squares minimizer +// Copyright 2019 Google Inc. All rights reserved. +// http://ceres-solver.org/ +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are met: +// +// * Redistributions of source code must retain the above copyright notice, +// this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above copyright notice, +// this list of conditions and the following disclaimer in the documentation +// and/or other materials provided with the distribution. +// * Neither the name of Google Inc. nor the names of its contributors may be +// used to endorse or promote products derived from this software without +// specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE +// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR +// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF +// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN +// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) +// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE +// POSSIBILITY OF SUCH DAMAGE. +// +// Author: sergey.vfx@gmail.com (Sergey Sharybin) +// mierle@gmail.com (Keir Mierle) +// sameeragarwal@google.com (Sameer Agarwal) + +#ifndef CERES_PUBLIC_AUTODIFF_LOCAL_PARAMETERIZATION_H_ +#define CERES_PUBLIC_AUTODIFF_LOCAL_PARAMETERIZATION_H_ + +#include + +#include "ceres/internal/autodiff.h" +#include "ceres/local_parameterization.h" + +namespace ceres { + +// WARNING: LocalParameterizations are deprecated, so is +// AutoDiffLocalParameterization. They will be removed from Ceres Solver in +// version 2.2.0. Please use Manifolds and AutoDiffManifold instead. + +// Create local parameterization with Jacobians computed via automatic +// differentiation. For more information on local parameterizations, +// see include/ceres/local_parameterization.h +// +// To get an auto differentiated local parameterization, you must define +// a class with a templated operator() (a functor) that computes +// +// x_plus_delta = Plus(x, delta); +// +// the template parameter T. The autodiff framework substitutes appropriate +// "Jet" objects for T in order to compute the derivative when necessary, but +// this is hidden, and you should write the function as if T were a scalar type +// (e.g. a double-precision floating point number). +// +// The function must write the computed value in the last argument (the only +// non-const one) and return true to indicate success. +// +// For example, Quaternions have a three dimensional local +// parameterization. It's plus operation can be implemented as (taken +// from internal/ceres/auto_diff_local_parameterization_test.cc) +// +// struct QuaternionPlus { +// template +// bool operator()(const T* x, const T* delta, T* x_plus_delta) const { +// const T squared_norm_delta = +// delta[0] * delta[0] + delta[1] * delta[1] + delta[2] * delta[2]; +// +// T q_delta[4]; +// if (squared_norm_delta > T(0.0)) { +// T norm_delta = sqrt(squared_norm_delta); +// const T sin_delta_by_delta = sin(norm_delta) / norm_delta; +// q_delta[0] = cos(norm_delta); +// q_delta[1] = sin_delta_by_delta * delta[0]; +// q_delta[2] = sin_delta_by_delta * delta[1]; +// q_delta[3] = sin_delta_by_delta * delta[2]; +// } else { +// // We do not just use q_delta = [1,0,0,0] here because that is a +// // constant and when used for automatic differentiation will +// // lead to a zero derivative. Instead we take a first order +// // approximation and evaluate it at zero. +// q_delta[0] = T(1.0); +// q_delta[1] = delta[0]; +// q_delta[2] = delta[1]; +// q_delta[3] = delta[2]; +// } +// +// QuaternionProduct(q_delta, x, x_plus_delta); +// return true; +// } +// }; +// +// Then given this struct, the auto differentiated local +// parameterization can now be constructed as +// +// LocalParameterization* local_parameterization = +// new AutoDiffLocalParameterization; +// | | +// Global Size ---------------+ | +// Local Size -------------------+ +// +// WARNING: Since the functor will get instantiated with different types for +// T, you must to convert from other numeric types to T before mixing +// computations with other variables of type T. In the example above, this is +// seen where instead of using k_ directly, k_ is wrapped with T(k_). + +template +class CERES_DEPRECATED_WITH_MSG("Use AutoDiffManifold instead.") + AutoDiffLocalParameterization : public LocalParameterization { + public: + AutoDiffLocalParameterization() : functor_(new Functor()) {} + + // Takes ownership of functor. + explicit AutoDiffLocalParameterization(Functor* functor) + : functor_(functor) {} + + bool Plus(const double* x, + const double* delta, + double* x_plus_delta) const override { + return (*functor_)(x, delta, x_plus_delta); + } + + bool ComputeJacobian(const double* x, double* jacobian) const override { + double zero_delta[kLocalSize]; + for (int i = 0; i < kLocalSize; ++i) { + zero_delta[i] = 0.0; + } + + double x_plus_delta[kGlobalSize]; + for (int i = 0; i < kGlobalSize; ++i) { + x_plus_delta[i] = 0.0; + } + + const double* parameter_ptrs[2] = {x, zero_delta}; + double* jacobian_ptrs[2] = {nullptr, jacobian}; + return internal::AutoDifferentiate< + kGlobalSize, + internal::StaticParameterDims>( + *functor_, parameter_ptrs, kGlobalSize, x_plus_delta, jacobian_ptrs); + } + + int GlobalSize() const override { return kGlobalSize; } + int LocalSize() const override { return kLocalSize; } + + const Functor& functor() const { return *functor_; } + + private: + std::unique_ptr functor_; +}; + +} // namespace ceres + +#endif // CERES_PUBLIC_AUTODIFF_LOCAL_PARAMETERIZATION_H_ diff --git a/ceres-v2/include/autodiff_manifold.h b/ceres-v2/include/autodiff_manifold.h new file mode 100644 index 0000000000000000000000000000000000000000..3063e19e8023c8944b9424b25e2b2aa453439c59 --- /dev/null +++ b/ceres-v2/include/autodiff_manifold.h @@ -0,0 +1,259 @@ +// Ceres Solver - A fast non-linear least squares minimizer +// Copyright 2022 Google Inc. All rights reserved. +// http://ceres-solver.org/ +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are met: +// +// * Redistributions of source code must retain the above copyright notice, +// this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above copyright notice, +// this list of conditions and the following disclaimer in the documentation +// and/or other materials provided with the distribution. +// * Neither the name of Google Inc. nor the names of its contributors may be +// used to endorse or promote products derived from this software without +// specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE +// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR +// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF +// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN +// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) +// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE +// POSSIBILITY OF SUCH DAMAGE. +// +// Author: sameeragarwal@google.com (Sameer Agarwal) + +#ifndef CERES_PUBLIC_AUTODIFF_MANIFOLD_H_ +#define CERES_PUBLIC_AUTODIFF_MANIFOLD_H_ + +#include + +#include "ceres/internal/autodiff.h" +#include "ceres/manifold.h" + +namespace ceres { + +// Create a Manifold with Jacobians computed via automatic differentiation. For +// more information on manifolds, see include/ceres/manifold.h +// +// To get an auto differentiated manifold, you must define a class/struct with +// templated Plus and Minus functions that compute +// +// x_plus_delta = Plus(x, delta); +// y_minus_x = Minus(y, x); +// +// Where, x, y and x_plus_y are vectors on the manifold in the ambient space (so +// they are kAmbientSize vectors) and delta, y_minus_x are vectors in the +// tangent space (so they are kTangentSize vectors). +// +// The Functor should have the signature: +// +// struct Functor { +// template +// bool Plus(const T* x, const T* delta, T* x_plus_delta) const; +// +// template +// bool Minus(const T* y, const T* x, T* y_minus_x) const; +// }; +// +// Observe that the Plus and Minus operations are templated on the parameter T. +// The autodiff framework substitutes appropriate "Jet" objects for T in order +// to compute the derivative when necessary. This is the same mechanism that is +// used to compute derivatives when using AutoDiffCostFunction. +// +// Plus and Minus should return true if the computation is successful and false +// otherwise, in which case the result will not be used. +// +// Given this Functor, the corresponding Manifold can be constructed as: +// +// AutoDiffManifold manifold; +// +// As a concrete example consider the case of Quaternions. Quaternions form a +// three dimensional manifold embedded in R^4, i.e. they have an ambient +// dimension of 4 and their tangent space has dimension 3. The following Functor +// (taken from autodiff_manifold_test.cc) defines the Plus and Minus operations +// on the Quaternion manifold: +// +// NOTE: The following is only used for illustration purposes. Ceres Solver +// ships with optimized production grade QuaternionManifold implementation. See +// manifold.h. +// +// This functor assumes that the quaternions are laid out as [w,x,y,z] in +// memory, i.e. the real or scalar part is the first coordinate. +// +// struct QuaternionFunctor { +// template +// bool Plus(const T* x, const T* delta, T* x_plus_delta) const { +// const T squared_norm_delta = +// delta[0] * delta[0] + delta[1] * delta[1] + delta[2] * delta[2]; +// +// T q_delta[4]; +// if (squared_norm_delta > T(0.0)) { +// T norm_delta = sqrt(squared_norm_delta); +// const T sin_delta_by_delta = sin(norm_delta) / norm_delta; +// q_delta[0] = cos(norm_delta); +// q_delta[1] = sin_delta_by_delta * delta[0]; +// q_delta[2] = sin_delta_by_delta * delta[1]; +// q_delta[3] = sin_delta_by_delta * delta[2]; +// } else { +// // We do not just use q_delta = [1,0,0,0] here because that is a +// // constant and when used for automatic differentiation will +// // lead to a zero derivative. Instead we take a first order +// // approximation and evaluate it at zero. +// q_delta[0] = T(1.0); +// q_delta[1] = delta[0]; +// q_delta[2] = delta[1]; +// q_delta[3] = delta[2]; +// } +// +// QuaternionProduct(q_delta, x, x_plus_delta); +// return true; +// } +// +// template +// bool Minus(const T* y, const T* x, T* y_minus_x) const { +// T minus_x[4] = {x[0], -x[1], -x[2], -x[3]}; +// T ambient_y_minus_x[4]; +// QuaternionProduct(y, minus_x, ambient_y_minus_x); +// T u_norm = sqrt(ambient_y_minus_x[1] * ambient_y_minus_x[1] + +// ambient_y_minus_x[2] * ambient_y_minus_x[2] + +// ambient_y_minus_x[3] * ambient_y_minus_x[3]); +// if (u_norm > 0.0) { +// T theta = atan2(u_norm, ambient_y_minus_x[0]); +// y_minus_x[0] = theta * ambient_y_minus_x[1] / u_norm; +// y_minus_x[1] = theta * ambient_y_minus_x[2] / u_norm; +// y_minus_x[2] = theta * ambient_y_minus_x[3] / u_norm; +// } else { +// // We do not use [0,0,0] here because even though the value part is +// // a constant, the derivative part is not. +// y_minus_x[0] = ambient_y_minus_x[1]; +// y_minus_x[1] = ambient_y_minus_x[2]; +// y_minus_x[2] = ambient_y_minus_x[3]; +// } +// return true; +// } +// }; +// +// Then given this struct, the auto differentiated Quaternion Manifold can now +// be constructed as +// +// Manifold* manifold = new AutoDiffManifold; + +template +class AutoDiffManifold final : public Manifold { + public: + AutoDiffManifold() : functor_(std::make_unique()) {} + + // Takes ownership of functor. + explicit AutoDiffManifold(Functor* functor) : functor_(functor) {} + + int AmbientSize() const override { return kAmbientSize; } + int TangentSize() const override { return kTangentSize; } + + bool Plus(const double* x, + const double* delta, + double* x_plus_delta) const override { + return functor_->Plus(x, delta, x_plus_delta); + } + + bool PlusJacobian(const double* x, double* jacobian) const override; + + bool Minus(const double* y, + const double* x, + double* y_minus_x) const override { + return functor_->Minus(y, x, y_minus_x); + } + + bool MinusJacobian(const double* x, double* jacobian) const override; + + const Functor& functor() const { return *functor_; } + + private: + std::unique_ptr functor_; +}; + +namespace internal { + +// The following two helper structs are needed to interface the Plus and Minus +// methods of the ManifoldFunctor with the automatic differentiation which +// expects a Functor with operator(). +template +struct PlusWrapper { + explicit PlusWrapper(const Functor& functor) : functor(functor) {} + template + bool operator()(const T* x, const T* delta, T* x_plus_delta) const { + return functor.Plus(x, delta, x_plus_delta); + } + const Functor& functor; +}; + +template +struct MinusWrapper { + explicit MinusWrapper(const Functor& functor) : functor(functor) {} + template + bool operator()(const T* y, const T* x, T* y_minus_x) const { + return functor.Minus(y, x, y_minus_x); + } + const Functor& functor; +}; +} // namespace internal + +template +bool AutoDiffManifold::PlusJacobian( + const double* x, double* jacobian) const { + double zero_delta[kTangentSize]; + for (int i = 0; i < kTangentSize; ++i) { + zero_delta[i] = 0.0; + } + + double x_plus_delta[kAmbientSize]; + for (int i = 0; i < kAmbientSize; ++i) { + x_plus_delta[i] = 0.0; + } + + const double* parameter_ptrs[2] = {x, zero_delta}; + + // PlusJacobian is D_2 Plus(x,0) so we only need to compute the Jacobian + // w.r.t. the second argument. + double* jacobian_ptrs[2] = {nullptr, jacobian}; + return internal::AutoDifferentiate< + kAmbientSize, + internal::StaticParameterDims>( + internal::PlusWrapper(*functor_), + parameter_ptrs, + kAmbientSize, + x_plus_delta, + jacobian_ptrs); +} + +template +bool AutoDiffManifold::MinusJacobian( + const double* x, double* jacobian) const { + double y_minus_x[kTangentSize]; + for (int i = 0; i < kTangentSize; ++i) { + y_minus_x[i] = 0.0; + } + + const double* parameter_ptrs[2] = {x, x}; + + // MinusJacobian is D_1 Minus(x,x), so we only need to compute the Jacobian + // w.r.t. the first argument. + double* jacobian_ptrs[2] = {jacobian, nullptr}; + return internal::AutoDifferentiate< + kTangentSize, + internal::StaticParameterDims>( + internal::MinusWrapper(*functor_), + parameter_ptrs, + kTangentSize, + y_minus_x, + jacobian_ptrs); +} + +} // namespace ceres + +#endif // CERES_PUBLIC_AUTODIFF_MANIFOLD_H_ diff --git a/ceres-v2/include/c_api.h b/ceres-v2/include/c_api.h new file mode 100644 index 0000000000000000000000000000000000000000..1be8ca2e0773e424e0137468377f43469216f445 --- /dev/null +++ b/ceres-v2/include/c_api.h @@ -0,0 +1,148 @@ +/* Ceres Solver - A fast non-linear least squares minimizer + * Copyright 2019 Google Inc. All rights reserved. + * http://ceres-solver.org/ + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * - Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * - Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * - Neither the name of Google Inc. nor the names of its contributors may be + * used to endorse or promote products derived from this software without + * specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + * + * Author: mierle@gmail.com (Keir Mierle) + * + * A minimal C API for Ceres. Not all functionality is included. This API is + * not intended for clients of Ceres, but is instead intended for easing the + * process of binding Ceres to other languages. + * + * Currently this is a work in progress. + */ + +#ifndef CERES_PUBLIC_C_API_H_ +#define CERES_PUBLIC_C_API_H_ + +// clang-format off +#include "ceres/internal/export.h" +#include "ceres/internal/disable_warnings.h" +// clang-format on + +#ifdef __cplusplus +extern "C" { +#endif + +/* Init the Ceres private data. Must be called before anything else. */ +CERES_EXPORT void ceres_init(); + +/* Equivalent to CostFunction::Evaluate() in the C++ API. + * + * The user may keep private information inside the opaque user_data object. + * The pointer here is the same one passed in the ceres_add_residual_block(). + */ +typedef int (*ceres_cost_function_t)(void* user_data, + double** parameters, + double* residuals, + double** jacobians); + +/* Equivalent to LossFunction::Evaluate() from the C++ API. */ +typedef void (*ceres_loss_function_t)(void* user_data, + double squared_norm, + double out[3]); + +/* Create callback data for Ceres' stock loss functions. + * + * Ceres has several loss functions available by default, and these functions + * expose those to the C API. To use the stock loss functions, call + * ceres_create_*_loss_data(), which internally creates an instance of one of + * the stock loss functions (for example ceres::CauchyLoss), and pass the + * returned "loss_function_data" along with the ceres_stock_loss_function to + * ceres_add_residual_block(). + * + * For example: + * + * void* cauchy_loss_function_data = + * ceres_create_cauchy_loss_function_data(1.2, 0.0); + * ceres_problem_add_residual_block( + * problem, + * my_cost_function, + * my_cost_function_data, + * ceres_stock_loss_function, + * cauchy_loss_function_data, + * 1, + * 2, + * parameter_sizes, + * parameter_pointers); + * ... + * ceres_free_stock_loss_function_data(cauchy_loss_function_data); + * + * See loss_function.h for the details of each loss function. + */ +CERES_EXPORT void* ceres_create_huber_loss_function_data(double a); +CERES_EXPORT void* ceres_create_softl1_loss_function_data(double a); +CERES_EXPORT void* ceres_create_cauchy_loss_function_data(double a); +CERES_EXPORT void* ceres_create_arctan_loss_function_data(double a); +CERES_EXPORT void* ceres_create_tolerant_loss_function_data(double a, double b); + +/* Free the given stock loss function data. */ +CERES_EXPORT void ceres_free_stock_loss_function_data(void* loss_function_data); + +/* This is an implementation of ceres_loss_function_t contained within Ceres + * itself, intended as a way to access the various stock Ceres loss functions + * from the C API. This should be passed to ceres_add_residual() below, in + * combination with a user_data pointer generated by + * ceres_create_stock_loss_function() above. */ +CERES_EXPORT void ceres_stock_loss_function(void* user_data, + double squared_norm, + double out[3]); + +/* Equivalent to Problem from the C++ API. */ +struct ceres_problem_s; +typedef struct ceres_problem_s ceres_problem_t; + +struct ceres_residual_block_id_s; +typedef struct ceres_residual_block_id_s ceres_residual_block_id_t; + +/* Create and destroy a problem */ +/* TODO(keir): Add options for the problem. */ +CERES_EXPORT ceres_problem_t* ceres_create_problem(); +CERES_EXPORT void ceres_free_problem(ceres_problem_t* problem); + +/* Add a residual block. */ +CERES_EXPORT ceres_residual_block_id_t* ceres_problem_add_residual_block( + ceres_problem_t* problem, + ceres_cost_function_t cost_function, + void* cost_function_data, + ceres_loss_function_t loss_function, + void* loss_function_data, + int num_residuals, + int num_parameter_blocks, + int* parameter_block_sizes, + double** parameters); + +CERES_EXPORT void ceres_solve(ceres_problem_t* problem); + +/* TODO(keir): Figure out a way to pass a config in. */ + +#ifdef __cplusplus +} +#endif + +#include "ceres/internal/reenable_warnings.h" + +#endif /* CERES_PUBLIC_C_API_H_ */ diff --git a/ceres-v2/include/ceres.h b/ceres-v2/include/ceres.h new file mode 100644 index 0000000000000000000000000000000000000000..c32477d42543bb054f174ec6d784734aaf7f36b4 --- /dev/null +++ b/ceres-v2/include/ceres.h @@ -0,0 +1,74 @@ +// Ceres Solver - A fast non-linear least squares minimizer +// Copyright 2022 Google Inc. All rights reserved. +// http://ceres-solver.org/ +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are met: +// +// * Redistributions of source code must retain the above copyright notice, +// this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above copyright notice, +// this list of conditions and the following disclaimer in the documentation +// and/or other materials provided with the distribution. +// * Neither the name of Google Inc. nor the names of its contributors may be +// used to endorse or promote products derived from this software without +// specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE +// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR +// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF +// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN +// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) +// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE +// POSSIBILITY OF SUCH DAMAGE. +// +// Author: keir@google.com (Keir Mierle) +// +// This is a forwarding header containing the public symbols exported from +// Ceres. Anything in the "ceres" namespace is available for use. + +#ifndef CERES_PUBLIC_CERES_H_ +#define CERES_PUBLIC_CERES_H_ + +#include "ceres/autodiff_cost_function.h" +#include "ceres/autodiff_first_order_function.h" +#include "ceres/autodiff_local_parameterization.h" +#include "ceres/autodiff_manifold.h" +#include "ceres/conditioned_cost_function.h" +#include "ceres/context.h" +#include "ceres/cost_function.h" +#include "ceres/cost_function_to_functor.h" +#include "ceres/covariance.h" +#include "ceres/crs_matrix.h" +#include "ceres/dynamic_autodiff_cost_function.h" +#include "ceres/dynamic_cost_function.h" +#include "ceres/dynamic_cost_function_to_functor.h" +#include "ceres/dynamic_numeric_diff_cost_function.h" +#include "ceres/evaluation_callback.h" +#include "ceres/first_order_function.h" +#include "ceres/gradient_checker.h" +#include "ceres/gradient_problem.h" +#include "ceres/gradient_problem_solver.h" +#include "ceres/iteration_callback.h" +#include "ceres/jet.h" +#include "ceres/line_manifold.h" +#include "ceres/local_parameterization.h" +#include "ceres/loss_function.h" +#include "ceres/manifold.h" +#include "ceres/numeric_diff_cost_function.h" +#include "ceres/numeric_diff_first_order_function.h" +#include "ceres/numeric_diff_options.h" +#include "ceres/ordered_groups.h" +#include "ceres/problem.h" +#include "ceres/product_manifold.h" +#include "ceres/sized_cost_function.h" +#include "ceres/solver.h" +#include "ceres/sphere_manifold.h" +#include "ceres/types.h" +#include "ceres/version.h" + +#endif // CERES_PUBLIC_CERES_H_ diff --git a/ceres-v2/include/conditioned_cost_function.h b/ceres-v2/include/conditioned_cost_function.h new file mode 100644 index 0000000000000000000000000000000000000000..e4c3decbfd5b32af4ee3353bba76b43f010f4f87 --- /dev/null +++ b/ceres-v2/include/conditioned_cost_function.h @@ -0,0 +1,101 @@ +// Ceres Solver - A fast non-linear least squares minimizer +// Copyright 2019 Google Inc. All rights reserved. +// http://ceres-solver.org/ +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are met: +// +// * Redistributions of source code must retain the above copyright notice, +// this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above copyright notice, +// this list of conditions and the following disclaimer in the documentation +// and/or other materials provided with the distribution. +// * Neither the name of Google Inc. nor the names of its contributors may be +// used to endorse or promote products derived from this software without +// specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE +// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR +// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF +// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN +// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) +// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE +// POSSIBILITY OF SUCH DAMAGE. +// +// Author: wjr@google.com (William Rucklidge) +// +// This file contains a cost function that can apply a transformation to +// each residual value before they are square-summed. + +#ifndef CERES_PUBLIC_CONDITIONED_COST_FUNCTION_H_ +#define CERES_PUBLIC_CONDITIONED_COST_FUNCTION_H_ + +#include +#include + +#include "ceres/cost_function.h" +#include "ceres/internal/disable_warnings.h" +#include "ceres/types.h" + +namespace ceres { + +// This class allows you to apply different conditioning to the residual +// values of a wrapped cost function. An example where this is useful is +// where you have an existing cost function that produces N values, but you +// want the total cost to be something other than just the sum of these +// squared values - maybe you want to apply a different scaling to some +// values, to change their contribution to the cost. +// +// Usage: +// +// // my_cost_function produces N residuals +// CostFunction* my_cost_function = ... +// CHECK_EQ(N, my_cost_function->num_residuals()); +// vector conditioners; +// +// // Make N 1x1 cost functions (1 parameter, 1 residual) +// CostFunction* f_1 = ... +// conditioners.push_back(f_1); +// ... +// CostFunction* f_N = ... +// conditioners.push_back(f_N); +// ConditionedCostFunction* ccf = +// new ConditionedCostFunction(my_cost_function, conditioners); +// +// Now ccf's residual i (i=0..N-1) will be passed though the i'th conditioner. +// +// ccf_residual[i] = f_i(my_cost_function_residual[i]) +// +// and the Jacobian will be affected appropriately. +class CERES_EXPORT ConditionedCostFunction final : public CostFunction { + public: + // Builds a cost function based on a wrapped cost function, and a + // per-residual conditioner. Takes ownership of all of the wrapped cost + // functions, or not, depending on the ownership parameter. Conditioners + // may be nullptr, in which case the corresponding residual is not modified. + // + // The conditioners can repeat. + ConditionedCostFunction(CostFunction* wrapped_cost_function, + const std::vector& conditioners, + Ownership ownership); + ~ConditionedCostFunction() override; + + bool Evaluate(double const* const* parameters, + double* residuals, + double** jacobians) const override; + + private: + std::unique_ptr wrapped_cost_function_; + std::vector conditioners_; + Ownership ownership_; +}; + +} // namespace ceres + +#include "ceres/internal/reenable_warnings.h" + +#endif // CERES_PUBLIC_CONDITIONED_COST_FUNCTION_H_ diff --git a/ceres-v2/include/context.h b/ceres-v2/include/context.h new file mode 100644 index 0000000000000000000000000000000000000000..6c6e8f4c95359e5b905c4afc284cb88e9bdfb3a6 --- /dev/null +++ b/ceres-v2/include/context.h @@ -0,0 +1,58 @@ +// Ceres Solver - A fast non-linear least squares minimizer +// Copyright 2019 Google Inc. All rights reserved. +// http://ceres-solver.org/ +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are met: +// +// * Redistributions of source code must retain the above copyright notice, +// this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above copyright notice, +// this list of conditions and the following disclaimer in the documentation +// and/or other materials provided with the distribution. +// * Neither the name of Google Inc. nor the names of its contributors may be +// used to endorse or promote products derived from this software without +// specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE +// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR +// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF +// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN +// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) +// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE +// POSSIBILITY OF SUCH DAMAGE. +// +// Author: vitus@google.com (Michael Vitus) + +#ifndef CERES_PUBLIC_CONTEXT_H_ +#define CERES_PUBLIC_CONTEXT_H_ + +#include "ceres/internal/export.h" + +namespace ceres { + +// A global context for processing data in Ceres. This provides a mechanism to +// allow Ceres to reuse items that are expensive to create between multiple +// calls; for example, thread pools. The same Context can be used on multiple +// Problems, either serially or in parallel. When using it with multiple +// Problems at the same time, they may end up contending for resources +// (e.g. threads) managed by the Context. +class CERES_EXPORT Context { + public: + Context(); + Context(const Context&) = delete; + void operator=(const Context&) = delete; + + virtual ~Context(); + + // Creates a context object and the caller takes ownership. + static Context* Create(); +}; + +} // namespace ceres + +#endif // CERES_PUBLIC_CONTEXT_H_ diff --git a/ceres-v2/include/cost_function.h b/ceres-v2/include/cost_function.h new file mode 100644 index 0000000000000000000000000000000000000000..fef972b75af43b615241697d891e67fb8832a7eb --- /dev/null +++ b/ceres-v2/include/cost_function.h @@ -0,0 +1,144 @@ +// Ceres Solver - A fast non-linear least squares minimizer +// Copyright 2019 Google Inc. All rights reserved. +// http://ceres-solver.org/ +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are met: +// +// * Redistributions of source code must retain the above copyright notice, +// this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above copyright notice, +// this list of conditions and the following disclaimer in the documentation +// and/or other materials provided with the distribution. +// * Neither the name of Google Inc. nor the names of its contributors may be +// used to endorse or promote products derived from this software without +// specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE +// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR +// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF +// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN +// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) +// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE +// POSSIBILITY OF SUCH DAMAGE. +// +// Author: sameeragarwal@google.com (Sameer Agarwal) +// keir@google.m (Keir Mierle) +// +// This is the interface through which the least squares solver accesses the +// residual and Jacobian of the least squares problem. Users are expected to +// subclass CostFunction to define their own terms in the least squares problem. +// +// It is recommended that users define templated residual functors for use as +// arguments for AutoDiffCostFunction (see autodiff_cost_function.h), instead of +// directly implementing the CostFunction interface. This often results in both +// shorter code and faster execution than hand-coded derivatives. However, +// specialized cases may demand direct implementation of the lower-level +// CostFunction interface; for example, this is true when calling legacy code +// which is not templated on numeric types. + +#ifndef CERES_PUBLIC_COST_FUNCTION_H_ +#define CERES_PUBLIC_COST_FUNCTION_H_ + +#include +#include + +#include "ceres/internal/disable_warnings.h" +#include "ceres/internal/export.h" + +namespace ceres { + +// This class implements the computation of the cost (a.k.a. residual) terms as +// a function of the input (control) variables, and is the interface for users +// to describe their least squares problem to Ceres. In other words, this is the +// modeling layer between users and the Ceres optimizer. The signature of the +// function (number and sizes of input parameter blocks and number of outputs) +// is stored in parameter_block_sizes_ and num_residuals_ respectively. User +// code inheriting from this class is expected to set these two members with the +// corresponding accessors. This information will be verified by the Problem +// when added with AddResidualBlock(). +class CERES_EXPORT CostFunction { + public: + CostFunction(); + CostFunction(const CostFunction&) = delete; + void operator=(const CostFunction&) = delete; + + virtual ~CostFunction(); + + // Inputs: + // + // parameters is an array of pointers to arrays containing the + // various parameter blocks. parameters has the same number of + // elements as parameter_block_sizes_. Parameter blocks are in the + // same order as parameter_block_sizes_.i.e., + // + // parameters_[i] = double[parameter_block_sizes_[i]] + // + // Outputs: + // + // residuals is an array of size num_residuals_. + // + // jacobians is an array of size parameter_block_sizes_ containing + // pointers to storage for jacobian blocks corresponding to each + // parameter block. Jacobian blocks are in the same order as + // parameter_block_sizes, i.e. jacobians[i], is an + // array that contains num_residuals_* parameter_block_sizes_[i] + // elements. Each jacobian block is stored in row-major order, i.e., + // + // jacobians[i][r*parameter_block_size_[i] + c] = + // d residual[r] / d parameters[i][c] + // + // If jacobians is nullptr, then no derivatives are returned; this is + // the case when computing cost only. If jacobians[i] is nullptr, then + // the jacobian block corresponding to the i'th parameter block must + // not to be returned. + // + // The return value indicates whether the computation of the + // residuals and/or jacobians was successful or not. + // + // This can be used to communicate numerical failures in jacobian + // computations for instance. + // + // A more interesting and common use is to impose constraints on the + // parameters. If the initial values of the parameter blocks satisfy + // the constraints, then returning false whenever the constraints + // are not satisfied will prevent the solver from moving into the + // infeasible region. This is not a very sophisticated mechanism for + // enforcing constraints, but is often good enough. + // + // Note that it is important that the initial values of the + // parameter block must be feasible, otherwise the solver will + // declare a numerical problem at iteration 0. + virtual bool Evaluate(double const* const* parameters, + double* residuals, + double** jacobians) const = 0; + + const std::vector& parameter_block_sizes() const { + return parameter_block_sizes_; + } + + int num_residuals() const { return num_residuals_; } + + protected: + std::vector* mutable_parameter_block_sizes() { + return ¶meter_block_sizes_; + } + + void set_num_residuals(int num_residuals) { num_residuals_ = num_residuals; } + + private: + // Cost function signature metadata: number of inputs & their sizes, + // number of outputs (residuals). + std::vector parameter_block_sizes_; + int num_residuals_; +}; + +} // namespace ceres + +#include "ceres/internal/reenable_warnings.h" + +#endif // CERES_PUBLIC_COST_FUNCTION_H_ diff --git a/ceres-v2/include/cost_function_to_functor.h b/ceres-v2/include/cost_function_to_functor.h new file mode 100644 index 0000000000000000000000000000000000000000..08a8050c5f8701085b3087f957d1b445c8c9bea2 --- /dev/null +++ b/ceres-v2/include/cost_function_to_functor.h @@ -0,0 +1,171 @@ +// Ceres Solver - A fast non-linear least squares minimizer +// Copyright 2019 Google Inc. All rights reserved. +// http://ceres-solver.org/ +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are met: +// +// * Redistributions of source code must retain the above copyright notice, +// this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above copyright notice, +// this list of conditions and the following disclaimer in the documentation +// and/or other materials provided with the distribution. +// * Neither the name of Google Inc. nor the names of its contributors may be +// used to endorse or promote products derived from this software without +// specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE +// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR +// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF +// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN +// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) +// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE +// POSSIBILITY OF SUCH DAMAGE. +// +// Author: sameeragarwal@google.com (Sameer Agarwal) +// +// CostFunctionToFunctor is an adapter class that allows users to use +// SizedCostFunction objects in templated functors which are to be used for +// automatic differentiation. This allows the user to seamlessly mix +// analytic, numeric and automatic differentiation. +// +// For example, let us assume that +// +// class IntrinsicProjection : public SizedCostFunction<2, 5, 3> { +// public: +// IntrinsicProjection(const double* observation); +// bool Evaluate(double const* const* parameters, +// double* residuals, +// double** jacobians) const override; +// }; +// +// is a cost function that implements the projection of a point in its +// local coordinate system onto its image plane and subtracts it from +// the observed point projection. It can compute its residual and +// jacobians either via analytic or numerical differentiation. +// +// Now we would like to compose the action of this CostFunction with +// the action of camera extrinsics, i.e., rotation and +// translation. Say we have a templated function +// +// template +// void RotateAndTranslatePoint(const T* rotation, +// const T* translation, +// const T* point, +// T* result); +// +// Then we can now do the following, +// +// struct CameraProjection { +// CameraProjection(const double* observation) +// : intrinsic_projection_(new IntrinsicProjection(observation)) { +// } +// template +// bool operator()(const T* rotation, +// const T* translation, +// const T* intrinsics, +// const T* point, +// T* residual) const { +// T transformed_point[3]; +// RotateAndTranslatePoint(rotation, translation, point, transformed_point); +// +// // Note that we call intrinsic_projection_, just like it was +// // any other templated functor. +// +// return intrinsic_projection_(intrinsics, transformed_point, residual); +// } +// +// private: +// CostFunctionToFunctor<2,5,3> intrinsic_projection_; +// }; + +#ifndef CERES_PUBLIC_COST_FUNCTION_TO_FUNCTOR_H_ +#define CERES_PUBLIC_COST_FUNCTION_TO_FUNCTOR_H_ + +#include +#include +#include +#include +#include + +#include "ceres/cost_function.h" +#include "ceres/dynamic_cost_function_to_functor.h" +#include "ceres/internal/export.h" +#include "ceres/internal/fixed_array.h" +#include "ceres/internal/parameter_dims.h" +#include "ceres/types.h" +#include "glog/logging.h" + +namespace ceres { + +template +class CostFunctionToFunctor { + public: + // Takes ownership of cost_function. + explicit CostFunctionToFunctor(CostFunction* cost_function) + : cost_functor_(cost_function) { + CHECK(cost_function != nullptr); + CHECK(kNumResiduals > 0 || kNumResiduals == DYNAMIC); + + const std::vector& parameter_block_sizes = + cost_function->parameter_block_sizes(); + const int num_parameter_blocks = ParameterDims::kNumParameterBlocks; + CHECK_EQ(static_cast(parameter_block_sizes.size()), + num_parameter_blocks); + + if (parameter_block_sizes.size() == num_parameter_blocks) { + for (int block = 0; block < num_parameter_blocks; ++block) { + CHECK_EQ(ParameterDims::GetDim(block), parameter_block_sizes[block]) + << "Parameter block size missmatch. The specified static parameter " + "block dimension does not match the one from the cost function."; + } + } + + CHECK_EQ(accumulate( + parameter_block_sizes.begin(), parameter_block_sizes.end(), 0), + ParameterDims::kNumParameters); + } + + template + bool operator()(const T* p1, Ts*... ps) const { + // Add one because of residual block. + static_assert(sizeof...(Ts) + 1 == ParameterDims::kNumParameterBlocks + 1, + "Invalid number of parameter blocks specified."); + + auto params = std::make_tuple(p1, ps...); + + // Extract residual pointer from params. The residual pointer is the + // last pointer. + constexpr int kResidualIndex = ParameterDims::kNumParameterBlocks; + T* residuals = std::get(params); + + // Extract parameter block pointers from params. + using Indices = + std::make_integer_sequence; + std::array parameter_blocks = + GetParameterPointers(params, Indices()); + + return cost_functor_(parameter_blocks.data(), residuals); + } + + private: + using ParameterDims = internal::StaticParameterDims; + + template + static std::array + GetParameterPointers(const Tuple& paramPointers, + std::integer_sequence) { + return std::array{ + {std::get(paramPointers)...}}; + } + + DynamicCostFunctionToFunctor cost_functor_; +}; + +} // namespace ceres + +#endif // CERES_PUBLIC_COST_FUNCTION_TO_FUNCTOR_H_ diff --git a/ceres-v2/include/covariance.h b/ceres-v2/include/covariance.h new file mode 100644 index 0000000000000000000000000000000000000000..60bcc80b80f282a878176a1c2f92783c50902573 --- /dev/null +++ b/ceres-v2/include/covariance.h @@ -0,0 +1,459 @@ +// Ceres Solver - A fast non-linear least squares minimizer +// Copyright 2019 Google Inc. All rights reserved. +// http://ceres-solver.org/ +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are met: +// +// * Redistributions of source code must retain the above copyright notice, +// this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above copyright notice, +// this list of conditions and the following disclaimer in the documentation +// and/or other materials provided with the distribution. +// * Neither the name of Google Inc. nor the names of its contributors may be +// used to endorse or promote products derived from this software without +// specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE +// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR +// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF +// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN +// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) +// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE +// POSSIBILITY OF SUCH DAMAGE. +// +// Author: sameeragarwal@google.com (Sameer Agarwal) + +#ifndef CERES_PUBLIC_COVARIANCE_H_ +#define CERES_PUBLIC_COVARIANCE_H_ + +#include +#include +#include + +#include "ceres/internal/config.h" +#include "ceres/internal/disable_warnings.h" +#include "ceres/internal/export.h" +#include "ceres/types.h" + +namespace ceres { + +class Problem; + +namespace internal { +class CovarianceImpl; +} // namespace internal + +// WARNING +// ======= +// It is very easy to use this class incorrectly without understanding +// the underlying mathematics. Please read and understand the +// documentation completely before attempting to use it. +// +// +// This class allows the user to evaluate the covariance for a +// non-linear least squares problem and provides random access to its +// blocks +// +// Background +// ========== +// One way to assess the quality of the solution returned by a +// non-linear least squares solver is to analyze the covariance of the +// solution. +// +// Let us consider the non-linear regression problem +// +// y = f(x) + N(0, I) +// +// i.e., the observation y is a random non-linear function of the +// independent variable x with mean f(x) and identity covariance. Then +// the maximum likelihood estimate of x given observations y is the +// solution to the non-linear least squares problem: +// +// x* = arg min_x |f(x) - y|^2 +// +// And the covariance of x* is given by +// +// C(x*) = inverse[J'(x*)J(x*)] +// +// Here J(x*) is the Jacobian of f at x*. The above formula assumes +// that J(x*) has full column rank. +// +// If J(x*) is rank deficient, then the covariance matrix C(x*) is +// also rank deficient and is given by +// +// C(x*) = pseudoinverse[J'(x*)J(x*)] +// +// Note that in the above, we assumed that the covariance +// matrix for y was identity. This is an important assumption. If this +// is not the case and we have +// +// y = f(x) + N(0, S) +// +// Where S is a positive semi-definite matrix denoting the covariance +// of y, then the maximum likelihood problem to be solved is +// +// x* = arg min_x f'(x) inverse[S] f(x) +// +// and the corresponding covariance estimate of x* is given by +// +// C(x*) = inverse[J'(x*) inverse[S] J(x*)] +// +// So, if it is the case that the observations being fitted to have a +// covariance matrix not equal to identity, then it is the user's +// responsibility that the corresponding cost functions are correctly +// scaled, e.g. in the above case the cost function for this problem +// should evaluate S^{-1/2} f(x) instead of just f(x), where S^{-1/2} +// is the inverse square root of the covariance matrix S. +// +// This class allows the user to evaluate the covariance for a +// non-linear least squares problem and provides random access to its +// blocks. The computation assumes that the CostFunctions compute +// residuals such that their covariance is identity. +// +// Since the computation of the covariance matrix requires computing +// the inverse of a potentially large matrix, this can involve a +// rather large amount of time and memory. However, it is usually the +// case that the user is only interested in a small part of the +// covariance matrix. Quite often just the block diagonal. This class +// allows the user to specify the parts of the covariance matrix that +// she is interested in and then uses this information to only compute +// and store those parts of the covariance matrix. +// +// Rank of the Jacobian +// -------------------- +// As we noted above, if the jacobian is rank deficient, then the +// inverse of J'J is not defined and instead a pseudo inverse needs to +// be computed. +// +// The rank deficiency in J can be structural -- columns which are +// always known to be zero or numerical -- depending on the exact +// values in the Jacobian. +// +// Structural rank deficiency occurs when the problem contains +// parameter blocks that are constant. This class correctly handles +// structural rank deficiency like that. +// +// Numerical rank deficiency, where the rank of the matrix cannot be +// predicted by its sparsity structure and requires looking at its +// numerical values is more complicated. Here again there are two +// cases. +// +// a. The rank deficiency arises from overparameterization. e.g., a +// four dimensional quaternion used to parameterize SO(3), which is +// a three dimensional manifold. In cases like this, the user should +// use an appropriate LocalParameterization/Manifold. Not only will this lead +// to better numerical behaviour of the Solver, it will also expose +// the rank deficiency to the Covariance object so that it can +// handle it correctly. +// +// b. More general numerical rank deficiency in the Jacobian +// requires the computation of the so called Singular Value +// Decomposition (SVD) of J'J. We do not know how to do this for +// large sparse matrices efficiently. For small and moderate sized +// problems this is done using dense linear algebra. +// +// Gauge Invariance +// ---------------- +// In structure from motion (3D reconstruction) problems, the +// reconstruction is ambiguous up to a similarity transform. This is +// known as a Gauge Ambiguity. Handling Gauges correctly requires the +// use of SVD or custom inversion algorithms. For small problems the +// user can use the dense algorithm. For more details see +// +// Ken-ichi Kanatani, Daniel D. Morris: Gauges and gauge +// transformations for uncertainty description of geometric structure +// with indeterminacy. IEEE Transactions on Information Theory 47(5): +// 2017-2028 (2001) +// +// Example Usage +// ============= +// +// double x[3]; +// double y[2]; +// +// Problem problem; +// problem.AddParameterBlock(x, 3); +// problem.AddParameterBlock(y, 2); +// +// +// +// Covariance::Options options; +// Covariance covariance(options); +// +// std::vector> covariance_blocks; +// covariance_blocks.push_back(make_pair(x, x)); +// covariance_blocks.push_back(make_pair(y, y)); +// covariance_blocks.push_back(make_pair(x, y)); +// +// CHECK(covariance.Compute(covariance_blocks, &problem)); +// +// double covariance_xx[3 * 3]; +// double covariance_yy[2 * 2]; +// double covariance_xy[3 * 2]; +// covariance.GetCovarianceBlock(x, x, covariance_xx) +// covariance.GetCovarianceBlock(y, y, covariance_yy) +// covariance.GetCovarianceBlock(x, y, covariance_xy) +// +class CERES_EXPORT Covariance { + public: + struct CERES_EXPORT Options { + // Sparse linear algebra library to use when a sparse matrix + // factorization is being used to compute the covariance matrix. + // + // Currently this only applies to SPARSE_QR. + SparseLinearAlgebraLibraryType sparse_linear_algebra_library_type = +#if !defined(CERES_NO_SUITESPARSE) + SUITE_SPARSE; +#else + // Eigen's QR factorization is always available. + EIGEN_SPARSE; +#endif + + // Ceres supports two different algorithms for covariance + // estimation, which represent different tradeoffs in speed, + // accuracy and reliability. + // + // 1. DENSE_SVD uses Eigen's JacobiSVD to perform the + // computations. It computes the singular value decomposition + // + // U * D * V' = J + // + // and then uses it to compute the pseudo inverse of J'J as + // + // pseudoinverse[J'J] = V * pseudoinverse[D^2] * V' + // + // It is an accurate but slow method and should only be used + // for small to moderate sized problems. It can handle + // full-rank as well as rank deficient Jacobians. + // + // 2. SPARSE_QR uses the sparse QR factorization algorithm + // to compute the decomposition + // + // Q * R = J + // + // [J'J]^-1 = [R'*R]^-1 + // + // SPARSE_QR is not capable of computing the covariance if the + // Jacobian is rank deficient. Depending on the value of + // Covariance::Options::sparse_linear_algebra_library_type, either + // Eigen's Sparse QR factorization algorithm will be used or + // SuiteSparse's high performance SuiteSparseQR algorithm will be + // used. + CovarianceAlgorithmType algorithm_type = SPARSE_QR; + + // If the Jacobian matrix is near singular, then inverting J'J + // will result in unreliable results, e.g, if + // + // J = [1.0 1.0 ] + // [1.0 1.0000001 ] + // + // which is essentially a rank deficient matrix, we have + // + // inv(J'J) = [ 2.0471e+14 -2.0471e+14] + // [-2.0471e+14 2.0471e+14] + // + // This is not a useful result. Therefore, by default + // Covariance::Compute will return false if a rank deficient + // Jacobian is encountered. How rank deficiency is detected + // depends on the algorithm being used. + // + // 1. DENSE_SVD + // + // min_sigma / max_sigma < sqrt(min_reciprocal_condition_number) + // + // where min_sigma and max_sigma are the minimum and maxiumum + // singular values of J respectively. + // + // 2. SPARSE_QR + // + // rank(J) < num_col(J) + // + // Here rank(J) is the estimate of the rank of J returned by the + // sparse QR factorization algorithm. It is a fairly reliable + // indication of rank deficiency. + // + double min_reciprocal_condition_number = 1e-14; + + // When using DENSE_SVD, the user has more control in dealing with + // singular and near singular covariance matrices. + // + // As mentioned above, when the covariance matrix is near + // singular, instead of computing the inverse of J'J, the + // Moore-Penrose pseudoinverse of J'J should be computed. + // + // If J'J has the eigen decomposition (lambda_i, e_i), where + // lambda_i is the i^th eigenvalue and e_i is the corresponding + // eigenvector, then the inverse of J'J is + // + // inverse[J'J] = sum_i e_i e_i' / lambda_i + // + // and computing the pseudo inverse involves dropping terms from + // this sum that correspond to small eigenvalues. + // + // How terms are dropped is controlled by + // min_reciprocal_condition_number and null_space_rank. + // + // If null_space_rank is non-negative, then the smallest + // null_space_rank eigenvalue/eigenvectors are dropped + // irrespective of the magnitude of lambda_i. If the ratio of the + // smallest non-zero eigenvalue to the largest eigenvalue in the + // truncated matrix is still below + // min_reciprocal_condition_number, then the Covariance::Compute() + // will fail and return false. + // + // Setting null_space_rank = -1 drops all terms for which + // + // lambda_i / lambda_max < min_reciprocal_condition_number. + // + // This option has no effect on the SUITE_SPARSE_QR and + // EIGEN_SPARSE_QR algorithms. + int null_space_rank = 0; + + int num_threads = 1; + + // Even though the residual blocks in the problem may contain loss + // functions, setting apply_loss_function to false will turn off + // the application of the loss function to the output of the cost + // function and in turn its effect on the covariance. + // + // TODO(sameergaarwal): Expand this based on Jim's experiments. + bool apply_loss_function = true; + }; + + explicit Covariance(const Options& options); + ~Covariance(); + + // Compute a part of the covariance matrix. + // + // The vector covariance_blocks, indexes into the covariance matrix + // block-wise using pairs of parameter blocks. This allows the + // covariance estimation algorithm to only compute and store these + // blocks. + // + // Since the covariance matrix is symmetric, if the user passes + // (block1, block2), then GetCovarianceBlock can be called with + // block1, block2 as well as block2, block1. + // + // covariance_blocks cannot contain duplicates. Bad things will + // happen if they do. + // + // Note that the list of covariance_blocks is only used to determine + // what parts of the covariance matrix are computed. The full + // Jacobian is used to do the computation, i.e. they do not have an + // impact on what part of the Jacobian is used for computation. + // + // The return value indicates the success or failure of the + // covariance computation. Please see the documentation for + // Covariance::Options for more on the conditions under which this + // function returns false. + bool Compute(const std::vector>& + covariance_blocks, + Problem* problem); + + // Compute a part of the covariance matrix. + // + // The vector parameter_blocks contains the parameter blocks that + // are used for computing the covariance matrix. From this vector + // all covariance pairs are generated. This allows the covariance + // estimation algorithm to only compute and store these blocks. + // + // parameter_blocks cannot contain duplicates. Bad things will + // happen if they do. + // + // Note that the list of covariance_blocks is only used to determine + // what parts of the covariance matrix are computed. The full + // Jacobian is used to do the computation, i.e. they do not have an + // impact on what part of the Jacobian is used for computation. + // + // The return value indicates the success or failure of the + // covariance computation. Please see the documentation for + // Covariance::Options for more on the conditions under which this + // function returns false. + bool Compute(const std::vector& parameter_blocks, + Problem* problem); + + // Return the block of the cross-covariance matrix corresponding to + // parameter_block1 and parameter_block2. + // + // Compute must be called before the first call to + // GetCovarianceBlock and the pair OR the pair must have been present in the vector + // covariance_blocks when Compute was called. Otherwise + // GetCovarianceBlock will return false. + // + // covariance_block must point to a memory location that can store a + // parameter_block1_size x parameter_block2_size matrix. The + // returned covariance will be a row-major matrix. + bool GetCovarianceBlock(const double* parameter_block1, + const double* parameter_block2, + double* covariance_block) const; + + // Return the block of the cross-covariance matrix corresponding to + // parameter_block1 and parameter_block2. + // Returns cross-covariance in the tangent space if a local + // parameterization is associated with either parameter block; + // else returns cross-covariance in the ambient space. + // + // Compute must be called before the first call to + // GetCovarianceBlock and the pair OR the pair must have been present in the vector + // covariance_blocks when Compute was called. Otherwise + // GetCovarianceBlock will return false. + // + // covariance_block must point to a memory location that can store a + // parameter_block1_local_size x parameter_block2_local_size matrix. The + // returned covariance will be a row-major matrix. + bool GetCovarianceBlockInTangentSpace(const double* parameter_block1, + const double* parameter_block2, + double* covariance_block) const; + + // Return the covariance matrix corresponding to all parameter_blocks. + // + // Compute must be called before calling GetCovarianceMatrix and all + // parameter_blocks must have been present in the vector + // parameter_blocks when Compute was called. Otherwise + // GetCovarianceMatrix returns false. + // + // covariance_matrix must point to a memory location that can store + // the size of the covariance matrix. The covariance matrix will be + // a square matrix whose row and column count is equal to the sum of + // the sizes of the individual parameter blocks. The covariance + // matrix will be a row-major matrix. + bool GetCovarianceMatrix(const std::vector& parameter_blocks, + double* covariance_matrix) const; + + // Return the covariance matrix corresponding to parameter_blocks + // in the tangent space if a local parameterization is associated + // with one of the parameter blocks else returns the covariance + // matrix in the ambient space. + // + // Compute must be called before calling GetCovarianceMatrix and all + // parameter_blocks must have been present in the vector + // parameters_blocks when Compute was called. Otherwise + // GetCovarianceMatrix returns false. + // + // covariance_matrix must point to a memory location that can store + // the size of the covariance matrix. The covariance matrix will be + // a square matrix whose row and column count is equal to the sum of + // the sizes of the tangent spaces of the individual parameter + // blocks. The covariance matrix will be a row-major matrix. + bool GetCovarianceMatrixInTangentSpace( + const std::vector& parameter_blocks, + double* covariance_matrix) const; + + private: + std::unique_ptr impl_; +}; + +} // namespace ceres + +#include "ceres/internal/reenable_warnings.h" + +#endif // CERES_PUBLIC_COVARIANCE_H_ diff --git a/ceres-v2/include/crs_matrix.h b/ceres-v2/include/crs_matrix.h new file mode 100644 index 0000000000000000000000000000000000000000..faa0f9885289a2419bc4f2e985810a98d97bef4f --- /dev/null +++ b/ceres-v2/include/crs_matrix.h @@ -0,0 +1,87 @@ +// Ceres Solver - A fast non-linear least squares minimizer +// Copyright 2019 Google Inc. All rights reserved. +// http://ceres-solver.org/ +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are met: +// +// * Redistributions of source code must retain the above copyright notice, +// this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above copyright notice, +// this list of conditions and the following disclaimer in the documentation +// and/or other materials provided with the distribution. +// * Neither the name of Google Inc. nor the names of its contributors may be +// used to endorse or promote products derived from this software without +// specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE +// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR +// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF +// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN +// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) +// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE +// POSSIBILITY OF SUCH DAMAGE. +// +// Author: sameeragarwal@google.com (Sameer Agarwal) + +#ifndef CERES_PUBLIC_CRS_MATRIX_H_ +#define CERES_PUBLIC_CRS_MATRIX_H_ + +#include + +#include "ceres/internal/disable_warnings.h" +#include "ceres/internal/export.h" + +namespace ceres { + +// A compressed row sparse matrix used primarily for communicating the +// Jacobian matrix to the user. +struct CERES_EXPORT CRSMatrix { + CRSMatrix() = default; + + int num_rows{0}; + int num_cols{0}; + + // A compressed row matrix stores its contents in three arrays, + // rows, cols and values. + // + // rows is a num_rows + 1 sized array that points into the cols and + // values array. For each row i: + // + // cols[rows[i]] ... cols[rows[i + 1] - 1] are the indices of the + // non-zero columns of row i. + // + // values[rows[i]] .. values[rows[i + 1] - 1] are the values of the + // corresponding entries. + // + // cols and values contain as many entries as there are non-zeros in + // the matrix. + // + // e.g, consider the 3x4 sparse matrix + // + // [ 0 10 0 4 ] + // [ 0 2 -3 2 ] + // [ 1 2 0 0 ] + // + // The three arrays will be: + // + // + // -row0- ---row1--- -row2- + // rows = [ 0, 2, 5, 7] + // cols = [ 1, 3, 1, 2, 3, 0, 1] + // values = [10, 4, 2, -3, 2, 1, 2] + + std::vector cols; + std::vector rows; + std::vector values; +}; + +} // namespace ceres + +#include "ceres/internal/reenable_warnings.h" + +#endif // CERES_PUBLIC_CRS_MATRIX_H_ diff --git a/ceres-v2/include/cubic_interpolation.h b/ceres-v2/include/cubic_interpolation.h new file mode 100644 index 0000000000000000000000000000000000000000..3ca6b11b407471bc362207ab71df34d294d3a2ca --- /dev/null +++ b/ceres-v2/include/cubic_interpolation.h @@ -0,0 +1,436 @@ +// Ceres Solver - A fast non-linear least squares minimizer +// Copyright 2019 Google Inc. All rights reserved. +// http://ceres-solver.org/ +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are met: +// +// * Redistributions of source code must retain the above copyright notice, +// this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above copyright notice, +// this list of conditions and the following disclaimer in the documentation +// and/or other materials provided with the distribution. +// * Neither the name of Google Inc. nor the names of its contributors may be +// used to endorse or promote products derived from this software without +// specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE +// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR +// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF +// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN +// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) +// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE +// POSSIBILITY OF SUCH DAMAGE. +// +// Author: sameeragarwal@google.com (Sameer Agarwal) + +#ifndef CERES_PUBLIC_CUBIC_INTERPOLATION_H_ +#define CERES_PUBLIC_CUBIC_INTERPOLATION_H_ + +#include "Eigen/Core" +#include "ceres/internal/export.h" +#include "glog/logging.h" + +namespace ceres { + +// Given samples from a function sampled at four equally spaced points, +// +// p0 = f(-1) +// p1 = f(0) +// p2 = f(1) +// p3 = f(2) +// +// Evaluate the cubic Hermite spline (also known as the Catmull-Rom +// spline) at a point x that lies in the interval [0, 1]. +// +// This is also the interpolation kernel (for the case of a = 0.5) as +// proposed by R. Keys, in: +// +// "Cubic convolution interpolation for digital image processing". +// IEEE Transactions on Acoustics, Speech, and Signal Processing +// 29 (6): 1153-1160. +// +// For more details see +// +// http://en.wikipedia.org/wiki/Cubic_Hermite_spline +// http://en.wikipedia.org/wiki/Bicubic_interpolation +// +// f if not nullptr will contain the interpolated function values. +// dfdx if not nullptr will contain the interpolated derivative values. +template +void CubicHermiteSpline(const Eigen::Matrix& p0, + const Eigen::Matrix& p1, + const Eigen::Matrix& p2, + const Eigen::Matrix& p3, + const double x, + double* f, + double* dfdx) { + using VType = Eigen::Matrix; + const VType a = 0.5 * (-p0 + 3.0 * p1 - 3.0 * p2 + p3); + const VType b = 0.5 * (2.0 * p0 - 5.0 * p1 + 4.0 * p2 - p3); + const VType c = 0.5 * (-p0 + p2); + const VType d = p1; + + // Use Horner's rule to evaluate the function value and its + // derivative. + + // f = ax^3 + bx^2 + cx + d + if (f != nullptr) { + Eigen::Map(f, kDataDimension) = d + x * (c + x * (b + x * a)); + } + + // dfdx = 3ax^2 + 2bx + c + if (dfdx != nullptr) { + Eigen::Map(dfdx, kDataDimension) = c + x * (2.0 * b + 3.0 * a * x); + } +} + +// Given as input an infinite one dimensional grid, which provides the +// following interface. +// +// class Grid { +// public: +// enum { DATA_DIMENSION = 2; }; +// void GetValue(int n, double* f) const; +// }; +// +// Here, GetValue gives the value of a function f (possibly vector +// valued) for any integer n. +// +// The enum DATA_DIMENSION indicates the dimensionality of the +// function being interpolated. For example if you are interpolating +// rotations in axis-angle format over time, then DATA_DIMENSION = 3. +// +// CubicInterpolator uses cubic Hermite splines to produce a smooth +// approximation to it that can be used to evaluate the f(x) and f'(x) +// at any point on the real number line. +// +// For more details on cubic interpolation see +// +// http://en.wikipedia.org/wiki/Cubic_Hermite_spline +// +// Example usage: +// +// const double data[] = {1.0, 2.0, 5.0, 6.0}; +// Grid1D grid(data, 0, 4); +// CubicInterpolator> interpolator(grid); +// double f, dfdx; +// interpolator.Evaluator(1.5, &f, &dfdx); +template +class CubicInterpolator { + public: + explicit CubicInterpolator(const Grid& grid) : grid_(grid) { + // The + casts the enum into an int before doing the + // comparison. It is needed to prevent + // "-Wunnamed-type-template-args" related errors. + CHECK_GE(+Grid::DATA_DIMENSION, 1); + } + + void Evaluate(double x, double* f, double* dfdx) const { + const int n = std::floor(x); + Eigen::Matrix p0, p1, p2, p3; + grid_.GetValue(n - 1, p0.data()); + grid_.GetValue(n, p1.data()); + grid_.GetValue(n + 1, p2.data()); + grid_.GetValue(n + 2, p3.data()); + CubicHermiteSpline(p0, p1, p2, p3, x - n, f, dfdx); + } + + // The following two Evaluate overloads are needed for interfacing + // with automatic differentiation. The first is for when a scalar + // evaluation is done, and the second one is for when Jets are used. + void Evaluate(const double& x, double* f) const { Evaluate(x, f, nullptr); } + + template + void Evaluate(const JetT& x, JetT* f) const { + double fx[Grid::DATA_DIMENSION], dfdx[Grid::DATA_DIMENSION]; + Evaluate(x.a, fx, dfdx); + for (int i = 0; i < Grid::DATA_DIMENSION; ++i) { + f[i].a = fx[i]; + f[i].v = dfdx[i] * x.v; + } + } + + private: + const Grid& grid_; +}; + +// An object that implements an infinite one dimensional grid needed +// by the CubicInterpolator where the source of the function values is +// an array of type T on the interval +// +// [begin, ..., end - 1] +// +// Since the input array is finite and the grid is infinite, values +// outside this interval needs to be computed. Grid1D uses the value +// from the nearest edge. +// +// The function being provided can be vector valued, in which case +// kDataDimension > 1. The dimensional slices of the function maybe +// interleaved, or they maybe stacked, i.e, if the function has +// kDataDimension = 2, if kInterleaved = true, then it is stored as +// +// f01, f02, f11, f12 .... +// +// and if kInterleaved = false, then it is stored as +// +// f01, f11, .. fn1, f02, f12, .. , fn2 +// +template +struct Grid1D { + public: + enum { DATA_DIMENSION = kDataDimension }; + + Grid1D(const T* data, const int begin, const int end) + : data_(data), begin_(begin), end_(end), num_values_(end - begin) { + CHECK_LT(begin, end); + } + + EIGEN_STRONG_INLINE void GetValue(const int n, double* f) const { + const int idx = (std::min)((std::max)(begin_, n), end_ - 1) - begin_; + if (kInterleaved) { + for (int i = 0; i < kDataDimension; ++i) { + f[i] = static_cast(data_[kDataDimension * idx + i]); + } + } else { + for (int i = 0; i < kDataDimension; ++i) { + f[i] = static_cast(data_[i * num_values_ + idx]); + } + } + } + + private: + const T* data_; + const int begin_; + const int end_; + const int num_values_; +}; + +// Given as input an infinite two dimensional grid like object, which +// provides the following interface: +// +// struct Grid { +// enum { DATA_DIMENSION = 1 }; +// void GetValue(int row, int col, double* f) const; +// }; +// +// Where, GetValue gives us the value of a function f (possibly vector +// valued) for any pairs of integers (row, col), and the enum +// DATA_DIMENSION indicates the dimensionality of the function being +// interpolated. For example if you are interpolating a color image +// with three channels (Red, Green & Blue), then DATA_DIMENSION = 3. +// +// BiCubicInterpolator uses the cubic convolution interpolation +// algorithm of R. Keys, to produce a smooth approximation to it that +// can be used to evaluate the f(r,c), df(r, c)/dr and df(r,c)/dc at +// any point in the real plane. +// +// For more details on the algorithm used here see: +// +// "Cubic convolution interpolation for digital image processing". +// Robert G. Keys, IEEE Trans. on Acoustics, Speech, and Signal +// Processing 29 (6): 1153-1160, 1981. +// +// http://en.wikipedia.org/wiki/Cubic_Hermite_spline +// http://en.wikipedia.org/wiki/Bicubic_interpolation +// +// Example usage: +// +// const double data[] = {1.0, 3.0, -1.0, 4.0, +// 3.6, 2.1, 4.2, 2.0, +// 2.0, 1.0, 3.1, 5.2}; +// Grid2D grid(data, 3, 4); +// BiCubicInterpolator> interpolator(grid); +// double f, dfdr, dfdc; +// interpolator.Evaluate(1.2, 2.5, &f, &dfdr, &dfdc); + +template +class BiCubicInterpolator { + public: + explicit BiCubicInterpolator(const Grid& grid) : grid_(grid) { + // The + casts the enum into an int before doing the + // comparison. It is needed to prevent + // "-Wunnamed-type-template-args" related errors. + CHECK_GE(+Grid::DATA_DIMENSION, 1); + } + + // Evaluate the interpolated function value and/or its + // derivative. Uses the nearest point on the grid boundary if r or + // c is out of bounds. + void Evaluate( + double r, double c, double* f, double* dfdr, double* dfdc) const { + // BiCubic interpolation requires 16 values around the point being + // evaluated. We will use pij, to indicate the elements of the + // 4x4 grid of values. + // + // col + // p00 p01 p02 p03 + // row p10 p11 p12 p13 + // p20 p21 p22 p23 + // p30 p31 p32 p33 + // + // The point (r,c) being evaluated is assumed to lie in the square + // defined by p11, p12, p22 and p21. + + const int row = std::floor(r); + const int col = std::floor(c); + + Eigen::Matrix p0, p1, p2, p3; + + // Interpolate along each of the four rows, evaluating the function + // value and the horizontal derivative in each row. + Eigen::Matrix f0, f1, f2, f3; + Eigen::Matrix df0dc, df1dc, df2dc, df3dc; + + grid_.GetValue(row - 1, col - 1, p0.data()); + grid_.GetValue(row - 1, col, p1.data()); + grid_.GetValue(row - 1, col + 1, p2.data()); + grid_.GetValue(row - 1, col + 2, p3.data()); + CubicHermiteSpline( + p0, p1, p2, p3, c - col, f0.data(), df0dc.data()); + + grid_.GetValue(row, col - 1, p0.data()); + grid_.GetValue(row, col, p1.data()); + grid_.GetValue(row, col + 1, p2.data()); + grid_.GetValue(row, col + 2, p3.data()); + CubicHermiteSpline( + p0, p1, p2, p3, c - col, f1.data(), df1dc.data()); + + grid_.GetValue(row + 1, col - 1, p0.data()); + grid_.GetValue(row + 1, col, p1.data()); + grid_.GetValue(row + 1, col + 1, p2.data()); + grid_.GetValue(row + 1, col + 2, p3.data()); + CubicHermiteSpline( + p0, p1, p2, p3, c - col, f2.data(), df2dc.data()); + + grid_.GetValue(row + 2, col - 1, p0.data()); + grid_.GetValue(row + 2, col, p1.data()); + grid_.GetValue(row + 2, col + 1, p2.data()); + grid_.GetValue(row + 2, col + 2, p3.data()); + CubicHermiteSpline( + p0, p1, p2, p3, c - col, f3.data(), df3dc.data()); + + // Interpolate vertically the interpolated value from each row and + // compute the derivative along the columns. + CubicHermiteSpline(f0, f1, f2, f3, r - row, f, dfdr); + if (dfdc != nullptr) { + // Interpolate vertically the derivative along the columns. + CubicHermiteSpline( + df0dc, df1dc, df2dc, df3dc, r - row, dfdc, nullptr); + } + } + + // The following two Evaluate overloads are needed for interfacing + // with automatic differentiation. The first is for when a scalar + // evaluation is done, and the second one is for when Jets are used. + void Evaluate(const double& r, const double& c, double* f) const { + Evaluate(r, c, f, nullptr, nullptr); + } + + template + void Evaluate(const JetT& r, const JetT& c, JetT* f) const { + double frc[Grid::DATA_DIMENSION]; + double dfdr[Grid::DATA_DIMENSION]; + double dfdc[Grid::DATA_DIMENSION]; + Evaluate(r.a, c.a, frc, dfdr, dfdc); + for (int i = 0; i < Grid::DATA_DIMENSION; ++i) { + f[i].a = frc[i]; + f[i].v = dfdr[i] * r.v + dfdc[i] * c.v; + } + } + + private: + const Grid& grid_; +}; + +// An object that implements an infinite two dimensional grid needed +// by the BiCubicInterpolator where the source of the function values +// is an grid of type T on the grid +// +// [(row_start, col_start), ..., (row_start, col_end - 1)] +// [ ... ] +// [(row_end - 1, col_start), ..., (row_end - 1, col_end - 1)] +// +// Since the input grid is finite and the grid is infinite, values +// outside this interval needs to be computed. Grid2D uses the value +// from the nearest edge. +// +// The function being provided can be vector valued, in which case +// kDataDimension > 1. The data maybe stored in row or column major +// format and the various dimensional slices of the function maybe +// interleaved, or they maybe stacked, i.e, if the function has +// kDataDimension = 2, is stored in row-major format and if +// kInterleaved = true, then it is stored as +// +// f001, f002, f011, f012, ... +// +// A commonly occuring example are color images (RGB) where the three +// channels are stored interleaved. +// +// If kInterleaved = false, then it is stored as +// +// f001, f011, ..., fnm1, f002, f012, ... +template +struct Grid2D { + public: + enum { DATA_DIMENSION = kDataDimension }; + + Grid2D(const T* data, + const int row_begin, + const int row_end, + const int col_begin, + const int col_end) + : data_(data), + row_begin_(row_begin), + row_end_(row_end), + col_begin_(col_begin), + col_end_(col_end), + num_rows_(row_end - row_begin), + num_cols_(col_end - col_begin), + num_values_(num_rows_ * num_cols_) { + CHECK_GE(kDataDimension, 1); + CHECK_LT(row_begin, row_end); + CHECK_LT(col_begin, col_end); + } + + EIGEN_STRONG_INLINE void GetValue(const int r, const int c, double* f) const { + const int row_idx = + (std::min)((std::max)(row_begin_, r), row_end_ - 1) - row_begin_; + const int col_idx = + (std::min)((std::max)(col_begin_, c), col_end_ - 1) - col_begin_; + + const int n = (kRowMajor) ? num_cols_ * row_idx + col_idx + : num_rows_ * col_idx + row_idx; + + if (kInterleaved) { + for (int i = 0; i < kDataDimension; ++i) { + f[i] = static_cast(data_[kDataDimension * n + i]); + } + } else { + for (int i = 0; i < kDataDimension; ++i) { + f[i] = static_cast(data_[i * num_values_ + n]); + } + } + } + + private: + const T* data_; + const int row_begin_; + const int row_end_; + const int col_begin_; + const int col_end_; + const int num_rows_; + const int num_cols_; + const int num_values_; +}; + +} // namespace ceres + +#endif // CERES_PUBLIC_CUBIC_INTERPOLATOR_H_ diff --git a/ceres-v2/include/dynamic_autodiff_cost_function.h b/ceres-v2/include/dynamic_autodiff_cost_function.h new file mode 100644 index 0000000000000000000000000000000000000000..c21d0517f27f5cfa1e9d37d9aa12b61cb9e23a70 --- /dev/null +++ b/ceres-v2/include/dynamic_autodiff_cost_function.h @@ -0,0 +1,274 @@ +// Ceres Solver - A fast non-linear least squares minimizer +// Copyright 2019 Google Inc. All rights reserved. +// http://ceres-solver.org/ +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are met: +// +// * Redistributions of source code must retain the above copyright notice, +// this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above copyright notice, +// this list of conditions and the following disclaimer in the documentation +// and/or other materials provided with the distribution. +// * Neither the name of Google Inc. nor the names of its contributors may be +// used to endorse or promote products derived from this software without +// specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE +// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR +// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF +// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN +// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) +// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE +// POSSIBILITY OF SUCH DAMAGE. +// +// Author: sameeragarwal@google.com (Sameer Agarwal) +// mierle@gmail.com (Keir Mierle) + +#ifndef CERES_PUBLIC_DYNAMIC_AUTODIFF_COST_FUNCTION_H_ +#define CERES_PUBLIC_DYNAMIC_AUTODIFF_COST_FUNCTION_H_ + +#include +#include +#include +#include + +#include "ceres/dynamic_cost_function.h" +#include "ceres/internal/fixed_array.h" +#include "ceres/jet.h" +#include "ceres/types.h" +#include "glog/logging.h" + +namespace ceres { + +// This autodiff implementation differs from the one found in +// autodiff_cost_function.h by supporting autodiff on cost functions +// with variable numbers of parameters with variable sizes. With the +// other implementation, all the sizes (both the number of parameter +// blocks and the size of each block) must be fixed at compile time. +// +// The functor API differs slightly from the API for fixed size +// autodiff; the expected interface for the cost functors is: +// +// struct MyCostFunctor { +// template +// bool operator()(T const* const* parameters, T* residuals) const { +// // Use parameters[i] to access the i'th parameter block. +// } +// }; +// +// Since the sizing of the parameters is done at runtime, you must +// also specify the sizes after creating the dynamic autodiff cost +// function. For example: +// +// DynamicAutoDiffCostFunction cost_function( +// new MyCostFunctor()); +// cost_function.AddParameterBlock(5); +// cost_function.AddParameterBlock(10); +// cost_function.SetNumResiduals(21); +// +// Under the hood, the implementation evaluates the cost function +// multiple times, computing a small set of the derivatives (four by +// default, controlled by the Stride template parameter) with each +// pass. There is a tradeoff with the size of the passes; you may want +// to experiment with the stride. +template +class DynamicAutoDiffCostFunction final : public DynamicCostFunction { + public: + // Takes ownership by default. + explicit DynamicAutoDiffCostFunction(CostFunctor* functor, + Ownership ownership = TAKE_OWNERSHIP) + : functor_(functor), ownership_(ownership) {} + + DynamicAutoDiffCostFunction(DynamicAutoDiffCostFunction&& other) + : functor_(std::move(other.functor_)), ownership_(other.ownership_) {} + + ~DynamicAutoDiffCostFunction() override { + // Manually release pointer if configured to not take ownership + // rather than deleting only if ownership is taken. This is to + // stay maximally compatible to old user code which may have + // forgotten to implement a virtual destructor, from when the + // AutoDiffCostFunction always took ownership. + if (ownership_ == DO_NOT_TAKE_OWNERSHIP) { + functor_.release(); + } + } + + bool Evaluate(double const* const* parameters, + double* residuals, + double** jacobians) const override { + CHECK_GT(num_residuals(), 0) + << "You must call DynamicAutoDiffCostFunction::SetNumResiduals() " + << "before DynamicAutoDiffCostFunction::Evaluate()."; + + if (jacobians == nullptr) { + return (*functor_)(parameters, residuals); + } + + // The difficulty with Jets, as implemented in Ceres, is that they were + // originally designed for strictly compile-sized use. At this point, there + // is a large body of code that assumes inside a cost functor it is + // acceptable to do e.g. T(1.5) and get an appropriately sized jet back. + // + // Unfortunately, it is impossible to communicate the expected size of a + // dynamically sized jet to the static instantiations that existing code + // depends on. + // + // To work around this issue, the solution here is to evaluate the + // jacobians in a series of passes, each one computing Stride * + // num_residuals() derivatives. This is done with small, fixed-size jets. + const int num_parameter_blocks = + static_cast(parameter_block_sizes().size()); + const int num_parameters = std::accumulate( + parameter_block_sizes().begin(), parameter_block_sizes().end(), 0); + + // Allocate scratch space for the strided evaluation. + using JetT = Jet; + internal::FixedArray input_jets( + num_parameters); + internal::FixedArray output_jets( + num_residuals()); + + // Make the parameter pack that is sent to the functor (reused). + internal::FixedArray*> jet_parameters( + num_parameter_blocks, nullptr); + int num_active_parameters = 0; + + // To handle constant parameters between non-constant parameter blocks, the + // start position --- a raw parameter index --- of each contiguous block of + // non-constant parameters is recorded in start_derivative_section. + std::vector start_derivative_section; + bool in_derivative_section = false; + int parameter_cursor = 0; + + // Discover the derivative sections and set the parameter values. + for (int i = 0; i < num_parameter_blocks; ++i) { + jet_parameters[i] = &input_jets[parameter_cursor]; + + const int parameter_block_size = parameter_block_sizes()[i]; + if (jacobians[i] != nullptr) { + if (!in_derivative_section) { + start_derivative_section.push_back(parameter_cursor); + in_derivative_section = true; + } + + num_active_parameters += parameter_block_size; + } else { + in_derivative_section = false; + } + + for (int j = 0; j < parameter_block_size; ++j, parameter_cursor++) { + input_jets[parameter_cursor].a = parameters[i][j]; + } + } + + if (num_active_parameters == 0) { + return (*functor_)(parameters, residuals); + } + // When `num_active_parameters % Stride != 0` then it can be the case + // that `active_parameter_count < Stride` while parameter_cursor is less + // than the total number of parameters and with no remaining non-constant + // parameter blocks. Pushing parameter_cursor (the total number of + // parameters) as a final entry to start_derivative_section is required + // because if a constant parameter block is encountered after the + // last non-constant block then current_derivative_section is incremented + // and would otherwise index an invalid position in + // start_derivative_section. Setting the final element to the total number + // of parameters means that this can only happen at most once in the loop + // below. + start_derivative_section.push_back(parameter_cursor); + + // Evaluate all of the strides. Each stride is a chunk of the derivative to + // evaluate, typically some size proportional to the size of the SIMD + // registers of the CPU. + int num_strides = static_cast( + ceil(num_active_parameters / static_cast(Stride))); + + int current_derivative_section = 0; + int current_derivative_section_cursor = 0; + + for (int pass = 0; pass < num_strides; ++pass) { + // Set most of the jet components to zero, except for + // non-constant #Stride parameters. + const int initial_derivative_section = current_derivative_section; + const int initial_derivative_section_cursor = + current_derivative_section_cursor; + + int active_parameter_count = 0; + parameter_cursor = 0; + + for (int i = 0; i < num_parameter_blocks; ++i) { + for (int j = 0; j < parameter_block_sizes()[i]; + ++j, parameter_cursor++) { + input_jets[parameter_cursor].v.setZero(); + if (active_parameter_count < Stride && + parameter_cursor >= + (start_derivative_section[current_derivative_section] + + current_derivative_section_cursor)) { + if (jacobians[i] != nullptr) { + input_jets[parameter_cursor].v[active_parameter_count] = 1.0; + ++active_parameter_count; + ++current_derivative_section_cursor; + } else { + ++current_derivative_section; + current_derivative_section_cursor = 0; + } + } + } + } + + if (!(*functor_)(&jet_parameters[0], &output_jets[0])) { + return false; + } + + // Copy the pieces of the jacobians into their final place. + active_parameter_count = 0; + + current_derivative_section = initial_derivative_section; + current_derivative_section_cursor = initial_derivative_section_cursor; + + for (int i = 0, parameter_cursor = 0; i < num_parameter_blocks; ++i) { + for (int j = 0; j < parameter_block_sizes()[i]; + ++j, parameter_cursor++) { + if (active_parameter_count < Stride && + parameter_cursor >= + (start_derivative_section[current_derivative_section] + + current_derivative_section_cursor)) { + if (jacobians[i] != nullptr) { + for (int k = 0; k < num_residuals(); ++k) { + jacobians[i][k * parameter_block_sizes()[i] + j] = + output_jets[k].v[active_parameter_count]; + } + ++active_parameter_count; + ++current_derivative_section_cursor; + } else { + ++current_derivative_section; + current_derivative_section_cursor = 0; + } + } + } + } + + // Only copy the residuals over once (even though we compute them on + // every loop). + if (pass == num_strides - 1) { + for (int k = 0; k < num_residuals(); ++k) { + residuals[k] = output_jets[k].a; + } + } + } + return true; + } + + private: + std::unique_ptr functor_; + Ownership ownership_; +}; + +} // namespace ceres + +#endif // CERES_PUBLIC_DYNAMIC_AUTODIFF_COST_FUNCTION_H_ diff --git a/ceres-v2/include/dynamic_cost_function.h b/ceres-v2/include/dynamic_cost_function.h new file mode 100644 index 0000000000000000000000000000000000000000..c84a366dafbe6318c8caa925342ec6cba61046d3 --- /dev/null +++ b/ceres-v2/include/dynamic_cost_function.h @@ -0,0 +1,57 @@ +// Ceres Solver - A fast non-linear least squares minimizer +// Copyright 2019 Google Inc. All rights reserved. +// http://ceres-solver.org/ +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are met: +// +// * Redistributions of source code must retain the above copyright notice, +// this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above copyright notice, +// this list of conditions and the following disclaimer in the documentation +// and/or other materials provided with the distribution. +// * Neither the name of Google Inc. nor the names of its contributors may be +// used to endorse or promote products derived from this software without +// specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE +// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR +// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF +// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN +// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) +// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE +// POSSIBILITY OF SUCH DAMAGE. +// +// Author: sameeragarwal@google.com (Sameer Agarwal) + +#ifndef CERES_PUBLIC_DYNAMIC_COST_FUNCTION_H_ +#define CERES_PUBLIC_DYNAMIC_COST_FUNCTION_H_ + +#include "ceres/cost_function.h" +#include "ceres/internal/disable_warnings.h" + +namespace ceres { + +// A common base class for DynamicAutoDiffCostFunction and +// DynamicNumericDiffCostFunction which depend on methods that can add +// parameter blocks and set the number of residuals at run time. +class CERES_EXPORT DynamicCostFunction : public CostFunction { + public: + virtual void AddParameterBlock(int size) { + mutable_parameter_block_sizes()->push_back(size); + } + + virtual void SetNumResiduals(int num_residuals) { + set_num_residuals(num_residuals); + } +}; + +} // namespace ceres + +#include "ceres/internal/reenable_warnings.h" + +#endif // CERES_PUBLIC_DYNAMIC_COST_FUNCTION_H_ diff --git a/ceres-v2/include/dynamic_cost_function_to_functor.h b/ceres-v2/include/dynamic_cost_function_to_functor.h new file mode 100644 index 0000000000000000000000000000000000000000..5b5feaaf58e6327a5d6f4d66ccd624b53672fb43 --- /dev/null +++ b/ceres-v2/include/dynamic_cost_function_to_functor.h @@ -0,0 +1,194 @@ +// Ceres Solver - A fast non-linear least squares minimizer +// Copyright 2019 Google Inc. All rights reserved. +// http://ceres-solver.org/ +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are met: +// +// * Redistributions of source code must retain the above copyright notice, +// this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above copyright notice, +// this list of conditions and the following disclaimer in the documentation +// and/or other materials provided with the distribution. +// * Neither the name of Google Inc. nor the names of its contributors may be +// used to endorse or promote products derived from this software without +// specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE +// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR +// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF +// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN +// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) +// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE +// POSSIBILITY OF SUCH DAMAGE. +// +// Author: sameeragarwal@google.com (Sameer Agarwal) +// dgossow@google.com (David Gossow) + +#ifndef CERES_PUBLIC_DYNAMIC_COST_FUNCTION_TO_FUNCTOR_H_ +#define CERES_PUBLIC_DYNAMIC_COST_FUNCTION_TO_FUNCTOR_H_ + +#include +#include +#include + +#include "ceres/dynamic_cost_function.h" +#include "ceres/internal/disable_warnings.h" +#include "ceres/internal/export.h" +#include "ceres/internal/fixed_array.h" +#include "glog/logging.h" + +namespace ceres { + +// DynamicCostFunctionToFunctor allows users to use CostFunction +// objects in templated functors which are to be used for automatic +// differentiation. It works similar to CostFunctionToFunctor, with the +// difference that it allows you to wrap a cost function with dynamic numbers +// of parameters and residuals. +// +// For example, let us assume that +// +// class IntrinsicProjection : public CostFunction { +// public: +// IntrinsicProjection(const double* observation); +// bool Evaluate(double const* const* parameters, +// double* residuals, +// double** jacobians) const override; +// }; +// +// is a cost function that implements the projection of a point in its +// local coordinate system onto its image plane and subtracts it from +// the observed point projection. It can compute its residual and +// either via analytic or numerical differentiation can compute its +// jacobians. The intrinsics are passed in as parameters[0] and the point as +// parameters[1]. +// +// Now we would like to compose the action of this CostFunction with +// the action of camera extrinsics, i.e., rotation and +// translation. Say we have a templated function +// +// template +// void RotateAndTranslatePoint(double const* const* parameters, +// double* residuals); +// +// Then we can now do the following, +// +// struct CameraProjection { +// CameraProjection(const double* observation) +// : intrinsic_projection_.(new IntrinsicProjection(observation)) { +// } +// template +// bool operator()(T const* const* parameters, +// T* residual) const { +// const T* rotation = parameters[0]; +// const T* translation = parameters[1]; +// const T* intrinsics = parameters[2]; +// const T* point = parameters[3]; +// T transformed_point[3]; +// RotateAndTranslatePoint(rotation, translation, point, transformed_point); +// +// // Note that we call intrinsic_projection_, just like it was +// // any other templated functor. +// const T* projection_parameters[2]; +// projection_parameters[0] = intrinsics; +// projection_parameters[1] = transformed_point; +// return intrinsic_projection_(projection_parameters, residual); +// } +// +// private: +// DynamicCostFunctionToFunctor intrinsic_projection_; +// }; +class CERES_EXPORT DynamicCostFunctionToFunctor { + public: + // Takes ownership of cost_function. + explicit DynamicCostFunctionToFunctor(CostFunction* cost_function) + : cost_function_(cost_function) { + CHECK(cost_function != nullptr); + } + + bool operator()(double const* const* parameters, double* residuals) const { + return cost_function_->Evaluate(parameters, residuals, nullptr); + } + + template + bool operator()(JetT const* const* inputs, JetT* output) const { + const std::vector& parameter_block_sizes = + cost_function_->parameter_block_sizes(); + const int num_parameter_blocks = + static_cast(parameter_block_sizes.size()); + const int num_residuals = cost_function_->num_residuals(); + const int num_parameters = std::accumulate( + parameter_block_sizes.begin(), parameter_block_sizes.end(), 0); + + internal::FixedArray parameters(num_parameters); + internal::FixedArray parameter_blocks(num_parameter_blocks); + internal::FixedArray jacobians(num_residuals * num_parameters); + internal::FixedArray jacobian_blocks(num_parameter_blocks); + internal::FixedArray residuals(num_residuals); + + // Build a set of arrays to get the residuals and jacobians from + // the CostFunction wrapped by this functor. + double* parameter_ptr = parameters.data(); + double* jacobian_ptr = jacobians.data(); + for (int i = 0; i < num_parameter_blocks; ++i) { + parameter_blocks[i] = parameter_ptr; + jacobian_blocks[i] = jacobian_ptr; + for (int j = 0; j < parameter_block_sizes[i]; ++j) { + *parameter_ptr++ = inputs[i][j].a; + } + jacobian_ptr += num_residuals * parameter_block_sizes[i]; + } + + if (!cost_function_->Evaluate(parameter_blocks.data(), + residuals.data(), + jacobian_blocks.data())) { + return false; + } + + // Now that we have the incoming Jets, which are carrying the + // partial derivatives of each of the inputs w.r.t to some other + // underlying parameters. The derivative of the outputs of the + // cost function w.r.t to the same underlying parameters can now + // be computed by applying the chain rule. + // + // d output[i] d output[i] d input[j] + // -------------- = sum_j ----------- * ------------ + // d parameter[k] d input[j] d parameter[k] + // + // d input[j] + // -------------- = inputs[j], so + // d parameter[k] + // + // outputJet[i] = sum_k jacobian[i][k] * inputJet[k] + // + // The following loop, iterates over the residuals, computing one + // output jet at a time. + for (int i = 0; i < num_residuals; ++i) { + output[i].a = residuals[i]; + output[i].v.setZero(); + + for (int j = 0; j < num_parameter_blocks; ++j) { + const int32_t block_size = parameter_block_sizes[j]; + for (int k = 0; k < parameter_block_sizes[j]; ++k) { + output[i].v += + jacobian_blocks[j][i * block_size + k] * inputs[j][k].v; + } + } + } + + return true; + } + + private: + std::unique_ptr cost_function_; +}; + +} // namespace ceres + +#include "ceres/internal/reenable_warnings.h" + +#endif // CERES_PUBLIC_DYNAMIC_COST_FUNCTION_TO_FUNCTOR_H_ diff --git a/ceres-v2/include/dynamic_numeric_diff_cost_function.h b/ceres-v2/include/dynamic_numeric_diff_cost_function.h new file mode 100644 index 0000000000000000000000000000000000000000..e1892e8ba4a63d496af45f323c28ec3bd55234a1 --- /dev/null +++ b/ceres-v2/include/dynamic_numeric_diff_cost_function.h @@ -0,0 +1,164 @@ +// Ceres Solver - A fast non-linear least squares minimizer +// Copyright 2019 Google Inc. All rights reserved. +// http://ceres-solver.org/ +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are met: +// +// * Redistributions of source code must retain the above copyright notice, +// this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above copyright notice, +// this list of conditions and the following disclaimer in the documentation +// and/or other materials provided with the distribution. +// * Neither the name of Google Inc. nor the names of its contributors may be +// used to endorse or promote products derived from this software without +// specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE +// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR +// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF +// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN +// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) +// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE +// POSSIBILITY OF SUCH DAMAGE. +// +// Author: mierle@gmail.com (Keir Mierle) +// sameeragarwal@google.com (Sameer Agarwal) +// thadh@gmail.com (Thad Hughes) +// tbennun@gmail.com (Tal Ben-Nun) + +#ifndef CERES_PUBLIC_DYNAMIC_NUMERIC_DIFF_COST_FUNCTION_H_ +#define CERES_PUBLIC_DYNAMIC_NUMERIC_DIFF_COST_FUNCTION_H_ + +#include +#include +#include +#include + +#include "ceres/dynamic_cost_function.h" +#include "ceres/internal/eigen.h" +#include "ceres/internal/numeric_diff.h" +#include "ceres/internal/parameter_dims.h" +#include "ceres/numeric_diff_options.h" +#include "ceres/types.h" +#include "glog/logging.h" + +namespace ceres { + +// This numeric diff implementation differs from the one found in +// numeric_diff_cost_function.h by supporting numericdiff on cost +// functions with variable numbers of parameters with variable +// sizes. With the other implementation, all the sizes (both the +// number of parameter blocks and the size of each block) must be +// fixed at compile time. +// +// The functor API differs slightly from the API for fixed size +// numeric diff; the expected interface for the cost functors is: +// +// struct MyCostFunctor { +// bool operator()(double const* +// const* parameters, +// double* residuals) const { +// // Use parameters[i] to access the i'th parameter block. +// } +// } +// +// Since the sizing of the parameters is done at runtime, you must +// also specify the sizes after creating the +// DynamicNumericDiffCostFunction. For example: +// +// DynamicAutoDiffCostFunction cost_function( +// new MyCostFunctor()); +// cost_function.AddParameterBlock(5); +// cost_function.AddParameterBlock(10); +// cost_function.SetNumResiduals(21); +template +class DynamicNumericDiffCostFunction final : public DynamicCostFunction { + public: + explicit DynamicNumericDiffCostFunction( + const CostFunctor* functor, + Ownership ownership = TAKE_OWNERSHIP, + const NumericDiffOptions& options = NumericDiffOptions()) + : functor_(functor), ownership_(ownership), options_(options) {} + + DynamicNumericDiffCostFunction(DynamicNumericDiffCostFunction&& other) + : functor_(std::move(other.functor_)), ownership_(other.ownership_) {} + + ~DynamicNumericDiffCostFunction() override { + if (ownership_ != TAKE_OWNERSHIP) { + functor_.release(); + } + } + + bool Evaluate(double const* const* parameters, + double* residuals, + double** jacobians) const override { + using internal::NumericDiff; + CHECK_GT(num_residuals(), 0) + << "You must call DynamicNumericDiffCostFunction::SetNumResiduals() " + << "before DynamicNumericDiffCostFunction::Evaluate()."; + + const std::vector& block_sizes = parameter_block_sizes(); + CHECK(!block_sizes.empty()) + << "You must call DynamicNumericDiffCostFunction::AddParameterBlock() " + << "before DynamicNumericDiffCostFunction::Evaluate()."; + + const bool status = + internal::VariadicEvaluate( + *functor_.get(), parameters, residuals); + if (jacobians == nullptr || !status) { + return status; + } + + // Create local space for a copy of the parameters which will get mutated. + int parameters_size = accumulate(block_sizes.begin(), block_sizes.end(), 0); + std::vector parameters_copy(parameters_size); + std::vector parameters_references_copy(block_sizes.size()); + parameters_references_copy[0] = ¶meters_copy[0]; + for (size_t block = 1; block < block_sizes.size(); ++block) { + parameters_references_copy[block] = + parameters_references_copy[block - 1] + block_sizes[block - 1]; + } + + // Copy the parameters into the local temp space. + for (size_t block = 0; block < block_sizes.size(); ++block) { + memcpy(parameters_references_copy[block], + parameters[block], + block_sizes[block] * sizeof(*parameters[block])); + } + + for (size_t block = 0; block < block_sizes.size(); ++block) { + if (jacobians[block] != nullptr && + !NumericDiff:: + EvaluateJacobianForParameterBlock(functor_.get(), + residuals, + options_, + this->num_residuals(), + block, + block_sizes[block], + ¶meters_references_copy[0], + jacobians[block])) { + return false; + } + } + return true; + } + + private: + std::unique_ptr functor_; + Ownership ownership_; + NumericDiffOptions options_; +}; + +} // namespace ceres + +#endif // CERES_PUBLIC_DYNAMIC_AUTODIFF_COST_FUNCTION_H_ diff --git a/ceres-v2/include/evaluation_callback.h b/ceres-v2/include/evaluation_callback.h new file mode 100644 index 0000000000000000000000000000000000000000..495d565047a5fe014ffb8e8a00e7850ddea2377c --- /dev/null +++ b/ceres-v2/include/evaluation_callback.h @@ -0,0 +1,80 @@ +// Ceres Solver - A fast non-linear least squares minimizer +// Copyright 2019 Google Inc. All rights reserved. +// http://ceres-solver.org/ +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are met: +// +// * Redistributions of source code must retain the above copyright notice, +// this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above copyright notice, +// this list of conditions and the following disclaimer in the documentation +// and/or other materials provided with the distribution. +// * Neither the name of Google Inc. nor the names of its contributors may be +// used to endorse or promote products derived from this software without +// specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE +// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR +// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF +// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN +// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) +// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE +// POSSIBILITY OF SUCH DAMAGE. +// +// Author: mierle@gmail.com (Keir Mierle) + +#ifndef CERES_PUBLIC_EVALUATION_CALLBACK_H_ +#define CERES_PUBLIC_EVALUATION_CALLBACK_H_ + +#include "ceres/internal/export.h" + +namespace ceres { + +// Using this callback interface, Ceres can notify you when it is +// about to evaluate the residuals or jacobians. With the callback, +// you can share computation between residual blocks by doing the +// shared computation in PrepareForEvaluation() before Ceres calls +// CostFunction::Evaluate(). It also enables caching results between a +// pure residual evaluation and a residual & jacobian evaluation, via +// the new_evaluation_point argument. +// +// One use case for this callback is if the cost function compute is +// moved to the GPU. In that case, the prepare call does the actual +// cost function evaluation, and subsequent calls from Ceres to the +// actual cost functions merely copy the results from the GPU onto the +// corresponding blocks for Ceres to plug into the solver. +// +// NOTE: Ceres provides no mechanism to share data other than the +// notification from the callback. Users must provide access to +// pre-computed shared data to their cost functions behind the scenes; +// this all happens without Ceres knowing. +// +// One approach is to put a pointer to the shared data in each cost +// function (recommended) or to use a global shared variable +// (discouraged; bug-prone). As far as Ceres is concerned, it is +// evaluating cost functions like any other; it just so happens that +// behind the scenes the cost functions reuse pre-computed data to +// execute faster. +class CERES_EXPORT EvaluationCallback { + public: + virtual ~EvaluationCallback(); + + // Called before Ceres requests residuals or jacobians for a given setting of + // the parameters. User parameters (the double* values provided to the cost + // functions) are fixed until the next call to PrepareForEvaluation(). If + // new_evaluation_point == true, then this is a new point that is different + // from the last evaluated point. Otherwise, it is the same point that was + // evaluated previously (either jacobian or residual) and the user can use + // cached results from previous evaluations. + virtual void PrepareForEvaluation(bool evaluate_jacobians, + bool new_evaluation_point) = 0; +}; + +} // namespace ceres + +#endif // CERES_PUBLIC_EVALUATION_CALLBACK_H_ diff --git a/ceres-v2/include/first_order_function.h b/ceres-v2/include/first_order_function.h new file mode 100644 index 0000000000000000000000000000000000000000..d718b6679cead0c7f9da6f800108c5a15b449761 --- /dev/null +++ b/ceres-v2/include/first_order_function.h @@ -0,0 +1,54 @@ +// Ceres Solver - A fast non-linear least squares minimizer +// Copyright 2019 Google Inc. All rights reserved. +// http://ceres-solver.org/ +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are met: +// +// * Redistributions of source code must retain the above copyright notice, +// this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above copyright notice, +// this list of conditions and the following disclaimer in the documentation +// and/or other materials provided with the distribution. +// * Neither the name of Google Inc. nor the names of its contributors may be +// used to endorse or promote products derived from this software without +// specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE +// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR +// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF +// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN +// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) +// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE +// POSSIBILITY OF SUCH DAMAGE. +// +// Author: sameeragarwal@google.com (Sameer Agarwal) + +#ifndef CERES_PUBLIC_FIRST_ORDER_FUNCTION_H_ +#define CERES_PUBLIC_FIRST_ORDER_FUNCTION_H_ + +#include "ceres/internal/export.h" + +namespace ceres { + +// A FirstOrderFunction object implements the evaluation of a function +// and its gradient. +class CERES_EXPORT FirstOrderFunction { + public: + virtual ~FirstOrderFunction(); + + // cost is never null. gradient may be null. The return value + // indicates whether the evaluation was successful or not. + virtual bool Evaluate(const double* const parameters, + double* cost, + double* gradient) const = 0; + virtual int NumParameters() const = 0; +}; + +} // namespace ceres + +#endif // CERES_PUBLIC_FIRST_ORDER_FUNCTION_H_ diff --git a/ceres-v2/include/gradient_checker.h b/ceres-v2/include/gradient_checker.h new file mode 100644 index 0000000000000000000000000000000000000000..178fa2b0dd2c073d9a11ace23dda6cfd4ff3b56f --- /dev/null +++ b/ceres-v2/include/gradient_checker.h @@ -0,0 +1,189 @@ +// Ceres Solver - A fast non-linear least squares minimizer +// Copyright 2019 Google Inc. All rights reserved. +// http://ceres-solver.org/ +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are met: +// +// * Redistributions of source code must retain the above copyright notice, +// this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above copyright notice, +// this list of conditions and the following disclaimer in the documentation +// and/or other materials provided with the distribution. +// * Neither the name of Google Inc. nor the names of its contributors may be +// used to endorse or promote products derived from this software without +// specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE +// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR +// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF +// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN +// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) +// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE +// POSSIBILITY OF SUCH DAMAGE. +// Copyright 2007 Google Inc. All Rights Reserved. +// +// Authors: wjr@google.com (William Rucklidge), +// keir@google.com (Keir Mierle), +// dgossow@google.com (David Gossow) + +#ifndef CERES_PUBLIC_GRADIENT_CHECKER_H_ +#define CERES_PUBLIC_GRADIENT_CHECKER_H_ + +#include +#include +#include + +#include "ceres/cost_function.h" +#include "ceres/dynamic_numeric_diff_cost_function.h" +#include "ceres/internal/disable_warnings.h" +#include "ceres/internal/eigen.h" +#include "ceres/internal/export.h" +#include "ceres/internal/fixed_array.h" +#include "ceres/local_parameterization.h" +#include "ceres/manifold.h" +#include "glog/logging.h" + +namespace ceres { + +// GradientChecker compares the Jacobians returned by a cost function against +// derivatives estimated using finite differencing. +// +// The condition enforced is that +// +// (J_actual(i, j) - J_numeric(i, j)) +// ------------------------------------ < relative_precision +// max(J_actual(i, j), J_numeric(i, j)) +// +// where J_actual(i, j) is the jacobian as computed by the supplied cost +// function (by the user) multiplied by the local parameterization Jacobian +// and J_numeric is the jacobian as computed by finite differences, multiplied +// by the local parameterization Jacobian as well. +// +// How to use: Fill in an array of pointers to parameter blocks for your +// CostFunction, and then call Probe(). Check that the return value is 'true'. +class CERES_EXPORT GradientChecker { + public: + // This constructor will not take ownership of the cost function or local + // parameterizations. + // + // function: The cost function to probe. + // + // local_parameterizations: A vector of local parameterizations, one for each + // parameter block. May be nullptr or contain nullptrs to indicate that the + // respective parameter does not have a local parameterization. + // + // options: Options to use for numerical differentiation. + // + // NOTE: This constructor is deprecated and will be removed in the next public + // release of Ceres Solver. Please transition to using the Manifold based + // version. + CERES_DEPRECATED_WITH_MSG( + "Local Parameterizations are deprecated. Use the constructor that uses " + "Manifolds instead.") + GradientChecker( + const CostFunction* function, + const std::vector* local_parameterizations, + const NumericDiffOptions& options); + + // This will not take ownership of the cost function or manifolds. + // + // function: The cost function to probe. + // + // manifolds: A vector of manifolds for each parameter. May be nullptr or + // contain nullptrs to indicate that the respective parameter blocks are + // Euclidean. + // + // options: Options to use for numerical differentiation. + GradientChecker(const CostFunction* function, + const std::vector* manifolds, + const NumericDiffOptions& options); + ~GradientChecker(); + + // Contains results from a call to Probe for later inspection. + struct CERES_EXPORT ProbeResults { + // The return value of the cost function. + bool return_value; + + // Computed residual vector. + Vector residuals; + + // The sizes of the Jacobians below are dictated by the cost function's + // parameter block size and residual block sizes. If a parameter block has a + // manifold associated with it, the size of the "local" Jacobian will be + // determined by the dimension of the manifold (which is the same as the + // dimension of the tangent space) and residual block size, otherwise it + // will be identical to the regular Jacobian. + + // Derivatives as computed by the cost function. + std::vector jacobians; + + // Derivatives as computed by the cost function in local space. + std::vector local_jacobians; + + // Derivatives as computed by numerical differentiation in local space. + std::vector numeric_jacobians; + + // Derivatives as computed by numerical differentiation in local space. + std::vector local_numeric_jacobians; + + // Contains the maximum relative error found in the local Jacobians. + double maximum_relative_error; + + // If an error was detected, this will contain a detailed description of + // that error. + std::string error_log; + }; + + // Call the cost function, compute alternative Jacobians using finite + // differencing and compare results. If manifolds are given, the Jacobians + // will be multiplied by the manifold Jacobians before performing the check, + // which effectively means that all errors along the null space of the + // manifold will be ignored. Returns false if the Jacobians don't match, the + // cost function return false, or if a cost function returns a different + // residual when called with a Jacobian output argument vs. calling it + // without. Otherwise returns true. + // + // parameters: The parameter values at which to probe. + // relative_precision: A threshold for the relative difference between the + // Jacobians. If the Jacobians differ by more than this amount, then the + // probe fails. + // results: On return, the Jacobians (and other information) will be stored + // here. May be nullptr. + // + // Returns true if no problems are detected and the difference between the + // Jacobians is less than error_tolerance. + bool Probe(double const* const* parameters, + double relative_precision, + ProbeResults* results) const; + + private: + GradientChecker() = delete; + GradientChecker(const GradientChecker&) = delete; + void operator=(const GradientChecker&) = delete; + + // This bool is used to determine whether the constructor with the + // LocalParameterizations is called or the one with Manifolds is called. If + // the former, then the vector of manifolds is a vector of ManifoldAdapter + // objects which we own and should be deleted. If the latter then they are + // real Manifold objects owned by the caller and will not be deleted. + // + // This bool is only needed during the LocalParameterization to Manifold + // transition, once this transition is complete the LocalParameterization + // based constructor and this bool will be removed. + const bool delete_manifolds_ = false; + + std::vector manifolds_; + const CostFunction* function_; + std::unique_ptr finite_diff_cost_function_; +}; + +} // namespace ceres + +#include "ceres/internal/reenable_warnings.h" + +#endif // CERES_PUBLIC_GRADIENT_CHECKER_H_ diff --git a/ceres-v2/include/gradient_problem.h b/ceres-v2/include/gradient_problem.h new file mode 100644 index 0000000000000000000000000000000000000000..b6a8b86742145c7f13fabc389c7f6bf49f677db5 --- /dev/null +++ b/ceres-v2/include/gradient_problem.h @@ -0,0 +1,185 @@ +// Ceres Solver - A fast non-linear least squares minimizer +// Copyright 2019 Google Inc. All rights reserved. +// http://ceres-solver.org/ +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are met: +// +// * Redistributions of source code must retain the above copyright notice, +// this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above copyright notice, +// this list of conditions and the following disclaimer in the documentation +// and/or other materials provided with the distribution. +// * Neither the name of Google Inc. nor the names of its contributors may be +// used to endorse or promote products derived from this software without +// specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE +// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR +// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF +// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN +// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) +// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE +// POSSIBILITY OF SUCH DAMAGE. +// +// Author: sameeragarwal@google.com (Sameer Agarwal) + +#ifndef CERES_PUBLIC_GRADIENT_PROBLEM_H_ +#define CERES_PUBLIC_GRADIENT_PROBLEM_H_ + +#include + +#include "ceres/first_order_function.h" +#include "ceres/internal/disable_warnings.h" +#include "ceres/internal/export.h" +#include "ceres/local_parameterization.h" +#include "ceres/manifold.h" + +namespace ceres { + +class FirstOrderFunction; + +// Instances of GradientProblem represent general non-linear +// optimization problems that must be solved using just the value of +// the objective function and its gradient. + +// Unlike the Problem class, which can only be used to model non-linear least +// squares problems, instances of GradientProblem are not restricted in the form +// of the objective function. +// +// Structurally GradientProblem is a composition of a FirstOrderFunction and +// optionally a Manifold. +// +// The FirstOrderFunction is responsible for evaluating the cost and gradient of +// the objective function. +// +// The Manifold is responsible for going back and forth between the ambient +// space and the local tangent space. (See manifold.h for more details). When a +// Manifold is not provided, then the tangent space is assumed to coincide with +// the ambient Euclidean space that the gradient vector lives in. +// +// Example usage: +// +// The following demonstrate the problem construction for Rosenbrock's function +// +// f(x,y) = (1-x)^2 + 100(y - x^2)^2; +// +// class Rosenbrock : public ceres::FirstOrderFunction { +// public: +// virtual ~Rosenbrock() {} +// +// virtual bool Evaluate(const double* parameters, +// double* cost, +// double* gradient) const { +// const double x = parameters[0]; +// const double y = parameters[1]; +// +// cost[0] = (1.0 - x) * (1.0 - x) + 100.0 * (y - x * x) * (y - x * x); +// if (gradient != nullptr) { +// gradient[0] = -2.0 * (1.0 - x) - 200.0 * (y - x * x) * 2.0 * x; +// gradient[1] = 200.0 * (y - x * x); +// } +// return true; +// }; +// +// virtual int NumParameters() const { return 2; }; +// }; +// +// ceres::GradientProblem problem(new Rosenbrock()); +// +// NOTE: We are currently in the process of transitioning from +// LocalParameterization to Manifolds in the Ceres API. During this period, +// GradientProblem will support using both Manifold and LocalParameterization +// objects interchangably. For methods in the API affected by this change, see +// their documentation below. +class CERES_EXPORT GradientProblem { + public: + // Takes ownership of the function. + explicit GradientProblem(FirstOrderFunction* function); + + // Takes ownership of the function and the parameterization. + // + // NOTE: This constructor is deprecated and will be removed in the next public + // release of Ceres Solver. Please move to using the Manifold based + // constructor. + CERES_DEPRECATED_WITH_MSG( + "LocalParameterizations are deprecated. Please use the constructor that " + "uses Manifold instead.") + GradientProblem(FirstOrderFunction* function, + LocalParameterization* parameterization); + + // Takes ownership of the function and the manifold. + GradientProblem(FirstOrderFunction* function, Manifold* manifold); + + int NumParameters() const; + + // Dimension of the manifold (and its tangent space). + // + // During the transition from LocalParameterization to Manifold, this method + // reports the LocalSize of the LocalParameterization or the TangentSize of + // the Manifold object associated with this problem. + int NumTangentParameters() const; + + // Dimension of the manifold (and its tangent space). + // + // NOTE: This method is deprecated and will be removed in the next public + // release of Ceres Solver. Please move to using NumTangentParameters() + // instead. + int NumLocalParameters() const { return NumTangentParameters(); } + + // This call is not thread safe. + bool Evaluate(const double* parameters, double* cost, double* gradient) const; + bool Plus(const double* x, const double* delta, double* x_plus_delta) const; + + const FirstOrderFunction* function() const { return function_.get(); } + FirstOrderFunction* mutable_function() { return function_.get(); } + + // NOTE: During the transition from LocalParameterization to Manifold we need + // to support both The LocalParameterization and Manifold based constructors. + // + // When the user uses the LocalParameterization, internally the solver will + // wrap it in a ManifoldAdapter object and return it when manifold or + // mutable_manifold are called. + // + // As a result this method will return a non-nullptr result if a Manifold or a + // LocalParameterization was used when constructing the GradientProblem. + const Manifold* manifold() const { return manifold_.get(); } + Manifold* mutable_manifold() { return manifold_.get(); } + + // If the problem is constructed without a LocalParameterization or with a + // Manifold this method will return a nullptr. + // + // NOTE: This method is deprecated and will be removed in the next public + // release of Ceres Solver. + CERES_DEPRECATED_WITH_MSG("Use Manifolds instead.") + const LocalParameterization* parameterization() const { + return parameterization_.get(); + } + + // If the problem is constructed without a LocalParameterization or with a + // Manifold this method will return a nullptr. + // + // NOTE: This method is deprecated and will be removed in the next public + // release of Ceres Solver. + CERES_DEPRECATED_WITH_MSG("Use Manifolds instead.") + LocalParameterization* mutable_parameterization() { + return parameterization_.get(); + } + + private: + std::unique_ptr function_; + CERES_DEPRECATED_WITH_MSG("") + std::unique_ptr parameterization_; + std::unique_ptr manifold_; + std::unique_ptr scratch_; +}; + +} // namespace ceres + +#include "ceres/internal/reenable_warnings.h" + +#endif // CERES_PUBLIC_GRADIENT_PROBLEM_H_ diff --git a/ceres-v2/include/gradient_problem_solver.h b/ceres-v2/include/gradient_problem_solver.h new file mode 100644 index 0000000000000000000000000000000000000000..b6290c80c28f4bd81bb6556e468ecba0b7d3ed64 --- /dev/null +++ b/ceres-v2/include/gradient_problem_solver.h @@ -0,0 +1,357 @@ +// Ceres Solver - A fast non-linear least squares minimizer +// Copyright 2019 Google Inc. All rights reserved. +// http://ceres-solver.org/ +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are met: +// +// * Redistributions of source code must retain the above copyright notice, +// this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above copyright notice, +// this list of conditions and the following disclaimer in the documentation +// and/or other materials provided with the distribution. +// * Neither the name of Google Inc. nor the names of its contributors may be +// used to endorse or promote products derived from this software without +// specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE +// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR +// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF +// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN +// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) +// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE +// POSSIBILITY OF SUCH DAMAGE. +// +// Author: sameeragarwal@google.com (Sameer Agarwal) + +#ifndef CERES_PUBLIC_GRADIENT_PROBLEM_SOLVER_H_ +#define CERES_PUBLIC_GRADIENT_PROBLEM_SOLVER_H_ + +#include +#include +#include + +#include "ceres/internal/disable_warnings.h" +#include "ceres/internal/export.h" +#include "ceres/internal/port.h" +#include "ceres/iteration_callback.h" +#include "ceres/types.h" + +namespace ceres { + +class GradientProblem; + +class CERES_EXPORT GradientProblemSolver { + public: + virtual ~GradientProblemSolver(); + + // The options structure contains, not surprisingly, options that control how + // the solver operates. The defaults should be suitable for a wide range of + // problems; however, better performance is often obtainable with tweaking. + // + // The constants are defined inside types.h + struct CERES_EXPORT Options { + // Returns true if the options struct has a valid + // configuration. Returns false otherwise, and fills in *error + // with a message describing the problem. + bool IsValid(std::string* error) const; + + // Minimizer options ---------------------------------------- + LineSearchDirectionType line_search_direction_type = LBFGS; + LineSearchType line_search_type = WOLFE; + NonlinearConjugateGradientType nonlinear_conjugate_gradient_type = + FLETCHER_REEVES; + + // The LBFGS hessian approximation is a low rank approximation to + // the inverse of the Hessian matrix. The rank of the + // approximation determines (linearly) the space and time + // complexity of using the approximation. Higher the rank, the + // better is the quality of the approximation. The increase in + // quality is however is bounded for a number of reasons. + // + // 1. The method only uses secant information and not actual + // derivatives. + // + // 2. The Hessian approximation is constrained to be positive + // definite. + // + // So increasing this rank to a large number will cost time and + // space complexity without the corresponding increase in solution + // quality. There are no hard and fast rules for choosing the + // maximum rank. The best choice usually requires some problem + // specific experimentation. + // + // For more theoretical and implementation details of the LBFGS + // method, please see: + // + // Nocedal, J. (1980). "Updating Quasi-Newton Matrices with + // Limited Storage". Mathematics of Computation 35 (151): 773-782. + int max_lbfgs_rank = 20; + + // As part of the (L)BFGS update step (BFGS) / right-multiply step (L-BFGS), + // the initial inverse Hessian approximation is taken to be the Identity. + // However, Oren showed that using instead I * \gamma, where \gamma is + // chosen to approximate an eigenvalue of the true inverse Hessian can + // result in improved convergence in a wide variety of cases. Setting + // use_approximate_eigenvalue_bfgs_scaling to true enables this scaling. + // + // It is important to note that approximate eigenvalue scaling does not + // always improve convergence, and that it can in fact significantly degrade + // performance for certain classes of problem, which is why it is disabled + // by default. In particular it can degrade performance when the + // sensitivity of the problem to different parameters varies significantly, + // as in this case a single scalar factor fails to capture this variation + // and detrimentally downscales parts of the jacobian approximation which + // correspond to low-sensitivity parameters. It can also reduce the + // robustness of the solution to errors in the jacobians. + // + // Oren S.S., Self-scaling variable metric (SSVM) algorithms + // Part II: Implementation and experiments, Management Science, + // 20(5), 863-874, 1974. + bool use_approximate_eigenvalue_bfgs_scaling = false; + + // Degree of the polynomial used to approximate the objective + // function. Valid values are BISECTION, QUADRATIC and CUBIC. + // + // BISECTION corresponds to pure backtracking search with no + // interpolation. + LineSearchInterpolationType line_search_interpolation_type = CUBIC; + + // If during the line search, the step_size falls below this + // value, it is truncated to zero. + double min_line_search_step_size = 1e-9; + + // Line search parameters. + + // Solving the line search problem exactly is computationally + // prohibitive. Fortunately, line search based optimization + // algorithms can still guarantee convergence if instead of an + // exact solution, the line search algorithm returns a solution + // which decreases the value of the objective function + // sufficiently. More precisely, we are looking for a step_size + // s.t. + // + // f(step_size) <= f(0) + sufficient_decrease * f'(0) * step_size + // + double line_search_sufficient_function_decrease = 1e-4; + + // In each iteration of the line search, + // + // new_step_size >= max_line_search_step_contraction * step_size + // + // Note that by definition, for contraction: + // + // 0 < max_step_contraction < min_step_contraction < 1 + // + double max_line_search_step_contraction = 1e-3; + + // In each iteration of the line search, + // + // new_step_size <= min_line_search_step_contraction * step_size + // + // Note that by definition, for contraction: + // + // 0 < max_step_contraction < min_step_contraction < 1 + // + double min_line_search_step_contraction = 0.6; + + // Maximum number of trial step size iterations during each line search, + // if a step size satisfying the search conditions cannot be found within + // this number of trials, the line search will terminate. + int max_num_line_search_step_size_iterations = 20; + + // Maximum number of restarts of the line search direction algorithm before + // terminating the optimization. Restarts of the line search direction + // algorithm occur when the current algorithm fails to produce a new descent + // direction. This typically indicates a numerical failure, or a breakdown + // in the validity of the approximations used. + int max_num_line_search_direction_restarts = 5; + + // The strong Wolfe conditions consist of the Armijo sufficient + // decrease condition, and an additional requirement that the + // step-size be chosen s.t. the _magnitude_ ('strong' Wolfe + // conditions) of the gradient along the search direction + // decreases sufficiently. Precisely, this second condition + // is that we seek a step_size s.t. + // + // |f'(step_size)| <= sufficient_curvature_decrease * |f'(0)| + // + // Where f() is the line search objective and f'() is the derivative + // of f w.r.t step_size (d f / d step_size). + double line_search_sufficient_curvature_decrease = 0.9; + + // During the bracketing phase of the Wolfe search, the step size is + // increased until either a point satisfying the Wolfe conditions is + // found, or an upper bound for a bracket containing a point satisfying + // the conditions is found. Precisely, at each iteration of the + // expansion: + // + // new_step_size <= max_step_expansion * step_size. + // + // By definition for expansion, max_step_expansion > 1.0. + double max_line_search_step_expansion = 10.0; + + // Maximum number of iterations for the minimizer to run for. + int max_num_iterations = 50; + + // Maximum time for which the minimizer should run for. + double max_solver_time_in_seconds = 1e9; + + // Minimizer terminates when + // + // (new_cost - old_cost) < function_tolerance * old_cost; + // + double function_tolerance = 1e-6; + + // Minimizer terminates when + // + // max_i |x - Project(Plus(x, -g(x))| < gradient_tolerance + // + // This value should typically be 1e-4 * function_tolerance. + double gradient_tolerance = 1e-10; + + // Minimizer terminates when + // + // |step|_2 <= parameter_tolerance * ( |x|_2 + parameter_tolerance) + // + double parameter_tolerance = 1e-8; + + // Logging options --------------------------------------------------------- + + LoggingType logging_type = PER_MINIMIZER_ITERATION; + + // By default the Minimizer progress is logged to VLOG(1), which + // is sent to STDERR depending on the vlog level. If this flag is + // set to true, and logging_type is not SILENT, the logging output + // is sent to STDOUT. + bool minimizer_progress_to_stdout = false; + + // If true, the user's parameter blocks are updated at the end of + // every Minimizer iteration, otherwise they are updated when the + // Minimizer terminates. This is useful if, for example, the user + // wishes to visualize the state of the optimization every + // iteration. + bool update_state_every_iteration = false; + + // Callbacks that are executed at the end of each iteration of the + // Minimizer. An iteration may terminate midway, either due to + // numerical failures or because one of the convergence tests has + // been satisfied. In this case none of the callbacks are + // executed. + + // Callbacks are executed in the order that they are specified in + // this vector. By default, parameter blocks are updated only at + // the end of the optimization, i.e when the Minimizer + // terminates. This behaviour is controlled by + // update_state_every_variable. If the user wishes to have access + // to the update parameter blocks when his/her callbacks are + // executed, then set update_state_every_iteration to true. + // + // The solver does NOT take ownership of these pointers. + std::vector callbacks; + }; + + struct CERES_EXPORT Summary { + // A brief one line description of the state of the solver after + // termination. + std::string BriefReport() const; + + // A full multiline description of the state of the solver after + // termination. + std::string FullReport() const; + + bool IsSolutionUsable() const; + + // Minimizer summary ------------------------------------------------- + TerminationType termination_type = FAILURE; + + // Reason why the solver terminated. + std::string message = "ceres::GradientProblemSolve was not called."; + + // Cost of the problem (value of the objective function) before + // the optimization. + double initial_cost = -1.0; + + // Cost of the problem (value of the objective function) after the + // optimization. + double final_cost = -1.0; + + // IterationSummary for each minimizer iteration in order. + std::vector iterations; + + // Number of times the cost (and not the gradient) was evaluated. + int num_cost_evaluations = -1; + + // Number of times the gradient (and the cost) were evaluated. + int num_gradient_evaluations = -1; + + // Sum total of all time spent inside Ceres when Solve is called. + double total_time_in_seconds = -1.0; + + // Time (in seconds) spent evaluating the cost. + double cost_evaluation_time_in_seconds = -1.0; + + // Time (in seconds) spent evaluating the gradient. + double gradient_evaluation_time_in_seconds = -1.0; + + // Time (in seconds) spent minimizing the interpolating polynomial + // to compute the next candidate step size as part of a line search. + double line_search_polynomial_minimization_time_in_seconds = -1.0; + + // Number of parameters in the problem. + int num_parameters = -1; + + // Dimension of the tangent space of the problem. + CERES_DEPRECATED_WITH_MSG("Use num_tangent_parameters.") + int num_local_parameters = -1; + + // Dimension of the tangent space of the problem. + int num_tangent_parameters = -1; + + // Type of line search direction used. + LineSearchDirectionType line_search_direction_type = LBFGS; + + // Type of the line search algorithm used. + LineSearchType line_search_type = WOLFE; + + // When performing line search, the degree of the polynomial used + // to approximate the objective function. + LineSearchInterpolationType line_search_interpolation_type = CUBIC; + + // If the line search direction is NONLINEAR_CONJUGATE_GRADIENT, + // then this indicates the particular variant of non-linear + // conjugate gradient used. + NonlinearConjugateGradientType nonlinear_conjugate_gradient_type = + FLETCHER_REEVES; + + // If the type of the line search direction is LBFGS, then this + // indicates the rank of the Hessian approximation. + int max_lbfgs_rank = -1; + }; + + // Once a least squares problem has been built, this function takes + // the problem and optimizes it based on the values of the options + // parameters. Upon return, a detailed summary of the work performed + // by the preprocessor, the non-linear minimizer and the linear + // solver are reported in the summary object. + virtual void Solve(const GradientProblemSolver::Options& options, + const GradientProblem& problem, + double* parameters, + GradientProblemSolver::Summary* summary); +}; + +// Helper function which avoids going through the interface. +CERES_EXPORT void Solve(const GradientProblemSolver::Options& options, + const GradientProblem& problem, + double* parameters, + GradientProblemSolver::Summary* summary); + +} // namespace ceres + +#include "ceres/internal/reenable_warnings.h" + +#endif // CERES_PUBLIC_GRADIENT_PROBLEM_SOLVER_H_ diff --git a/ceres-v2/include/internal/array_selector.h b/ceres-v2/include/internal/array_selector.h new file mode 100644 index 0000000000000000000000000000000000000000..b4db012f00bb1df516129edbfdd3bd59dd12e3b0 --- /dev/null +++ b/ceres-v2/include/internal/array_selector.h @@ -0,0 +1,97 @@ +// Ceres Solver - A fast non-linear least squares minimizer +// Copyright 2020 Google Inc. All rights reserved. +// http://ceres-solver.org/ +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are met: +// +// * Redistributions of source code must retain the above copyright notice, +// this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above copyright notice, +// this list of conditions and the following disclaimer in the documentation +// and/or other materials provided with the distribution. +// * Neither the name of Google Inc. nor the names of its contributors may be +// used to endorse or promote products derived from this software without +// specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE +// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR +// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF +// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN +// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) +// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE +// POSSIBILITY OF SUCH DAMAGE. +// +// Author: darius.rueckert@fau.de (Darius Rueckert) +// + +#ifndef CERES_PUBLIC_INTERNAL_ARRAY_SELECTOR_H_ +#define CERES_PUBLIC_INTERNAL_ARRAY_SELECTOR_H_ + +#include +#include + +#include "ceres/internal/fixed_array.h" +#include "ceres/types.h" + +namespace ceres { +namespace internal { + +// StaticFixedArray selects the best array implementation based on template +// arguments. If the size is not known at compile-time, pass +// ceres::DYNAMIC as a size-template argument. +// +// Three different containers are selected in different scenarios: +// +// num_elements == DYNAMIC: +// -> ceres::internal::FixedArray(size) + +// num_elements != DYNAMIC && num_elements <= max_stack_size +// -> std::array + +// num_elements != DYNAMIC && num_elements > max_stack_size +// -> std::vector(num_elements) +// +template +struct ArraySelector {}; + +template +struct ArraySelector + : ceres::internal::FixedArray { + explicit ArraySelector(int s) + : ceres::internal::FixedArray(s) {} +}; + +template +struct ArraySelector + : std::array { + explicit ArraySelector(int s) { CHECK_EQ(s, num_elements); } +}; + +template +struct ArraySelector + : std::vector { + explicit ArraySelector(int s) : std::vector(s) { + CHECK_EQ(s, num_elements); + } +}; + +} // namespace internal +} // namespace ceres + +#endif // CERES_PUBLIC_INTERNAL_ARRAY_SELECTOR_H_ diff --git a/ceres-v2/include/internal/autodiff.h b/ceres-v2/include/internal/autodiff.h new file mode 100644 index 0000000000000000000000000000000000000000..c796618cd2db078960a2a9e52ce5ab6e4100e3b5 --- /dev/null +++ b/ceres-v2/include/internal/autodiff.h @@ -0,0 +1,365 @@ +// Ceres Solver - A fast non-linear least squares minimizer +// Copyright 2019 Google Inc. All rights reserved. +// http://ceres-solver.org/ +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are met: +// +// * Redistributions of source code must retain the above copyright notice, +// this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above copyright notice, +// this list of conditions and the following disclaimer in the documentation +// and/or other materials provided with the distribution. +// * Neither the name of Google Inc. nor the names of its contributors may be +// used to endorse or promote products derived from this software without +// specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE +// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR +// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF +// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN +// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) +// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE +// POSSIBILITY OF SUCH DAMAGE. +// +// Author: keir@google.com (Keir Mierle) +// +// Computation of the Jacobian matrix for vector-valued functions of multiple +// variables, using automatic differentiation based on the implementation of +// dual numbers in jet.h. Before reading the rest of this file, it is advisable +// to read jet.h's header comment in detail. +// +// The helper wrapper AutoDifferentiate() computes the jacobian of +// functors with templated operator() taking this form: +// +// struct F { +// template +// bool operator()(const T *x, const T *y, ..., T *z) { +// // Compute z[] based on x[], y[], ... +// // return true if computation succeeded, false otherwise. +// } +// }; +// +// All inputs and outputs may be vector-valued. +// +// To understand how jets are used to compute the jacobian, a +// picture may help. Consider a vector-valued function, F, returning 3 +// dimensions and taking a vector-valued parameter of 4 dimensions: +// +// y x +// [ * ] F [ * ] +// [ * ] <--- [ * ] +// [ * ] [ * ] +// [ * ] +// +// Similar to the 2-parameter example for f described in jet.h, computing the +// jacobian dy/dx is done by substituting a suitable jet object for x and all +// intermediate steps of the computation of F. Since x is has 4 dimensions, use +// a Jet. +// +// Before substituting a jet object for x, the dual components are set +// appropriately for each dimension of x: +// +// y x +// [ * | * * * * ] f [ * | 1 0 0 0 ] x0 +// [ * | * * * * ] <--- [ * | 0 1 0 0 ] x1 +// [ * | * * * * ] [ * | 0 0 1 0 ] x2 +// ---+--- [ * | 0 0 0 1 ] x3 +// | ^ ^ ^ ^ +// dy/dx | | | +----- infinitesimal for x3 +// | | +------- infinitesimal for x2 +// | +--------- infinitesimal for x1 +// +----------- infinitesimal for x0 +// +// The reason to set the internal 4x4 submatrix to the identity is that we wish +// to take the derivative of y separately with respect to each dimension of x. +// Each column of the 4x4 identity is therefore for a single component of the +// independent variable x. +// +// Then the jacobian of the mapping, dy/dx, is the 3x4 sub-matrix of the +// extended y vector, indicated in the above diagram. +// +// Functors with multiple parameters +// --------------------------------- +// In practice, it is often convenient to use a function f of two or more +// vector-valued parameters, for example, x[3] and z[6]. Unfortunately, the jet +// framework is designed for a single-parameter vector-valued input. The wrapper +// in this file addresses this issue adding support for functions with one or +// more parameter vectors. +// +// To support multiple parameters, all the parameter vectors are concatenated +// into one and treated as a single parameter vector, except that since the +// functor expects different inputs, we need to construct the jets as if they +// were part of a single parameter vector. The extended jets are passed +// separately for each parameter. +// +// For example, consider a functor F taking two vector parameters, p[2] and +// q[3], and producing an output y[4]: +// +// struct F { +// template +// bool operator()(const T *p, const T *q, T *z) { +// // ... +// } +// }; +// +// In this case, the necessary jet type is Jet. Here is a +// visualization of the jet objects in this case: +// +// Dual components for p ----+ +// | +// -+- +// y [ * | 1 0 | 0 0 0 ] --- p[0] +// [ * | 0 1 | 0 0 0 ] --- p[1] +// [ * | . . | + + + ] | +// [ * | . . | + + + ] v +// [ * | . . | + + + ] <--- F(p, q) +// [ * | . . | + + + ] ^ +// ^^^ ^^^^^ | +// dy/dp dy/dq [ * | 0 0 | 1 0 0 ] --- q[0] +// [ * | 0 0 | 0 1 0 ] --- q[1] +// [ * | 0 0 | 0 0 1 ] --- q[2] +// --+-- +// | +// Dual components for q --------------+ +// +// where the 4x2 submatrix (marked with ".") and 4x3 submatrix (marked with "+" +// of y in the above diagram are the derivatives of y with respect to p and q +// respectively. This is how autodiff works for functors taking multiple vector +// valued arguments (up to 6). +// +// Jacobian null pointers (nullptr) +// -------------------------------- +// In general, the functions below will accept nullptr for all or some of the +// Jacobian parameters, meaning that those Jacobians will not be computed. + +#ifndef CERES_PUBLIC_INTERNAL_AUTODIFF_H_ +#define CERES_PUBLIC_INTERNAL_AUTODIFF_H_ + +#include +#include +#include + +#include "ceres/internal/array_selector.h" +#include "ceres/internal/eigen.h" +#include "ceres/internal/fixed_array.h" +#include "ceres/internal/parameter_dims.h" +#include "ceres/internal/variadic_evaluate.h" +#include "ceres/jet.h" +#include "ceres/types.h" +#include "glog/logging.h" + +// If the number of parameters exceeds this values, the corresponding jets are +// placed on the heap. This will reduce performance by a factor of 2-5 on +// current compilers. +#ifndef CERES_AUTODIFF_MAX_PARAMETERS_ON_STACK +#define CERES_AUTODIFF_MAX_PARAMETERS_ON_STACK 50 +#endif + +#ifndef CERES_AUTODIFF_MAX_RESIDUALS_ON_STACK +#define CERES_AUTODIFF_MAX_RESIDUALS_ON_STACK 20 +#endif + +namespace ceres { +namespace internal { + +// Extends src by a 1st order perturbation for every dimension and puts it in +// dst. The size of src is N. Since this is also used for perturbations in +// blocked arrays, offset is used to shift which part of the jet the +// perturbation occurs. This is used to set up the extended x augmented by an +// identity matrix. The JetT type should be a Jet type, and T should be a +// numeric type (e.g. double). For example, +// +// 0 1 2 3 4 5 6 7 8 +// dst[0] [ * | . . | 1 0 0 | . . . ] +// dst[1] [ * | . . | 0 1 0 | . . . ] +// dst[2] [ * | . . | 0 0 1 | . . . ] +// +// is what would get put in dst if N was 3, offset was 3, and the jet type JetT +// was 8-dimensional. +template +struct Make1stOrderPerturbation { + public: + inline static void Apply(const T* src, JetT* dst) { + if (j == 0) { + DCHECK(src); + DCHECK(dst); + } + dst[j] = JetT(src[j], j + Offset); + Make1stOrderPerturbation::Apply(src, dst); + } +}; + +template +struct Make1stOrderPerturbation { + public: + static void Apply(const T* /* NOT USED */, JetT* /* NOT USED */) {} +}; + +// Calls Make1stOrderPerturbation for every parameter block. +// +// Example: +// If one having three parameter blocks with dimensions (3, 2, 4), the call +// Make1stOrderPerturbations::Apply(params, x); +// will result in the following calls to Make1stOrderPerturbation: +// Make1stOrderPerturbation<0, 3, 0>::Apply(params[0], x + 0); +// Make1stOrderPerturbation<0, 2, 3>::Apply(params[1], x + 3); +// Make1stOrderPerturbation<0, 4, 5>::Apply(params[2], x + 5); +template +struct Make1stOrderPerturbations; + +template +struct Make1stOrderPerturbations, + ParameterIdx, + Offset> { + template + inline static void Apply(T const* const* parameters, JetT* x) { + Make1stOrderPerturbation<0, N, Offset, T, JetT>::Apply( + parameters[ParameterIdx], x + Offset); + Make1stOrderPerturbations, + ParameterIdx + 1, + Offset + N>::Apply(parameters, x); + } +}; + +// End of 'recursion'. Nothing more to do. +template +struct Make1stOrderPerturbations, + ParameterIdx, + Total> { + template + static void Apply(T const* const* /* NOT USED */, JetT* /* NOT USED */) {} +}; + +// Takes the 0th order part of src, assumed to be a Jet type, and puts it in +// dst. This is used to pick out the "vector" part of the extended y. +template +inline void Take0thOrderPart(int M, const JetT* src, T dst) { + DCHECK(src); + for (int i = 0; i < M; ++i) { + dst[i] = src[i].a; + } +} + +// Takes N 1st order parts, starting at index N0, and puts them in the M x N +// matrix 'dst'. This is used to pick out the "matrix" parts of the extended y. +template +inline void Take1stOrderPart(const int M, const JetT* src, T* dst) { + DCHECK(src); + DCHECK(dst); + for (int i = 0; i < M; ++i) { + Eigen::Map>(dst + N * i, N) = + src[i].v.template segment(N0); + } +} + +// Calls Take1stOrderPart for every parameter block. +// +// Example: +// If one having three parameter blocks with dimensions (3, 2, 4), the call +// Take1stOrderParts::Apply(num_outputs, +// output, +// jacobians); +// will result in the following calls to Take1stOrderPart: +// if (jacobians[0]) { +// Take1stOrderPart<0, 3>(num_outputs, output, jacobians[0]); +// } +// if (jacobians[1]) { +// Take1stOrderPart<3, 2>(num_outputs, output, jacobians[1]); +// } +// if (jacobians[2]) { +// Take1stOrderPart<5, 4>(num_outputs, output, jacobians[2]); +// } +template +struct Take1stOrderParts; + +template +struct Take1stOrderParts, + ParameterIdx, + Offset> { + template + inline static void Apply(int num_outputs, JetT* output, T** jacobians) { + if (jacobians[ParameterIdx]) { + Take1stOrderPart(num_outputs, output, jacobians[ParameterIdx]); + } + Take1stOrderParts, + ParameterIdx + 1, + Offset + N>::Apply(num_outputs, output, jacobians); + } +}; + +// End of 'recursion'. Nothing more to do. +template +struct Take1stOrderParts, ParameterIdx, Offset> { + template + static void Apply(int /* NOT USED*/, + JetT* /* NOT USED*/, + T** /* NOT USED */) {} +}; + +template +inline bool AutoDifferentiate(const Functor& functor, + T const* const* parameters, + int dynamic_num_outputs, + T* function_value, + T** jacobians) { + using JetT = Jet; + using Parameters = typename ParameterDims::Parameters; + + if (kNumResiduals != DYNAMIC) { + DCHECK_EQ(kNumResiduals, dynamic_num_outputs); + } + + ArraySelector + parameters_as_jets(ParameterDims::kNumParameters); + + // Pointers to the beginning of each parameter block + std::array unpacked_parameters = + ParameterDims::GetUnpackedParameters(parameters_as_jets.data()); + + // If the number of residuals is fixed, we use the template argument as the + // number of outputs. Otherwise we use the num_outputs parameter. Note: The + // ?-operator here is compile-time evaluated, therefore num_outputs is also + // a compile-time constant for functors with fixed residuals. + const int num_outputs = + kNumResiduals == DYNAMIC ? dynamic_num_outputs : kNumResiduals; + DCHECK_GT(num_outputs, 0); + + ArraySelector + residuals_as_jets(num_outputs); + + // Invalidate the output Jets, so that we can detect if the user + // did not assign values to all of them. + for (int i = 0; i < num_outputs; ++i) { + residuals_as_jets[i].a = kImpossibleValue; + residuals_as_jets[i].v.setConstant(kImpossibleValue); + } + + Make1stOrderPerturbations::Apply(parameters, + parameters_as_jets.data()); + + if (!VariadicEvaluate( + functor, unpacked_parameters.data(), residuals_as_jets.data())) { + return false; + } + + Take0thOrderPart(num_outputs, residuals_as_jets.data(), function_value); + Take1stOrderParts::Apply( + num_outputs, residuals_as_jets.data(), jacobians); + + return true; +} + +} // namespace internal +} // namespace ceres + +#endif // CERES_PUBLIC_INTERNAL_AUTODIFF_H_ diff --git a/ceres-v2/include/internal/config.h b/ceres-v2/include/internal/config.h new file mode 100644 index 0000000000000000000000000000000000000000..a3aa08fdd7f2a248e67e2498199e24f47c2918fa --- /dev/null +++ b/ceres-v2/include/internal/config.h @@ -0,0 +1,123 @@ +// Ceres Solver - A fast non-linear least squares minimizer +// Copyright 2022 Google Inc. All rights reserved. +// http://ceres-solver.org/ +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are met: +// +// * Redistributions of source code must retain the above copyright notice, +// this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above copyright notice, +// this list of conditions and the following disclaimer in the documentation +// and/or other materials provided with the distribution. +// * Neither the name of Google Inc. nor the names of its contributors may be +// used to endorse or promote products derived from this software without +// specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE +// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR +// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF +// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN +// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) +// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE +// POSSIBILITY OF SUCH DAMAGE. +// +// Author: alexs.mac@gmail.com (Alex Stewart) + +// Configuration options for Ceres. +// +// Do not edit this file, it was automatically configured by CMake when +// Ceres was compiled with the relevant configuration for the machine +// on which Ceres was compiled. +// +// Ceres Developers: All options should have the same name as their mapped +// CMake options, in the preconfigured version of this file +// all options should be enclosed in '@'. + +#ifndef CERES_PUBLIC_INTERNAL_CONFIG_H_ +#define CERES_PUBLIC_INTERNAL_CONFIG_H_ + +// If defined, use the LGPL code in Eigen. +#define CERES_USE_EIGEN_SPARSE + +// If defined, Ceres was compiled without LAPACK. +// #define CERES_NO_LAPACK + +// If defined, Ceres was compiled without SuiteSparse. +// #define CERES_NO_SUITESPARSE + +// If defined, Ceres was compiled without CXSparse. +// #define CERES_NO_CXSPARSE + +// If defined, Ceres was compiled without CUDA. +// #define CERES_NO_CUDA + +// If defined, Ceres was compiled without Apple's Accelerate framework solvers. +#define CERES_NO_ACCELERATE_SPARSE + +#if defined(CERES_NO_SUITESPARSE) && \ + defined(CERES_NO_ACCELERATE_SPARSE) && \ + defined(CERES_NO_CXSPARSE) && \ + !defined(CERES_USE_EIGEN_SPARSE) // NOLINT +// If defined Ceres was compiled without any sparse linear algebra support. +#define CERES_NO_SPARSE +#endif + +// If defined, Ceres was compiled without Schur specializations. +// #define CERES_RESTRICT_SCHUR_SPECIALIZATION + +// If defined, Ceres was compiled to use Eigen instead of hardcoded BLAS +// routines. +// #define CERES_NO_CUSTOM_BLAS + +// If defined, Ceres was compiled without multithreading support. +// #define CERES_NO_THREADS +// If defined Ceres was compiled with OpenMP multithreading. +// #define CERES_USE_OPENMP +// If defined Ceres was compiled with modern C++ multithreading. +#define CERES_USE_CXX_THREADS + +// If defined, Ceres was compiled with a version MSVC >= 2005 which +// deprecated the standard POSIX names for bessel functions, replacing them +// with underscore prefixed versions (e.g. j0() -> _j0()). +// #define CERES_MSVC_USE_UNDERSCORE_PREFIXED_BESSEL_FUNCTIONS + +#if defined(CERES_USE_OPENMP) +#if defined(CERES_USE_CXX_THREADS) || defined(CERES_NO_THREADS) +#error CERES_USE_OPENMP is mutually exclusive to CERES_USE_CXX_THREADS and CERES_NO_THREADS +#endif +#elif defined(CERES_USE_CXX_THREADS) +#if defined(CERES_USE_OPENMP) || defined(CERES_NO_THREADS) +#error CERES_USE_CXX_THREADS is mutually exclusive to CERES_USE_OPENMP, CERES_USE_CXX_THREADS and CERES_NO_THREADS +#endif +#elif defined(CERES_NO_THREADS) +#if defined(CERES_USE_OPENMP) || defined(CERES_USE_CXX_THREADS) +#error CERES_NO_THREADS is mutually exclusive to CERES_USE_OPENMP and CERES_USE_CXX_THREADS +#endif +#else +# error One of CERES_USE_OPENMP, CERES_USE_CXX_THREADS or CERES_NO_THREADS must be defined. +#endif + +// CERES_NO_SPARSE should be automatically defined by config.h if Ceres was +// compiled without any sparse back-end. Verify that it has not subsequently +// been inconsistently redefined. +#if defined(CERES_NO_SPARSE) +#if !defined(CERES_NO_SUITESPARSE) +#error CERES_NO_SPARSE requires CERES_NO_SUITESPARSE. +#endif +#if !defined(CERES_NO_CXSPARSE) +#error CERES_NO_SPARSE requires CERES_NO_CXSPARSE +#endif +#if !defined(CERES_NO_ACCELERATE_SPARSE) +#error CERES_NO_SPARSE requires CERES_NO_ACCELERATE_SPARSE +#endif +#if defined(CERES_USE_EIGEN_SPARSE) +#error CERES_NO_SPARSE requires !CERES_USE_EIGEN_SPARSE +#endif +#endif + +#endif // CERES_PUBLIC_INTERNAL_CONFIG_H_ diff --git a/ceres-v2/include/internal/disable_warnings.h b/ceres-v2/include/internal/disable_warnings.h new file mode 100644 index 0000000000000000000000000000000000000000..d7766a0a08fdbec186f389dfe84e7ca952e62fa5 --- /dev/null +++ b/ceres-v2/include/internal/disable_warnings.h @@ -0,0 +1,44 @@ +// Ceres Solver - A fast non-linear least squares minimizer +// Copyright 2015 Google Inc. All rights reserved. +// http://ceres-solver.org/ +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are met: +// +// * Redistributions of source code must retain the above copyright notice, +// this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above copyright notice, +// this list of conditions and the following disclaimer in the documentation +// and/or other materials provided with the distribution. +// * Neither the name of Google Inc. nor the names of its contributors may be +// used to endorse or promote products derived from this software without +// specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE +// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR +// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF +// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN +// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) +// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE +// POSSIBILITY OF SUCH DAMAGE. +// +// This file has the sole purpose to silence warnings when including Ceres. + +// This is not your usual header guard. The macro CERES_WARNINGS_DISABLED +// shows up again in reenable_warnings.h. +#ifndef CERES_WARNINGS_DISABLED +#define CERES_WARNINGS_DISABLED + +#ifdef _MSC_VER +#pragma warning(push) +// Disable the warning C4251 which is triggered by stl classes in +// Ceres' public interface. To quote MSDN: "C4251 can be ignored " +// "if you are deriving from a type in the Standard C++ Library" +#pragma warning(disable : 4251) +#endif + +#endif // CERES_WARNINGS_DISABLED diff --git a/ceres-v2/include/internal/eigen.h b/ceres-v2/include/internal/eigen.h new file mode 100644 index 0000000000000000000000000000000000000000..111cc7a07bb4735768cbe1fe760df5e667cc7a08 --- /dev/null +++ b/ceres-v2/include/internal/eigen.h @@ -0,0 +1,75 @@ +// Ceres Solver - A fast non-linear least squares minimizer +// Copyright 2015 Google Inc. All rights reserved. +// http://ceres-solver.org/ +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are met: +// +// * Redistributions of source code must retain the above copyright notice, +// this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above copyright notice, +// this list of conditions and the following disclaimer in the documentation +// and/or other materials provided with the distribution. +// * Neither the name of Google Inc. nor the names of its contributors may be +// used to endorse or promote products derived from this software without +// specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE +// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR +// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF +// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN +// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) +// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE +// POSSIBILITY OF SUCH DAMAGE. +// +// Author: sameeragarwal@google.com (Sameer Agarwal) + +#ifndef CERES_INTERNAL_EIGEN_H_ +#define CERES_INTERNAL_EIGEN_H_ + +#include "Eigen/Core" + +namespace ceres { + +using Vector = Eigen::Matrix; +using Matrix = + Eigen::Matrix; +using VectorRef = Eigen::Map; +using MatrixRef = Eigen::Map; +using ConstVectorRef = Eigen::Map; +using ConstMatrixRef = Eigen::Map; + +// Column major matrices for DenseSparseMatrix/DenseQRSolver +using ColMajorMatrix = + Eigen::Matrix; + +using ColMajorMatrixRef = + Eigen::Map>; + +using ConstColMajorMatrixRef = + Eigen::Map>; + +// C++ does not support templated typdefs, thus the need for this +// struct so that we can support statically sized Matrix and Maps. +template +struct EigenTypes { + using Matrix = + Eigen::Matrix; + + using MatrixRef = Eigen::Map; + using ConstMatrixRef = Eigen::Map; + using Vector = Eigen::Matrix; + using VectorRef = Eigen::Map>; + using ConstVectorRef = Eigen::Map>; +}; + +} // namespace ceres + +#endif // CERES_INTERNAL_EIGEN_H_ diff --git a/ceres-v2/include/internal/export.h b/ceres-v2/include/internal/export.h new file mode 100644 index 0000000000000000000000000000000000000000..c85bc5ca65dc763637ed31fa9b2ca6fb25fb54f8 --- /dev/null +++ b/ceres-v2/include/internal/export.h @@ -0,0 +1,42 @@ + +#ifndef CERES_EXPORT_H +#define CERES_EXPORT_H + +#ifdef CERES_STATIC_DEFINE +# define CERES_EXPORT +# define CERES_NO_EXPORT +#else +# ifndef CERES_EXPORT +# ifdef ceres_EXPORTS + /* We are building this library */ +# define CERES_EXPORT +# else + /* We are using this library */ +# define CERES_EXPORT +# endif +# endif + +# ifndef CERES_NO_EXPORT +# define CERES_NO_EXPORT +# endif +#endif + +#ifndef CERES_DEPRECATED +# define CERES_DEPRECATED __attribute__ ((__deprecated__)) +#endif + +#ifndef CERES_DEPRECATED_EXPORT +# define CERES_DEPRECATED_EXPORT CERES_EXPORT CERES_DEPRECATED +#endif + +#ifndef CERES_DEPRECATED_NO_EXPORT +# define CERES_DEPRECATED_NO_EXPORT CERES_NO_EXPORT CERES_DEPRECATED +#endif + +#if 0 /* DEFINE_NO_DEPRECATED */ +# ifndef CERES_NO_DEPRECATED +# define CERES_NO_DEPRECATED +# endif +#endif + +#endif /* CERES_EXPORT_H */ diff --git a/ceres-v2/include/internal/fixed_array.h b/ceres-v2/include/internal/fixed_array.h new file mode 100644 index 0000000000000000000000000000000000000000..dcbddcd3a1d14698dc2d07799b842aa9c9f111d6 --- /dev/null +++ b/ceres-v2/include/internal/fixed_array.h @@ -0,0 +1,467 @@ +// Copyright 2018 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// ----------------------------------------------------------------------------- +// File: fixed_array.h +// ----------------------------------------------------------------------------- +// +// A `FixedArray` represents a non-resizable array of `T` where the length of +// the array can be determined at run-time. It is a good replacement for +// non-standard and deprecated uses of `alloca()` and variable length arrays +// within the GCC extension. (See +// https://gcc.gnu.org/onlinedocs/gcc/Variable-Length.html). +// +// `FixedArray` allocates small arrays inline, keeping performance fast by +// avoiding heap operations. It also helps reduce the chances of +// accidentally overflowing your stack if large input is passed to +// your function. + +#ifndef CERES_PUBLIC_INTERNAL_FIXED_ARRAY_H_ +#define CERES_PUBLIC_INTERNAL_FIXED_ARRAY_H_ + +#include // For Eigen::aligned_allocator +#include +#include +#include +#include +#include +#include + +#include "ceres/internal/memory.h" +#include "glog/logging.h" + +namespace ceres { +namespace internal { + +constexpr static auto kFixedArrayUseDefault = static_cast(-1); + +// The default fixed array allocator. +// +// As one can not easily detect if a struct contains or inherits from a fixed +// size Eigen type, to be safe the Eigen::aligned_allocator is used by default. +// But trivial types can never contain Eigen types, so std::allocator is used to +// safe some heap memory. +template +using FixedArrayDefaultAllocator = + typename std::conditional::value, + std::allocator, + Eigen::aligned_allocator>::type; + +// ----------------------------------------------------------------------------- +// FixedArray +// ----------------------------------------------------------------------------- +// +// A `FixedArray` provides a run-time fixed-size array, allocating a small array +// inline for efficiency. +// +// Most users should not specify an `inline_elements` argument and let +// `FixedArray` automatically determine the number of elements +// to store inline based on `sizeof(T)`. If `inline_elements` is specified, the +// `FixedArray` implementation will use inline storage for arrays with a +// length <= `inline_elements`. +// +// Note that a `FixedArray` constructed with a `size_type` argument will +// default-initialize its values by leaving trivially constructible types +// uninitialized (e.g. int, int[4], double), and others default-constructed. +// This matches the behavior of c-style arrays and `std::array`, but not +// `std::vector`. +// +// Note that `FixedArray` does not provide a public allocator; if it requires a +// heap allocation, it will do so with global `::operator new[]()` and +// `::operator delete[]()`, even if T provides class-scope overrides for these +// operators. +template > +class FixedArray { + static_assert(!std::is_array::value || std::extent::value > 0, + "Arrays with unknown bounds cannot be used with FixedArray."); + + static constexpr size_t kInlineBytesDefault = 256; + + using AllocatorTraits = std::allocator_traits; + // std::iterator_traits isn't guaranteed to be SFINAE-friendly until C++17, + // but this seems to be mostly pedantic. + template + using EnableIfForwardIterator = typename std::enable_if::iterator_category, + std::forward_iterator_tag>::value>::type; + static constexpr bool DefaultConstructorIsNonTrivial() { + return !std::is_trivially_default_constructible::value; + } + + public: + using allocator_type = typename AllocatorTraits::allocator_type; + using value_type = typename AllocatorTraits::value_type; + using pointer = typename AllocatorTraits::pointer; + using const_pointer = typename AllocatorTraits::const_pointer; + using reference = value_type&; + using const_reference = const value_type&; + using size_type = typename AllocatorTraits::size_type; + using difference_type = typename AllocatorTraits::difference_type; + using iterator = pointer; + using const_iterator = const_pointer; + using reverse_iterator = std::reverse_iterator; + using const_reverse_iterator = std::reverse_iterator; + + static constexpr size_type inline_elements = + (N == kFixedArrayUseDefault ? kInlineBytesDefault / sizeof(value_type) + : static_cast(N)); + + FixedArray(const FixedArray& other, + const allocator_type& a = allocator_type()) + : FixedArray(other.begin(), other.end(), a) {} + + FixedArray(FixedArray&& other, const allocator_type& a = allocator_type()) + : FixedArray(std::make_move_iterator(other.begin()), + std::make_move_iterator(other.end()), + a) {} + + // Creates an array object that can store `n` elements. + // Note that trivially constructible elements will be uninitialized. + explicit FixedArray(size_type n, const allocator_type& a = allocator_type()) + : storage_(n, a) { + if (DefaultConstructorIsNonTrivial()) { + ConstructRange(storage_.alloc(), storage_.begin(), storage_.end()); + } + } + + // Creates an array initialized with `n` copies of `val`. + FixedArray(size_type n, + const value_type& val, + const allocator_type& a = allocator_type()) + : storage_(n, a) { + ConstructRange(storage_.alloc(), storage_.begin(), storage_.end(), val); + } + + // Creates an array initialized with the size and contents of `init_list`. + FixedArray(std::initializer_list init_list, + const allocator_type& a = allocator_type()) + : FixedArray(init_list.begin(), init_list.end(), a) {} + + // Creates an array initialized with the elements from the input + // range. The array's size will always be `std::distance(first, last)`. + // REQUIRES: Iterator must be a forward_iterator or better. + template * = nullptr> + FixedArray(Iterator first, + Iterator last, + const allocator_type& a = allocator_type()) + : storage_(std::distance(first, last), a) { + CopyRange(storage_.alloc(), storage_.begin(), first, last); + } + + ~FixedArray() noexcept { + for (auto* cur = storage_.begin(); cur != storage_.end(); ++cur) { + AllocatorTraits::destroy(storage_.alloc(), cur); + } + } + + // Assignments are deleted because they break the invariant that the size of a + // `FixedArray` never changes. + void operator=(FixedArray&&) = delete; + void operator=(const FixedArray&) = delete; + + // FixedArray::size() + // + // Returns the length of the fixed array. + size_type size() const { return storage_.size(); } + + // FixedArray::max_size() + // + // Returns the largest possible value of `std::distance(begin(), end())` for a + // `FixedArray`. This is equivalent to the most possible addressable bytes + // over the number of bytes taken by T. + constexpr size_type max_size() const { + return (std::numeric_limits::max)() / sizeof(value_type); + } + + // FixedArray::empty() + // + // Returns whether or not the fixed array is empty. + bool empty() const { return size() == 0; } + + // FixedArray::memsize() + // + // Returns the memory size of the fixed array in bytes. + size_t memsize() const { return size() * sizeof(value_type); } + + // FixedArray::data() + // + // Returns a const T* pointer to elements of the `FixedArray`. This pointer + // can be used to access (but not modify) the contained elements. + const_pointer data() const { return AsValueType(storage_.begin()); } + + // Overload of FixedArray::data() to return a T* pointer to elements of the + // fixed array. This pointer can be used to access and modify the contained + // elements. + pointer data() { return AsValueType(storage_.begin()); } + + // FixedArray::operator[] + // + // Returns a reference the ith element of the fixed array. + // REQUIRES: 0 <= i < size() + reference operator[](size_type i) { + DCHECK_LT(i, size()); + return data()[i]; + } + + // Overload of FixedArray::operator()[] to return a const reference to the + // ith element of the fixed array. + // REQUIRES: 0 <= i < size() + const_reference operator[](size_type i) const { + DCHECK_LT(i, size()); + return data()[i]; + } + + // FixedArray::front() + // + // Returns a reference to the first element of the fixed array. + reference front() { return *begin(); } + + // Overload of FixedArray::front() to return a reference to the first element + // of a fixed array of const values. + const_reference front() const { return *begin(); } + + // FixedArray::back() + // + // Returns a reference to the last element of the fixed array. + reference back() { return *(end() - 1); } + + // Overload of FixedArray::back() to return a reference to the last element + // of a fixed array of const values. + const_reference back() const { return *(end() - 1); } + + // FixedArray::begin() + // + // Returns an iterator to the beginning of the fixed array. + iterator begin() { return data(); } + + // Overload of FixedArray::begin() to return a const iterator to the + // beginning of the fixed array. + const_iterator begin() const { return data(); } + + // FixedArray::cbegin() + // + // Returns a const iterator to the beginning of the fixed array. + const_iterator cbegin() const { return begin(); } + + // FixedArray::end() + // + // Returns an iterator to the end of the fixed array. + iterator end() { return data() + size(); } + + // Overload of FixedArray::end() to return a const iterator to the end of the + // fixed array. + const_iterator end() const { return data() + size(); } + + // FixedArray::cend() + // + // Returns a const iterator to the end of the fixed array. + const_iterator cend() const { return end(); } + + // FixedArray::rbegin() + // + // Returns a reverse iterator from the end of the fixed array. + reverse_iterator rbegin() { return reverse_iterator(end()); } + + // Overload of FixedArray::rbegin() to return a const reverse iterator from + // the end of the fixed array. + const_reverse_iterator rbegin() const { + return const_reverse_iterator(end()); + } + + // FixedArray::crbegin() + // + // Returns a const reverse iterator from the end of the fixed array. + const_reverse_iterator crbegin() const { return rbegin(); } + + // FixedArray::rend() + // + // Returns a reverse iterator from the beginning of the fixed array. + reverse_iterator rend() { return reverse_iterator(begin()); } + + // Overload of FixedArray::rend() for returning a const reverse iterator + // from the beginning of the fixed array. + const_reverse_iterator rend() const { + return const_reverse_iterator(begin()); + } + + // FixedArray::crend() + // + // Returns a reverse iterator from the beginning of the fixed array. + const_reverse_iterator crend() const { return rend(); } + + // FixedArray::fill() + // + // Assigns the given `value` to all elements in the fixed array. + void fill(const value_type& val) { std::fill(begin(), end(), val); } + + // Relational operators. Equality operators are elementwise using + // `operator==`, while order operators order FixedArrays lexicographically. + friend bool operator==(const FixedArray& lhs, const FixedArray& rhs) { + return std::equal(lhs.begin(), lhs.end(), rhs.begin(), rhs.end()); + } + + friend bool operator!=(const FixedArray& lhs, const FixedArray& rhs) { + return !(lhs == rhs); + } + + friend bool operator<(const FixedArray& lhs, const FixedArray& rhs) { + return std::lexicographical_compare( + lhs.begin(), lhs.end(), rhs.begin(), rhs.end()); + } + + friend bool operator>(const FixedArray& lhs, const FixedArray& rhs) { + return rhs < lhs; + } + + friend bool operator<=(const FixedArray& lhs, const FixedArray& rhs) { + return !(rhs < lhs); + } + + friend bool operator>=(const FixedArray& lhs, const FixedArray& rhs) { + return !(lhs < rhs); + } + + private: + // StorageElement + // + // For FixedArrays with a C-style-array value_type, StorageElement is a POD + // wrapper struct called StorageElementWrapper that holds the value_type + // instance inside. This is needed for construction and destruction of the + // entire array regardless of how many dimensions it has. For all other cases, + // StorageElement is just an alias of value_type. + // + // Maintainer's Note: The simpler solution would be to simply wrap value_type + // in a struct whether it's an array or not. That causes some paranoid + // diagnostics to misfire, believing that 'data()' returns a pointer to a + // single element, rather than the packed array that it really is. + // e.g.: + // + // FixedArray buf(1); + // sprintf(buf.data(), "foo"); + // + // error: call to int __builtin___sprintf_chk(etc...) + // will always overflow destination buffer [-Werror] + // + template ::type, + size_t InnerN = std::extent::value> + struct StorageElementWrapper { + InnerT array[InnerN]; + }; + + using StorageElement = + typename std::conditional::value, + StorageElementWrapper, + value_type>::type; + + static pointer AsValueType(pointer ptr) { return ptr; } + static pointer AsValueType(StorageElementWrapper* ptr) { + return std::addressof(ptr->array); + } + + static_assert(sizeof(StorageElement) == sizeof(value_type), ""); + static_assert(alignof(StorageElement) == alignof(value_type), ""); + + class NonEmptyInlinedStorage { + public: + StorageElement* data() { return reinterpret_cast(buff_); } + void AnnotateConstruct(size_type) {} + void AnnotateDestruct(size_type) {} + + // #ifdef ADDRESS_SANITIZER + // void* RedzoneBegin() { return &redzone_begin_; } + // void* RedzoneEnd() { return &redzone_end_ + 1; } + // #endif // ADDRESS_SANITIZER + + private: + // ADDRESS_SANITIZER_REDZONE(redzone_begin_); + alignas(StorageElement) char buff_[sizeof(StorageElement[inline_elements])]; + // ADDRESS_SANITIZER_REDZONE(redzone_end_); + }; + + class EmptyInlinedStorage { + public: + StorageElement* data() { return nullptr; } + void AnnotateConstruct(size_type) {} + void AnnotateDestruct(size_type) {} + }; + + using InlinedStorage = + typename std::conditional::type; + + // Storage + // + // An instance of Storage manages the inline and out-of-line memory for + // instances of FixedArray. This guarantees that even when construction of + // individual elements fails in the FixedArray constructor body, the + // destructor for Storage will still be called and out-of-line memory will be + // properly deallocated. + // + class Storage : public InlinedStorage { + public: + Storage(size_type n, const allocator_type& a) + : size_alloc_(n, a), data_(InitializeData()) {} + + ~Storage() noexcept { + if (UsingInlinedStorage(size())) { + InlinedStorage::AnnotateDestruct(size()); + } else { + AllocatorTraits::deallocate(alloc(), AsValueType(begin()), size()); + } + } + + size_type size() const { return std::get<0>(size_alloc_); } + StorageElement* begin() const { return data_; } + StorageElement* end() const { return begin() + size(); } + allocator_type& alloc() { return std::get<1>(size_alloc_); } + + private: + static bool UsingInlinedStorage(size_type n) { + return n <= inline_elements; + } + + StorageElement* InitializeData() { + if (UsingInlinedStorage(size())) { + InlinedStorage::AnnotateConstruct(size()); + return InlinedStorage::data(); + } else { + return reinterpret_cast( + AllocatorTraits::allocate(alloc(), size())); + } + } + + // Using std::tuple and not absl::CompressedTuple, as it has a lot of + // dependencies to other absl headers. + std::tuple size_alloc_; + StorageElement* data_; + }; + + Storage storage_; +}; + +template +constexpr size_t FixedArray::kInlineBytesDefault; + +template +constexpr typename FixedArray::size_type + FixedArray::inline_elements; + +} // namespace internal +} // namespace ceres + +#endif // CERES_PUBLIC_INTERNAL_FIXED_ARRAY_H_ diff --git a/ceres-v2/include/internal/householder_vector.h b/ceres-v2/include/internal/householder_vector.h new file mode 100644 index 0000000000000000000000000000000000000000..7700208be222e0e14742a635d6405fa753a411a3 --- /dev/null +++ b/ceres-v2/include/internal/householder_vector.h @@ -0,0 +1,96 @@ +// Ceres Solver - A fast non-linear least squares minimizer +// Copyright 2015 Google Inc. All rights reserved. +// http://code.google.com/p/ceres-solver/ +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are met: +// +// * Redistributions of source code must retain the above copyright notice, +// this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above copyright notice, +// this list of conditions and the following disclaimer in the documentation +// and/or other materials provided with the distribution. +// * Neither the name of Google Inc. nor the names of its contributors may be +// used to endorse or promote products derived from this software without +// specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE +// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR +// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF +// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN +// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) +// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE +// POSSIBILITY OF SUCH DAMAGE. +// +// Author: vitus@google.com (Michael Vitus) + +#ifndef CERES_PUBLIC_INTERNAL_HOUSEHOLDER_VECTOR_H_ +#define CERES_PUBLIC_INTERNAL_HOUSEHOLDER_VECTOR_H_ + +#include "Eigen/Core" +#include "glog/logging.h" + +namespace ceres { +namespace internal { + +// Algorithm 5.1.1 from 'Matrix Computations' by Golub et al. (Johns Hopkins +// Studies in Mathematical Sciences) but using the nth element of the input +// vector as pivot instead of first. This computes the vector v with v(n) = 1 +// and beta such that H = I - beta * v * v^T is orthogonal and +// H * x = ||x||_2 * e_n. +// +// NOTE: Some versions of MSVC have trouble deducing the type of v if +// you do not specify all the template arguments explicitly. +template +void ComputeHouseholderVector(const XVectorType& x, + Eigen::Matrix* v, + Scalar* beta) { + CHECK(beta != nullptr); + CHECK(v != nullptr); + CHECK_GT(x.rows(), 1); + CHECK_EQ(x.rows(), v->rows()); + + Scalar sigma = x.head(x.rows() - 1).squaredNorm(); + *v = x; + (*v)(v->rows() - 1) = Scalar(1.0); + + *beta = Scalar(0.0); + const Scalar& x_pivot = x(x.rows() - 1); + + if (sigma <= Scalar(std::numeric_limits::epsilon())) { + if (x_pivot < Scalar(0.0)) { + *beta = Scalar(2.0); + } + return; + } + + const Scalar mu = sqrt(x_pivot * x_pivot + sigma); + Scalar v_pivot = Scalar(1.0); + + if (x_pivot <= Scalar(0.0)) { + v_pivot = x_pivot - mu; + } else { + v_pivot = -sigma / (x_pivot + mu); + } + + *beta = Scalar(2.0) * v_pivot * v_pivot / (sigma + v_pivot * v_pivot); + + v->head(v->rows() - 1) /= v_pivot; +} + +template +typename Derived::PlainObject ApplyHouseholderVector( + const XVectorType& y, + const Eigen::MatrixBase& v, + const typename Derived::Scalar& beta) { + return (y - v * (beta * (v.transpose() * y))); +} + +} // namespace internal +} // namespace ceres + +#endif // CERES_PUBLIC_INTERNAL_HOUSEHOLDER_VECTOR_H_ diff --git a/ceres-v2/include/internal/integer_sequence_algorithm.h b/ceres-v2/include/internal/integer_sequence_algorithm.h new file mode 100644 index 0000000000000000000000000000000000000000..777c119a77fd195c6d5c1566b96b79b224f48451 --- /dev/null +++ b/ceres-v2/include/internal/integer_sequence_algorithm.h @@ -0,0 +1,291 @@ +// Ceres Solver - A fast non-linear least squares minimizer +// Copyright 2022 Google Inc. All rights reserved. +// http://ceres-solver.org/ +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are met: +// +// * Redistributions of source code must retain the above copyright notice, +// this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above copyright notice, +// this list of conditions and the following disclaimer in the documentation +// and/or other materials provided with the distribution. +// * Neither the name of Google Inc. nor the names of its contributors may be +// used to endorse or promote products derived from this software without +// specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE +// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR +// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF +// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN +// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) +// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE +// POSSIBILITY OF SUCH DAMAGE. +// +// Author: jodebo_beck@gmx.de (Johannes Beck) +// sergiu.deitsch@gmail.com (Sergiu Deitsch) +// +// Algorithms to be used together with integer_sequence, like computing the sum +// or the exclusive scan (sometimes called exclusive prefix sum) at compile +// time. + +#ifndef CERES_PUBLIC_INTERNAL_INTEGER_SEQUENCE_ALGORITHM_H_ +#define CERES_PUBLIC_INTERNAL_INTEGER_SEQUENCE_ALGORITHM_H_ + +#include + +#include "ceres/jet_fwd.h" + +namespace ceres { +namespace internal { + +// Implementation of calculating the sum of an integer sequence. +// Recursively instantiate SumImpl and calculate the sum of the N first +// numbers. This reduces the number of instantiations and speeds up +// compilation. +// +// Examples: +// 1) integer_sequence: +// Value = 5 +// +// 2) integer_sequence: +// Value = 4 + 2 + SumImpl>::Value +// Value = 4 + 2 + 0 +// +// 3) integer_sequence: +// Value = 2 + 1 + SumImpl>::Value +// Value = 2 + 1 + 4 +template +struct SumImpl; + +// Strip of and sum the first number. +template +struct SumImpl> { + static constexpr T Value = + N + SumImpl>::Value; +}; + +// Strip of and sum the first two numbers. +template +struct SumImpl> { + static constexpr T Value = + N1 + N2 + SumImpl>::Value; +}; + +// Strip of and sum the first four numbers. +template +struct SumImpl> { + static constexpr T Value = + N1 + N2 + N3 + N4 + SumImpl>::Value; +}; + +// Only one number is left. 'Value' is just that number ('recursion' ends). +template +struct SumImpl> { + static constexpr T Value = N; +}; + +// No number is left. 'Value' is the identity element (for sum this is zero). +template +struct SumImpl> { + static constexpr T Value = T(0); +}; + +// Calculate the sum of an integer sequence. The resulting sum will be stored in +// 'Value'. +template +class Sum { + using T = typename Seq::value_type; + + public: + static constexpr T Value = SumImpl::Value; +}; + +// Implementation of calculating an exclusive scan (exclusive prefix sum) of an +// integer sequence. Exclusive means that the i-th input element is not included +// in the i-th sum. Calculating the exclusive scan for an input array I results +// in the following output R: +// +// R[0] = 0 +// R[1] = I[0]; +// R[2] = I[0] + I[1]; +// R[3] = I[0] + I[1] + I[2]; +// ... +// +// In C++17 std::exclusive_scan does the same operation at runtime (but +// cannot be used to calculate the prefix sum at compile time). See +// https://en.cppreference.com/w/cpp/algorithm/exclusive_scan for a more +// detailed description. +// +// Example for integer_sequence (seq := integer_sequence): +// T , Sum, Ns... , Rs... +// ExclusiveScanImpl, seq> +// ExclusiveScanImpl, seq> +// ExclusiveScanImpl, seq> +// ExclusiveScanImpl, seq> +// ^^^^^^^^^^^^^^^^^ +// resulting sequence +template +struct ExclusiveScanImpl; + +template +struct ExclusiveScanImpl, + std::integer_sequence> { + using Type = + typename ExclusiveScanImpl, + std::integer_sequence>::Type; +}; + +// End of 'recursion'. The resulting type is SeqOut. +template +struct ExclusiveScanImpl, SeqOut> { + using Type = SeqOut; +}; + +// Calculates the exclusive scan of the specified integer sequence. The last +// element (the total) is not included in the resulting sequence so they have +// same length. This means the exclusive scan of integer_sequence +// will be integer_sequence. +template +class ExclusiveScanT { + using T = typename Seq::value_type; + + public: + using Type = + typename ExclusiveScanImpl>::Type; +}; + +// Helper to use exclusive scan without typename. +template +using ExclusiveScan = typename ExclusiveScanT::Type; + +// Removes all elements from a integer sequence corresponding to specified +// ValueToRemove. +// +// This type should not be used directly but instead RemoveValue. +template +struct RemoveValueImpl; + +// Final filtered sequence +template +struct RemoveValueImpl, + std::integer_sequence> { + using type = std::integer_sequence; +}; + +// Found a matching value +template +struct RemoveValueImpl, + std::integer_sequence> + : RemoveValueImpl, + std::integer_sequence> {}; + +// Move one element from the tail to the head +template +struct RemoveValueImpl, + std::integer_sequence> + : RemoveValueImpl, + std::integer_sequence> {}; + +// Start recursion by splitting the integer sequence into two separate ones +template +struct RemoveValueImpl> + : RemoveValueImpl, + std::integer_sequence> {}; + +// RemoveValue takes an integer Sequence of arbitrary type and removes all +// elements matching ValueToRemove. +// +// In contrast to RemoveValueImpl, this implementation deduces the value type +// eliminating the need to specify it explicitly. +// +// As an example, RemoveValue, 4>::type will +// not transform the type of the original sequence. However, +// RemoveValue, 2>::type will generate a new +// sequence of type std::integer_sequence by removing the value 2. +template +struct RemoveValue + : RemoveValueImpl { +}; + +// Convenience template alias for RemoveValue. +template +using RemoveValue_t = typename RemoveValue::type; + +// Determines whether the values of an integer sequence are all the same. +// +// The integer sequence must contain at least one value. The predicate is +// undefined for empty sequences. The evaluation result of the predicate for a +// sequence containing only one value is defined to be true. +template +struct AreAllEqual; + +// The predicate result for a sequence containing one element is defined to be +// true. +template +struct AreAllEqual> : std::true_type {}; + +// Recursion end. +template +struct AreAllEqual> + : std::integral_constant {}; + +// Recursion for sequences containing at least two elements. +template +// clang-format off +struct AreAllEqual > + : std::integral_constant +< + bool, + AreAllEqual >::value && + AreAllEqual >::value +> +// clang-format on +{}; + +// Convenience variable template for AreAllEqual. +template +constexpr bool AreAllEqual_v = AreAllEqual::value; + +// Predicate determining whether an integer sequence is either empty or all +// values are equal. +template +struct IsEmptyOrAreAllEqual; + +// Empty case. +template +struct IsEmptyOrAreAllEqual> : std::true_type {}; + +// General case for sequences containing at least one value. +template +struct IsEmptyOrAreAllEqual> + : AreAllEqual> {}; + +// Convenience variable template for IsEmptyOrAreAllEqual. +template +constexpr bool IsEmptyOrAreAllEqual_v = IsEmptyOrAreAllEqual::value; + +} // namespace internal +} // namespace ceres + +#endif // CERES_PUBLIC_INTERNAL_INTEGER_SEQUENCE_ALGORITHM_H_ diff --git a/ceres-v2/include/internal/jet_traits.h b/ceres-v2/include/internal/jet_traits.h new file mode 100644 index 0000000000000000000000000000000000000000..2a38c05b7dab672b9807ee9a7502fb9c795f9ac5 --- /dev/null +++ b/ceres-v2/include/internal/jet_traits.h @@ -0,0 +1,223 @@ +// Ceres Solver - A fast non-linear least squares minimizer +// Copyright 2022 Google Inc. All rights reserved. +// http://ceres-solver.org/ +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are met: +// +// * Redistributions of source code must retain the above copyright notice, +// this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above copyright notice, +// this list of conditions and the following disclaimer in the documentation +// and/or other materials provided with the distribution. +// * Neither the name of Google Inc. nor the names of its contributors may be +// used to endorse or promote products derived from this software without +// specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE +// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR +// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF +// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN +// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) +// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE +// POSSIBILITY OF SUCH DAMAGE. +// +// Author: sergiu.deitsch@gmail.com (Sergiu Deitsch) +// + +#ifndef CERES_PUBLIC_INTERNAL_JET_TRAITS_H_ +#define CERES_PUBLIC_INTERNAL_JET_TRAITS_H_ + +#include +#include +#include + +#include "ceres/internal/integer_sequence_algorithm.h" +#include "ceres/jet_fwd.h" + +namespace ceres { +namespace internal { + +// Predicate that determines whether T is a Jet. +template +struct IsJet : std::false_type {}; + +template +struct IsJet> : std::true_type {}; + +// Convenience variable template for IsJet. +template +constexpr bool IsJet_v = IsJet::value; + +// Predicate that determines whether any of the Types is a Jet. +template +struct AreAnyJet : std::false_type {}; + +template +struct AreAnyJet : AreAnyJet {}; + +template +struct AreAnyJet, Types...> : std::true_type {}; + +// Convenience variable template for AreAnyJet. +template +constexpr bool AreAnyJet_v = AreAnyJet::value; + +// Extracts the underlying floating-point from a type T. +template +struct UnderlyingScalar { + using type = T; +}; + +template +struct UnderlyingScalar> : UnderlyingScalar {}; + +// Convenience template alias for UnderlyingScalar type trait. +template +using UnderlyingScalar_t = typename UnderlyingScalar::type; + +// Predicate determining whether all Types in the pack are the same. +// +// Specifically, the predicate applies std::is_same recursively to pairs of +// Types in the pack. +// +// The predicate is defined only for template packs containing at least two +// types. +template +// clang-format off +struct AreAllSame : std::integral_constant +< + bool, + AreAllSame::value && + AreAllSame::value +> +// clang-format on +{}; + +// AreAllSame pairwise test. +template +struct AreAllSame : std::is_same {}; + +// Convenience variable template for AreAllSame. +template +constexpr bool AreAllSame_v = AreAllSame::value; + +// Determines the rank of a type. This allows to ensure that types passed as +// arguments are compatible to each other. The rank of Jet is determined by the +// dimensions of the dual part. The rank of a scalar is always 0. +// Non-specialized types default to a rank of -1. +template +struct Rank : std::integral_constant {}; + +// The rank of a scalar is 0. +template +struct Rank::value>> + : std::integral_constant {}; + +// The rank of a Jet is given by its dimensionality. +template +struct Rank> : std::integral_constant {}; + +// Convenience variable template for Rank. +template +constexpr int Rank_v = Rank::value; + +// Constructs an integer sequence of ranks for each of the Types in the pack. +template +using Ranks_t = std::integer_sequence...>; + +// Returns the scalar part of a type. This overload acts as an identity. +template +constexpr decltype(auto) AsScalar(T&& value) noexcept { + return std::forward(value); +} + +// Recursively unwraps the scalar part of a Jet until a non-Jet scalar type is +// encountered. +template +constexpr decltype(auto) AsScalar(const Jet& value) noexcept( + noexcept(AsScalar(value.a))) { + return AsScalar(value.a); +} + +} // namespace internal + +// Type trait ensuring at least one of the types is a Jet, +// the underlying scalar types are the same and Jet dimensions match. +// +// The type trait can be further specialized if necessary. +// +// This trait is a candidate for a concept definition once C++20 features can +// be used. +template +// clang-format off +struct CompatibleJetOperands : std::integral_constant +< + bool, + // At least one of the types is a Jet + internal::AreAnyJet_v && + // The underlying floating-point types are exactly the same + internal::AreAllSame_v...> && + // Non-zero ranks of types are equal + internal::IsEmptyOrAreAllEqual_v, 0>> +> +// clang-format on +{}; + +// Single Jet operand is always compatible. +template +struct CompatibleJetOperands> : std::true_type {}; + +// Single non-Jet operand is always incompatible. +template +struct CompatibleJetOperands : std::false_type {}; + +// Empty operands are always incompatible. +template <> +struct CompatibleJetOperands<> : std::false_type {}; + +// Convenience variable template ensuring at least one of the types is a Jet, +// the underlying scalar types are the same and Jet dimensions match. +// +// This trait is a candidate for a concept definition once C++20 features can +// be used. +template +constexpr bool CompatibleJetOperands_v = CompatibleJetOperands::value; + +// Type trait ensuring at least one of the types is a Jet, +// the underlying scalar types are compatible among each other and Jet +// dimensions match. +// +// The type trait can be further specialized if necessary. +// +// This trait is a candidate for a concept definition once C++20 features can +// be used. +template +// clang-format off +struct PromotableJetOperands : std::integral_constant +< + bool, + // Types can be compatible among each other + internal::AreAnyJet_v && + // Non-zero ranks of types are equal + internal::IsEmptyOrAreAllEqual_v, 0>> +> +// clang-format on +{}; + +// Convenience variable template ensuring at least one of the types is a Jet, +// the underlying scalar types are compatible among each other and Jet +// dimensions match. +// +// This trait is a candidate for a concept definition once C++20 features can +// be used. +template +constexpr bool PromotableJetOperands_v = PromotableJetOperands::value; + +} // namespace ceres + +#endif // CERES_PUBLIC_INTERNAL_JET_TRAITS_H_ diff --git a/ceres-v2/include/internal/line_parameterization.h b/ceres-v2/include/internal/line_parameterization.h new file mode 100644 index 0000000000000000000000000000000000000000..eda390148df9b5de170f2d2c2b867941c865399d --- /dev/null +++ b/ceres-v2/include/internal/line_parameterization.h @@ -0,0 +1,183 @@ +// Ceres Solver - A fast non-linear least squares minimizer +// Copyright 2020 Google Inc. All rights reserved. +// http://ceres-solver.org/ +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are met: +// +// * Redistributions of source code must retain the above copyright notice, +// this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above copyright notice, +// this list of conditions and the following disclaimer in the documentation +// and/or other materials provided with the distribution. +// * Neither the name of Google Inc. nor the names of its contributors may be +// used to endorse or promote products derived from this software without +// specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE +// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR +// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF +// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN +// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) +// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE +// POSSIBILITY OF SUCH DAMAGE. +// +// Author: jodebo_beck@gmx.de (Johannes Beck) +// + +#ifndef CERES_PUBLIC_INTERNAL_LINE_PARAMETERIZATION_H_ +#define CERES_PUBLIC_INTERNAL_LINE_PARAMETERIZATION_H_ + +#include "householder_vector.h" + +namespace ceres { + +template +bool LineParameterization::Plus( + const double* x_ptr, + const double* delta_ptr, + double* x_plus_delta_ptr) const { + // We seek a box plus operator of the form + // + // [o*, d*] = Plus([o, d], [delta_o, delta_d]) + // + // where o is the origin point, d is the direction vector, delta_o is + // the delta of the origin point and delta_d the delta of the direction and + // o* and d* is the updated origin point and direction. + // + // We separate the Plus operator into the origin point and directional part + // d* = Plus_d(d, delta_d) + // o* = Plus_o(o, d, delta_o) + // + // The direction update function Plus_d is the same as for the homogeneous + // vector parameterization: + // + // d* = H_{v(d)} [0.5 sinc(0.5 |delta_d|) delta_d, cos(0.5 |delta_d|)]^T + // + // where H is the householder matrix + // H_{v} = I - (2 / |v|^2) v v^T + // and + // v(d) = d - sign(d_n) |d| e_n. + // + // The origin point update function Plus_o is defined as + // + // o* = o + H_{v(d)} [0.5 delta_o, 0]^T. + + static constexpr int kDim = AmbientSpaceDimension; + using AmbientVector = Eigen::Matrix; + using AmbientVectorRef = Eigen::Map>; + using ConstAmbientVectorRef = + Eigen::Map>; + using ConstTangentVectorRef = + Eigen::Map>; + + ConstAmbientVectorRef o(x_ptr); + ConstAmbientVectorRef d(x_ptr + kDim); + + ConstTangentVectorRef delta_o(delta_ptr); + ConstTangentVectorRef delta_d(delta_ptr + kDim - 1); + AmbientVectorRef o_plus_delta(x_plus_delta_ptr); + AmbientVectorRef d_plus_delta(x_plus_delta_ptr + kDim); + + const double norm_delta_d = delta_d.norm(); + + o_plus_delta = o; + + // Shortcut for zero delta direction. + if (norm_delta_d == 0.0) { + d_plus_delta = d; + + if (delta_o.isZero(0.0)) { + return true; + } + } + + // Calculate the householder transformation which is needed for f_d and f_o. + AmbientVector v; + double beta; + + // NOTE: The explicit template arguments are needed here because + // ComputeHouseholderVector is templated and some versions of MSVC + // have trouble deducing the type of v automatically. + internal::ComputeHouseholderVector( + d, &v, &beta); + + if (norm_delta_d != 0.0) { + // Map the delta from the minimum representation to the over parameterized + // homogeneous vector. See section A6.9.2 on page 624 of Hartley & Zisserman + // (2nd Edition) for a detailed description. Note there is a typo on Page + // 625, line 4 so check the book errata. + const double norm_delta_div_2 = 0.5 * norm_delta_d; + const double sin_delta_by_delta = + std::sin(norm_delta_div_2) / norm_delta_div_2; + + // Apply the delta update to remain on the unit sphere. See section A6.9.3 + // on page 625 of Hartley & Zisserman (2nd Edition) for a detailed + // description. + AmbientVector y; + y.template head() = 0.5 * sin_delta_by_delta * delta_d; + y[kDim - 1] = std::cos(norm_delta_div_2); + + d_plus_delta = d.norm() * (y - v * (beta * (v.transpose() * y))); + } + + // The null space is in the direction of the line, so the tangent space is + // perpendicular to the line direction. This is achieved by using the + // householder matrix of the direction and allow only movements + // perpendicular to e_n. + // + // The factor of 0.5 is used to be consistent with the line direction + // update. + AmbientVector y; + y << 0.5 * delta_o, 0; + o_plus_delta += y - v * (beta * (v.transpose() * y)); + + return true; +} + +template +bool LineParameterization::ComputeJacobian( + const double* x_ptr, double* jacobian_ptr) const { + static constexpr int kDim = AmbientSpaceDimension; + using AmbientVector = Eigen::Matrix; + using ConstAmbientVectorRef = + Eigen::Map>; + using MatrixRef = Eigen::Map< + Eigen::Matrix>; + + ConstAmbientVectorRef d(x_ptr + kDim); + MatrixRef jacobian(jacobian_ptr); + + // Clear the Jacobian as only half of the matrix is not zero. + jacobian.setZero(); + + AmbientVector v; + double beta; + + // NOTE: The explicit template arguments are needed here because + // ComputeHouseholderVector is templated and some versions of MSVC + // have trouble deducing the type of v automatically. + internal::ComputeHouseholderVector( + d, &v, &beta); + + // The Jacobian is equal to J = 0.5 * H.leftCols(kDim - 1) where H is + // the Householder matrix (H = I - beta * v * v') for the origin point. For + // the line direction part the Jacobian is scaled by the norm of the + // direction. + for (int i = 0; i < kDim - 1; ++i) { + jacobian.block(0, i, kDim, 1) = -0.5 * beta * v(i) * v; + jacobian.col(i)(i) += 0.5; + } + + jacobian.template block(kDim, kDim - 1) = + jacobian.template block(0, 0) * d.norm(); + return true; +} + +} // namespace ceres + +#endif // CERES_PUBLIC_INTERNAL_LINE_PARAMETERIZATION_H_ diff --git a/ceres-v2/include/internal/memory.h b/ceres-v2/include/internal/memory.h new file mode 100644 index 0000000000000000000000000000000000000000..45c5b67c353db52a29345289470fee563c404754 --- /dev/null +++ b/ceres-v2/include/internal/memory.h @@ -0,0 +1,90 @@ +// Copyright 2017 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// ----------------------------------------------------------------------------- +// File: memory.h +// ----------------------------------------------------------------------------- +// +// This header file contains utility functions for managing the creation and +// conversion of smart pointers. This file is an extension to the C++ +// standard library header file. + +#ifndef CERES_PUBLIC_INTERNAL_MEMORY_H_ +#define CERES_PUBLIC_INTERNAL_MEMORY_H_ + +#include + +#ifdef CERES_HAVE_EXCEPTIONS +#define CERES_INTERNAL_TRY try +#define CERES_INTERNAL_CATCH_ANY catch (...) +#define CERES_INTERNAL_RETHROW \ + do { \ + throw; \ + } while (false) +#else // CERES_HAVE_EXCEPTIONS +#define CERES_INTERNAL_TRY if (true) +#define CERES_INTERNAL_CATCH_ANY else if (false) +#define CERES_INTERNAL_RETHROW \ + do { \ + } while (false) +#endif // CERES_HAVE_EXCEPTIONS + +namespace ceres { +namespace internal { + +template +void ConstructRange(Allocator& alloc, + Iterator first, + Iterator last, + const Args&... args) { + for (Iterator cur = first; cur != last; ++cur) { + CERES_INTERNAL_TRY { + std::allocator_traits::construct( + alloc, std::addressof(*cur), args...); + } + CERES_INTERNAL_CATCH_ANY { + while (cur != first) { + --cur; + std::allocator_traits::destroy(alloc, std::addressof(*cur)); + } + CERES_INTERNAL_RETHROW; + } + } +} + +template +void CopyRange(Allocator& alloc, + Iterator destination, + InputIterator first, + InputIterator last) { + for (Iterator cur = destination; first != last; + static_cast(++cur), static_cast(++first)) { + CERES_INTERNAL_TRY { + std::allocator_traits::construct( + alloc, std::addressof(*cur), *first); + } + CERES_INTERNAL_CATCH_ANY { + while (cur != destination) { + --cur; + std::allocator_traits::destroy(alloc, std::addressof(*cur)); + } + CERES_INTERNAL_RETHROW; + } + } +} + +} // namespace internal +} // namespace ceres + +#endif // CERES_PUBLIC_INTERNAL_MEMORY_H_ diff --git a/ceres-v2/include/internal/numeric_diff.h b/ceres-v2/include/internal/numeric_diff.h new file mode 100644 index 0000000000000000000000000000000000000000..351845c05fbd1a2cdb0350a74a51062976e9a792 --- /dev/null +++ b/ceres-v2/include/internal/numeric_diff.h @@ -0,0 +1,508 @@ +// Ceres Solver - A fast non-linear least squares minimizer +// Copyright 2015 Google Inc. All rights reserved. +// http://ceres-solver.org/ +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are met: +// +// * Redistributions of source code must retain the above copyright notice, +// this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above copyright notice, +// this list of conditions and the following disclaimer in the documentation +// and/or other materials provided with the distribution. +// * Neither the name of Google Inc. nor the names of its contributors may be +// used to endorse or promote products derived from this software without +// specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE +// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR +// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF +// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN +// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) +// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE +// POSSIBILITY OF SUCH DAMAGE. +// +// Author: sameeragarwal@google.com (Sameer Agarwal) +// mierle@gmail.com (Keir Mierle) +// tbennun@gmail.com (Tal Ben-Nun) +// +// Finite differencing routines used by NumericDiffCostFunction. + +#ifndef CERES_PUBLIC_INTERNAL_NUMERIC_DIFF_H_ +#define CERES_PUBLIC_INTERNAL_NUMERIC_DIFF_H_ + +#include +#include + +#include "Eigen/Dense" +#include "Eigen/StdVector" +#include "ceres/cost_function.h" +#include "ceres/internal/fixed_array.h" +#include "ceres/internal/variadic_evaluate.h" +#include "ceres/numeric_diff_options.h" +#include "ceres/types.h" +#include "glog/logging.h" + +namespace ceres { +namespace internal { + +// This is split from the main class because C++ doesn't allow partial template +// specializations for member functions. The alternative is to repeat the main +// class for differing numbers of parameters, which is also unfortunate. +template +struct NumericDiff { + // Mutates parameters but must restore them before return. + static bool EvaluateJacobianForParameterBlock( + const CostFunctor* functor, + const double* residuals_at_eval_point, + const NumericDiffOptions& options, + int num_residuals, + int parameter_block_index, + int parameter_block_size, + double** parameters, + double* jacobian) { + using Eigen::ColMajor; + using Eigen::Map; + using Eigen::Matrix; + using Eigen::RowMajor; + + DCHECK(jacobian); + + const int num_residuals_internal = + (kNumResiduals != ceres::DYNAMIC ? kNumResiduals : num_residuals); + const int parameter_block_index_internal = + (kParameterBlock != ceres::DYNAMIC ? kParameterBlock + : parameter_block_index); + const int parameter_block_size_internal = + (kParameterBlockSize != ceres::DYNAMIC ? kParameterBlockSize + : parameter_block_size); + + using ResidualVector = Matrix; + using ParameterVector = Matrix; + + // The convoluted reasoning for choosing the Row/Column major + // ordering of the matrix is an artifact of the restrictions in + // Eigen that prevent it from creating RowMajor matrices with a + // single column. In these cases, we ask for a ColMajor matrix. + using JacobianMatrix = + Matrix; + + Map parameter_jacobian( + jacobian, num_residuals_internal, parameter_block_size_internal); + + Map x_plus_delta( + parameters[parameter_block_index_internal], + parameter_block_size_internal); + ParameterVector x(x_plus_delta); + ParameterVector step_size = + x.array().abs() * ((kMethod == RIDDERS) + ? options.ridders_relative_initial_step_size + : options.relative_step_size); + + // It is not a good idea to make the step size arbitrarily + // small. This will lead to problems with round off and numerical + // instability when dividing by the step size. The general + // recommendation is to not go down below sqrt(epsilon). + double min_step_size = std::sqrt(std::numeric_limits::epsilon()); + + // For Ridders' method, the initial step size is required to be large, + // thus ridders_relative_initial_step_size is used. + if (kMethod == RIDDERS) { + min_step_size = + (std::max)(min_step_size, options.ridders_relative_initial_step_size); + } + + // For each parameter in the parameter block, use finite differences to + // compute the derivative for that parameter. + FixedArray temp_residual_array(num_residuals_internal); + FixedArray residual_array(num_residuals_internal); + Map residuals(residual_array.data(), + num_residuals_internal); + + for (int j = 0; j < parameter_block_size_internal; ++j) { + const double delta = (std::max)(min_step_size, step_size(j)); + + if (kMethod == RIDDERS) { + if (!EvaluateRiddersJacobianColumn(functor, + j, + delta, + options, + num_residuals_internal, + parameter_block_size_internal, + x.data(), + residuals_at_eval_point, + parameters, + x_plus_delta.data(), + temp_residual_array.data(), + residual_array.data())) { + return false; + } + } else { + if (!EvaluateJacobianColumn(functor, + j, + delta, + num_residuals_internal, + parameter_block_size_internal, + x.data(), + residuals_at_eval_point, + parameters, + x_plus_delta.data(), + temp_residual_array.data(), + residual_array.data())) { + return false; + } + } + + parameter_jacobian.col(j).matrix() = residuals; + } + return true; + } + + static bool EvaluateJacobianColumn(const CostFunctor* functor, + int parameter_index, + double delta, + int num_residuals, + int parameter_block_size, + const double* x_ptr, + const double* residuals_at_eval_point, + double** parameters, + double* x_plus_delta_ptr, + double* temp_residuals_ptr, + double* residuals_ptr) { + using Eigen::Map; + using Eigen::Matrix; + + using ResidualVector = Matrix; + using ParameterVector = Matrix; + + Map x(x_ptr, parameter_block_size); + Map x_plus_delta(x_plus_delta_ptr, parameter_block_size); + + Map residuals(residuals_ptr, num_residuals); + Map temp_residuals(temp_residuals_ptr, num_residuals); + + // Mutate 1 element at a time and then restore. + x_plus_delta(parameter_index) = x(parameter_index) + delta; + + if (!VariadicEvaluate( + *functor, parameters, residuals.data())) { + return false; + } + + // Compute this column of the jacobian in 3 steps: + // 1. Store residuals for the forward part. + // 2. Subtract residuals for the backward (or 0) part. + // 3. Divide out the run. + double one_over_delta = 1.0 / delta; + if (kMethod == CENTRAL || kMethod == RIDDERS) { + // Compute the function on the other side of x(parameter_index). + x_plus_delta(parameter_index) = x(parameter_index) - delta; + + if (!VariadicEvaluate( + *functor, parameters, temp_residuals.data())) { + return false; + } + + residuals -= temp_residuals; + one_over_delta /= 2; + } else { + // Forward difference only; reuse existing residuals evaluation. + residuals -= + Map(residuals_at_eval_point, num_residuals); + } + + // Restore x_plus_delta. + x_plus_delta(parameter_index) = x(parameter_index); + + // Divide out the run to get slope. + residuals *= one_over_delta; + + return true; + } + + // This numeric difference implementation uses adaptive differentiation + // on the parameters to obtain the Jacobian matrix. The adaptive algorithm + // is based on Ridders' method for adaptive differentiation, which creates + // a Romberg tableau from varying step sizes and extrapolates the + // intermediate results to obtain the current computational error. + // + // References: + // C.J.F. Ridders, Accurate computation of F'(x) and F'(x) F"(x), Advances + // in Engineering Software (1978), Volume 4, Issue 2, April 1982, + // Pages 75-76, ISSN 0141-1195, + // http://dx.doi.org/10.1016/S0141-1195(82)80057-0. + static bool EvaluateRiddersJacobianColumn( + const CostFunctor* functor, + int parameter_index, + double delta, + const NumericDiffOptions& options, + int num_residuals, + int parameter_block_size, + const double* x_ptr, + const double* residuals_at_eval_point, + double** parameters, + double* x_plus_delta_ptr, + double* temp_residuals_ptr, + double* residuals_ptr) { + using Eigen::aligned_allocator; + using Eigen::Map; + using Eigen::Matrix; + + using ResidualVector = Matrix; + using ResidualCandidateMatrix = + Matrix; + using ParameterVector = Matrix; + + Map x(x_ptr, parameter_block_size); + Map x_plus_delta(x_plus_delta_ptr, parameter_block_size); + + Map residuals(residuals_ptr, num_residuals); + Map temp_residuals(temp_residuals_ptr, num_residuals); + + // In order for the algorithm to converge, the step size should be + // initialized to a value that is large enough to produce a significant + // change in the function. + // As the derivative is estimated, the step size decreases. + // By default, the step sizes are chosen so that the middle column + // of the Romberg tableau uses the input delta. + double current_step_size = + delta * pow(options.ridders_step_shrink_factor, + options.max_num_ridders_extrapolations / 2); + + // Double-buffering temporary differential candidate vectors + // from previous step size. + ResidualCandidateMatrix stepsize_candidates_a( + num_residuals, options.max_num_ridders_extrapolations); + ResidualCandidateMatrix stepsize_candidates_b( + num_residuals, options.max_num_ridders_extrapolations); + ResidualCandidateMatrix* current_candidates = &stepsize_candidates_a; + ResidualCandidateMatrix* previous_candidates = &stepsize_candidates_b; + + // Represents the computational error of the derivative. This variable is + // initially set to a large value, and is set to the difference between + // current and previous finite difference extrapolations. + // norm_error is supposed to decrease as the finite difference tableau + // generation progresses, serving both as an estimate for differentiation + // error and as a measure of differentiation numerical stability. + double norm_error = (std::numeric_limits::max)(); + + // Loop over decreasing step sizes until: + // 1. Error is smaller than a given value (ridders_epsilon), + // 2. Maximal order of extrapolation reached, or + // 3. Extrapolation becomes numerically unstable. + for (int i = 0; i < options.max_num_ridders_extrapolations; ++i) { + // Compute the numerical derivative at this step size. + if (!EvaluateJacobianColumn(functor, + parameter_index, + current_step_size, + num_residuals, + parameter_block_size, + x.data(), + residuals_at_eval_point, + parameters, + x_plus_delta.data(), + temp_residuals.data(), + current_candidates->col(0).data())) { + // Something went wrong; bail. + return false; + } + + // Store initial results. + if (i == 0) { + residuals = current_candidates->col(0); + } + + // Shrink differentiation step size. + current_step_size /= options.ridders_step_shrink_factor; + + // Extrapolation factor for Richardson acceleration method (see below). + double richardson_factor = options.ridders_step_shrink_factor * + options.ridders_step_shrink_factor; + for (int k = 1; k <= i; ++k) { + // Extrapolate the various orders of finite differences using + // the Richardson acceleration method. + current_candidates->col(k) = + (richardson_factor * current_candidates->col(k - 1) - + previous_candidates->col(k - 1)) / + (richardson_factor - 1.0); + + richardson_factor *= options.ridders_step_shrink_factor * + options.ridders_step_shrink_factor; + + // Compute the difference between the previous value and the current. + double candidate_error = (std::max)( + (current_candidates->col(k) - current_candidates->col(k - 1)) + .norm(), + (current_candidates->col(k) - previous_candidates->col(k - 1)) + .norm()); + + // If the error has decreased, update results. + if (candidate_error <= norm_error) { + norm_error = candidate_error; + residuals = current_candidates->col(k); + + // If the error is small enough, stop. + if (norm_error < options.ridders_epsilon) { + break; + } + } + } + + // After breaking out of the inner loop, declare convergence. + if (norm_error < options.ridders_epsilon) { + break; + } + + // Check to see if the current gradient estimate is numerically unstable. + // If so, bail out and return the last stable result. + if (i > 0) { + double tableau_error = + (current_candidates->col(i) - previous_candidates->col(i - 1)) + .norm(); + + // Compare current error to the chosen candidate's error. + if (tableau_error >= 2 * norm_error) { + break; + } + } + + std::swap(current_candidates, previous_candidates); + } + return true; + } +}; + +// This function calls NumericDiff<...>::EvaluateJacobianForParameterBlock for +// each parameter block. +// +// Example: +// A call to +// EvaluateJacobianForParameterBlocks>( +// functor, +// residuals_at_eval_point, +// options, +// num_residuals, +// parameters, +// jacobians); +// will result in the following calls to +// NumericDiff<...>::EvaluateJacobianForParameterBlock: +// +// if (jacobians[0] != nullptr) { +// if (!NumericDiff< +// CostFunctor, +// method, +// kNumResiduals, +// StaticParameterDims<2, 3>, +// 0, +// 2>::EvaluateJacobianForParameterBlock(functor, +// residuals_at_eval_point, +// options, +// num_residuals, +// 0, +// 2, +// parameters, +// jacobians[0])) { +// return false; +// } +// } +// if (jacobians[1] != nullptr) { +// if (!NumericDiff< +// CostFunctor, +// method, +// kNumResiduals, +// StaticParameterDims<2, 3>, +// 1, +// 3>::EvaluateJacobianForParameterBlock(functor, +// residuals_at_eval_point, +// options, +// num_residuals, +// 1, +// 3, +// parameters, +// jacobians[1])) { +// return false; +// } +// } +template +struct EvaluateJacobianForParameterBlocks; + +template +struct EvaluateJacobianForParameterBlocks, + ParameterIdx> { + template + static bool Apply(const CostFunctor* functor, + const double* residuals_at_eval_point, + const NumericDiffOptions& options, + int num_residuals, + double** parameters, + double** jacobians) { + if (jacobians[ParameterIdx] != nullptr) { + if (!NumericDiff< + CostFunctor, + method, + kNumResiduals, + ParameterDims, + ParameterIdx, + N>::EvaluateJacobianForParameterBlock(functor, + residuals_at_eval_point, + options, + num_residuals, + ParameterIdx, + N, + parameters, + jacobians[ParameterIdx])) { + return false; + } + } + + return EvaluateJacobianForParameterBlocks, + ParameterIdx + 1>:: + template Apply(functor, + residuals_at_eval_point, + options, + num_residuals, + parameters, + jacobians); + } +}; + +// End of 'recursion'. Nothing more to do. +template +struct EvaluateJacobianForParameterBlocks, + ParameterIdx> { + template + static bool Apply(const CostFunctor* /* NOT USED*/, + const double* /* NOT USED*/, + const NumericDiffOptions& /* NOT USED*/, + int /* NOT USED*/, + double** /* NOT USED*/, + double** /* NOT USED*/) { + return true; + } +}; + +} // namespace internal +} // namespace ceres + +#endif // CERES_PUBLIC_INTERNAL_NUMERIC_DIFF_H_ diff --git a/ceres-v2/include/internal/parameter_dims.h b/ceres-v2/include/internal/parameter_dims.h new file mode 100644 index 0000000000000000000000000000000000000000..240210614162beceea4466f2ea30fdf92a5ced0e --- /dev/null +++ b/ceres-v2/include/internal/parameter_dims.h @@ -0,0 +1,124 @@ +// Ceres Solver - A fast non-linear least squares minimizer +// Copyright 2018 Google Inc. All rights reserved. +// http://ceres-solver.org/ +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are met: +// +// * Redistributions of source code must retain the above copyright notice, +// this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above copyright notice, +// this list of conditions and the following disclaimer in the documentation +// and/or other materials provided with the distribution. +// * Neither the name of Google Inc. nor the names of its contributors may be +// used to endorse or promote products derived from this software without +// specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE +// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR +// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF +// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN +// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) +// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE +// POSSIBILITY OF SUCH DAMAGE. +// +// Author: jodebo_beck@gmx.de (Johannes Beck) + +#ifndef CERES_PUBLIC_INTERNAL_PARAMETER_DIMS_H_ +#define CERES_PUBLIC_INTERNAL_PARAMETER_DIMS_H_ + +#include +#include + +#include "ceres/internal/integer_sequence_algorithm.h" + +namespace ceres { +namespace internal { + +// Checks, whether the given parameter block sizes are valid. Valid means every +// dimension is bigger than zero. +constexpr bool IsValidParameterDimensionSequence(std::integer_sequence) { + return true; +} + +template +constexpr bool IsValidParameterDimensionSequence( + std::integer_sequence) { + return (N <= 0) ? false + : IsValidParameterDimensionSequence( + std::integer_sequence()); +} + +// Helper class that represents the parameter dimensions. The parameter +// dimensions are either dynamic or the sizes are known at compile time. It is +// used to pass parameter block dimensions around (e.g. between functions or +// classes). +// +// As an example if one have three parameter blocks with dimensions (2, 4, 1), +// one would use 'StaticParameterDims<2, 4, 1>' which is a synonym for +// 'ParameterDims'. +// For dynamic parameter dims, one would just use 'DynamicParameterDims', which +// is a synonym for 'ParameterDims'. +template +class ParameterDims { + public: + using Parameters = std::integer_sequence; + + // The parameter dimensions are only valid if all parameter block dimensions + // are greater than zero. + static constexpr bool kIsValid = + IsValidParameterDimensionSequence(Parameters()); + static_assert(kIsValid, + "Invalid parameter block dimension detected. Each parameter " + "block dimension must be bigger than zero."); + + static constexpr bool kIsDynamic = IsDynamic; + static constexpr int kNumParameterBlocks = sizeof...(Ns); + static_assert(kIsDynamic || kNumParameterBlocks > 0, + "At least one parameter block must be specified."); + + static constexpr int kNumParameters = + Sum>::Value; + + static constexpr int GetDim(int dim) { return params_[dim]; } + + // If one has all parameters packed into a single array this function unpacks + // the parameters. + template + static inline std::array GetUnpackedParameters( + T* ptr) { + using Offsets = ExclusiveScan; + return GetUnpackedParameters(ptr, Offsets()); + } + + private: + template + static inline std::array GetUnpackedParameters( + T* ptr, std::integer_sequence) { + return std::array{{ptr + Indices...}}; + } + + static constexpr std::array params_{Ns...}; +}; + +// Even static constexpr member variables needs to be defined (not only +// declared). As the ParameterDims class is tempalted this definition must +// be in the header file. +template +constexpr std::array::kNumParameterBlocks> + ParameterDims::params_; + +// Using declarations for static and dynamic parameter dims. This makes client +// code easier to read. +template +using StaticParameterDims = ParameterDims; +using DynamicParameterDims = ParameterDims; + +} // namespace internal +} // namespace ceres + +#endif // CERES_PUBLIC_INTERNAL_PARAMETER_DIMS_H_ diff --git a/ceres-v2/include/internal/port.h b/ceres-v2/include/internal/port.h new file mode 100644 index 0000000000000000000000000000000000000000..4275b0e15c361c2a741e71fe95c2640c83f58921 --- /dev/null +++ b/ceres-v2/include/internal/port.h @@ -0,0 +1,88 @@ +// Ceres Solver - A fast non-linear least squares minimizer +// Copyright 2022 Google Inc. All rights reserved. +// http://ceres-solver.org/ +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are met: +// +// * Redistributions of source code must retain the above copyright notice, +// this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above copyright notice, +// this list of conditions and the following disclaimer in the documentation +// and/or other materials provided with the distribution. +// * Neither the name of Google Inc. nor the names of its contributors may be +// used to endorse or promote products derived from this software without +// specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE +// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR +// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF +// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN +// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) +// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE +// POSSIBILITY OF SUCH DAMAGE. +// +// Author: keir@google.com (Keir Mierle) + +#ifndef CERES_PUBLIC_INTERNAL_PORT_H_ +#define CERES_PUBLIC_INTERNAL_PORT_H_ + +// A macro to mark a function/variable/class as deprecated. +// We use compiler specific attributes rather than the c++ +// attribute because they do not mix well with each other. +#if defined(_MSC_VER) +#define CERES_DEPRECATED_WITH_MSG(message) __declspec(deprecated(message)) +#elif defined(__GNUC__) +#define CERES_DEPRECATED_WITH_MSG(message) __attribute__((deprecated(message))) +#else +// In the worst case fall back to c++ attribute. +#define CERES_DEPRECATED_WITH_MSG(message) [[deprecated(message)]] +#endif + +#ifndef CERES_GET_FLAG +#define CERES_GET_FLAG(X) X +#endif + +// Indicates whether C++17 is currently active +#ifndef CERES_HAS_CPP17 +#if __cplusplus >= 201703L || (defined(_MSVC_LANG) && _MSVC_LANG >= 201703L) +#define CERES_HAS_CPP17 +#endif // __cplusplus >= 201703L || (defined(_MSVC_LANG) && _MSVC_LANG >= + // 201703L) +#endif // !defined(CERES_HAS_CPP17) + +// Indicates whether C++20 is currently active +#ifndef CERES_HAS_CPP20 +#if __cplusplus >= 202002L || (defined(_MSVC_LANG) && _MSVC_LANG >= 202002L) +#define CERES_HAS_CPP20 +#endif // __cplusplus >= 202002L || (defined(_MSVC_LANG) && _MSVC_LANG >= + // 202002L) +#endif // !defined(CERES_HAS_CPP20) + +// Prevents symbols from being substituted by the corresponding macro definition +// under the same name. For instance, min and max are defined as macros on +// Windows (unless NOMINMAX is defined) which causes compilation errors when +// defining or referencing symbols under the same name. +// +// To be robust in all cases particularly when NOMINMAX cannot be used, use this +// macro to annotate min/max declarations/definitions. Examples: +// +// int max CERES_PREVENT_MACRO_SUBSTITUTION(); +// min CERES_PREVENT_MACRO_SUBSTITUTION(a, b); +// max CERES_PREVENT_MACRO_SUBSTITUTION(a, b); +// +// NOTE: In case the symbols for which the substitution must be prevented are +// used within another macro, the substitution must be inhibited using parens as +// +// (std::numerical_limits::max)() +// +// since the helper macro will not work here. Do not use this technique in +// general case, because it will prevent argument-dependent lookup (ADL). +// +#define CERES_PREVENT_MACRO_SUBSTITUTION // Yes, it's empty + +#endif // CERES_PUBLIC_INTERNAL_PORT_H_ diff --git a/ceres-v2/include/internal/reenable_warnings.h b/ceres-v2/include/internal/reenable_warnings.h new file mode 100644 index 0000000000000000000000000000000000000000..2c5db061fd7a8d2be0c86c571ee4cf0ef4f128da --- /dev/null +++ b/ceres-v2/include/internal/reenable_warnings.h @@ -0,0 +1,38 @@ +// Ceres Solver - A fast non-linear least squares minimizer +// Copyright 2015 Google Inc. All rights reserved. +// http://ceres-solver.org/ +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are met: +// +// * Redistributions of source code must retain the above copyright notice, +// this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above copyright notice, +// this list of conditions and the following disclaimer in the documentation +// and/or other materials provided with the distribution. +// * Neither the name of Google Inc. nor the names of its contributors may be +// used to endorse or promote products derived from this software without +// specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE +// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR +// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF +// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN +// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) +// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE +// POSSIBILITY OF SUCH DAMAGE. +// + +// This is not your usual header guard. See disable_warnings.h +#ifdef CERES_WARNINGS_DISABLED +#undef CERES_WARNINGS_DISABLED + +#ifdef _MSC_VER +#pragma warning(pop) +#endif + +#endif // CERES_WARNINGS_DISABLED diff --git a/ceres-v2/include/internal/sphere_manifold_functions.h b/ceres-v2/include/internal/sphere_manifold_functions.h new file mode 100644 index 0000000000000000000000000000000000000000..5be3321a5792933be1860a9b542dfa1fc6083fe3 --- /dev/null +++ b/ceres-v2/include/internal/sphere_manifold_functions.h @@ -0,0 +1,162 @@ +// Ceres Solver - A fast non-linear least squares minimizer +// Copyright 2022 Google Inc. All rights reserved. +// http://ceres-solver.org/ +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are met: +// +// * Redistributions of source code must retain the above copyright notice, +// this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above copyright notice, +// this list of conditions and the following disclaimer in the documentation +// and/or other materials provided with the distribution. +// * Neither the name of Google Inc. nor the names of its contributors may be +// used to endorse or promote products derived from this software without +// specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE +// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR +// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF +// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN +// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) +// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE +// POSSIBILITY OF SUCH DAMAGE. +// +// Author: vitus@google.com (Mike Vitus) +// jodebo_beck@gmx.de (Johannes Beck) + +#ifndef CERES_PUBLIC_INTERNAL_SPHERE_MANIFOLD_HELPERS_H_ +#define CERES_PUBLIC_INTERNAL_SPHERE_MANIFOLD_HELPERS_H_ + +#include "ceres/internal/householder_vector.h" + +// This module contains functions to compute the SphereManifold plus and minus +// operator and their Jacobians. +// +// As the parameters to these functions are shared between them, they are +// described here: The following variable names are used: +// Plus(x, delta) = x + delta = x_plus_delta, +// Minus(y, x) = y - x = y_minus_x. +// +// The remaining ones are v and beta which describe the Householder +// transformation of x, and norm_delta which is the norm of delta. +// +// The types of x, y, x_plus_delta and y_minus_x need to be equivalent to +// Eigen::Matrix and the type of delta needs +// to be equivalent to Eigen::Matrix. +// +// The type of Jacobian plus needs to be equivalent to Eigen::Matrix and for +// Jacobian minus Eigen::Matrix. +// +// For all vector / matrix inputs and outputs, template parameters are +// used in order to allow also Eigen::Ref and Eigen block expressions to +// be passed to the function. + +namespace ceres { +namespace internal { + +template +inline void ComputeSphereManifoldPlus(const VT& v, + double beta, + const XT& x, + const DeltaT& delta, + double norm_delta, + XPlusDeltaT* x_plus_delta) { + constexpr int AmbientDim = VT::RowsAtCompileTime; + + // Map the delta from the minimum representation to the over parameterized + // homogeneous vector. See B.2 p.25 equation (106) - (107) for more details. + const double norm_delta_div_2 = 0.5 * norm_delta; + const double sin_delta_by_delta = + std::sin(norm_delta_div_2) / norm_delta_div_2; + + Eigen::Matrix y(v.size()); + y << 0.5 * sin_delta_by_delta * delta, std::cos(norm_delta_div_2); + + // Apply the delta update to remain on the sphere. + *x_plus_delta = x.norm() * ApplyHouseholderVector(y, v, beta); +} + +template +inline void ComputeSphereManifoldPlusJacobian(const VT& x, + JacobianT* jacobian) { + constexpr int AmbientSpaceDim = VT::RowsAtCompileTime; + using AmbientVector = Eigen::Matrix; + const int ambient_size = x.size(); + const int tangent_size = x.size() - 1; + + AmbientVector v(ambient_size); + double beta; + + // NOTE: The explicit template arguments are needed here because + // ComputeHouseholderVector is templated and some versions of MSVC + // have trouble deducing the type of v automatically. + ComputeHouseholderVector(x, &v, &beta); + + // The Jacobian is equal to J = 0.5 * H.leftCols(size_ - 1) where H is the + // Householder matrix (H = I - beta * v * v'). + for (int i = 0; i < tangent_size; ++i) { + (*jacobian).col(i) = -0.5 * beta * v(i) * v; + (*jacobian)(i, i) += 0.5; + } + (*jacobian) *= x.norm(); +} + +template +inline void ComputeSphereManifoldMinus( + const VT& v, double beta, const XT& x, const YT& y, YMinusXT* y_minus_x) { + constexpr int AmbientSpaceDim = VT::RowsAtCompileTime; + constexpr int TangentSpaceDim = + AmbientSpaceDim == Eigen::Dynamic ? Eigen::Dynamic : AmbientSpaceDim - 1; + using AmbientVector = Eigen::Matrix; + + const int tanget_size = v.size() - 1; + + const AmbientVector hy = ApplyHouseholderVector(y, v, beta) / x.norm(); + + // Calculate y - x. See B.2 p.25 equation (108). + double y_last = hy[tanget_size]; + double hy_norm = hy.template head(tanget_size).norm(); + if (hy_norm == 0.0) { + y_minus_x->setZero(); + } else { + *y_minus_x = 2.0 * std::atan2(hy_norm, y_last) / hy_norm * + hy.template head(tanget_size); + } +} + +template +inline void ComputeSphereManifoldMinusJacobian(const VT& x, + JacobianT* jacobian) { + constexpr int AmbientSpaceDim = VT::RowsAtCompileTime; + using AmbientVector = Eigen::Matrix; + const int ambient_size = x.size(); + const int tangent_size = x.size() - 1; + + AmbientVector v(ambient_size); + double beta; + + // NOTE: The explicit template arguments are needed here because + // ComputeHouseholderVector is templated and some versions of MSVC + // have trouble deducing the type of v automatically. + ComputeHouseholderVector(x, &v, &beta); + + // The Jacobian is equal to J = 2.0 * H.leftCols(size_ - 1) where H is the + // Householder matrix (H = I - beta * v * v'). + for (int i = 0; i < tangent_size; ++i) { + (*jacobian).row(i) = -2.0 * beta * v(i) * v; + (*jacobian)(i, i) += 2.0; + } + (*jacobian) /= x.norm(); +} + +} // namespace internal +} // namespace ceres + +#endif diff --git a/ceres-v2/include/internal/variadic_evaluate.h b/ceres-v2/include/internal/variadic_evaluate.h new file mode 100644 index 0000000000000000000000000000000000000000..b8408237cc335dfd270c4a2634e8af57b18bbd4a --- /dev/null +++ b/ceres-v2/include/internal/variadic_evaluate.h @@ -0,0 +1,113 @@ +// Ceres Solver - A fast non-linear least squares minimizer +// Copyright 2015 Google Inc. All rights reserved. +// http://ceres-solver.org/ +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are met: +// +// * Redistributions of source code must retain the above copyright notice, +// this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above copyright notice, +// this list of conditions and the following disclaimer in the documentation +// and/or other materials provided with the distribution. +// * Neither the name of Google Inc. nor the names of its contributors may be +// used to endorse or promote products derived from this software without +// specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE +// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR +// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF +// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN +// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) +// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE +// POSSIBILITY OF SUCH DAMAGE. +// +// Author: sameeragarwal@google.com (Sameer Agarwal) +// mierle@gmail.com (Keir Mierle) +// jodebo_beck@gmx.de (Johannes Beck) + +#ifndef CERES_PUBLIC_INTERNAL_VARIADIC_EVALUATE_H_ +#define CERES_PUBLIC_INTERNAL_VARIADIC_EVALUATE_H_ + +#include +#include +#include + +#include "ceres/cost_function.h" +#include "ceres/internal/parameter_dims.h" + +namespace ceres { +namespace internal { + +// For fixed size cost functors +template +inline bool VariadicEvaluateImpl(const Functor& functor, + T const* const* input, + T* output, + std::false_type /*is_dynamic*/, + std::integer_sequence) { + static_assert(sizeof...(Indices), + "Invalid number of parameter blocks. At least one parameter " + "block must be specified."); + return functor(input[Indices]..., output); +} + +// For dynamic sized cost functors +template +inline bool VariadicEvaluateImpl(const Functor& functor, + T const* const* input, + T* output, + std::true_type /*is_dynamic*/, + std::integer_sequence) { + return functor(input, output); +} + +// For ceres cost functors (not ceres::CostFunction) +template +inline bool VariadicEvaluateImpl(const Functor& functor, + T const* const* input, + T* output, + const void* /* NOT USED */) { + using ParameterBlockIndices = + std::make_integer_sequence; + using IsDynamic = std::integral_constant; + return VariadicEvaluateImpl( + functor, input, output, IsDynamic(), ParameterBlockIndices()); +} + +// For ceres::CostFunction +template +inline bool VariadicEvaluateImpl(const Functor& functor, + T const* const* input, + T* output, + const CostFunction* /* NOT USED */) { + return functor.Evaluate(input, output, nullptr); +} + +// Variadic evaluate is a helper function to evaluate ceres cost function or +// functors using an input, output and the parameter dimensions. There are +// several ways different possibilities: +// 1) If the passed functor is a 'ceres::CostFunction' its evaluate method is +// called. +// 2) If the functor is not a 'ceres::CostFunction' and the specified parameter +// dims is dynamic, the functor must have the following signature +// 'bool(T const* const* input, T* output)'. +// 3) If the functor is not a 'ceres::CostFunction' and the specified parameter +// dims is not dynamic, the input is expanded by using the number of parameter +// blocks. The signature of the functor must have the following signature +// 'bool()(const T* i_1, const T* i_2, ... const T* i_n, T* output)'. +template +inline bool VariadicEvaluate(const Functor& functor, + T const* const* input, + T* output) { + return VariadicEvaluateImpl(functor, input, output, &functor); +} + +} // namespace internal +} // namespace ceres + +#endif // CERES_PUBLIC_INTERNAL_VARIADIC_EVALUATE_H_ diff --git a/ceres-v2/include/iteration_callback.h b/ceres-v2/include/iteration_callback.h new file mode 100644 index 0000000000000000000000000000000000000000..3d7e8e94f3049ff79535d10aafffdead837a3ec5 --- /dev/null +++ b/ceres-v2/include/iteration_callback.h @@ -0,0 +1,204 @@ +// Ceres Solver - A fast non-linear least squares minimizer +// Copyright 2019 Google Inc. All rights reserved. +// http://ceres-solver.org/ +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are met: +// +// * Redistributions of source code must retain the above copyright notice, +// this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above copyright notice, +// this list of conditions and the following disclaimer in the documentation +// and/or other materials provided with the distribution. +// * Neither the name of Google Inc. nor the names of its contributors may be +// used to endorse or promote products derived from this software without +// specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE +// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR +// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF +// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN +// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) +// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE +// POSSIBILITY OF SUCH DAMAGE. +// +// Author: sameeragarwal@google.com (Sameer Agarwal) +// +// When an iteration callback is specified, Ceres calls the callback +// after each minimizer step (if the minimizer has not converged) and +// passes it an IterationSummary object, defined below. + +#ifndef CERES_PUBLIC_ITERATION_CALLBACK_H_ +#define CERES_PUBLIC_ITERATION_CALLBACK_H_ + +#include "ceres/internal/disable_warnings.h" +#include "ceres/internal/export.h" +#include "ceres/types.h" + +namespace ceres { + +// This struct describes the state of the optimizer after each +// iteration of the minimization. +struct CERES_EXPORT IterationSummary { + // Current iteration number. + int iteration = 0; + + // Step was numerically valid, i.e., all values are finite and the + // step reduces the value of the linearized model. + // + // Note: step_is_valid is always true when iteration = 0. + bool step_is_valid = false; + + // Step did not reduce the value of the objective function + // sufficiently, but it was accepted because of the relaxed + // acceptance criterion used by the non-monotonic trust region + // algorithm. + // + // Note: step_is_nonmonotonic is always false when iteration = 0; + bool step_is_nonmonotonic = false; + + // Whether or not the minimizer accepted this step or not. If the + // ordinary trust region algorithm is used, this means that the + // relative reduction in the objective function value was greater + // than Solver::Options::min_relative_decrease. However, if the + // non-monotonic trust region algorithm is used + // (Solver::Options:use_nonmonotonic_steps = true), then even if the + // relative decrease is not sufficient, the algorithm may accept the + // step and the step is declared successful. + // + // Note: step_is_successful is always true when iteration = 0. + bool step_is_successful = false; + + // Value of the objective function. + double cost = 0.0; + + // Change in the value of the objective function in this + // iteration. This can be positive or negative. + double cost_change = 0.0; + + // Infinity norm of the gradient vector. + double gradient_max_norm = 0.0; + + // 2-norm of the gradient vector. + double gradient_norm = 0.0; + + // 2-norm of the size of the step computed by the optimization + // algorithm. + double step_norm = 0.0; + + // For trust region algorithms, the ratio of the actual change in + // cost and the change in the cost of the linearized approximation. + double relative_decrease = 0.0; + + // Size of the trust region at the end of the current iteration. For + // the Levenberg-Marquardt algorithm, the regularization parameter + // mu = 1.0 / trust_region_radius. + double trust_region_radius = 0.0; + + // For the inexact step Levenberg-Marquardt algorithm, this is the + // relative accuracy with which the Newton(LM) step is solved. This + // number affects only the iterative solvers capable of solving + // linear systems inexactly. Factorization-based exact solvers + // ignore it. + double eta = 0.0; + + // Step sized computed by the line search algorithm. + double step_size = 0.0; + + // Number of function value evaluations used by the line search algorithm. + int line_search_function_evaluations = 0; + + // Number of function gradient evaluations used by the line search algorithm. + int line_search_gradient_evaluations = 0; + + // Number of iterations taken by the line search algorithm. + int line_search_iterations = 0; + + // Number of iterations taken by the linear solver to solve for the + // Newton step. + int linear_solver_iterations = 0; + + // All times reported below are wall times. + + // Time (in seconds) spent inside the minimizer loop in the current + // iteration. + double iteration_time_in_seconds = 0.0; + + // Time (in seconds) spent inside the trust region step solver. + double step_solver_time_in_seconds = 0.0; + + // Time (in seconds) since the user called Solve(). + double cumulative_time_in_seconds = 0.0; +}; + +// Interface for specifying callbacks that are executed at the end of +// each iteration of the Minimizer. The solver uses the return value +// of operator() to decide whether to continue solving or to +// terminate. The user can return three values. +// +// SOLVER_ABORT indicates that the callback detected an abnormal +// situation. The solver returns without updating the parameter blocks +// (unless Solver::Options::update_state_every_iteration is set +// true). Solver returns with Solver::Summary::termination_type set to +// USER_ABORT. +// +// SOLVER_TERMINATE_SUCCESSFULLY indicates that there is no need to +// optimize anymore (some user specified termination criterion has +// been met). Solver returns with Solver::Summary::termination_type +// set to USER_SUCCESS. +// +// SOLVER_CONTINUE indicates that the solver should continue +// optimizing. +// +// For example, the following Callback is used internally by Ceres to +// log the progress of the optimization. +// +// Callback for logging the state of the minimizer to STDERR or STDOUT +// depending on the user's preferences and logging level. +// +// class LoggingCallback : public IterationCallback { +// public: +// explicit LoggingCallback(bool log_to_stdout) +// : log_to_stdout_(log_to_stdout) {} +// +// CallbackReturnType operator()(const IterationSummary& summary) { +// const char* kReportRowFormat = +// "% 4d: f:% 8e d:% 3.2e g:% 3.2e h:% 3.2e " +// "rho:% 3.2e mu:% 3.2e eta:% 3.2e li:% 3d"; +// string output = StringPrintf(kReportRowFormat, +// summary.iteration, +// summary.cost, +// summary.cost_change, +// summary.gradient_max_norm, +// summary.step_norm, +// summary.relative_decrease, +// summary.trust_region_radius, +// summary.eta, +// summary.linear_solver_iterations); +// if (log_to_stdout_) { +// cout << output << endl; +// } else { +// VLOG(1) << output; +// } +// return SOLVER_CONTINUE; +// } +// +// private: +// const bool log_to_stdout_; +// }; +// +class CERES_EXPORT IterationCallback { + public: + virtual ~IterationCallback(); + virtual CallbackReturnType operator()(const IterationSummary& summary) = 0; +}; + +} // namespace ceres + +#include "ceres/internal/reenable_warnings.h" + +#endif // CERES_PUBLIC_ITERATION_CALLBACK_H_ diff --git a/ceres-v2/include/jet.h b/ceres-v2/include/jet.h new file mode 100644 index 0000000000000000000000000000000000000000..fba1e2ab6e0541c84b77effde2124f69665edc03 --- /dev/null +++ b/ceres-v2/include/jet.h @@ -0,0 +1,1387 @@ +// Ceres Solver - A fast non-linear least squares minimizer +// Copyright 2022 Google Inc. All rights reserved. +// http://ceres-solver.org/ +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are met: +// +// * Redistributions of source code must retain the above copyright notice, +// this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above copyright notice, +// this list of conditions and the following disclaimer in the documentation +// and/or other materials provided with the distribution. +// * Neither the name of Google Inc. nor the names of its contributors may be +// used to endorse or promote products derived from this software without +// specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE +// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR +// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF +// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN +// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) +// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE +// POSSIBILITY OF SUCH DAMAGE. +// +// Author: keir@google.com (Keir Mierle) +// +// A simple implementation of N-dimensional dual numbers, for automatically +// computing exact derivatives of functions. +// +// While a complete treatment of the mechanics of automatic differentiation is +// beyond the scope of this header (see +// http://en.wikipedia.org/wiki/Automatic_differentiation for details), the +// basic idea is to extend normal arithmetic with an extra element, "e," often +// denoted with the greek symbol epsilon, such that e != 0 but e^2 = 0. Dual +// numbers are extensions of the real numbers analogous to complex numbers: +// whereas complex numbers augment the reals by introducing an imaginary unit i +// such that i^2 = -1, dual numbers introduce an "infinitesimal" unit e such +// that e^2 = 0. Dual numbers have two components: the "real" component and the +// "infinitesimal" component, generally written as x + y*e. Surprisingly, this +// leads to a convenient method for computing exact derivatives without needing +// to manipulate complicated symbolic expressions. +// +// For example, consider the function +// +// f(x) = x^2 , +// +// evaluated at 10. Using normal arithmetic, f(10) = 100, and df/dx(10) = 20. +// Next, argument 10 with an infinitesimal to get: +// +// f(10 + e) = (10 + e)^2 +// = 100 + 2 * 10 * e + e^2 +// = 100 + 20 * e -+- +// -- | +// | +--- This is zero, since e^2 = 0 +// | +// +----------------- This is df/dx! +// +// Note that the derivative of f with respect to x is simply the infinitesimal +// component of the value of f(x + e). So, in order to take the derivative of +// any function, it is only necessary to replace the numeric "object" used in +// the function with one extended with infinitesimals. The class Jet, defined in +// this header, is one such example of this, where substitution is done with +// templates. +// +// To handle derivatives of functions taking multiple arguments, different +// infinitesimals are used, one for each variable to take the derivative of. For +// example, consider a scalar function of two scalar parameters x and y: +// +// f(x, y) = x^2 + x * y +// +// Following the technique above, to compute the derivatives df/dx and df/dy for +// f(1, 3) involves doing two evaluations of f, the first time replacing x with +// x + e, the second time replacing y with y + e. +// +// For df/dx: +// +// f(1 + e, y) = (1 + e)^2 + (1 + e) * 3 +// = 1 + 2 * e + 3 + 3 * e +// = 4 + 5 * e +// +// --> df/dx = 5 +// +// For df/dy: +// +// f(1, 3 + e) = 1^2 + 1 * (3 + e) +// = 1 + 3 + e +// = 4 + e +// +// --> df/dy = 1 +// +// To take the gradient of f with the implementation of dual numbers ("jets") in +// this file, it is necessary to create a single jet type which has components +// for the derivative in x and y, and passing them to a templated version of f: +// +// template +// T f(const T &x, const T &y) { +// return x * x + x * y; +// } +// +// // The "2" means there should be 2 dual number components. +// // It computes the partial derivative at x=10, y=20. +// Jet x(10, 0); // Pick the 0th dual number for x. +// Jet y(20, 1); // Pick the 1st dual number for y. +// Jet z = f(x, y); +// +// LOG(INFO) << "df/dx = " << z.v[0] +// << "df/dy = " << z.v[1]; +// +// Most users should not use Jet objects directly; a wrapper around Jet objects, +// which makes computing the derivative, gradient, or jacobian of templated +// functors simple, is in autodiff.h. Even autodiff.h should not be used +// directly; instead autodiff_cost_function.h is typically the file of interest. +// +// For the more mathematically inclined, this file implements first-order +// "jets". A 1st order jet is an element of the ring +// +// T[N] = T[t_1, ..., t_N] / (t_1, ..., t_N)^2 +// +// which essentially means that each jet consists of a "scalar" value 'a' from T +// and a 1st order perturbation vector 'v' of length N: +// +// x = a + \sum_i v[i] t_i +// +// A shorthand is to write an element as x = a + u, where u is the perturbation. +// Then, the main point about the arithmetic of jets is that the product of +// perturbations is zero: +// +// (a + u) * (b + v) = ab + av + bu + uv +// = ab + (av + bu) + 0 +// +// which is what operator* implements below. Addition is simpler: +// +// (a + u) + (b + v) = (a + b) + (u + v). +// +// The only remaining question is how to evaluate the function of a jet, for +// which we use the chain rule: +// +// f(a + u) = f(a) + f'(a) u +// +// where f'(a) is the (scalar) derivative of f at a. +// +// By pushing these things through sufficiently and suitably templated +// functions, we can do automatic differentiation. Just be sure to turn on +// function inlining and common-subexpression elimination, or it will be very +// slow! +// +// WARNING: Most Ceres users should not directly include this file or know the +// details of how jets work. Instead the suggested method for automatic +// derivatives is to use autodiff_cost_function.h, which is a wrapper around +// both jets.h and autodiff.h to make taking derivatives of cost functions for +// use in Ceres easier. + +#ifndef CERES_PUBLIC_JET_H_ +#define CERES_PUBLIC_JET_H_ + +#include +#include +#include +#include // NOLINT +#include +#include +#include +#include + +#include "Eigen/Core" +#include "ceres/internal/jet_traits.h" +#include "ceres/internal/port.h" +#include "ceres/jet_fwd.h" + +// Here we provide partial specializations of std::common_type for the Jet class +// to allow determining a Jet type with a common underlying arithmetic type. +// Such an arithmetic type can be either a scalar or an another Jet. An example +// for a common type, say, between a float and a Jet is a Jet (i.e., std::common_type_t> and +// ceres::Jet refer to the same type.) +// +// The partial specialization are also used for determining compatible types by +// means of SFINAE and thus allow such types to be expressed as operands of +// logical comparison operators. Missing (partial) specialization of +// std::common_type for a particular (custom) type will therefore disable the +// use of comparison operators defined by Ceres. +// +// Since these partial specializations are used as SFINAE constraints, they +// enable standard promotion rules between various scalar types and consequently +// their use in comparison against a Jet without providing implicit +// conversions from a scalar, such as an int, to a Jet (see the implementation +// of logical comparison operators below). + +template +struct std::common_type> { + using type = ceres::Jet, N>; +}; + +template +struct std::common_type, U> { + using type = ceres::Jet, N>; +}; + +template +struct std::common_type, ceres::Jet> { + using type = ceres::Jet, N>; +}; + +namespace ceres { + +template +struct Jet { + enum { DIMENSION = N }; + using Scalar = T; + + // Default-construct "a" because otherwise this can lead to false errors about + // uninitialized uses when other classes relying on default constructed T + // (where T is a Jet). This usually only happens in opt mode. Note that + // the C++ standard mandates that e.g. default constructed doubles are + // initialized to 0.0; see sections 8.5 of the C++03 standard. + Jet() : a() { v.setConstant(Scalar()); } + + // Constructor from scalar: a + 0. + explicit Jet(const T& value) { + a = value; + v.setConstant(Scalar()); + } + + // Constructor from scalar plus variable: a + t_i. + Jet(const T& value, int k) { + a = value; + v.setConstant(Scalar()); + v[k] = T(1.0); + } + + // Constructor from scalar and vector part + // The use of Eigen::DenseBase allows Eigen expressions + // to be passed in without being fully evaluated until + // they are assigned to v + template + EIGEN_STRONG_INLINE Jet(const T& a, const Eigen::DenseBase& v) + : a(a), v(v) {} + + // Compound operators + Jet& operator+=(const Jet& y) { + *this = *this + y; + return *this; + } + + Jet& operator-=(const Jet& y) { + *this = *this - y; + return *this; + } + + Jet& operator*=(const Jet& y) { + *this = *this * y; + return *this; + } + + Jet& operator/=(const Jet& y) { + *this = *this / y; + return *this; + } + + // Compound with scalar operators. + Jet& operator+=(const T& s) { + *this = *this + s; + return *this; + } + + Jet& operator-=(const T& s) { + *this = *this - s; + return *this; + } + + Jet& operator*=(const T& s) { + *this = *this * s; + return *this; + } + + Jet& operator/=(const T& s) { + *this = *this / s; + return *this; + } + + // The scalar part. + T a; + + // The infinitesimal part. + Eigen::Matrix v; + + // This struct needs to have an Eigen aligned operator new as it contains + // fixed-size Eigen types. + EIGEN_MAKE_ALIGNED_OPERATOR_NEW +}; + +// Unary + +template +inline Jet const& operator+(const Jet& f) { + return f; +} + +// TODO(keir): Try adding __attribute__((always_inline)) to these functions to +// see if it causes a performance increase. + +// Unary - +template +inline Jet operator-(const Jet& f) { + return Jet(-f.a, -f.v); +} + +// Binary + +template +inline Jet operator+(const Jet& f, const Jet& g) { + return Jet(f.a + g.a, f.v + g.v); +} + +// Binary + with a scalar: x + s +template +inline Jet operator+(const Jet& f, T s) { + return Jet(f.a + s, f.v); +} + +// Binary + with a scalar: s + x +template +inline Jet operator+(T s, const Jet& f) { + return Jet(f.a + s, f.v); +} + +// Binary - +template +inline Jet operator-(const Jet& f, const Jet& g) { + return Jet(f.a - g.a, f.v - g.v); +} + +// Binary - with a scalar: x - s +template +inline Jet operator-(const Jet& f, T s) { + return Jet(f.a - s, f.v); +} + +// Binary - with a scalar: s - x +template +inline Jet operator-(T s, const Jet& f) { + return Jet(s - f.a, -f.v); +} + +// Binary * +template +inline Jet operator*(const Jet& f, const Jet& g) { + return Jet(f.a * g.a, f.a * g.v + f.v * g.a); +} + +// Binary * with a scalar: x * s +template +inline Jet operator*(const Jet& f, T s) { + return Jet(f.a * s, f.v * s); +} + +// Binary * with a scalar: s * x +template +inline Jet operator*(T s, const Jet& f) { + return Jet(f.a * s, f.v * s); +} + +// Binary / +template +inline Jet operator/(const Jet& f, const Jet& g) { + // This uses: + // + // a + u (a + u)(b - v) (a + u)(b - v) + // ----- = -------------- = -------------- + // b + v (b + v)(b - v) b^2 + // + // which holds because v*v = 0. + const T g_a_inverse = T(1.0) / g.a; + const T f_a_by_g_a = f.a * g_a_inverse; + return Jet(f_a_by_g_a, (f.v - f_a_by_g_a * g.v) * g_a_inverse); +} + +// Binary / with a scalar: s / x +template +inline Jet operator/(T s, const Jet& g) { + const T minus_s_g_a_inverse2 = -s / (g.a * g.a); + return Jet(s / g.a, g.v * minus_s_g_a_inverse2); +} + +// Binary / with a scalar: x / s +template +inline Jet operator/(const Jet& f, T s) { + const T s_inverse = T(1.0) / s; + return Jet(f.a * s_inverse, f.v * s_inverse); +} + +// Binary comparison operators for both scalars and jets. At least one of the +// operands must be a Jet. Promotable scalars (e.g., int, float, double etc.) +// can appear on either side of the operator. std::common_type_t is used as an +// SFINAE constraint to selectively enable compatible operand types. This allows +// comparison, for instance, against int literals without implicit conversion. +// In case the Jet arithmetic type is a Jet itself, a recursive expansion of Jet +// value is performed. +#define CERES_DEFINE_JET_COMPARISON_OPERATOR(op) \ + template >* = nullptr> \ + constexpr bool operator op(const Lhs& f, const Rhs& g) noexcept( \ + noexcept(internal::AsScalar(f) op internal::AsScalar(g))) { \ + using internal::AsScalar; \ + return AsScalar(f) op AsScalar(g); \ + } +CERES_DEFINE_JET_COMPARISON_OPERATOR(<) // NOLINT +CERES_DEFINE_JET_COMPARISON_OPERATOR(<=) // NOLINT +CERES_DEFINE_JET_COMPARISON_OPERATOR(>) // NOLINT +CERES_DEFINE_JET_COMPARISON_OPERATOR(>=) // NOLINT +CERES_DEFINE_JET_COMPARISON_OPERATOR(==) // NOLINT +CERES_DEFINE_JET_COMPARISON_OPERATOR(!=) // NOLINT +#undef CERES_DEFINE_JET_COMPARISON_OPERATOR + +// Pull some functions from namespace std. +// +// This is necessary because we want to use the same name (e.g. 'sqrt') for +// double-valued and Jet-valued functions, but we are not allowed to put +// Jet-valued functions inside namespace std. +using std::abs; +using std::acos; +using std::asin; +using std::atan; +using std::atan2; +using std::cbrt; +using std::ceil; +using std::copysign; +using std::cos; +using std::cosh; +using std::erf; +using std::erfc; +using std::exp; +using std::exp2; +using std::expm1; +using std::fdim; +using std::floor; +using std::fma; +using std::fmax; +using std::fmin; +using std::fpclassify; +using std::hypot; +using std::isfinite; +using std::isinf; +using std::isnan; +using std::isnormal; +using std::log; +using std::log10; +using std::log1p; +using std::log2; +using std::norm; +using std::pow; +using std::signbit; +using std::sin; +using std::sinh; +using std::sqrt; +using std::tan; +using std::tanh; + +// MSVC (up to 1930) defines quiet comparison functions as template functions +// which causes compilation errors due to ambiguity in the template parameter +// type resolution for using declarations in the ceres namespace. Workaround the +// issue by defining specific overload and bypass MSVC standard library +// definitions. +#if defined(_MSC_VER) +inline bool isgreater(double lhs, + double rhs) noexcept(noexcept(std::isgreater(lhs, rhs))) { + return std::isgreater(lhs, rhs); +} +inline bool isless(double lhs, + double rhs) noexcept(noexcept(std::isless(lhs, rhs))) { + return std::isless(lhs, rhs); +} +inline bool islessequal(double lhs, + double rhs) noexcept(noexcept(std::islessequal(lhs, + rhs))) { + return std::islessequal(lhs, rhs); +} +inline bool isgreaterequal(double lhs, double rhs) noexcept( + noexcept(std::isgreaterequal(lhs, rhs))) { + return std::isgreaterequal(lhs, rhs); +} +inline bool islessgreater(double lhs, double rhs) noexcept( + noexcept(std::islessgreater(lhs, rhs))) { + return std::islessgreater(lhs, rhs); +} +inline bool isunordered(double lhs, + double rhs) noexcept(noexcept(std::isunordered(lhs, + rhs))) { + return std::isunordered(lhs, rhs); +} +#else +using std::isgreater; +using std::isgreaterequal; +using std::isless; +using std::islessequal; +using std::islessgreater; +using std::isunordered; +#endif + +#ifdef CERES_HAS_CPP20 +using std::lerp; +using std::midpoint; +#endif // defined(CERES_HAS_CPP20) + +// Legacy names from pre-C++11 days. +// clang-format off +CERES_DEPRECATED_WITH_MSG("ceres::IsFinite will be removed in a future Ceres Solver release. Please use ceres::isfinite.") +inline bool IsFinite(double x) { return std::isfinite(x); } +CERES_DEPRECATED_WITH_MSG("ceres::IsInfinite will be removed in a future Ceres Solver release. Please use ceres::isinf.") +inline bool IsInfinite(double x) { return std::isinf(x); } +CERES_DEPRECATED_WITH_MSG("ceres::IsNaN will be removed in a future Ceres Solver release. Please use ceres::isnan.") +inline bool IsNaN(double x) { return std::isnan(x); } +CERES_DEPRECATED_WITH_MSG("ceres::IsNormal will be removed in a future Ceres Solver release. Please use ceres::isnormal.") +inline bool IsNormal(double x) { return std::isnormal(x); } +// clang-format on + +// In general, f(a + h) ~= f(a) + f'(a) h, via the chain rule. + +// abs(x + h) ~= abs(x) + sgn(x)h +template +inline Jet abs(const Jet& f) { + return Jet(abs(f.a), copysign(T(1), f.a) * f.v); +} + +// copysign(a, b) composes a float with the magnitude of a and the sign of b. +// Therefore, the function can be formally defined as +// +// copysign(a, b) = sgn(b)|a| +// +// where +// +// d/dx |x| = sgn(x) +// d/dx sgn(x) = 2δ(x) +// +// sgn(x) being the signum function. Differentiating copysign(a, b) with respect +// to a and b gives: +// +// d/da sgn(b)|a| = sgn(a) sgn(b) +// d/db sgn(b)|a| = 2|a|δ(b) +// +// with the dual representation given by +// +// copysign(a + da, b + db) ~= sgn(b)|a| + (sgn(a)sgn(b) da + 2|a|δ(b) db) +// +// where δ(b) is the Dirac delta function. +template +inline Jet copysign(const Jet& f, const Jet g) { + // The Dirac delta function δ(b) is undefined at b=0 (here it's + // infinite) and 0 everywhere else. + T d = fpclassify(g) == FP_ZERO ? std::numeric_limits::infinity() : T(0); + T sa = copysign(T(1), f.a); // sgn(a) + T sb = copysign(T(1), g.a); // sgn(b) + // The second part of the infinitesimal is 2|a|δ(b) which is either infinity + // or 0 unless a or any of the values of the b infinitesimal are 0. In the + // latter case, the corresponding values become NaNs (multiplying 0 by + // infinity gives NaN). We drop the constant factor 2 since it does not change + // the result (its values will still be either 0, infinity or NaN). + return Jet(copysign(f.a, g.a), sa * sb * f.v + abs(f.a) * d * g.v); +} + +// log(a + h) ~= log(a) + h / a +template +inline Jet log(const Jet& f) { + const T a_inverse = T(1.0) / f.a; + return Jet(log(f.a), f.v * a_inverse); +} + +// log10(a + h) ~= log10(a) + h / (a log(10)) +template +inline Jet log10(const Jet& f) { + // Most compilers will expand log(10) to a constant. + const T a_inverse = T(1.0) / (f.a * log(T(10.0))); + return Jet(log10(f.a), f.v * a_inverse); +} + +// log1p(a + h) ~= log1p(a) + h / (1 + a) +template +inline Jet log1p(const Jet& f) { + const T a_inverse = T(1.0) / (T(1.0) + f.a); + return Jet(log1p(f.a), f.v * a_inverse); +} + +// exp(a + h) ~= exp(a) + exp(a) h +template +inline Jet exp(const Jet& f) { + const T tmp = exp(f.a); + return Jet(tmp, tmp * f.v); +} + +// expm1(a + h) ~= expm1(a) + exp(a) h +template +inline Jet expm1(const Jet& f) { + const T tmp = expm1(f.a); + const T expa = tmp + T(1.0); // exp(a) = expm1(a) + 1 + return Jet(tmp, expa * f.v); +} + +// sqrt(a + h) ~= sqrt(a) + h / (2 sqrt(a)) +template +inline Jet sqrt(const Jet& f) { + const T tmp = sqrt(f.a); + const T two_a_inverse = T(1.0) / (T(2.0) * tmp); + return Jet(tmp, f.v * two_a_inverse); +} + +// cos(a + h) ~= cos(a) - sin(a) h +template +inline Jet cos(const Jet& f) { + return Jet(cos(f.a), -sin(f.a) * f.v); +} + +// acos(a + h) ~= acos(a) - 1 / sqrt(1 - a^2) h +template +inline Jet acos(const Jet& f) { + const T tmp = -T(1.0) / sqrt(T(1.0) - f.a * f.a); + return Jet(acos(f.a), tmp * f.v); +} + +// sin(a + h) ~= sin(a) + cos(a) h +template +inline Jet sin(const Jet& f) { + return Jet(sin(f.a), cos(f.a) * f.v); +} + +// asin(a + h) ~= asin(a) + 1 / sqrt(1 - a^2) h +template +inline Jet asin(const Jet& f) { + const T tmp = T(1.0) / sqrt(T(1.0) - f.a * f.a); + return Jet(asin(f.a), tmp * f.v); +} + +// tan(a + h) ~= tan(a) + (1 + tan(a)^2) h +template +inline Jet tan(const Jet& f) { + const T tan_a = tan(f.a); + const T tmp = T(1.0) + tan_a * tan_a; + return Jet(tan_a, tmp * f.v); +} + +// atan(a + h) ~= atan(a) + 1 / (1 + a^2) h +template +inline Jet atan(const Jet& f) { + const T tmp = T(1.0) / (T(1.0) + f.a * f.a); + return Jet(atan(f.a), tmp * f.v); +} + +// sinh(a + h) ~= sinh(a) + cosh(a) h +template +inline Jet sinh(const Jet& f) { + return Jet(sinh(f.a), cosh(f.a) * f.v); +} + +// cosh(a + h) ~= cosh(a) + sinh(a) h +template +inline Jet cosh(const Jet& f) { + return Jet(cosh(f.a), sinh(f.a) * f.v); +} + +// tanh(a + h) ~= tanh(a) + (1 - tanh(a)^2) h +template +inline Jet tanh(const Jet& f) { + const T tanh_a = tanh(f.a); + const T tmp = T(1.0) - tanh_a * tanh_a; + return Jet(tanh_a, tmp * f.v); +} + +// The floor function should be used with extreme care as this operation will +// result in a zero derivative which provides no information to the solver. +// +// floor(a + h) ~= floor(a) + 0 +template +inline Jet floor(const Jet& f) { + return Jet(floor(f.a)); +} + +// The ceil function should be used with extreme care as this operation will +// result in a zero derivative which provides no information to the solver. +// +// ceil(a + h) ~= ceil(a) + 0 +template +inline Jet ceil(const Jet& f) { + return Jet(ceil(f.a)); +} + +// Some new additions to C++11: + +// cbrt(a + h) ~= cbrt(a) + h / (3 a ^ (2/3)) +template +inline Jet cbrt(const Jet& f) { + const T derivative = T(1.0) / (T(3.0) * cbrt(f.a * f.a)); + return Jet(cbrt(f.a), f.v * derivative); +} + +// exp2(x + h) = 2^(x+h) ~= 2^x + h*2^x*log(2) +template +inline Jet exp2(const Jet& f) { + const T tmp = exp2(f.a); + const T derivative = tmp * log(T(2)); + return Jet(tmp, f.v * derivative); +} + +// log2(x + h) ~= log2(x) + h / (x * log(2)) +template +inline Jet log2(const Jet& f) { + const T derivative = T(1.0) / (f.a * log(T(2))); + return Jet(log2(f.a), f.v * derivative); +} + +// Like sqrt(x^2 + y^2), +// but acts to prevent underflow/overflow for small/large x/y. +// Note that the function is non-smooth at x=y=0, +// so the derivative is undefined there. +template +inline Jet hypot(const Jet& x, const Jet& y) { + // d/da sqrt(a) = 0.5 / sqrt(a) + // d/dx x^2 + y^2 = 2x + // So by the chain rule: + // d/dx sqrt(x^2 + y^2) = 0.5 / sqrt(x^2 + y^2) * 2x = x / sqrt(x^2 + y^2) + // d/dy sqrt(x^2 + y^2) = y / sqrt(x^2 + y^2) + const T tmp = hypot(x.a, y.a); + return Jet(tmp, x.a / tmp * x.v + y.a / tmp * y.v); +} + +#ifdef CERES_HAS_CPP17 +// Like sqrt(x^2 + y^2 + z^2), +// but acts to prevent underflow/overflow for small/large x/y/z. +// Note that the function is non-smooth at x=y=z=0, +// so the derivative is undefined there. +template +inline Jet hypot(const Jet& x, + const Jet& y, + const Jet& z) { + // d/da sqrt(a) = 0.5 / sqrt(a) + // d/dx x^2 + y^2 + z^2 = 2x + // So by the chain rule: + // d/dx sqrt(x^2 + y^2 + z^2) + // = 0.5 / sqrt(x^2 + y^2 + z^2) * 2x + // = x / sqrt(x^2 + y^2 + z^2) + // d/dy sqrt(x^2 + y^2 + z^2) = y / sqrt(x^2 + y^2 + z^2) + // d/dz sqrt(x^2 + y^2 + z^2) = z / sqrt(x^2 + y^2 + z^2) + const T tmp = hypot(x.a, y.a, z.a); + return Jet(tmp, x.a / tmp * x.v + y.a / tmp * y.v + z.a / tmp * z.v); +} +#endif // defined(CERES_HAS_CPP17) + +// Like x * y + z but rounded only once. +template +inline Jet fma(const Jet& x, + const Jet& y, + const Jet& z) { + // d/dx fma(x, y, z) = y + // d/dy fma(x, y, z) = x + // d/dz fma(x, y, z) = 1 + return Jet(fma(x.a, y.a, z.a), y.a * x.v + x.a * y.v + z.v); +} + +// Returns the larger of the two arguments. NaNs are treated as missing data. +// +// NOTE: This function is NOT subject to any of the error conditions specified +// in `math_errhandling`. +template >* = nullptr> +inline decltype(auto) fmax(const Lhs& f, const Rhs& g) { + using J = std::common_type_t; + return (isnan(g) || isgreater(f, g)) ? J{f} : J{g}; +} + +// Returns the smaller of the two arguments. NaNs are treated as missing data. +// +// NOTE: This function is NOT subject to any of the error conditions specified +// in `math_errhandling`. +template >* = nullptr> +inline decltype(auto) fmin(const Lhs& f, const Rhs& g) { + using J = std::common_type_t; + return (isnan(f) || isless(g, f)) ? J{g} : J{f}; +} + +// Returns the positive difference (f - g) of two arguments and zero if f <= g. +// If at least one argument is NaN, a NaN is return. +// +// NOTE At least one of the argument types must be a Jet, the other one can be a +// scalar. In case both arguments are Jets, their dimensionality must match. +template >* = nullptr> +inline decltype(auto) fdim(const Lhs& f, const Rhs& g) { + using J = std::common_type_t; + if (isnan(f) || isnan(g)) { + return std::numeric_limits::quiet_NaN(); + } + return isgreater(f, g) ? J{f - g} : J{}; +} + +// erf is defined as an integral that cannot be expressed analytically +// however, the derivative is trivial to compute +// erf(x + h) = erf(x) + h * 2*exp(-x^2)/sqrt(pi) +template +inline Jet erf(const Jet& x) { + // We evaluate the constant as follows: + // 2 / sqrt(pi) = 1 / sqrt(atan(1.)) + // On POSIX sytems it is defined as M_2_SQRTPI, but this is not + // portable and the type may not be T. The above expression + // evaluates to full precision with IEEE arithmetic and, since it's + // constant, the compiler can generate exactly the same code. gcc + // does so even at -O0. + return Jet(erf(x.a), x.v * exp(-x.a * x.a) * (T(1) / sqrt(atan(T(1))))); +} + +// erfc(x) = 1-erf(x) +// erfc(x + h) = erfc(x) + h * (-2*exp(-x^2)/sqrt(pi)) +template +inline Jet erfc(const Jet& x) { + // See in erf() above for the evaluation of the constant in the derivative. + return Jet(erfc(x.a), + -x.v * exp(-x.a * x.a) * (T(1) / sqrt(atan(T(1))))); +} + +// Bessel functions of the first kind with integer order equal to 0, 1, n. +// +// Microsoft has deprecated the j[0,1,n]() POSIX Bessel functions in favour of +// _j[0,1,n](). Where available on MSVC, use _j[0,1,n]() to avoid deprecated +// function errors in client code (the specific warning is suppressed when +// Ceres itself is built). +inline double BesselJ0(double x) { +#if defined(CERES_MSVC_USE_UNDERSCORE_PREFIXED_BESSEL_FUNCTIONS) + return _j0(x); +#else + return j0(x); +#endif +} +inline double BesselJ1(double x) { +#if defined(CERES_MSVC_USE_UNDERSCORE_PREFIXED_BESSEL_FUNCTIONS) + return _j1(x); +#else + return j1(x); +#endif +} +inline double BesselJn(int n, double x) { +#if defined(CERES_MSVC_USE_UNDERSCORE_PREFIXED_BESSEL_FUNCTIONS) + return _jn(n, x); +#else + return jn(n, x); +#endif +} + +// For the formulae of the derivatives of the Bessel functions see the book: +// Olver, Lozier, Boisvert, Clark, NIST Handbook of Mathematical Functions, +// Cambridge University Press 2010. +// +// Formulae are also available at http://dlmf.nist.gov + +// See formula http://dlmf.nist.gov/10.6#E3 +// j0(a + h) ~= j0(a) - j1(a) h +template +inline Jet BesselJ0(const Jet& f) { + return Jet(BesselJ0(f.a), -BesselJ1(f.a) * f.v); +} + +// See formula http://dlmf.nist.gov/10.6#E1 +// j1(a + h) ~= j1(a) + 0.5 ( j0(a) - j2(a) ) h +template +inline Jet BesselJ1(const Jet& f) { + return Jet(BesselJ1(f.a), + T(0.5) * (BesselJ0(f.a) - BesselJn(2, f.a)) * f.v); +} + +// See formula http://dlmf.nist.gov/10.6#E1 +// j_n(a + h) ~= j_n(a) + 0.5 ( j_{n-1}(a) - j_{n+1}(a) ) h +template +inline Jet BesselJn(int n, const Jet& f) { + return Jet( + BesselJn(n, f.a), + T(0.5) * (BesselJn(n - 1, f.a) - BesselJn(n + 1, f.a)) * f.v); +} + +// Classification and comparison functionality referencing only the scalar part +// of a Jet. To classify the derivatives (e.g., for sanity checks), the dual +// part should be referenced explicitly. For instance, to check whether the +// derivatives of a Jet 'f' are reasonable, one can use +// +// isfinite(f.v.array()).all() +// !isnan(f.v.array()).any() +// +// etc., depending on the desired semantics. +// +// NOTE: Floating-point classification and comparison functions and operators +// should be used with care as no derivatives can be propagated by such +// functions directly but only by expressions resulting from corresponding +// conditional statements. At the same time, conditional statements can possibly +// introduce a discontinuity in the cost function making it impossible to +// evaluate its derivative and thus the optimization problem intractable. + +// Determines whether the scalar part of the Jet is finite. +template +inline bool isfinite(const Jet& f) { + return isfinite(f.a); +} + +// Determines whether the scalar part of the Jet is infinite. +template +inline bool isinf(const Jet& f) { + return isinf(f.a); +} + +// Determines whether the scalar part of the Jet is NaN. +template +inline bool isnan(const Jet& f) { + return isnan(f.a); +} + +// Determines whether the scalar part of the Jet is neither zero, subnormal, +// infinite, nor NaN. +template +inline bool isnormal(const Jet& f) { + return isnormal(f.a); +} + +// Determines whether the scalar part of the Jet f is less than the scalar +// part of g. +// +// NOTE: This function does NOT set any floating-point exceptions. +template >* = nullptr> +inline bool isless(const Lhs& f, const Rhs& g) { + using internal::AsScalar; + return isless(AsScalar(f), AsScalar(g)); +} + +// Determines whether the scalar part of the Jet f is greater than the scalar +// part of g. +// +// NOTE: This function does NOT set any floating-point exceptions. +template >* = nullptr> +inline bool isgreater(const Lhs& f, const Rhs& g) { + using internal::AsScalar; + return isgreater(AsScalar(f), AsScalar(g)); +} + +// Determines whether the scalar part of the Jet f is less than or equal to the +// scalar part of g. +// +// NOTE: This function does NOT set any floating-point exceptions. +template >* = nullptr> +inline bool islessequal(const Lhs& f, const Rhs& g) { + using internal::AsScalar; + return islessequal(AsScalar(f), AsScalar(g)); +} + +// Determines whether the scalar part of the Jet f is less than or greater than +// (f < g || f > g) the scalar part of g. +// +// NOTE: This function does NOT set any floating-point exceptions. +template >* = nullptr> +inline bool islessgreater(const Lhs& f, const Rhs& g) { + using internal::AsScalar; + return islessgreater(AsScalar(f), AsScalar(g)); +} + +// Determines whether the scalar part of the Jet f is greater than or equal to +// the scalar part of g. +// +// NOTE: This function does NOT set any floating-point exceptions. +template >* = nullptr> +inline bool isgreaterequal(const Lhs& f, const Rhs& g) { + using internal::AsScalar; + return isgreaterequal(AsScalar(f), AsScalar(g)); +} + +// Determines if either of the scalar parts of the arguments are NaN and +// thus cannot be ordered with respect to each other. +template >* = nullptr> +inline bool isunordered(const Lhs& f, const Rhs& g) { + using internal::AsScalar; + return isunordered(AsScalar(f), AsScalar(g)); +} + +// Categorize scalar part as zero, subnormal, normal, infinite, NaN, or +// implementation-defined. +template +inline int fpclassify(const Jet& f) { + return fpclassify(f.a); +} + +// Determines whether the scalar part of the argument is negative. +template +inline bool signbit(const Jet& f) { + return signbit(f.a); +} + +// Legacy functions from the pre-C++11 days. +template +CERES_DEPRECATED_WITH_MSG( + "ceres::IsFinite will be removed in a future Ceres Solver release. Please " + "use ceres::isfinite.") +inline bool IsFinite(const Jet& f) { + return isfinite(f); +} + +template +CERES_DEPRECATED_WITH_MSG( + "ceres::IsNaN will be removed in a future Ceres Solver release. Please use " + "ceres::isnan.") +inline bool IsNaN(const Jet& f) { + return isnan(f); +} + +template +CERES_DEPRECATED_WITH_MSG( + "ceres::IsNormal will be removed in a future Ceres Solver release. Please " + "use ceres::isnormal.") +inline bool IsNormal(const Jet& f) { + return isnormal(f); +} + +// The jet is infinite if any part of the jet is infinite. +template +CERES_DEPRECATED_WITH_MSG( + "ceres::IsInfinite will be removed in a future Ceres Solver release. " + "Please use ceres::isinf.") +inline bool IsInfinite(const Jet& f) { + return isinf(f); +} + +#ifdef CERES_HAS_CPP20 +// Computes the linear interpolation a + t(b - a) between a and b at the value +// t. For arguments outside of the range 0 <= t <= 1, the values are +// extrapolated. +// +// Differentiating lerp(a, b, t) with respect to a, b, and t gives: +// +// d/da lerp(a, b, t) = 1 - t +// d/db lerp(a, b, t) = t +// d/dt lerp(a, b, t) = b - a +// +// with the dual representation given by +// +// lerp(a + da, b + db, t + dt) +// ~= lerp(a, b, t) + (1 - t) da + t db + (b - a) dt . +template +inline Jet lerp(const Jet& a, + const Jet& b, + const Jet& t) { + return Jet{lerp(a.a, b.a, t.a), + (T(1) - t.a) * a.v + t.a * b.v + (b.a - a.a) * t.v}; +} + +// Computes the midpoint a + (b - a) / 2. +// +// Differentiating midpoint(a, b) with respect to a and b gives: +// +// d/da midpoint(a, b) = 1/2 +// d/db midpoint(a, b) = 1/2 +// +// with the dual representation given by +// +// midpoint(a + da, b + db) ~= midpoint(a, b) + (da + db) / 2 . +template +inline Jet midpoint(const Jet& a, const Jet& b) { + Jet result{midpoint(a.a, b.a)}; + // To avoid overflow in the differential, compute + // (da + db) / 2 using midpoint. + for (int i = 0; i < N; ++i) { + result.v[i] = midpoint(a.v[i], b.v[i]); + } + return result; +} +#endif // defined(CERES_HAS_CPP20) + +// atan2(b + db, a + da) ~= atan2(b, a) + (- b da + a db) / (a^2 + b^2) +// +// In words: the rate of change of theta is 1/r times the rate of +// change of (x, y) in the positive angular direction. +template +inline Jet atan2(const Jet& g, const Jet& f) { + // Note order of arguments: + // + // f = a + da + // g = b + db + + T const tmp = T(1.0) / (f.a * f.a + g.a * g.a); + return Jet(atan2(g.a, f.a), tmp * (-g.a * f.v + f.a * g.v)); +} + +// Computes the square x^2 of a real number x (not the Euclidean L^2 norm as +// the name might suggest). +// +// NOTE: While std::norm is primarily intended for computing the squared +// magnitude of a std::complex<> number, the current Jet implementation does not +// support mixing a scalar T in its real part and std::complex and in the +// infinitesimal. Mixed Jet support is necessary for the type decay from +// std::complex to T (the squared magnitude of a complex number is always +// real) performed by std::norm. +// +// norm(x + h) ~= norm(x) + 2x h +template +inline Jet norm(const Jet& f) { + return Jet(norm(f.a), T(2) * f.a * f.v); +} + +// pow -- base is a differentiable function, exponent is a constant. +// (a+da)^p ~= a^p + p*a^(p-1) da +template +inline Jet pow(const Jet& f, double g) { + T const tmp = g * pow(f.a, g - T(1.0)); + return Jet(pow(f.a, g), tmp * f.v); +} + +// pow -- base is a constant, exponent is a differentiable function. +// We have various special cases, see the comment for pow(Jet, Jet) for +// analysis: +// +// 1. For f > 0 we have: (f)^(g + dg) ~= f^g + f^g log(f) dg +// +// 2. For f == 0 and g > 0 we have: (f)^(g + dg) ~= f^g +// +// 3. For f < 0 and integer g we have: (f)^(g + dg) ~= f^g but if dg +// != 0, the derivatives are not defined and we return NaN. + +template +inline Jet pow(T f, const Jet& g) { + Jet result; + + if (fpclassify(f) == FP_ZERO && g > 0) { + // Handle case 2. + result = Jet(T(0.0)); + } else { + if (f < 0 && g == floor(g.a)) { // Handle case 3. + result = Jet(pow(f, g.a)); + for (int i = 0; i < N; i++) { + if (fpclassify(g.v[i]) != FP_ZERO) { + // Return a NaN when g.v != 0. + result.v[i] = std::numeric_limits::quiet_NaN(); + } + } + } else { + // Handle case 1. + T const tmp = pow(f, g.a); + result = Jet(tmp, log(f) * tmp * g.v); + } + } + + return result; +} + +// pow -- both base and exponent are differentiable functions. This has a +// variety of special cases that require careful handling. +// +// 1. For f > 0: +// (f + df)^(g + dg) ~= f^g + f^(g - 1) * (g * df + f * log(f) * dg) +// The numerical evaluation of f * log(f) for f > 0 is well behaved, even for +// extremely small values (e.g. 1e-99). +// +// 2. For f == 0 and g > 1: (f + df)^(g + dg) ~= 0 +// This cases is needed because log(0) can not be evaluated in the f > 0 +// expression. However the function f*log(f) is well behaved around f == 0 +// and its limit as f-->0 is zero. +// +// 3. For f == 0 and g == 1: (f + df)^(g + dg) ~= 0 + df +// +// 4. For f == 0 and 0 < g < 1: The value is finite but the derivatives are not. +// +// 5. For f == 0 and g < 0: The value and derivatives of f^g are not finite. +// +// 6. For f == 0 and g == 0: The C standard incorrectly defines 0^0 to be 1 +// "because there are applications that can exploit this definition". We +// (arbitrarily) decree that derivatives here will be nonfinite, since that +// is consistent with the behavior for f == 0, g < 0 and 0 < g < 1. +// Practically any definition could have been justified because mathematical +// consistency has been lost at this point. +// +// 7. For f < 0, g integer, dg == 0: (f + df)^(g + dg) ~= f^g + g * f^(g - 1) df +// This is equivalent to the case where f is a differentiable function and g +// is a constant (to first order). +// +// 8. For f < 0, g integer, dg != 0: The value is finite but the derivatives are +// not, because any change in the value of g moves us away from the point +// with a real-valued answer into the region with complex-valued answers. +// +// 9. For f < 0, g noninteger: The value and derivatives of f^g are not finite. + +template +inline Jet pow(const Jet& f, const Jet& g) { + Jet result; + + if (fpclassify(f) == FP_ZERO && g >= 1) { + // Handle cases 2 and 3. + if (g > 1) { + result = Jet(T(0.0)); + } else { + result = f; + } + + } else { + if (f < 0 && g == floor(g.a)) { + // Handle cases 7 and 8. + T const tmp = g.a * pow(f.a, g.a - T(1.0)); + result = Jet(pow(f.a, g.a), tmp * f.v); + for (int i = 0; i < N; i++) { + if (fpclassify(g.v[i]) != FP_ZERO) { + // Return a NaN when g.v != 0. + result.v[i] = T(std::numeric_limits::quiet_NaN()); + } + } + } else { + // Handle the remaining cases. For cases 4,5,6,9 we allow the log() + // function to generate -HUGE_VAL or NaN, since those cases result in a + // nonfinite derivative. + T const tmp1 = pow(f.a, g.a); + T const tmp2 = g.a * pow(f.a, g.a - T(1.0)); + T const tmp3 = tmp1 * log(f.a); + result = Jet(tmp1, tmp2 * f.v + tmp3 * g.v); + } + } + + return result; +} + +// Note: This has to be in the ceres namespace for argument dependent lookup to +// function correctly. Otherwise statements like CHECK_LE(x, 2.0) fail with +// strange compile errors. +template +inline std::ostream& operator<<(std::ostream& s, const Jet& z) { + s << "[" << z.a << " ; "; + for (int i = 0; i < N; ++i) { + s << z.v[i]; + if (i != N - 1) { + s << ", "; + } + } + s << "]"; + return s; +} +} // namespace ceres + +namespace std { +template +struct numeric_limits> { + static constexpr bool is_specialized = true; + static constexpr bool is_signed = std::numeric_limits::is_signed; + static constexpr bool is_integer = std::numeric_limits::is_integer; + static constexpr bool is_exact = std::numeric_limits::is_exact; + static constexpr bool has_infinity = std::numeric_limits::has_infinity; + static constexpr bool has_quiet_NaN = std::numeric_limits::has_quiet_NaN; + static constexpr bool has_signaling_NaN = + std::numeric_limits::has_signaling_NaN; + static constexpr bool is_iec559 = std::numeric_limits::is_iec559; + static constexpr bool is_bounded = std::numeric_limits::is_bounded; + static constexpr bool is_modulo = std::numeric_limits::is_modulo; + + static constexpr std::float_denorm_style has_denorm = + std::numeric_limits::has_denorm; + static constexpr std::float_round_style round_style = + std::numeric_limits::round_style; + + static constexpr int digits = std::numeric_limits::digits; + static constexpr int digits10 = std::numeric_limits::digits10; + static constexpr int max_digits10 = std::numeric_limits::max_digits10; + static constexpr int radix = std::numeric_limits::radix; + static constexpr int min_exponent = std::numeric_limits::min_exponent; + static constexpr int min_exponent10 = std::numeric_limits::max_exponent10; + static constexpr int max_exponent = std::numeric_limits::max_exponent; + static constexpr int max_exponent10 = std::numeric_limits::max_exponent10; + static constexpr bool traps = std::numeric_limits::traps; + static constexpr bool tinyness_before = + std::numeric_limits::tinyness_before; + + static constexpr ceres::Jet min + CERES_PREVENT_MACRO_SUBSTITUTION() noexcept { + return ceres::Jet((std::numeric_limits::min)()); + } + static constexpr ceres::Jet lowest() noexcept { + return ceres::Jet(std::numeric_limits::lowest()); + } + static constexpr ceres::Jet epsilon() noexcept { + return ceres::Jet(std::numeric_limits::epsilon()); + } + static constexpr ceres::Jet round_error() noexcept { + return ceres::Jet(std::numeric_limits::round_error()); + } + static constexpr ceres::Jet infinity() noexcept { + return ceres::Jet(std::numeric_limits::infinity()); + } + static constexpr ceres::Jet quiet_NaN() noexcept { + return ceres::Jet(std::numeric_limits::quiet_NaN()); + } + static constexpr ceres::Jet signaling_NaN() noexcept { + return ceres::Jet(std::numeric_limits::signaling_NaN()); + } + static constexpr ceres::Jet denorm_min() noexcept { + return ceres::Jet(std::numeric_limits::denorm_min()); + } + + static constexpr ceres::Jet max + CERES_PREVENT_MACRO_SUBSTITUTION() noexcept { + return ceres::Jet((std::numeric_limits::max)()); + } +}; + +} // namespace std + +namespace Eigen { + +// Creating a specialization of NumTraits enables placing Jet objects inside +// Eigen arrays, getting all the goodness of Eigen combined with autodiff. +template +struct NumTraits> { + using Real = ceres::Jet; + using NonInteger = ceres::Jet; + using Nested = ceres::Jet; + using Literal = ceres::Jet; + + static typename ceres::Jet dummy_precision() { + return ceres::Jet(1e-12); + } + + static inline Real epsilon() { + return Real(std::numeric_limits::epsilon()); + } + + static inline int digits10() { return NumTraits::digits10(); } + + enum { + IsComplex = 0, + IsInteger = 0, + IsSigned, + ReadCost = 1, + AddCost = 1, + // For Jet types, multiplication is more expensive than addition. + MulCost = 3, + HasFloatingPoint = 1, + RequireInitialization = 1 + }; + + template + struct Div { + enum { +#if defined(EIGEN_VECTORIZE_AVX) + AVX = true, +#else + AVX = false, +#endif + + // Assuming that for Jets, division is as expensive as + // multiplication. + Cost = 3 + }; + }; + + static inline Real highest() { return Real((std::numeric_limits::max)()); } + static inline Real lowest() { return Real(-(std::numeric_limits::max)()); } +}; + +// Specifying the return type of binary operations between Jets and scalar types +// allows you to perform matrix/array operations with Eigen matrices and arrays +// such as addition, subtraction, multiplication, and division where one Eigen +// matrix/array is of type Jet and the other is a scalar type. This improves +// performance by using the optimized scalar-to-Jet binary operations but +// is only available on Eigen versions >= 3.3 +template +struct ScalarBinaryOpTraits, T, BinaryOp> { + using ReturnType = ceres::Jet; +}; +template +struct ScalarBinaryOpTraits, BinaryOp> { + using ReturnType = ceres::Jet; +}; + +} // namespace Eigen + +#endif // CERES_PUBLIC_JET_H_ diff --git a/ceres-v2/include/jet_fwd.h b/ceres-v2/include/jet_fwd.h new file mode 100644 index 0000000000000000000000000000000000000000..fbb6286958cf48e006d98c5ec2797a15b52eaaed --- /dev/null +++ b/ceres-v2/include/jet_fwd.h @@ -0,0 +1,44 @@ +// Ceres Solver - A fast non-linear least squares minimizer +// Copyright 2022 Google Inc. All rights reserved. +// http://ceres-solver.org/ +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are met: +// +// * Redistributions of source code must retain the above copyright notice, +// this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above copyright notice, +// this list of conditions and the following disclaimer in the documentation +// and/or other materials provided with the distribution. +// * Neither the name of Google Inc. nor the names of its contributors may be +// used to endorse or promote products derived from this software without +// specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE +// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR +// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF +// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN +// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) +// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE +// POSSIBILITY OF SUCH DAMAGE. +// +// Author: sergiu.deitsch@gmail.com (Sergiu Deitsch) +// + +#ifndef CERES_PUBLIC_JET_FWD_H_ +#define CERES_PUBLIC_JET_FWD_H_ + +namespace ceres { + +// Jet forward declaration necessary for the following partial specialization of +// std::common_type and type traits. +template +struct Jet; + +} // namespace ceres + +#endif // CERES_PUBLIC_JET_FWD_H_ diff --git a/ceres-v2/include/line_manifold.h b/ceres-v2/include/line_manifold.h new file mode 100644 index 0000000000000000000000000000000000000000..f8f1b235220c74ba863360c29fd95f63988c6575 --- /dev/null +++ b/ceres-v2/include/line_manifold.h @@ -0,0 +1,304 @@ +// Ceres Solver - A fast non-linear least squares minimizer +// Copyright 2022 Google Inc. All rights reserved. +// http://ceres-solver.org/ +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are met: +// +// * Redistributions of source code must retain the above copyright notice, +// this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above copyright notice, +// this list of conditions and the following disclaimer in the documentation +// and/or other materials provided with the distribution. +// * Neither the name of Google Inc. nor the names of its contributors may be +// used to endorse or promote products derived from this software without +// specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE +// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR +// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF +// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN +// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) +// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE +// POSSIBILITY OF SUCH DAMAGE. +// +// Author: jodebo_beck@gmx.de (Johannes Beck) +// + +#ifndef CERES_PUBLIC_LINE_MANIFOLD_H_ +#define CERES_PUBLIC_LINE_MANIFOLD_H_ + +#include +#include +#include +#include +#include + +#include "ceres/internal/disable_warnings.h" +#include "ceres/internal/export.h" +#include "ceres/internal/householder_vector.h" +#include "ceres/internal/sphere_manifold_functions.h" +#include "ceres/manifold.h" +#include "ceres/types.h" +#include "glog/logging.h" + +namespace ceres { +// This provides a manifold for lines, where the line is +// over-parameterized by an origin point and a direction vector. So the +// parameter vector size needs to be two times the ambient space dimension, +// where the first half is interpreted as the origin point and the second half +// as the direction. +// +// The plus operator for the line direction is the same as for the +// SphereManifold. The update of the origin point is +// perpendicular to the line direction before the update. +// +// This manifold is a special case of the affine Grassmannian +// manifold (see https://en.wikipedia.org/wiki/Affine_Grassmannian_(manifold)) +// for the case Graff_1(R^n). +// +// The class works with dynamic and static ambient space dimensions. If the +// ambient space dimensions is known at compile time use +// +// LineManifold<3> manifold; +// +// If the ambient space dimensions is not known at compile time the template +// parameter needs to be set to ceres::DYNAMIC and the actual dimension needs +// to be provided as a constructor argument: +// +// LineManifold manifold(ambient_dim); +// +template +class LineManifold final : public Manifold { + public: + static_assert(AmbientSpaceDimension == DYNAMIC || AmbientSpaceDimension >= 2, + "The ambient space must be at least 2."); + static_assert(ceres::DYNAMIC == Eigen::Dynamic, + "ceres::DYNAMIC needs to be the same as Eigen::Dynamic."); + + LineManifold(); + explicit LineManifold(int size); + + int AmbientSize() const override { return 2 * size_; } + int TangentSize() const override { return 2 * (size_ - 1); } + bool Plus(const double* x, + const double* delta, + double* x_plus_delta) const override; + bool PlusJacobian(const double* x, double* jacobian) const override; + bool Minus(const double* y, + const double* x, + double* y_minus_x) const override; + bool MinusJacobian(const double* x, double* jacobian) const override; + + private: + static constexpr bool IsDynamic = (AmbientSpaceDimension == ceres::DYNAMIC); + static constexpr int TangentSpaceDimension = + IsDynamic ? ceres::DYNAMIC : AmbientSpaceDimension - 1; + + static constexpr int DAmbientSpaceDimension = + IsDynamic ? ceres::DYNAMIC : 2 * AmbientSpaceDimension; + static constexpr int DTangentSpaceDimension = + IsDynamic ? ceres::DYNAMIC : 2 * TangentSpaceDimension; + + using AmbientVector = Eigen::Matrix; + using TangentVector = Eigen::Matrix; + using MatrixPlusJacobian = Eigen::Matrix; + using MatrixMinusJacobian = Eigen::Matrix; + + const int size_{AmbientSpaceDimension}; +}; + +template +LineManifold::LineManifold() + : size_{AmbientSpaceDimension} { + static_assert( + AmbientSpaceDimension != Eigen::Dynamic, + "The size is set to dynamic. Please call the constructor with a size."); +} + +template +LineManifold::LineManifold(int size) : size_{size} { + if (AmbientSpaceDimension != Eigen::Dynamic) { + CHECK_EQ(AmbientSpaceDimension, size) + << "Specified size by template parameter differs from the supplied " + "one."; + } else { + CHECK_GT(size_, 1) + << "The size of the manifold needs to be greater than 1."; + } +} + +template +bool LineManifold::Plus(const double* x_ptr, + const double* delta_ptr, + double* x_plus_delta_ptr) const { + // We seek a box plus operator of the form + // + // [o*, d*] = Plus([o, d], [delta_o, delta_d]) + // + // where o is the origin point, d is the direction vector, delta_o is + // the delta of the origin point and delta_d the delta of the direction and + // o* and d* is the updated origin point and direction. + // + // We separate the Plus operator into the origin point and directional part + // d* = Plus_d(d, delta_d) + // o* = Plus_o(o, d, delta_o) + // + // The direction update function Plus_d is the same as as the SphereManifold: + // + // d* = H_{v(d)} [0.5 sinc(0.5 |delta_d|) delta_d, cos(0.5 |delta_d|)]^T + // + // where H is the householder matrix + // H_{v} = I - (2 / |v|^2) v v^T + // and + // v(d) = d - sign(d_n) |d| e_n. + // + // The origin point update function Plus_o is defined as + // + // o* = o + H_{v(d)} [0.5 delta_o, 0]^T. + + Eigen::Map o(x_ptr, size_); + Eigen::Map d(x_ptr + size_, size_); + + Eigen::Map delta_o(delta_ptr, size_ - 1); + Eigen::Map delta_d(delta_ptr + size_ - 1, size_ - 1); + Eigen::Map o_plus_delta(x_plus_delta_ptr, size_); + Eigen::Map d_plus_delta(x_plus_delta_ptr + size_, size_); + + const double norm_delta_d = delta_d.norm(); + + o_plus_delta = o; + + // Shortcut for zero delta direction. + if (norm_delta_d == 0.0) { + d_plus_delta = d; + + if (delta_o.isZero(0.0)) { + return true; + } + } + + // Calculate the householder transformation which is needed for f_d and f_o. + AmbientVector v(size_); + double beta; + + // NOTE: The explicit template arguments are needed here because + // ComputeHouseholderVector is templated and some versions of MSVC + // have trouble deducing the type of v automatically. + internal::ComputeHouseholderVector, + double, + AmbientSpaceDimension>(d, &v, &beta); + + if (norm_delta_d != 0.0) { + internal::ComputeSphereManifoldPlus( + v, beta, d, delta_d, norm_delta_d, &d_plus_delta); + } + + // The null space is in the direction of the line, so the tangent space is + // perpendicular to the line direction. This is achieved by using the + // householder matrix of the direction and allow only movements + // perpendicular to e_n. + // + // The factor of 0.5 is used to be consistent with the line direction + // update. + AmbientVector y(size_); + y << 0.5 * delta_o, 0; + o_plus_delta += internal::ApplyHouseholderVector(y, v, beta); + + return true; +} + +template +bool LineManifold::PlusJacobian( + const double* x_ptr, double* jacobian_ptr) const { + Eigen::Map d(x_ptr + size_, size_); + Eigen::Map jacobian( + jacobian_ptr, 2 * size_, 2 * (size_ - 1)); + + // Clear the Jacobian as only half of the matrix is not zero. + jacobian.setZero(); + + auto jacobian_d = + jacobian + .template topLeftCorner( + size_, size_ - 1); + auto jacobian_o = jacobian.template bottomRightCorner( + size_, size_ - 1); + internal::ComputeSphereManifoldPlusJacobian(d, &jacobian_d); + jacobian_o = jacobian_d; + return true; +} + +template +bool LineManifold::Minus(const double* y_ptr, + const double* x_ptr, + double* y_minus_x) const { + Eigen::Map y_o(y_ptr, size_); + Eigen::Map y_d(y_ptr + size_, size_); + Eigen::Map x_o(x_ptr, size_); + Eigen::Map x_d(x_ptr + size_, size_); + + Eigen::Map y_minus_x_o(y_minus_x, size_ - 1); + Eigen::Map y_minus_x_d(y_minus_x + size_ - 1, size_ - 1); + + AmbientVector v(size_); + double beta; + + // NOTE: The explicit template arguments are needed here because + // ComputeHouseholderVector is templated and some versions of MSVC + // have trouble deducing the type of v automatically. + internal::ComputeHouseholderVector, + double, + AmbientSpaceDimension>(x_d, &v, &beta); + + internal::ComputeSphereManifoldMinus(v, beta, x_d, y_d, &y_minus_x_d); + + AmbientVector delta_o = y_o - x_o; + const AmbientVector h_delta_o = + 2.0 * internal::ApplyHouseholderVector(delta_o, v, beta); + y_minus_x_o = h_delta_o.template head(size_ - 1); + + return true; +} + +template +bool LineManifold::MinusJacobian( + const double* x_ptr, double* jacobian_ptr) const { + Eigen::Map d(x_ptr + size_, size_); + Eigen::Map jacobian( + jacobian_ptr, 2 * (size_ - 1), 2 * size_); + + // Clear the Jacobian as only half of the matrix is not zero. + jacobian.setZero(); + + auto jacobian_d = + jacobian + .template topLeftCorner( + size_ - 1, size_); + auto jacobian_o = jacobian.template bottomRightCorner( + size_ - 1, size_); + internal::ComputeSphereManifoldMinusJacobian(d, &jacobian_d); + jacobian_o = jacobian_d; + + return true; +} + +} // namespace ceres + +// clang-format off +#include "ceres/internal/reenable_warnings.h" +// clang-format on + +#endif // CERES_PUBLIC_LINE_MANIFOLD_H_ diff --git a/ceres-v2/include/local_parameterization.h b/ceres-v2/include/local_parameterization.h new file mode 100644 index 0000000000000000000000000000000000000000..5815dd17d15915812aaa61fe69f773201b21afa7 --- /dev/null +++ b/ceres-v2/include/local_parameterization.h @@ -0,0 +1,371 @@ +// Ceres Solver - A fast non-linear least squares minimizer +// Copyright 2019 Google Inc. All rights reserved. +// http://ceres-solver.org/ +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are met: +// +// * Redistributions of source code must retain the above copyright notice, +// this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above copyright notice, +// this list of conditions and the following disclaimer in the documentation +// and/or other materials provided with the distribution. +// * Neither the name of Google Inc. nor the names of its contributors may be +// used to endorse or promote products derived from this software without +// specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE +// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR +// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF +// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN +// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) +// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE +// POSSIBILITY OF SUCH DAMAGE. +// +// Author: keir@google.com (Keir Mierle) +// sameeragarwal@google.com (Sameer Agarwal) + +#ifndef CERES_PUBLIC_LOCAL_PARAMETERIZATION_H_ +#define CERES_PUBLIC_LOCAL_PARAMETERIZATION_H_ + +#include +#include +#include + +#include "ceres/internal/disable_warnings.h" +#include "ceres/internal/export.h" +#include "ceres/internal/port.h" + +namespace ceres { + +// WARNING: LocalParameterizations are deprecated. They will be removed from +// Ceres Solver in version 2.2.0. Please use Manifolds instead. + +// Purpose: Sometimes parameter blocks x can overparameterize a problem +// +// min f(x) +// x +// +// In that case it is desirable to choose a parameterization for the +// block itself to remove the null directions of the cost. More +// generally, if x lies on a manifold of a smaller dimension than the +// ambient space that it is embedded in, then it is numerically and +// computationally more effective to optimize it using a +// parameterization that lives in the tangent space of that manifold +// at each point. +// +// For example, a sphere in three dimensions is a 2 dimensional +// manifold, embedded in a three dimensional space. At each point on +// the sphere, the plane tangent to it defines a two dimensional +// tangent space. For a cost function defined on this sphere, given a +// point x, moving in the direction normal to the sphere at that point +// is not useful. Thus a better way to do a local optimization is to +// optimize over two dimensional vector delta in the tangent space at +// that point and then "move" to the point x + delta, where the move +// operation involves projecting back onto the sphere. Doing so +// removes a redundant dimension from the optimization, making it +// numerically more robust and efficient. +// +// More generally we can define a function +// +// x_plus_delta = Plus(x, delta), +// +// where x_plus_delta has the same size as x, and delta is of size +// less than or equal to x. The function Plus, generalizes the +// definition of vector addition. Thus it satisfies the identify +// +// Plus(x, 0) = x, for all x. +// +// A trivial version of Plus is when delta is of the same size as x +// and +// +// Plus(x, delta) = x + delta +// +// A more interesting case if x is two dimensional vector, and the +// user wishes to hold the first coordinate constant. Then, delta is a +// scalar and Plus is defined as +// +// Plus(x, delta) = x + [0] * delta +// [1] +// +// An example that occurs commonly in Structure from Motion problems +// is when camera rotations are parameterized using Quaternion. There, +// it is useful to only make updates orthogonal to that 4-vector +// defining the quaternion. One way to do this is to let delta be a 3 +// dimensional vector and define Plus to be +// +// Plus(x, delta) = [cos(|delta|), sin(|delta|) delta / |delta|] * x +// +// The multiplication between the two 4-vectors on the RHS is the +// standard quaternion product. +// +// Given f and a point x, optimizing f can now be restated as +// +// min f(Plus(x, delta)) +// delta +// +// Given a solution delta to this problem, the optimal value is then +// given by +// +// x* = Plus(x, delta) +// +// The class LocalParameterization defines the function Plus and its +// Jacobian which is needed to compute the Jacobian of f w.r.t delta. +class CERES_DEPRECATED_WITH_MSG( + "LocalParameterizations will be removed from the Ceres Solver API in " + "version 2.2.0. Use Manifolds instead.") + CERES_EXPORT LocalParameterization { + public: + virtual ~LocalParameterization(); + + // Generalization of the addition operation, + // + // x_plus_delta = Plus(x, delta) + // + // with the condition that Plus(x, 0) = x. + // + virtual bool Plus(const double* x, + const double* delta, + double* x_plus_delta) const = 0; + + // The jacobian of Plus(x, delta) w.r.t delta at delta = 0. + // + // jacobian is a row-major GlobalSize() x LocalSize() matrix. + virtual bool ComputeJacobian(const double* x, double* jacobian) const = 0; + + // local_matrix = global_matrix * jacobian + // + // global_matrix is a num_rows x GlobalSize row major matrix. + // local_matrix is a num_rows x LocalSize row major matrix. + // jacobian(x) is the matrix returned by ComputeJacobian at x. + // + // This is only used by GradientProblem. For most normal uses, it is + // okay to use the default implementation. + virtual bool MultiplyByJacobian(const double* x, + const int num_rows, + const double* global_matrix, + double* local_matrix) const; + + // Size of x. + virtual int GlobalSize() const = 0; + + // Size of delta. + virtual int LocalSize() const = 0; +}; + +// Some basic parameterizations + +// Identity Parameterization: Plus(x, delta) = x + delta +class CERES_DEPRECATED_WITH_MSG("Use EuclideanManifold instead.") + CERES_EXPORT IdentityParameterization : public LocalParameterization { + public: + explicit IdentityParameterization(int size); + bool Plus(const double* x, + const double* delta, + double* x_plus_delta) const override; + bool ComputeJacobian(const double* x, double* jacobian) const override; + bool MultiplyByJacobian(const double* x, + const int num_cols, + const double* global_matrix, + double* local_matrix) const override; + int GlobalSize() const override { return size_; } + int LocalSize() const override { return size_; } + + private: + const int size_; +}; + +// Hold a subset of the parameters inside a parameter block constant. +class CERES_DEPRECATED_WITH_MSG("Use SubsetManifold instead.") + CERES_EXPORT SubsetParameterization : public LocalParameterization { + public: + explicit SubsetParameterization(int size, + const std::vector& constant_parameters); + bool Plus(const double* x, + const double* delta, + double* x_plus_delta) const override; + bool ComputeJacobian(const double* x, double* jacobian) const override; + bool MultiplyByJacobian(const double* x, + const int num_cols, + const double* global_matrix, + double* local_matrix) const override; + int GlobalSize() const override { + return static_cast(constancy_mask_.size()); + } + int LocalSize() const override { return local_size_; } + + private: + const int local_size_; + std::vector constancy_mask_; +}; + +// Plus(x, delta) = [cos(|delta|), sin(|delta|) delta / |delta|] * x +// with * being the quaternion multiplication operator. Here we assume +// that the first element of the quaternion vector is the real (cos +// theta) part. +class CERES_DEPRECATED_WITH_MSG("Use QuaternionManifold instead.") + CERES_EXPORT QuaternionParameterization : public LocalParameterization { + public: + bool Plus(const double* x, + const double* delta, + double* x_plus_delta) const override; + bool ComputeJacobian(const double* x, double* jacobian) const override; + int GlobalSize() const override { return 4; } + int LocalSize() const override { return 3; } +}; + +// Implements the quaternion local parameterization for Eigen's representation +// of the quaternion. Eigen uses a different internal memory layout for the +// elements of the quaternion than what is commonly used. Specifically, Eigen +// stores the elements in memory as [x, y, z, w] where the real part is last +// whereas it is typically stored first. Note, when creating an Eigen quaternion +// through the constructor the elements are accepted in w, x, y, z order. Since +// Ceres operates on parameter blocks which are raw double pointers this +// difference is important and requires a different parameterization. +// +// Plus(x, delta) = [sin(|delta|) delta / |delta|, cos(|delta|)] * x +// with * being the quaternion multiplication operator. +class CERES_DEPRECATED_WITH_MSG("Use EigenQuaternionManifold instead.") + CERES_EXPORT EigenQuaternionParameterization + : public ceres::LocalParameterization { + public: + bool Plus(const double* x, + const double* delta, + double* x_plus_delta) const override; + bool ComputeJacobian(const double* x, double* jacobian) const override; + int GlobalSize() const override { return 4; } + int LocalSize() const override { return 3; } +}; + +// This provides a parameterization for homogeneous vectors which are commonly +// used in Structure from Motion problems. One example where they are used is +// in representing points whose triangulation is ill-conditioned. Here it is +// advantageous to use an over-parameterization since homogeneous vectors can +// represent points at infinity. +// +// The plus operator is defined as +// Plus(x, delta) = +// [sin(0.5 * |delta|) * delta / |delta|, cos(0.5 * |delta|)] * x +// +// with * defined as an operator which applies the update orthogonal to x to +// remain on the sphere. We assume that the last element of x is the scalar +// component. The size of the homogeneous vector is required to be greater than +// 1. +class CERES_DEPRECATED_WITH_MSG("Use SphereManifold instead.") CERES_EXPORT + HomogeneousVectorParameterization : public LocalParameterization { + public: + explicit HomogeneousVectorParameterization(int size); + bool Plus(const double* x, + const double* delta, + double* x_plus_delta) const override; + bool ComputeJacobian(const double* x, double* jacobian) const override; + int GlobalSize() const override { return size_; } + int LocalSize() const override { return size_ - 1; } + + private: + const int size_; +}; + +// This provides a parameterization for lines, where the line is +// over-parameterized by an origin point and a direction vector. So the +// parameter vector size needs to be two times the ambient space dimension, +// where the first half is interpreted as the origin point and the second half +// as the direction. +// +// The plus operator for the line direction is the same as for the +// HomogeneousVectorParameterization. The update of the origin point is +// perpendicular to the line direction before the update. +// +// This local parameterization is a special case of the affine Grassmannian +// manifold (see https://en.wikipedia.org/wiki/Affine_Grassmannian_(manifold)) +// for the case Graff_1(R^n). +template +class CERES_DEPRECATED_WITH_MSG("Use LineManifold instead.") + LineParameterization : public LocalParameterization { + public: + static_assert(AmbientSpaceDimension >= 2, + "The ambient space must be at least 2"); + + bool Plus(const double* x, + const double* delta, + double* x_plus_delta) const override; + bool ComputeJacobian(const double* x, double* jacobian) const override; + int GlobalSize() const override { return 2 * AmbientSpaceDimension; } + int LocalSize() const override { return 2 * (AmbientSpaceDimension - 1); } +}; + +// Construct a local parameterization by taking the Cartesian product +// of a number of other local parameterizations. This is useful, when +// a parameter block is the cartesian product of two or more +// manifolds. For example the parameters of a camera consist of a +// rotation and a translation, i.e., SO(3) x R^3. +// +// Example usage: +// +// ProductParameterization product_param(new QuaterionionParameterization(), +// new IdentityParameterization(3)); +// +// is the local parameterization for a rigid transformation, where the +// rotation is represented using a quaternion. +// +class CERES_DEPRECATED_WITH_MSG("Use ProductManifold instead.") + CERES_EXPORT ProductParameterization : public LocalParameterization { + public: + ProductParameterization(const ProductParameterization&) = delete; + ProductParameterization& operator=(const ProductParameterization&) = delete; + // + // NOTE: The constructor takes ownership of the input local + // parameterizations. + // + template + explicit ProductParameterization(LocalParams*... local_params) + : local_params_(sizeof...(LocalParams)) { + constexpr int kNumLocalParams = sizeof...(LocalParams); + static_assert(kNumLocalParams >= 2, + "At least two local parameterizations must be specified."); + + using LocalParameterizationPtr = std::unique_ptr; + + // Wrap all raw pointers into std::unique_ptr for exception safety. + std::array local_params_array{ + LocalParameterizationPtr(local_params)...}; + + // Initialize internal state. + for (int i = 0; i < kNumLocalParams; ++i) { + LocalParameterizationPtr& param = local_params_[i]; + param = std::move(local_params_array[i]); + + buffer_size_ = + std::max(buffer_size_, param->LocalSize() * param->GlobalSize()); + global_size_ += param->GlobalSize(); + local_size_ += param->LocalSize(); + } + } + + bool Plus(const double* x, + const double* delta, + double* x_plus_delta) const override; + bool ComputeJacobian(const double* x, double* jacobian) const override; + int GlobalSize() const override { return global_size_; } + int LocalSize() const override { return local_size_; } + + private: + std::vector> local_params_; + int local_size_{0}; + int global_size_{0}; + int buffer_size_{0}; +}; + +} // namespace ceres + +// clang-format off +#include "ceres/internal/reenable_warnings.h" +// clang-format on + +#include "ceres/internal/line_parameterization.h" + +#endif // CERES_PUBLIC_LOCAL_PARAMETERIZATION_H_ diff --git a/ceres-v2/include/loss_function.h b/ceres-v2/include/loss_function.h new file mode 100644 index 0000000000000000000000000000000000000000..8a5a37ff665d83d23ab76bb724fec3e7627a5aec --- /dev/null +++ b/ceres-v2/include/loss_function.h @@ -0,0 +1,433 @@ +// Ceres Solver - A fast non-linear least squares minimizer +// Copyright 2019 Google Inc. All rights reserved. +// http://ceres-solver.org/ +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are met: +// +// * Redistributions of source code must retain the above copyright notice, +// this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above copyright notice, +// this list of conditions and the following disclaimer in the documentation +// and/or other materials provided with the distribution. +// * Neither the name of Google Inc. nor the names of its contributors may be +// used to endorse or promote products derived from this software without +// specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE +// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR +// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF +// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN +// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) +// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE +// POSSIBILITY OF SUCH DAMAGE. +// +// Author: sameeragarwal@google.com (Sameer Agarwal) +// +// The LossFunction interface is the way users describe how residuals +// are converted to cost terms for the overall problem cost function. +// For the exact manner in which loss functions are converted to the +// overall cost for a problem, see problem.h. +// +// For least squares problem where there are no outliers and standard +// squared loss is expected, it is not necessary to create a loss +// function; instead passing a nullptr to the problem when adding +// residuals implies a standard squared loss. +// +// For least squares problems where the minimization may encounter +// input terms that contain outliers, that is, completely bogus +// measurements, it is important to use a loss function that reduces +// their associated penalty. +// +// Consider a structure from motion problem. The unknowns are 3D +// points and camera parameters, and the measurements are image +// coordinates describing the expected reprojected position for a +// point in a camera. For example, we want to model the geometry of a +// street scene with fire hydrants and cars, observed by a moving +// camera with unknown parameters, and the only 3D points we care +// about are the pointy tippy-tops of the fire hydrants. Our magic +// image processing algorithm, which is responsible for producing the +// measurements that are input to Ceres, has found and matched all +// such tippy-tops in all image frames, except that in one of the +// frame it mistook a car's headlight for a hydrant. If we didn't do +// anything special (i.e. if we used a basic quadratic loss), the +// residual for the erroneous measurement will result in extreme error +// due to the quadratic nature of squared loss. This results in the +// entire solution getting pulled away from the optimum to reduce +// the large error that would otherwise be attributed to the wrong +// measurement. +// +// Using a robust loss function, the cost for large residuals is +// reduced. In the example above, this leads to outlier terms getting +// downweighted so they do not overly influence the final solution. +// +// What cost function is best? +// +// In general, there isn't a principled way to select a robust loss +// function. The authors suggest starting with a non-robust cost, then +// only experimenting with robust loss functions if standard squared +// loss doesn't work. + +#ifndef CERES_PUBLIC_LOSS_FUNCTION_H_ +#define CERES_PUBLIC_LOSS_FUNCTION_H_ + +#include + +#include "ceres/internal/disable_warnings.h" +#include "ceres/internal/export.h" +#include "ceres/types.h" +#include "glog/logging.h" + +namespace ceres { + +class CERES_EXPORT LossFunction { + public: + virtual ~LossFunction(); + + // For a residual vector with squared 2-norm 'sq_norm', this method + // is required to fill in the value and derivatives of the loss + // function (rho in this example): + // + // out[0] = rho(sq_norm), + // out[1] = rho'(sq_norm), + // out[2] = rho''(sq_norm), + // + // Here the convention is that the contribution of a term to the + // cost function is given by 1/2 rho(s), where + // + // s = ||residuals||^2. + // + // Calling the method with a negative value of 's' is an error and + // the implementations are not required to handle that case. + // + // Most sane choices of rho() satisfy: + // + // rho(0) = 0, + // rho'(0) = 1, + // rho'(s) < 1 in outlier region, + // rho''(s) < 0 in outlier region, + // + // so that they mimic the least squares cost for small residuals. + virtual void Evaluate(double sq_norm, double out[3]) const = 0; +}; + +// Some common implementations follow below. +// +// Note: in the region of interest (i.e. s < 3) we have: +// TrivialLoss >= HuberLoss >= SoftLOneLoss >= CauchyLoss + +// This corresponds to no robustification. +// +// rho(s) = s +// +// At s = 0: rho = [0, 1, 0]. +// +// It is not normally necessary to use this, as passing nullptr for the +// loss function when building the problem accomplishes the same +// thing. +class CERES_EXPORT TrivialLoss final : public LossFunction { + public: + void Evaluate(double, double*) const override; +}; + +// Scaling +// ------- +// Given one robustifier +// s -> rho(s) +// one can change the length scale at which robustification takes +// place, by adding a scale factor 'a' as follows: +// +// s -> a^2 rho(s / a^2). +// +// The first and second derivatives are: +// +// s -> rho'(s / a^2), +// s -> (1 / a^2) rho''(s / a^2), +// +// but the behaviour near s = 0 is the same as the original function, +// i.e. +// +// rho(s) = s + higher order terms, +// a^2 rho(s / a^2) = s + higher order terms. +// +// The scalar 'a' should be positive. +// +// The reason for the appearance of squaring is that 'a' is in the +// units of the residual vector norm whereas 's' is a squared +// norm. For applications it is more convenient to specify 'a' than +// its square. The commonly used robustifiers below are described in +// un-scaled format (a = 1) but their implementations work for any +// non-zero value of 'a'. + +// Huber. +// +// rho(s) = s for s <= 1, +// rho(s) = 2 sqrt(s) - 1 for s >= 1. +// +// At s = 0: rho = [0, 1, 0]. +// +// The scaling parameter 'a' corresponds to 'delta' on this page: +// http://en.wikipedia.org/wiki/Huber_Loss_Function +class CERES_EXPORT HuberLoss final : public LossFunction { + public: + explicit HuberLoss(double a) : a_(a), b_(a * a) {} + void Evaluate(double, double*) const override; + + private: + const double a_; + // b = a^2. + const double b_; +}; + +// Soft L1, similar to Huber but smooth. +// +// rho(s) = 2 (sqrt(1 + s) - 1). +// +// At s = 0: rho = [0, 1, -1 / (2 * a^2)]. +class CERES_EXPORT SoftLOneLoss final : public LossFunction { + public: + explicit SoftLOneLoss(double a) : b_(a * a), c_(1 / b_) {} + void Evaluate(double, double*) const override; + + private: + // b = a^2. + const double b_; + // c = 1 / a^2. + const double c_; +}; + +// Inspired by the Cauchy distribution +// +// rho(s) = log(1 + s). +// +// At s = 0: rho = [0, 1, -1 / a^2]. +class CERES_EXPORT CauchyLoss final : public LossFunction { + public: + explicit CauchyLoss(double a) : b_(a * a), c_(1 / b_) {} + void Evaluate(double, double*) const override; + + private: + // b = a^2. + const double b_; + // c = 1 / a^2. + const double c_; +}; + +// Loss that is capped beyond a certain level using the arc-tangent function. +// The scaling parameter 'a' determines the level where falloff occurs. +// For costs much smaller than 'a', the loss function is linear and behaves like +// TrivialLoss, and for values much larger than 'a' the value asymptotically +// approaches the constant value of a * PI / 2. +// +// rho(s) = a atan(s / a). +// +// At s = 0: rho = [0, 1, 0]. +class CERES_EXPORT ArctanLoss final : public LossFunction { + public: + explicit ArctanLoss(double a) : a_(a), b_(1 / (a * a)) {} + void Evaluate(double, double*) const override; + + private: + const double a_; + // b = 1 / a^2. + const double b_; +}; + +// Loss function that maps to approximately zero cost in a range around the +// origin, and reverts to linear in error (quadratic in cost) beyond this range. +// The tolerance parameter 'a' sets the nominal point at which the +// transition occurs, and the transition size parameter 'b' sets the nominal +// distance over which most of the transition occurs. Both a and b must be +// greater than zero, and typically b will be set to a fraction of a. +// The slope rho'[s] varies smoothly from about 0 at s <= a - b to +// about 1 at s >= a + b. +// +// The term is computed as: +// +// rho(s) = b log(1 + exp((s - a) / b)) - c0. +// +// where c0 is chosen so that rho(0) == 0 +// +// c0 = b log(1 + exp(-a / b) +// +// This has the following useful properties: +// +// rho(s) == 0 for s = 0 +// rho'(s) ~= 0 for s << a - b +// rho'(s) ~= 1 for s >> a + b +// rho''(s) > 0 for all s +// +// In addition, all derivatives are continuous, and the curvature is +// concentrated in the range a - b to a + b. +// +// At s = 0: rho = [0, ~0, ~0]. +class CERES_EXPORT TolerantLoss final : public LossFunction { + public: + explicit TolerantLoss(double a, double b); + void Evaluate(double, double*) const override; + + private: + const double a_, b_, c_; +}; + +// This is the Tukey biweight loss function which aggressively +// attempts to suppress large errors. +// +// The term is computed as follows where the equations are scaled by a +// factor of 2 because the cost function is given by 1/2 rho(s): +// +// rho(s) = a^2 / 3 * (1 - (1 - s / a^2)^3 ) for s <= a^2, +// rho(s) = a^2 / 3 for s > a^2. +// +// At s = 0: rho = [0, 1, -2 / a^2] +class CERES_EXPORT TukeyLoss final : public ceres::LossFunction { + public: + explicit TukeyLoss(double a) : a_squared_(a * a) {} + void Evaluate(double, double*) const override; + + private: + const double a_squared_; +}; + +// Composition of two loss functions. The error is the result of first +// evaluating g followed by f to yield the composition f(g(s)). +// The loss functions must not be nullptr. +class CERES_EXPORT ComposedLoss final : public LossFunction { + public: + explicit ComposedLoss(const LossFunction* f, + Ownership ownership_f, + const LossFunction* g, + Ownership ownership_g); + ~ComposedLoss() override; + void Evaluate(double, double*) const override; + + private: + std::unique_ptr f_, g_; + const Ownership ownership_f_, ownership_g_; +}; + +// The discussion above has to do with length scaling: it affects the space +// in which s is measured. Sometimes you want to simply scale the output +// value of the robustifier. For example, you might want to weight +// different error terms differently (e.g., weight pixel reprojection +// errors differently from terrain errors). +// +// If rho is the wrapped robustifier, then this simply outputs +// s -> a * rho(s) +// +// The first and second derivatives are, not surprisingly +// s -> a * rho'(s) +// s -> a * rho''(s) +// +// Since we treat the a nullptr Loss function as the Identity loss +// function, rho = nullptr is a valid input and will result in the input +// being scaled by a. This provides a simple way of implementing a +// scaled ResidualBlock. +class CERES_EXPORT ScaledLoss final : public LossFunction { + public: + // Constructs a ScaledLoss wrapping another loss function. Takes + // ownership of the wrapped loss function or not depending on the + // ownership parameter. + ScaledLoss(const LossFunction* rho, double a, Ownership ownership) + : rho_(rho), a_(a), ownership_(ownership) {} + ScaledLoss(const ScaledLoss&) = delete; + void operator=(const ScaledLoss&) = delete; + + ~ScaledLoss() override { + if (ownership_ == DO_NOT_TAKE_OWNERSHIP) { + rho_.release(); + } + } + void Evaluate(double, double*) const override; + + private: + std::unique_ptr rho_; + const double a_; + const Ownership ownership_; +}; + +// Sometimes after the optimization problem has been constructed, we +// wish to mutate the scale of the loss function. For example, when +// performing estimation from data which has substantial outliers, +// convergence can be improved by starting out with a large scale, +// optimizing the problem and then reducing the scale. This can have +// better convergence behaviour than just using a loss function with a +// small scale. +// +// This templated class allows the user to implement a loss function +// whose scale can be mutated after an optimization problem has been +// constructed. +// +// Since we treat the a nullptr Loss function as the Identity loss +// function, rho = nullptr is a valid input. +// +// Example usage +// +// Problem problem; +// +// // Add parameter blocks +// +// CostFunction* cost_function = +// new AutoDiffCostFunction < UW_Camera_Mapper, 2, 9, 3>( +// new UW_Camera_Mapper(feature_x, feature_y)); +// +// LossFunctionWrapper* loss_function = new LossFunctionWrapper( +// new HuberLoss(1.0), TAKE_OWNERSHIP); +// +// problem.AddResidualBlock(cost_function, loss_function, parameters); +// +// Solver::Options options; +// Solger::Summary summary; +// +// Solve(options, &problem, &summary) +// +// loss_function->Reset(new HuberLoss(1.0), TAKE_OWNERSHIP); +// +// Solve(options, &problem, &summary) +// +class CERES_EXPORT LossFunctionWrapper final : public LossFunction { + public: + LossFunctionWrapper(LossFunction* rho, Ownership ownership) + : rho_(rho), ownership_(ownership) {} + + LossFunctionWrapper(const LossFunctionWrapper&) = delete; + void operator=(const LossFunctionWrapper&) = delete; + + ~LossFunctionWrapper() override { + if (ownership_ == DO_NOT_TAKE_OWNERSHIP) { + rho_.release(); + } + } + + void Evaluate(double sq_norm, double out[3]) const override { + if (rho_.get() == nullptr) { + out[0] = sq_norm; + out[1] = 1.0; + out[2] = 0.0; + } else { + rho_->Evaluate(sq_norm, out); + } + } + + void Reset(LossFunction* rho, Ownership ownership) { + if (ownership_ == DO_NOT_TAKE_OWNERSHIP) { + rho_.release(); + } + rho_.reset(rho); + ownership_ = ownership; + } + + private: + std::unique_ptr rho_; + Ownership ownership_; +}; + +} // namespace ceres + +#include "ceres/internal/reenable_warnings.h" + +#endif // CERES_PUBLIC_LOSS_FUNCTION_H_ diff --git a/ceres-v2/include/manifold.h b/ceres-v2/include/manifold.h new file mode 100644 index 0000000000000000000000000000000000000000..4d6e9fa0f5933c1dbf567f7bfb2d07d50d9cb8c1 --- /dev/null +++ b/ceres-v2/include/manifold.h @@ -0,0 +1,411 @@ +// Ceres Solver - A fast non-linear least squares minimizer +// Copyright 2022 Google Inc. All rights reserved. +// http://ceres-solver.org/ +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are met: +// +// * Redistributions of source code must retain the above copyright notice, +// this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above copyright notice, +// this list of conditions and the following disclaimer in the documentation +// and/or other materials provided with the distribution. +// * Neither the name of Google Inc. nor the names of its contributors may be +// used to endorse or promote products derived from this software without +// specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE +// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR +// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF +// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN +// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) +// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE +// POSSIBILITY OF SUCH DAMAGE. +// +// Author: sameeragarwal@google.com (Sameer Agarwal) + +#ifndef CERES_PUBLIC_MANIFOLD_H_ +#define CERES_PUBLIC_MANIFOLD_H_ + +#include +#include +#include +#include +#include +#include + +#include "ceres/internal/disable_warnings.h" +#include "ceres/internal/export.h" +#include "ceres/types.h" +#include "glog/logging.h" + +namespace ceres { + +// In sensor fusion problems, often we have to model quantities that live in +// spaces known as Manifolds, for example the rotation/orientation of a sensor +// that is represented by a quaternion. +// +// Manifolds are spaces which locally look like Euclidean spaces. More +// precisely, at each point on the manifold there is a linear space that is +// tangent to the manifold. It has dimension equal to the intrinsic dimension of +// the manifold itself, which is less than or equal to the ambient space in +// which the manifold is embedded. +// +// For example, the tangent space to a point on a sphere in three dimensions is +// the two dimensional plane that is tangent to the sphere at that point. There +// are two reasons tangent spaces are interesting: +// +// 1. They are Eucliean spaces so the usual vector space operations apply there, +// which makes numerical operations easy. +// 2. Movement in the tangent space translate into movements along the manifold. +// Movements perpendicular to the tangent space do not translate into +// movements on the manifold. +// +// Returning to our sphere example, moving in the 2 dimensional plane +// tangent to the sphere and projecting back onto the sphere will move you away +// from the point you started from but moving along the normal at the same point +// and the projecting back onto the sphere brings you back to the point. +// +// The Manifold interface defines two operations (and their derivatives) +// involving the tangent space, allowing filtering and optimization to be +// performed on said manifold: +// +// 1. x_plus_delta = Plus(x, delta) +// 2. delta = Minus(x_plus_delta, x) +// +// "Plus" computes the result of moving along delta in the tangent space at x, +// and then projecting back onto the manifold that x belongs to. In Differential +// Geometry this is known as a "Retraction". It is a generalization of vector +// addition in Euclidean spaces. +// +// Given two points on the manifold, "Minus" computes the change delta to x in +// the tangent space at x, that will take it to x_plus_delta. +// +// Let us now consider two examples. +// +// The Euclidean space R^n is the simplest example of a manifold. It has +// dimension n (and so does its tangent space) and Plus and Minus are the +// familiar vector sum and difference operations. +// +// Plus(x, delta) = x + delta = y, +// Minus(y, x) = y - x = delta. +// +// A more interesting case is SO(3), the special orthogonal group in three +// dimensions - the space of 3x3 rotation matrices. SO(3) is a three dimensional +// manifold embedded in R^9 or R^(3x3). So points on SO(3) are represented using +// 9 dimensional vectors or 3x3 matrices, and points in its tangent spaces are +// represented by 3 dimensional vectors. +// +// Defining Plus and Minus are defined in terms of the matrix Exp and Log +// operations as follows: +// +// Let Exp(p, q, r) = [cos(theta) + cp^2, -sr + cpq , sq + cpr ] +// [sr + cpq , cos(theta) + cq^2, -sp + cqr ] +// [-sq + cpr , sp + cqr , cos(theta) + cr^2] +// +// where: theta = sqrt(p^2 + q^2 + r^2) +// s = sinc(theta) +// c = (1 - cos(theta))/theta^2 +// +// and Log(x) = 1/(2 sinc(theta))[x_32 - x_23, x_13 - x_31, x_21 - x_12] +// +// where: theta = acos((Trace(x) - 1)/2) +// +// Then, +// +// Plus(x, delta) = x Exp(delta) +// Minus(y, x) = Log(x^T y) +// +// For Plus and Minus to be mathematically consistent, the following identities +// must be satisfied at all points x on the manifold: +// +// 1. Plus(x, 0) = x. +// 2. For all y, Plus(x, Minus(y, x)) = y. +// 3. For all delta, Minus(Plus(x, delta), x) = delta. +// 4. For all delta_1, delta_2 +// |Minus(Plus(x, delta_1), Plus(x, delta_2)) <= |delta_1 - delta_2| +// +// Briefly: +// (1) Ensures that the tangent space is "centered" at x, and the zero vector is +// the identity element. +// (2) Ensures that any y can be reached from x. +// (3) Ensures that Plus is an injective (one-to-one) map. +// (4) Allows us to define a metric on the manifold. +// +// Additionally we require that Plus and Minus be sufficiently smooth. In +// particular they need to be differentiable everywhere on the manifold. +// +// For more details, please see +// +// "Integrating Generic Sensor Fusion Algorithms with Sound State +// Representations through Encapsulation of Manifolds" +// By C. Hertzberg, R. Wagner, U. Frese and L. Schroder +// https://arxiv.org/pdf/1107.1119.pdf +class CERES_EXPORT Manifold { + public: + virtual ~Manifold(); + + // Dimension of the ambient space in which the manifold is embedded. + virtual int AmbientSize() const = 0; + + // Dimension of the manifold/tangent space. + virtual int TangentSize() const = 0; + + // x_plus_delta = Plus(x, delta), + // + // A generalization of vector addition in Euclidean space, Plus computes the + // result of moving along delta in the tangent space at x, and then projecting + // back onto the manifold that x belongs to. + // + // x and x_plus_delta are AmbientSize() vectors. + // delta is a TangentSize() vector. + // + // Return value indicates if the operation was successful or not. + virtual bool Plus(const double* x, + const double* delta, + double* x_plus_delta) const = 0; + + // Compute the derivative of Plus(x, delta) w.r.t delta at delta = 0, i.e. + // + // (D_2 Plus)(x, 0) + // + // jacobian is a row-major AmbientSize() x TangentSize() matrix. + // + // Return value indicates whether the operation was successful or not. + virtual bool PlusJacobian(const double* x, double* jacobian) const = 0; + + // tangent_matrix = ambient_matrix * (D_2 Plus)(x, 0) + // + // ambient_matrix is a row-major num_rows x AmbientSize() matrix. + // tangent_matrix is a row-major num_rows x TangentSize() matrix. + // + // Return value indicates whether the operation was successful or not. + // + // This function is only used by the GradientProblemSolver, where the + // dimension of the parameter block can be large and it may be more efficient + // to compute this product directly rather than first evaluating the Jacobian + // into a matrix and then doing a matrix vector product. + // + // Because this is not an often used function, we provide a default + // implementation for convenience. If performance becomes an issue then the + // user should consider implementing a specialization. + virtual bool RightMultiplyByPlusJacobian(const double* x, + const int num_rows, + const double* ambient_matrix, + double* tangent_matrix) const; + + // y_minus_x = Minus(y, x) + // + // Given two points on the manifold, Minus computes the change to x in the + // tangent space at x, that will take it to y. + // + // x and y are AmbientSize() vectors. + // y_minus_x is a TangentSize() vector. + // + // Return value indicates if the operation was successful or not. + virtual bool Minus(const double* y, + const double* x, + double* y_minus_x) const = 0; + + // Compute the derivative of Minus(y, x) w.r.t y at y = x, i.e + // + // (D_1 Minus) (x, x) + // + // Jacobian is a row-major TangentSize() x AmbientSize() matrix. + // + // Return value indicates whether the operation was successful or not. + virtual bool MinusJacobian(const double* x, double* jacobian) const = 0; +}; + +// The Euclidean manifold is another name for the ordinary vector space R^size, +// where the plus and minus operations are the usual vector addition and +// subtraction: +// Plus(x, delta) = x + delta +// Minus(y, x) = y - x. +// +// The class works with dynamic and static ambient space dimensions. If the +// ambient space dimensions is know at compile time use +// +// EuclideanManifold<3> manifold; +// +// If the ambient space dimensions is not known at compile time the template +// parameter needs to be set to ceres::DYNAMIC and the actual dimension needs +// to be provided as a constructor argument: +// +// EuclideanManifold manifold(ambient_dim); +template +class EuclideanManifold final : public Manifold { + public: + static_assert(Size == ceres::DYNAMIC || Size >= 0, + "The size of the manifold needs to be non-negative."); + static_assert(ceres::DYNAMIC == Eigen::Dynamic, + "ceres::DYNAMIC needs to be the same as Eigen::Dynamic."); + + EuclideanManifold() : size_{Size} { + static_assert( + Size != ceres::DYNAMIC, + "The size is set to dynamic. Please call the constructor with a size."); + } + + explicit EuclideanManifold(int size) : size_(size) { + if (Size != ceres::DYNAMIC) { + CHECK_EQ(Size, size) + << "Specified size by template parameter differs from the supplied " + "one."; + } else { + CHECK_GE(size_, 0) + << "The size of the manifold needs to be non-negative."; + } + } + + int AmbientSize() const override { return size_; } + int TangentSize() const override { return size_; } + + bool Plus(const double* x_ptr, + const double* delta_ptr, + double* x_plus_delta_ptr) const override { + Eigen::Map x(x_ptr, size_); + Eigen::Map delta(delta_ptr, size_); + Eigen::Map x_plus_delta(x_plus_delta_ptr, size_); + x_plus_delta = x + delta; + return true; + } + + bool PlusJacobian(const double* x_ptr, double* jacobian_ptr) const override { + Eigen::Map jacobian(jacobian_ptr, size_, size_); + jacobian.setIdentity(); + return true; + } + + bool RightMultiplyByPlusJacobian(const double* x, + const int num_rows, + const double* ambient_matrix, + double* tangent_matrix) const override { + std::copy_n(ambient_matrix, num_rows * size_, tangent_matrix); + return true; + } + + bool Minus(const double* y_ptr, + const double* x_ptr, + double* y_minus_x_ptr) const override { + Eigen::Map x(x_ptr, size_); + Eigen::Map y(y_ptr, size_); + Eigen::Map y_minus_x(y_minus_x_ptr, size_); + y_minus_x = y - x; + return true; + } + + bool MinusJacobian(const double* x_ptr, double* jacobian_ptr) const override { + Eigen::Map jacobian(jacobian_ptr, size_, size_); + jacobian.setIdentity(); + return true; + } + + private: + static constexpr bool IsDynamic = (Size == ceres::DYNAMIC); + using AmbientVector = Eigen::Matrix; + using MatrixJacobian = Eigen::Matrix; + + int size_{}; +}; + +// Hold a subset of the parameters inside a parameter block constant. +class CERES_EXPORT SubsetManifold final : public Manifold { + public: + SubsetManifold(int size, const std::vector& constant_parameters); + int AmbientSize() const override; + int TangentSize() const override; + + bool Plus(const double* x, + const double* delta, + double* x_plus_delta) const override; + bool PlusJacobian(const double* x, double* jacobian) const override; + bool RightMultiplyByPlusJacobian(const double* x, + const int num_rows, + const double* ambient_matrix, + double* tangent_matrix) const override; + bool Minus(const double* y, + const double* x, + double* y_minus_x) const override; + bool MinusJacobian(const double* x, double* jacobian) const override; + + private: + const int tangent_size_ = 0; + std::vector constancy_mask_; +}; + +// Implements the manifold for a Hamilton quaternion as defined in +// https://en.wikipedia.org/wiki/Quaternion. Quaternions are represented as +// unit norm 4-vectors, i.e. +// +// q = [q0; q1; q2; q3], |q| = 1 +// +// is the ambient space representation. +// +// q0 scalar part. +// q1 coefficient of i. +// q2 coefficient of j. +// q3 coefficient of k. +// +// where: i*i = j*j = k*k = -1 and i*j = k, j*k = i, k*i = j. +// +// The tangent space is R^3, which relates to the ambient space through the +// Plus and Minus operations defined as: +// +// Plus(x, delta) = [cos(|delta|); sin(|delta|) * delta / |delta|] * x +// Minus(y, x) = to_delta(y * x^{-1}) +// +// where "*" is the quaternion product and because q is a unit quaternion +// (|q|=1), q^-1 = [q0; -q1; -q2; -q3] +// +// and to_delta( [q0; u_{3x1}] ) = u / |u| * atan2(|u|, q0) +class CERES_EXPORT QuaternionManifold final : public Manifold { + public: + int AmbientSize() const override { return 4; } + int TangentSize() const override { return 3; } + + bool Plus(const double* x, + const double* delta, + double* x_plus_delta) const override; + bool PlusJacobian(const double* x, double* jacobian) const override; + bool Minus(const double* y, + const double* x, + double* y_minus_x) const override; + bool MinusJacobian(const double* x, double* jacobian) const override; +}; + +// Implements the quaternion manifold for Eigen's representation of the +// Hamilton quaternion. Geometrically it is exactly the same as the +// QuaternionManifold defined above. However, Eigen uses a different internal +// memory layout for the elements of the quaternion than what is commonly +// used. It stores the quaternion in memory as [q1, q2, q3, q0] or +// [x, y, z, w] where the real (scalar) part is last. +// +// Since Ceres operates on parameter blocks which are raw double pointers this +// difference is important and requires a different manifold. +class CERES_EXPORT EigenQuaternionManifold final : public Manifold { + public: + int AmbientSize() const override { return 4; } + int TangentSize() const override { return 3; } + + bool Plus(const double* x, + const double* delta, + double* x_plus_delta) const override; + bool PlusJacobian(const double* x, double* jacobian) const override; + bool Minus(const double* y, + const double* x, + double* y_minus_x) const override; + bool MinusJacobian(const double* x, double* jacobian) const override; +}; + +} // namespace ceres + +// clang-format off +#include "ceres/internal/reenable_warnings.h" +// clang-format on + +#endif // CERES_PUBLIC_MANIFOLD_H_ diff --git a/ceres-v2/include/manifold_test_utils.h b/ceres-v2/include/manifold_test_utils.h new file mode 100644 index 0000000000000000000000000000000000000000..3f9fb21e8f34747f23948aab3359f683f41f59b4 --- /dev/null +++ b/ceres-v2/include/manifold_test_utils.h @@ -0,0 +1,328 @@ +// Ceres Solver - A fast non-linear least squares minimizer +// Copyright 2022 Google Inc. All rights reserved. +// http://ceres-solver.org/ +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are met: +// +// * Redistributions of source code must retain the above copyright notice, +// this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above copyright notice, +// this list of conditions and the following disclaimer in the documentation +// and/or other materials provided with the distribution. +// * Neither the name of Google Inc. nor the names of its contributors may be +// used to endorse or promote products derived from this software without +// specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE +// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR +// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF +// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN +// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) +// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE +// POSSIBILITY OF SUCH DAMAGE. +// +// Author: sameeragarwal@google.com (Sameer Agarwal) + +#include +#include +#include + +#include "ceres/dynamic_numeric_diff_cost_function.h" +#include "ceres/internal/eigen.h" +#include "ceres/manifold.h" +#include "ceres/numeric_diff_options.h" +#include "ceres/types.h" +#include "gmock/gmock.h" +#include "gtest/gtest.h" + +namespace ceres { + +// Matchers and macros for help with testing Manifold objects. +// +// Testing a Manifold has two parts. +// +// 1. Checking that Manifold::Plus is correctly defined. This requires per +// manifold tests. +// +// 2. The other methods of the manifold have mathematical properties that make +// it compatible with Plus, as described in: +// +// "Integrating Generic Sensor Fusion Algorithms with Sound State +// Representations through Encapsulation of Manifolds" +// By C. Hertzberg, R. Wagner, U. Frese and L. Schroder +// https://arxiv.org/pdf/1107.1119.pdf +// +// These tests are implemented using generic matchers defined below which can +// all be called by the macro EXPECT_THAT_MANIFOLD_INVARIANTS_HOLD(manifold, x, +// delta, y, tolerance). See manifold_test.cc for example usage. + +// Checks that the invariant Plus(x, 0) == x holds. +MATCHER_P2(XPlusZeroIsXAt, x, tolerance, "") { + const int ambient_size = arg.AmbientSize(); + const int tangent_size = arg.TangentSize(); + + Vector actual = Vector::Zero(ambient_size); + Vector zero = Vector::Zero(tangent_size); + EXPECT_TRUE(arg.Plus(x.data(), zero.data(), actual.data())); + const double n = (actual - x).norm(); + const double d = x.norm(); + const double diffnorm = (d == 0.0) ? n : (n / d); + if (diffnorm > tolerance) { + *result_listener << "\nexpected (x): " << x.transpose() + << "\nactual: " << actual.transpose() + << "\ndiffnorm: " << diffnorm; + return false; + } + return true; +} + +// Checks that the invariant Minus(x, x) == 0 holds. +MATCHER_P2(XMinusXIsZeroAt, x, tolerance, "") { + const int tangent_size = arg.TangentSize(); + Vector actual = Vector::Zero(tangent_size); + EXPECT_TRUE(arg.Minus(x.data(), x.data(), actual.data())); + const double diffnorm = actual.norm(); + if (diffnorm > tolerance) { + *result_listener << "\nx: " << x.transpose() // + << "\nexpected: 0 0 0" + << "\nactual: " << actual.transpose() + << "\ndiffnorm: " << diffnorm; + return false; + } + return true; +} + +// Helper struct to curry Plus(x, .) so that it can be numerically +// differentiated. +struct PlusFunctor { + PlusFunctor(const Manifold& manifold, const double* x) + : manifold(manifold), x(x) {} + bool operator()(double const* const* parameters, double* x_plus_delta) const { + return manifold.Plus(x, parameters[0], x_plus_delta); + } + + const Manifold& manifold; + const double* x; +}; + +// Checks that the output of PlusJacobian matches the one obtained by +// numerically evaluating D_2 Plus(x,0). +MATCHER_P2(HasCorrectPlusJacobianAt, x, tolerance, "") { + const int ambient_size = arg.AmbientSize(); + const int tangent_size = arg.TangentSize(); + + NumericDiffOptions options; + options.ridders_relative_initial_step_size = 1e-4; + + DynamicNumericDiffCostFunction cost_function( + new PlusFunctor(arg, x.data()), TAKE_OWNERSHIP, options); + cost_function.AddParameterBlock(tangent_size); + cost_function.SetNumResiduals(ambient_size); + + Vector zero = Vector::Zero(tangent_size); + double* parameters[1] = {zero.data()}; + + Vector x_plus_zero = Vector::Zero(ambient_size); + Matrix expected = Matrix::Zero(ambient_size, tangent_size); + double* jacobians[1] = {expected.data()}; + + EXPECT_TRUE( + cost_function.Evaluate(parameters, x_plus_zero.data(), jacobians)); + + Matrix actual = Matrix::Random(ambient_size, tangent_size); + EXPECT_TRUE(arg.PlusJacobian(x.data(), actual.data())); + + const double n = (actual - expected).norm(); + const double d = expected.norm(); + const double diffnorm = (d == 0.0) ? n : n / d; + if (diffnorm > tolerance) { + *result_listener << "\nx: " << x.transpose() << "\nexpected: \n" + << expected << "\nactual:\n" + << actual << "\ndiff:\n" + << expected - actual << "\ndiffnorm : " << diffnorm; + return false; + } + return true; +} + +// Checks that the invariant Minus(Plus(x, delta), x) == delta holds. +MATCHER_P3(MinusPlusIsIdentityAt, x, delta, tolerance, "") { + const int ambient_size = arg.AmbientSize(); + const int tangent_size = arg.TangentSize(); + Vector x_plus_delta = Vector::Zero(ambient_size); + EXPECT_TRUE(arg.Plus(x.data(), delta.data(), x_plus_delta.data())); + Vector actual = Vector::Zero(tangent_size); + EXPECT_TRUE(arg.Minus(x_plus_delta.data(), x.data(), actual.data())); + + const double n = (actual - delta).norm(); + const double d = delta.norm(); + const double diffnorm = (d == 0.0) ? n : (n / d); + if (diffnorm > tolerance) { + *result_listener << "\nx: " << x.transpose() + << "\nexpected: " << delta.transpose() + << "\nactual:" << actual.transpose() + << "\ndiff:" << (delta - actual).transpose() + << "\ndiffnorm: " << diffnorm; + return false; + } + return true; +} + +// Checks that the invariant Plus(Minus(y, x), x) == y holds. +MATCHER_P3(PlusMinusIsIdentityAt, x, y, tolerance, "") { + const int ambient_size = arg.AmbientSize(); + const int tangent_size = arg.TangentSize(); + + Vector y_minus_x = Vector::Zero(tangent_size); + EXPECT_TRUE(arg.Minus(y.data(), x.data(), y_minus_x.data())); + + Vector actual = Vector::Zero(ambient_size); + EXPECT_TRUE(arg.Plus(x.data(), y_minus_x.data(), actual.data())); + + const double n = (actual - y).norm(); + const double d = y.norm(); + const double diffnorm = (d == 0.0) ? n : (n / d); + if (diffnorm > tolerance) { + *result_listener << "\nx: " << x.transpose() + << "\nexpected: " << y.transpose() + << "\nactual:" << actual.transpose() + << "\ndiff:" << (y - actual).transpose() + << "\ndiffnorm: " << diffnorm; + return false; + } + return true; +} + +// Helper struct to curry Minus(., x) so that it can be numerically +// differentiated. +struct MinusFunctor { + MinusFunctor(const Manifold& manifold, const double* x) + : manifold(manifold), x(x) {} + bool operator()(double const* const* parameters, double* y_minus_x) const { + return manifold.Minus(parameters[0], x, y_minus_x); + } + + const Manifold& manifold; + const double* x; +}; + +// Checks that the output of MinusJacobian matches the one obtained by +// numerically evaluating D_1 Minus(x,x). +MATCHER_P2(HasCorrectMinusJacobianAt, x, tolerance, "") { + const int ambient_size = arg.AmbientSize(); + const int tangent_size = arg.TangentSize(); + + Vector y = x; + Vector y_minus_x = Vector::Zero(tangent_size); + + NumericDiffOptions options; + options.ridders_relative_initial_step_size = 1e-4; + DynamicNumericDiffCostFunction cost_function( + new MinusFunctor(arg, x.data()), TAKE_OWNERSHIP, options); + cost_function.AddParameterBlock(ambient_size); + cost_function.SetNumResiduals(tangent_size); + + double* parameters[1] = {y.data()}; + + Matrix expected = Matrix::Zero(tangent_size, ambient_size); + double* jacobians[1] = {expected.data()}; + + EXPECT_TRUE(cost_function.Evaluate(parameters, y_minus_x.data(), jacobians)); + + Matrix actual = Matrix::Random(tangent_size, ambient_size); + EXPECT_TRUE(arg.MinusJacobian(x.data(), actual.data())); + + const double n = (actual - expected).norm(); + const double d = expected.norm(); + const double diffnorm = (d == 0.0) ? n : (n / d); + if (diffnorm > tolerance) { + *result_listener << "\nx: " << x.transpose() << "\nexpected: \n" + << expected << "\nactual:\n" + << actual << "\ndiff:\n" + << expected - actual << "\ndiffnorm: " << diffnorm; + return false; + } + return true; +} + +// Checks that D_delta Minus(Plus(x, delta), x) at delta = 0 is an identity +// matrix. +MATCHER_P2(MinusPlusJacobianIsIdentityAt, x, tolerance, "") { + const int ambient_size = arg.AmbientSize(); + const int tangent_size = arg.TangentSize(); + + Matrix plus_jacobian(ambient_size, tangent_size); + EXPECT_TRUE(arg.PlusJacobian(x.data(), plus_jacobian.data())); + Matrix minus_jacobian(tangent_size, ambient_size); + EXPECT_TRUE(arg.MinusJacobian(x.data(), minus_jacobian.data())); + + const Matrix actual = minus_jacobian * plus_jacobian; + const Matrix expected = Matrix::Identity(tangent_size, tangent_size); + + const double n = (actual - expected).norm(); + const double d = expected.norm(); + const double diffnorm = n / d; + if (diffnorm > tolerance) { + *result_listener << "\nx: " << x.transpose() << "\nexpected: \n" + << expected << "\nactual:\n" + << actual << "\ndiff:\n" + << expected - actual << "\ndiffnorm: " << diffnorm; + + return false; + } + return true; +} + +// Verify that the output of RightMultiplyByPlusJacobian is ambient_matrix * +// plus_jacobian. +MATCHER_P2(HasCorrectRightMultiplyByPlusJacobianAt, x, tolerance, "") { + const int ambient_size = arg.AmbientSize(); + const int tangent_size = arg.TangentSize(); + + constexpr int kMinNumRows = 0; + constexpr int kMaxNumRows = 3; + for (int num_rows = kMinNumRows; num_rows <= kMaxNumRows; ++num_rows) { + Matrix plus_jacobian = Matrix::Random(ambient_size, tangent_size); + EXPECT_TRUE(arg.PlusJacobian(x.data(), plus_jacobian.data())); + + Matrix ambient_matrix = Matrix::Random(num_rows, ambient_size); + Matrix expected = ambient_matrix * plus_jacobian; + + Matrix actual = Matrix::Random(num_rows, tangent_size); + EXPECT_TRUE(arg.RightMultiplyByPlusJacobian( + x.data(), num_rows, ambient_matrix.data(), actual.data())); + const double n = (actual - expected).norm(); + const double d = expected.norm(); + const double diffnorm = (d == 0.0) ? n : (n / d); + if (diffnorm > tolerance) { + *result_listener << "\nx: " << x.transpose() << "\nambient_matrix : \n" + << ambient_matrix << "\nplus_jacobian : \n" + << plus_jacobian << "\nexpected: \n" + << expected << "\nactual:\n" + << actual << "\ndiff:\n" + << expected - actual << "\ndiffnorm : " << diffnorm; + return false; + } + } + return true; +} + +#define EXPECT_THAT_MANIFOLD_INVARIANTS_HOLD(manifold, x, delta, y, tolerance) \ + Vector zero_tangent = Vector::Zero(manifold.TangentSize()); \ + EXPECT_THAT(manifold, XPlusZeroIsXAt(x, tolerance)); \ + EXPECT_THAT(manifold, XMinusXIsZeroAt(x, tolerance)); \ + EXPECT_THAT(manifold, MinusPlusIsIdentityAt(x, delta, tolerance)); \ + EXPECT_THAT(manifold, MinusPlusIsIdentityAt(x, zero_tangent, tolerance)); \ + EXPECT_THAT(manifold, PlusMinusIsIdentityAt(x, x, tolerance)); \ + EXPECT_THAT(manifold, PlusMinusIsIdentityAt(x, y, tolerance)); \ + EXPECT_THAT(manifold, HasCorrectPlusJacobianAt(x, tolerance)); \ + EXPECT_THAT(manifold, HasCorrectMinusJacobianAt(x, tolerance)); \ + EXPECT_THAT(manifold, MinusPlusJacobianIsIdentityAt(x, tolerance)); \ + EXPECT_THAT(manifold, HasCorrectRightMultiplyByPlusJacobianAt(x, tolerance)); + +} // namespace ceres diff --git a/ceres-v2/include/normal_prior.h b/ceres-v2/include/normal_prior.h new file mode 100644 index 0000000000000000000000000000000000000000..c5c7f3e623efaaa247b3e3e67c9cf32cb11725bf --- /dev/null +++ b/ceres-v2/include/normal_prior.h @@ -0,0 +1,78 @@ +// Ceres Solver - A fast non-linear least squares minimizer +// Copyright 2019 Google Inc. All rights reserved. +// http://ceres-solver.org/ +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are met: +// +// * Redistributions of source code must retain the above copyright notice, +// this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above copyright notice, +// this list of conditions and the following disclaimer in the documentation +// and/or other materials provided with the distribution. +// * Neither the name of Google Inc. nor the names of its contributors may be +// used to endorse or promote products derived from this software without +// specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE +// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR +// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF +// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN +// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) +// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE +// POSSIBILITY OF SUCH DAMAGE. +// +// Author: sameeragarwal@google.com (Sameer Agarwal) +// +// Cost term that implements a prior on a parameter block using a +// normal distribution. + +#ifndef CERES_PUBLIC_NORMAL_PRIOR_H_ +#define CERES_PUBLIC_NORMAL_PRIOR_H_ + +#include "ceres/cost_function.h" +#include "ceres/internal/disable_warnings.h" +#include "ceres/internal/eigen.h" + +namespace ceres { + +// Implements a cost function of the form +// +// cost(x) = ||A(x - b)||^2 +// +// where, the matrix A and the vector b are fixed and x is the +// variable. In case the user is interested in implementing a cost +// function of the form +// +// cost(x) = (x - mu)^T S^{-1} (x - mu) +// +// where, mu is a vector and S is a covariance matrix, then, A = +// S^{-1/2}, i.e the matrix A is the square root of the inverse of the +// covariance, also known as the stiffness matrix. There are however +// no restrictions on the shape of A. It is free to be rectangular, +// which would be the case if the covariance matrix S is rank +// deficient. + +class CERES_EXPORT NormalPrior final : public CostFunction { + public: + // Check that the number of rows in the vector b are the same as the + // number of columns in the matrix A, crash otherwise. + NormalPrior(const Matrix& A, const Vector& b); + bool Evaluate(double const* const* parameters, + double* residuals, + double** jacobians) const override; + + private: + Matrix A_; + Vector b_; +}; + +} // namespace ceres + +#include "ceres/internal/reenable_warnings.h" + +#endif // CERES_PUBLIC_NORMAL_PRIOR_H_ diff --git a/ceres-v2/include/numeric_diff_cost_function.h b/ceres-v2/include/numeric_diff_cost_function.h new file mode 100644 index 0000000000000000000000000000000000000000..6ec53175030c1618c88deb19a3acf5a6665480e1 --- /dev/null +++ b/ceres-v2/include/numeric_diff_cost_function.h @@ -0,0 +1,260 @@ +// Ceres Solver - A fast non-linear least squares minimizer +// Copyright 2019 Google Inc. All rights reserved. +// http://ceres-solver.org/ +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are met: +// +// * Redistributions of source code must retain the above copyright notice, +// this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above copyright notice, +// this list of conditions and the following disclaimer in the documentation +// and/or other materials provided with the distribution. +// * Neither the name of Google Inc. nor the names of its contributors may be +// used to endorse or promote products derived from this software without +// specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE +// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR +// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF +// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN +// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) +// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE +// POSSIBILITY OF SUCH DAMAGE. +// +// Author: keir@google.com (Keir Mierle) +// sameeragarwal@google.com (Sameer Agarwal) +// +// Create CostFunctions as needed by the least squares framework with jacobians +// computed via numeric (a.k.a. finite) differentiation. For more details see +// http://en.wikipedia.org/wiki/Numerical_differentiation. +// +// To get an numerically differentiated cost function, you must define +// a class with a operator() (a functor) that computes the residuals. +// +// The function must write the computed value in the last argument +// (the only non-const one) and return true to indicate success. +// Please see cost_function.h for details on how the return value +// maybe used to impose simple constraints on the parameter block. +// +// For example, consider a scalar error e = k - x'y, where both x and y are +// two-dimensional column vector parameters, the prime sign indicates +// transposition, and k is a constant. The form of this error, which is the +// difference between a constant and an expression, is a common pattern in least +// squares problems. For example, the value x'y might be the model expectation +// for a series of measurements, where there is an instance of the cost function +// for each measurement k. +// +// The actual cost added to the total problem is e^2, or (k - x'k)^2; however, +// the squaring is implicitly done by the optimization framework. +// +// To write an numerically-differentiable cost function for the above model, +// first define the object +// +// class MyScalarCostFunctor { +// explicit MyScalarCostFunctor(double k): k_(k) {} +// +// bool operator()(const double* const x, +// const double* const y, +// double* residuals) const { +// residuals[0] = k_ - x[0] * y[0] - x[1] * y[1]; +// return true; +// } +// +// private: +// double k_; +// }; +// +// Note that in the declaration of operator() the input parameters x +// and y come first, and are passed as const pointers to arrays of +// doubles. If there were three input parameters, then the third input +// parameter would come after y. The output is always the last +// parameter, and is also a pointer to an array. In the example above, +// the residual is a scalar, so only residuals[0] is set. +// +// Then given this class definition, the numerically differentiated +// cost function with central differences used for computing the +// derivative can be constructed as follows. +// +// CostFunction* cost_function +// = new NumericDiffCostFunction( +// new MyScalarCostFunctor(1.0)); ^ ^ ^ ^ +// | | | | +// Finite Differencing Scheme -+ | | | +// Dimension of residual ------------+ | | +// Dimension of x ----------------------+ | +// Dimension of y -------------------------+ +// +// In this example, there is usually an instance for each measurement of k. +// +// In the instantiation above, the template parameters following +// "MyScalarCostFunctor", "1, 2, 2", describe the functor as computing +// a 1-dimensional output from two arguments, both 2-dimensional. +// +// NumericDiffCostFunction also supports cost functions with a +// runtime-determined number of residuals. For example: +// +// clang-format off +// +// CostFunction* cost_function +// = new NumericDiffCostFunction( +// new CostFunctorWithDynamicNumResiduals(1.0), ^ ^ ^ +// TAKE_OWNERSHIP, | | | +// runtime_number_of_residuals); <----+ | | | +// | | | | +// | | | | +// Actual number of residuals ------+ | | | +// Indicate dynamic number of residuals --------------------+ | | +// Dimension of x ------------------------------------------------+ | +// Dimension of y ---------------------------------------------------+ +// clang-format on +// +// +// The central difference method is considerably more accurate at the cost of +// twice as many function evaluations than forward difference. Consider using +// central differences begin with, and only after that works, trying forward +// difference to improve performance. +// +// WARNING #1: A common beginner's error when first using +// NumericDiffCostFunction is to get the sizing wrong. In particular, +// there is a tendency to set the template parameters to (dimension of +// residual, number of parameters) instead of passing a dimension +// parameter for *every parameter*. In the example above, that would +// be , which is missing the last '2' +// argument. Please be careful when setting the size parameters. +// +//////////////////////////////////////////////////////////////////////////// +//////////////////////////////////////////////////////////////////////////// +// +// ALTERNATE INTERFACE +// +// For a variety of reasons, including compatibility with legacy code, +// NumericDiffCostFunction can also take CostFunction objects as +// input. The following describes how. +// +// To get a numerically differentiated cost function, define a +// subclass of CostFunction such that the Evaluate() function ignores +// the jacobian parameter. The numeric differentiation wrapper will +// fill in the jacobian parameter if necessary by repeatedly calling +// the Evaluate() function with small changes to the appropriate +// parameters, and computing the slope. For performance, the numeric +// differentiation wrapper class is templated on the concrete cost +// function, even though it could be implemented only in terms of the +// virtual CostFunction interface. +// +// The numerically differentiated version of a cost function for a cost function +// can be constructed as follows: +// +// CostFunction* cost_function +// = new NumericDiffCostFunction( +// new MyCostFunction(...), TAKE_OWNERSHIP); +// +// where MyCostFunction has 1 residual and 2 parameter blocks with sizes 4 and 8 +// respectively. Look at the tests for a more detailed example. +// +// TODO(keir): Characterize accuracy; mention pitfalls; provide alternatives. + +#ifndef CERES_PUBLIC_NUMERIC_DIFF_COST_FUNCTION_H_ +#define CERES_PUBLIC_NUMERIC_DIFF_COST_FUNCTION_H_ + +#include +#include + +#include "Eigen/Dense" +#include "ceres/cost_function.h" +#include "ceres/internal/numeric_diff.h" +#include "ceres/internal/parameter_dims.h" +#include "ceres/numeric_diff_options.h" +#include "ceres/sized_cost_function.h" +#include "ceres/types.h" +#include "glog/logging.h" + +namespace ceres { + +template // Parameters dimensions for each block. +class NumericDiffCostFunction final + : public SizedCostFunction { + public: + explicit NumericDiffCostFunction( + CostFunctor* functor, + Ownership ownership = TAKE_OWNERSHIP, + int num_residuals = kNumResiduals, + const NumericDiffOptions& options = NumericDiffOptions()) + : functor_(functor), ownership_(ownership), options_(options) { + if (kNumResiduals == DYNAMIC) { + SizedCostFunction::set_num_residuals(num_residuals); + } + } + + NumericDiffCostFunction(NumericDiffCostFunction&& other) + : functor_(std::move(other.functor_)), ownership_(other.ownership_) {} + + virtual ~NumericDiffCostFunction() { + if (ownership_ != TAKE_OWNERSHIP) { + functor_.release(); + } + } + + bool Evaluate(double const* const* parameters, + double* residuals, + double** jacobians) const override { + using internal::FixedArray; + using internal::NumericDiff; + + using ParameterDims = + typename SizedCostFunction::ParameterDims; + + constexpr int kNumParameters = ParameterDims::kNumParameters; + constexpr int kNumParameterBlocks = ParameterDims::kNumParameterBlocks; + + // Get the function value (residuals) at the the point to evaluate. + if (!internal::VariadicEvaluate( + *functor_, parameters, residuals)) { + return false; + } + + if (jacobians == nullptr) { + return true; + } + + // Create a copy of the parameters which will get mutated. + FixedArray parameters_copy(kNumParameters); + std::array parameters_reference_copy = + ParameterDims::GetUnpackedParameters(parameters_copy.data()); + + for (int block = 0; block < kNumParameterBlocks; ++block) { + memcpy(parameters_reference_copy[block], + parameters[block], + sizeof(double) * ParameterDims::GetDim(block)); + } + + internal::EvaluateJacobianForParameterBlocks:: + template Apply( + functor_.get(), + residuals, + options_, + SizedCostFunction::num_residuals(), + parameters_reference_copy.data(), + jacobians); + + return true; + } + + const CostFunctor& functor() const { return *functor_; } + + private: + std::unique_ptr functor_; + Ownership ownership_; + NumericDiffOptions options_; +}; + +} // namespace ceres + +#endif // CERES_PUBLIC_NUMERIC_DIFF_COST_FUNCTION_H_ diff --git a/ceres-v2/include/numeric_diff_first_order_function.h b/ceres-v2/include/numeric_diff_first_order_function.h new file mode 100644 index 0000000000000000000000000000000000000000..f5bb005be582a23328bb0b397bfbff27a2136998 --- /dev/null +++ b/ceres-v2/include/numeric_diff_first_order_function.h @@ -0,0 +1,163 @@ +// Ceres Solver - A fast non-linear least squares minimizer +// Copyright 2019 Google Inc. All rights reserved. +// http://ceres-solver.org/ +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are met: +// +// * Redistributions of source code must retain the above copyright notice, +// this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above copyright notice, +// this list of conditions and the following disclaimer in the documentation +// and/or other materials provided with the distribution. +// * Neither the name of Google Inc. nor the names of its contributors may be +// used to endorse or promote products derived from this software without +// specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE +// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR +// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF +// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN +// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) +// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE +// POSSIBILITY OF SUCH DAMAGE. +// +// Author: sameeragarwal@google.com (Sameer Agarwal) + +#ifndef CERES_PUBLIC_NUMERIC_DIFF_FIRST_ORDER_FUNCTION_H_ +#define CERES_PUBLIC_NUMERIC_DIFF_FIRST_ORDER_FUNCTION_H_ + +#include +#include + +#include "ceres/first_order_function.h" +#include "ceres/internal/eigen.h" +#include "ceres/internal/fixed_array.h" +#include "ceres/internal/numeric_diff.h" +#include "ceres/internal/parameter_dims.h" +#include "ceres/internal/variadic_evaluate.h" +#include "ceres/numeric_diff_options.h" +#include "ceres/types.h" + +namespace ceres { + +// Creates FirstOrderFunctions as needed by the GradientProblem +// framework, with gradients computed via numeric differentiation. For +// more information on numeric differentiation, see the wikipedia +// article at https://en.wikipedia.org/wiki/Numerical_differentiation +// +// To get an numerically differentiated cost function, you must define +// a class with an operator() (a functor) that computes the cost. +// +// The function must write the computed value in the last argument +// (the only non-const one) and return true to indicate success. +// +// For example, consider a scalar error e = x'y - a, where both x and y are +// two-dimensional column vector parameters, the prime sign indicates +// transposition, and a is a constant. +// +// To write an numerically-differentiable cost function for the above model, +// first define the object +// +// class QuadraticCostFunctor { +// public: +// explicit QuadraticCostFunctor(double a) : a_(a) {} +// bool operator()(const double* const xy, double* cost) const { +// constexpr int kInputVectorLength = 2; +// const double* const x = xy; +// const double* const y = xy + kInputVectorLength; +// *cost = x[0] * y[0] + x[1] * y[1] - a_; +// return true; +// } +// +// private: +// double a_; +// }; +// +// +// Note that in the declaration of operator() the input parameters xy +// come first, and are passed as const pointers to array of +// doubles. The output cost is the last parameter. +// +// Then given this class definition, the numerically differentiated +// first order function with central differences used for computing the +// derivative can be constructed as follows. +// +// FirstOrderFunction* function +// = new NumericDiffFirstOrderFunction( +// new QuadraticCostFunctor(1.0)); ^ ^ ^ +// | | | +// Finite Differencing Scheme -+ | | +// Dimension of xy ------------------------+ +// +// +// In the instantiation above, the template parameters following +// "QuadraticCostFunctor", "CENTRAL, 4", describe the finite +// differencing scheme as "central differencing" and the functor as +// computing its cost from a 4 dimensional input. +template +class NumericDiffFirstOrderFunction final : public FirstOrderFunction { + public: + explicit NumericDiffFirstOrderFunction( + FirstOrderFunctor* functor, + Ownership ownership = TAKE_OWNERSHIP, + const NumericDiffOptions& options = NumericDiffOptions()) + : functor_(functor), ownership_(ownership), options_(options) { + static_assert(kNumParameters > 0, "kNumParameters must be positive"); + } + + ~NumericDiffFirstOrderFunction() override { + if (ownership_ != TAKE_OWNERSHIP) { + functor_.release(); + } + } + + bool Evaluate(const double* const parameters, + double* cost, + double* gradient) const override { + using ParameterDims = internal::StaticParameterDims; + constexpr int kNumResiduals = 1; + + // Get the function value (cost) at the the point to evaluate. + if (!internal::VariadicEvaluate( + *functor_, ¶meters, cost)) { + return false; + } + + if (gradient == nullptr) { + return true; + } + + // Create a copy of the parameters which will get mutated. + internal::FixedArray parameters_copy(kNumParameters); + std::copy_n(parameters, kNumParameters, parameters_copy.data()); + double* parameters_ptr = parameters_copy.data(); + internal::EvaluateJacobianForParameterBlocks< + ParameterDims>::template Apply(functor_.get(), + cost, + options_, + kNumResiduals, + ¶meters_ptr, + &gradient); + return true; + } + + int NumParameters() const override { return kNumParameters; } + + const FirstOrderFunctor& functor() const { return *functor_; } + + private: + std::unique_ptr functor_; + Ownership ownership_; + NumericDiffOptions options_; +}; + +} // namespace ceres + +#endif // CERES_PUBLIC_NUMERIC_DIFF_FIRST_ORDER_FUNCTION_H_ diff --git a/ceres-v2/include/numeric_diff_options.h b/ceres-v2/include/numeric_diff_options.h new file mode 100644 index 0000000000000000000000000000000000000000..b025b51d938d6220e5f44c5725d2acc0f7dc9c4b --- /dev/null +++ b/ceres-v2/include/numeric_diff_options.h @@ -0,0 +1,76 @@ +// Ceres Solver - A fast non-linear least squares minimizer +// Copyright 2019 Google Inc. All rights reserved. +// http://ceres-solver.org/ +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are met: +// +// * Redistributions of source code must retain the above copyright notice, +// this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above copyright notice, +// this list of conditions and the following disclaimer in the documentation +// and/or other materials provided with the distribution. +// * Neither the name of Google Inc. nor the names of its contributors may be +// used to endorse or promote products derived from this software without +// specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE +// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR +// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF +// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN +// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) +// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE +// POSSIBILITY OF SUCH DAMAGE. +// +// Author: tbennun@gmail.com (Tal Ben-Nun) +// + +#ifndef CERES_PUBLIC_NUMERIC_DIFF_OPTIONS_H_ +#define CERES_PUBLIC_NUMERIC_DIFF_OPTIONS_H_ + +#include "ceres/internal/disable_warnings.h" +#include "ceres/internal/export.h" + +namespace ceres { + +// Options pertaining to numeric differentiation (e.g., convergence criteria, +// step sizes). +struct CERES_EXPORT NumericDiffOptions { + // Numeric differentiation step size (multiplied by parameter block's + // order of magnitude). If parameters are close to zero, the step size + // is set to sqrt(machine_epsilon). + double relative_step_size = 1e-6; + + // Initial step size for Ridders adaptive numeric differentiation (multiplied + // by parameter block's order of magnitude). + // If parameters are close to zero, Ridders' method sets the step size + // directly to this value. This parameter is separate from + // "relative_step_size" in order to set a different default value. + // + // Note: For Ridders' method to converge, the step size should be initialized + // to a value that is large enough to produce a significant change in the + // function. As the derivative is estimated, the step size decreases. + double ridders_relative_initial_step_size = 1e-2; + + // Maximal number of adaptive extrapolations (sampling) in Ridders' method. + int max_num_ridders_extrapolations = 10; + + // Convergence criterion on extrapolation error for Ridders adaptive + // differentiation. The available error estimation methods are defined in + // NumericDiffErrorType and set in the "ridders_error_method" field. + double ridders_epsilon = 1e-12; + + // The factor in which to shrink the step size with each extrapolation in + // Ridders' method. + double ridders_step_shrink_factor = 2.0; +}; + +} // namespace ceres + +#include "ceres/internal/reenable_warnings.h" + +#endif // CERES_PUBLIC_NUMERIC_DIFF_OPTIONS_H_ diff --git a/ceres-v2/include/ordered_groups.h b/ceres-v2/include/ordered_groups.h new file mode 100644 index 0000000000000000000000000000000000000000..c1531cce65f79dc9a84b874f664ea37292c285f1 --- /dev/null +++ b/ceres-v2/include/ordered_groups.h @@ -0,0 +1,197 @@ +// Ceres Solver - A fast non-linear least squares minimizer +// Copyright 2019 Google Inc. All rights reserved. +// http://ceres-solver.org/ +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are met: +// +// * Redistributions of source code must retain the above copyright notice, +// this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above copyright notice, +// this list of conditions and the following disclaimer in the documentation +// and/or other materials provided with the distribution. +// * Neither the name of Google Inc. nor the names of its contributors may be +// used to endorse or promote products derived from this software without +// specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE +// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR +// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF +// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN +// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) +// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE +// POSSIBILITY OF SUCH DAMAGE. +// +// Author: sameeragarwal@google.com (Sameer Agarwal) + +#ifndef CERES_PUBLIC_ORDERED_GROUPS_H_ +#define CERES_PUBLIC_ORDERED_GROUPS_H_ + +#include +#include +#include +#include + +#include "ceres/internal/export.h" +#include "glog/logging.h" + +namespace ceres { + +// A class for storing and manipulating an ordered collection of +// groups/sets with the following semantics: +// +// Group ids are non-negative integer values. Elements are any type +// that can serve as a key in a map or an element of a set. +// +// An element can only belong to one group at a time. A group may +// contain an arbitrary number of elements. +// +// Groups are ordered by their group id. +template +class OrderedGroups { + public: + // Add an element to a group. If a group with this id does not + // exist, one is created. This method can be called any number of + // times for the same element. Group ids should be non-negative + // numbers. + // + // Return value indicates if adding the element was a success. + bool AddElementToGroup(const T element, const int group) { + if (group < 0) { + return false; + } + + auto it = element_to_group_.find(element); + if (it != element_to_group_.end()) { + if (it->second == group) { + // Element is already in the right group, nothing to do. + return true; + } + + group_to_elements_[it->second].erase(element); + if (group_to_elements_[it->second].size() == 0) { + group_to_elements_.erase(it->second); + } + } + + element_to_group_[element] = group; + group_to_elements_[group].insert(element); + return true; + } + + void Clear() { + group_to_elements_.clear(); + element_to_group_.clear(); + } + + // Remove the element, no matter what group it is in. Return value + // indicates if the element was actually removed. + bool Remove(const T element) { + const int current_group = GroupId(element); + if (current_group < 0) { + return false; + } + + group_to_elements_[current_group].erase(element); + + if (group_to_elements_[current_group].size() == 0) { + // If the group is empty, then get rid of it. + group_to_elements_.erase(current_group); + } + + element_to_group_.erase(element); + return true; + } + + // Bulk remove elements. The return value indicates the number of + // elements successfully removed. + int Remove(const std::vector& elements) { + if (NumElements() == 0 || elements.size() == 0) { + return 0; + } + + int num_removed = 0; + for (int i = 0; i < elements.size(); ++i) { + num_removed += Remove(elements[i]); + } + return num_removed; + } + + // Reverse the order of the groups in place. + void Reverse() { + if (NumGroups() == 0) { + return; + } + + auto it = group_to_elements_.rbegin(); + std::map> new_group_to_elements; + new_group_to_elements[it->first] = it->second; + + int new_group_id = it->first + 1; + for (++it; it != group_to_elements_.rend(); ++it) { + for (const auto& element : it->second) { + element_to_group_[element] = new_group_id; + } + new_group_to_elements[new_group_id] = it->second; + new_group_id++; + } + + group_to_elements_.swap(new_group_to_elements); + } + + // Return the group id for the element. If the element is not a + // member of any group, return -1. + int GroupId(const T element) const { + auto it = element_to_group_.find(element); + if (it == element_to_group_.end()) { + return -1; + } + return it->second; + } + + bool IsMember(const T element) const { + auto it = element_to_group_.find(element); + return (it != element_to_group_.end()); + } + + // This function always succeeds, i.e., implicitly there exists a + // group for every integer. + int GroupSize(const int group) const { + auto it = group_to_elements_.find(group); + return (it == group_to_elements_.end()) ? 0 : it->second.size(); + } + + int NumElements() const { return element_to_group_.size(); } + + // Number of groups with one or more elements. + int NumGroups() const { return group_to_elements_.size(); } + + // The first group with one or more elements. Calling this when + // there are no groups with non-zero elements will result in a + // crash. + int MinNonZeroGroup() const { + CHECK_NE(NumGroups(), 0); + return group_to_elements_.begin()->first; + } + + const std::map>& group_to_elements() const { + return group_to_elements_; + } + + const std::map& element_to_group() const { return element_to_group_; } + + private: + std::map> group_to_elements_; + std::unordered_map element_to_group_; +}; + +// Typedef for the most commonly used version of OrderedGroups. +using ParameterBlockOrdering = OrderedGroups; + +} // namespace ceres + +#endif // CERES_PUBLIC_ORDERED_GROUP_H_ diff --git a/ceres-v2/include/problem.h b/ceres-v2/include/problem.h new file mode 100644 index 0000000000000000000000000000000000000000..819fa454b212a336fd02d843e336751b1f3f1133 --- /dev/null +++ b/ceres-v2/include/problem.h @@ -0,0 +1,685 @@ +// Ceres Solver - A fast non-linear least squares minimizer +// Copyright 2021 Google Inc. All rights reserved. +// http://ceres-solver.org/ +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are met: +// +// * Redistributions of source code must retain the above copyright notice, +// this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above copyright notice, +// this list of conditions and the following disclaimer in the documentation +// and/or other materials provided with the distribution. +// * Neither the name of Google Inc. nor the names of its contributors may be +// used to endorse or promote products derived from this software without +// specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE +// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR +// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF +// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN +// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) +// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE +// POSSIBILITY OF SUCH DAMAGE. +// +// Author: sameeragarwal@google.com (Sameer Agarwal) +// keir@google.com (Keir Mierle) +// +// The Problem object is used to build and hold least squares problems. + +#ifndef CERES_PUBLIC_PROBLEM_H_ +#define CERES_PUBLIC_PROBLEM_H_ + +#include +#include +#include +#include +#include +#include + +#include "ceres/context.h" +#include "ceres/internal/disable_warnings.h" +#include "ceres/internal/export.h" +#include "ceres/internal/port.h" +#include "ceres/types.h" +#include "glog/logging.h" + +namespace ceres { + +class CostFunction; +class EvaluationCallback; +class LossFunction; +class LocalParameterization; +class Manifold; +class Solver; +struct CRSMatrix; + +namespace internal { +class Preprocessor; +class ProblemImpl; +class ParameterBlock; +class ResidualBlock; +} // namespace internal + +// A ResidualBlockId is an opaque handle clients can use to remove residual +// blocks from a Problem after adding them. +using ResidualBlockId = internal::ResidualBlock*; + +// A class to represent non-linear least squares problems. Such +// problems have a cost function that is a sum of error terms (known +// as "residuals"), where each residual is a function of some subset +// of the parameters. The cost function takes the form +// +// N 1 +// SUM --- loss( || r_i1, r_i2,..., r_ik ||^2 ), +// i=1 2 +// +// where +// +// r_ij is residual number i, component j; the residual is a function of some +// subset of the parameters x1...xk. For example, in a structure from +// motion problem a residual might be the difference between a measured +// point in an image and the reprojected position for the matching +// camera, point pair. The residual would have two components, error in x +// and error in y. +// +// loss(y) is the loss function; for example, squared error or Huber L1 +// loss. If loss(y) = y, then the cost function is non-robustified +// least squares. +// +// This class is specifically designed to address the important subset of +// "sparse" least squares problems, where each component of the residual depends +// only on a small number number of parameters, even though the total number of +// residuals and parameters may be very large. This property affords tremendous +// gains in scale, allowing efficient solving of large problems that are +// otherwise inaccessible. +// +// The canonical example of a sparse least squares problem is +// "structure-from-motion" (SFM), where the parameters are points and cameras, +// and residuals are reprojection errors. Typically a single residual will +// depend only on 9 parameters (3 for the point, 6 for the camera). +// +// To create a least squares problem, use the AddResidualBlock() and +// AddParameterBlock() methods, documented below. Here is an example least +// squares problem containing 3 parameter blocks of sizes 3, 4 and 5 +// respectively and two residual terms of size 2 and 6: +// +// double x1[] = { 1.0, 2.0, 3.0 }; +// double x2[] = { 1.0, 2.0, 3.0, 5.0 }; +// double x3[] = { 1.0, 2.0, 3.0, 6.0, 7.0 }; +// +// Problem problem; +// +// problem.AddResidualBlock(new MyUnaryCostFunction(...), nullptr, x1); +// problem.AddResidualBlock(new MyBinaryCostFunction(...), nullptr, x2, x3); +// +// Please see cost_function.h for details of the CostFunction object. +// +// NOTE: We are currently in the process of transitioning from +// LocalParameterization to Manifolds in the Ceres API. During this period, +// Problem will support using both Manifold and LocalParameterization objects +// interchangably. In particular, adding a LocalParameterization to a parameter +// block is the same as adding a Manifold to that parameter block. For methods +// in the API affected by this change, see their documentation below. +class CERES_EXPORT Problem { + public: + struct CERES_EXPORT Options { + // These flags control whether the Problem object owns the CostFunctions, + // LossFunctions, LocalParameterizations, and Manifolds passed into the + // Problem. + // + // If set to TAKE_OWNERSHIP, then the problem object will delete the + // corresponding object on destruction. The destructor is careful to delete + // the pointers only once, since sharing objects is allowed. + Ownership cost_function_ownership = TAKE_OWNERSHIP; + Ownership loss_function_ownership = TAKE_OWNERSHIP; + CERES_DEPRECATED_WITH_MSG( + "Local Parameterizations are deprecated. Use Manifold and " + "manifold_ownership instead.") + Ownership local_parameterization_ownership = TAKE_OWNERSHIP; + Ownership manifold_ownership = TAKE_OWNERSHIP; + + // If true, trades memory for faster RemoveResidualBlock() and + // RemoveParameterBlock() operations. + // + // By default, RemoveParameterBlock() and RemoveResidualBlock() take time + // proportional to the size of the entire problem. If you only ever remove + // parameters or residuals from the problem occasionally, this might be + // acceptable. However, if you have memory to spare, enable this option to + // make RemoveParameterBlock() take time proportional to the number of + // residual blocks that depend on it, and RemoveResidualBlock() take (on + // average) constant time. + // + // The increase in memory usage is two-fold: an additional hash set per + // parameter block containing all the residuals that depend on the parameter + // block; and a hash set in the problem containing all residuals. + bool enable_fast_removal = false; + + // By default, Ceres performs a variety of safety checks when constructing + // the problem. There is a small but measurable performance penalty to these + // checks, typically around 5% of construction time. If you are sure your + // problem construction is correct, and 5% of the problem construction time + // is truly an overhead you want to avoid, then you can set + // disable_all_safety_checks to true. + // + // WARNING: Do not set this to true, unless you are absolutely sure of what + // you are doing. + bool disable_all_safety_checks = false; + + // A Ceres global context to use for solving this problem. This may help to + // reduce computation time as Ceres can reuse expensive objects to create. + // The context object can be nullptr, in which case Ceres may create one. + // + // Ceres does NOT take ownership of the pointer. + Context* context = nullptr; + + // Using this callback interface, Ceres can notify you when it is about to + // evaluate the residuals or jacobians. With the callback, you can share + // computation between residual blocks by doing the shared computation in + // EvaluationCallback::PrepareForEvaluation() before Ceres calls + // CostFunction::Evaluate(). It also enables caching results between a pure + // residual evaluation and a residual & jacobian evaluation. + // + // Problem DOES NOT take ownership of the callback. + // + // NOTE: Evaluation callbacks are incompatible with inner iterations. So + // calling Solve with Solver::Options::use_inner_iterations = true on a + // Problem with a non-null evaluation callback is an error. + EvaluationCallback* evaluation_callback = nullptr; + }; + + // The default constructor is equivalent to the invocation + // Problem(Problem::Options()). + Problem(); + explicit Problem(const Options& options); + Problem(Problem&&); + Problem& operator=(Problem&&); + + Problem(const Problem&) = delete; + Problem& operator=(const Problem&) = delete; + + ~Problem(); + + // Add a residual block to the overall cost function. The cost function + // carries with its information about the sizes of the parameter blocks it + // expects. The function checks that these match the sizes of the parameter + // blocks listed in parameter_blocks. The program aborts if a mismatch is + // detected. loss_function can be nullptr, in which case the cost of the term + // is just the squared norm of the residuals. + // + // The user has the option of explicitly adding the parameter blocks using + // AddParameterBlock. This causes additional correctness checking; however, + // AddResidualBlock implicitly adds the parameter blocks if they are not + // present, so calling AddParameterBlock explicitly is not required. + // + // The Problem object by default takes ownership of the cost_function and + // loss_function pointers (See Problem::Options to override this behaviour). + // These objects remain live for the life of the Problem object. If the user + // wishes to keep control over the destruction of these objects, then they can + // do this by setting the corresponding enums in the Options struct. + // + // Note: Even though the Problem takes ownership of cost_function and + // loss_function, it does not preclude the user from re-using them in another + // residual block. The destructor takes care to call delete on each + // cost_function or loss_function pointer only once, regardless of how many + // residual blocks refer to them. + // + // Example usage: + // + // double x1[] = {1.0, 2.0, 3.0}; + // double x2[] = {1.0, 2.0, 5.0, 6.0}; + // double x3[] = {3.0, 6.0, 2.0, 5.0, 1.0}; + // + // Problem problem; + // + // problem.AddResidualBlock(new MyUnaryCostFunction(...), nullptr, x1); + // problem.AddResidualBlock(new MyBinaryCostFunction(...), nullptr, x2, x1); + // + // Add a residual block by listing the parameter block pointers directly + // instead of wapping them in a container. + template + ResidualBlockId AddResidualBlock(CostFunction* cost_function, + LossFunction* loss_function, + double* x0, + Ts*... xs) { + const std::array parameter_blocks{{x0, xs...}}; + return AddResidualBlock(cost_function, + loss_function, + parameter_blocks.data(), + static_cast(parameter_blocks.size())); + } + + // Add a residual block by providing a vector of parameter blocks. + ResidualBlockId AddResidualBlock( + CostFunction* cost_function, + LossFunction* loss_function, + const std::vector& parameter_blocks); + + // Add a residual block by providing a pointer to the parameter block array + // and the number of parameter blocks. + ResidualBlockId AddResidualBlock(CostFunction* cost_function, + LossFunction* loss_function, + double* const* const parameter_blocks, + int num_parameter_blocks); + + // Add a parameter block with appropriate size to the problem. Repeated calls + // with the same arguments are ignored. Repeated calls with the same double + // pointer but a different size will result in a crash. + void AddParameterBlock(double* values, int size); + + // Add a parameter block with appropriate size and parameterization to the + // problem. It is okay for local_parameterization to be nullptr. + // + // Repeated calls with the same arguments are ignored. Repeated calls + // with the same double pointer but a different size results in a crash + // (unless Solver::Options::diable_all_safety_checks is set to true). + // + // Repeated calls with the same double pointer and size but different + // LocalParameterization is equivalent to calling + // SetParameterization(local_parameterization), i.e., any previously + // associated LocalParameterization or Manifold object will be replaced with + // the local_parameterization. + // + // NOTE: + // ---- + // + // This method is deprecated and will be removed in the next public + // release of Ceres Solver. Please move to using the Manifold based version of + // AddParameterBlock. + // + // During the transition from LocalParameterization to Manifold, internally + // the LocalParameterization is treated as a Manifold by wrapping it using a + // ManifoldAdapter object. So HasManifold() will return true, GetManifold() + // will return the wrapped object and ParameterBlockTangentSize() will return + // the LocalSize of the LocalParameterization. + CERES_DEPRECATED_WITH_MSG( + "LocalParameterizations are deprecated. Use the version with Manifolds " + "instead.") + void AddParameterBlock(double* values, + int size, + LocalParameterization* local_parameterization); + + // Add a parameter block with appropriate size and Manifold to the + // problem. It is okay for manifold to be nullptr. + // + // Repeated calls with the same arguments are ignored. Repeated calls + // with the same double pointer but a different size results in a crash + // (unless Solver::Options::diable_all_safety_checks is set to true). + // + // Repeated calls with the same double pointer and size but different Manifold + // is equivalent to calling SetManifold(manifold), i.e., any previously + // associated LocalParameterization or Manifold object will be replaced with + // the manifold. + // + // Note: + // ---- + // + // During the transition from LocalParameterization to Manifold, calling + // AddParameterBlock with a Manifold when a LocalParameterization is already + // associated with the parameter block is okay. It is equivalent to calling + // SetManifold(manifold), i.e., any previously associated + // LocalParameterization or Manifold object will be replaced with the + // manifold. + void AddParameterBlock(double* values, int size, Manifold* manifold); + + // Remove a parameter block from the problem. The LocalParameterization or + // Manifold of the parameter block, if it exists, will persist until the + // deletion of the problem (similar to cost/loss functions in residual block + // removal). Any residual blocks that depend on the parameter are also + // removed, as described above in RemoveResidualBlock(). + // + // If Problem::Options::enable_fast_removal is true, then the removal is fast + // (almost constant time). Otherwise, removing a parameter block will incur a + // scan of the entire Problem object. + // + // WARNING: Removing a residual or parameter block will destroy the implicit + // ordering, rendering the jacobian or residuals returned from the solver + // uninterpretable. If you depend on the evaluated jacobian, do not use + // remove! This may change in a future release. + void RemoveParameterBlock(const double* values); + + // Remove a residual block from the problem. Any parameters that the residual + // block depends on are not removed. The cost and loss functions for the + // residual block will not get deleted immediately; won't happen until the + // problem itself is deleted. + // + // WARNING: Removing a residual or parameter block will destroy the implicit + // ordering, rendering the jacobian or residuals returned from the solver + // uninterpretable. If you depend on the evaluated jacobian, do not use + // remove! This may change in a future release. + void RemoveResidualBlock(ResidualBlockId residual_block); + + // Hold the indicated parameter block constant during optimization. + void SetParameterBlockConstant(const double* values); + + // Allow the indicated parameter block to vary during optimization. + void SetParameterBlockVariable(double* values); + + // Returns true if a parameter block is set constant, and false otherwise. A + // parameter block may be set constant in two ways: either by calling + // SetParameterBlockConstant or by associating a LocalParameterization or + // Manifold with a zero dimensional tangent space with it. + bool IsParameterBlockConstant(const double* values) const; + + // Set the LocalParameterization for the parameter block. Calling + // SetParameterization with nullptr will clear any previously set + // LocalParameterization or Manifold for the parameter block. + // + // Repeated calls will cause any previously associated LocalParameterization + // or Manifold object to be replaced with the local_parameterization. + // + // The local_parameterization is owned by the Problem by default (See + // Problem::Options to override this behaviour). + // + // It is acceptable to set the same LocalParameterization for multiple + // parameter blocks; the destructor is careful to delete + // LocalParamaterizations only once. + // + // NOTE: + // ---- + // + // This method is deprecated and will be removed in the next public + // release of Ceres Solver. Please move to using the SetManifold instead. + // + // During the transition from LocalParameterization to Manifold, internally + // the LocalParameterization is treated as a Manifold by wrapping it using a + // ManifoldAdapter object. So HasManifold() will return true, GetManifold() + // will return the wrapped object and ParameterBlockTangentSize will return + // the same value of ParameterBlockLocalSize. + CERES_DEPRECATED_WITH_MSG( + "LocalParameterizations are deprecated. Use SetManifold instead.") + void SetParameterization(double* values, + LocalParameterization* local_parameterization); + + // Get the LocalParameterization object associated with this parameter block. + // If there is no LocalParameterization associated then nullptr is returned. + // + // NOTE: This method is deprecated and will be removed in the next public + // release of Ceres Solver. Use GetManifold instead. + // + // Note also that if a LocalParameterization is associated with a parameter + // block, HasManifold will return true and GetManifold will return the + // LocalParameterization wrapped in a ManifoldAdapter. + // + // The converse is NOT true, i.e., if a Manifold is associated with a + // parameter block, HasParameterization will return false and + // GetParameterization will return a nullptr. + CERES_DEPRECATED_WITH_MSG( + "LocalParameterizations are deprecated. Use GetManifold " + "instead.") + const LocalParameterization* GetParameterization(const double* values) const; + + // Returns true if a LocalParameterization is associated with this parameter + // block, false otherwise. + // + // NOTE: This method is deprecated and will be removed in the next public + // release of Ceres Solver. Use HasManifold instead. + // + // Note also that if a Manifold is associated with the parameter block, this + // method will return false. + CERES_DEPRECATED_WITH_MSG( + "LocalParameterizations are deprecated. Use HasManifold instead.") + bool HasParameterization(const double* values) const; + + // Set the Manifold for the parameter block. Calling SetManifold with nullptr + // will clear any previously set LocalParameterization or Manifold for the + // parameter block. + // + // Repeated calls will result in any previously associated + // LocalParameterization or Manifold object to be replaced with the manifold. + // + // The manifold is owned by the Problem by default (See Problem::Options to + // override this behaviour). + // + // It is acceptable to set the same Manifold for multiple parameter blocks. + void SetManifold(double* values, Manifold* manifold); + + // Get the Manifold object associated with this parameter block. + // + // If there is no Manifold Or LocalParameterization object associated then + // nullptr is returned. + // + // NOTE: During the transition from LocalParameterization to Manifold, + // internally the LocalParameterization is treated as a Manifold by wrapping + // it using a ManifoldAdapter object. So calling GetManifold on a parameter + // block with a LocalParameterization associated with it will return the + // LocalParameterization wrapped in a ManifoldAdapter + const Manifold* GetManifold(const double* values) const; + + // Returns true if a Manifold or a LocalParameterization is associated with + // this parameter block, false otherwise. + bool HasManifold(const double* values) const; + + // Set the lower/upper bound for the parameter at position "index". + void SetParameterLowerBound(double* values, int index, double lower_bound); + void SetParameterUpperBound(double* values, int index, double upper_bound); + + // Get the lower/upper bound for the parameter at position "index". If the + // parameter is not bounded by the user, then its lower bound is + // -std::numeric_limits::max() and upper bound is + // std::numeric_limits::max(). + double GetParameterLowerBound(const double* values, int index) const; + double GetParameterUpperBound(const double* values, int index) const; + + // Number of parameter blocks in the problem. Always equals + // parameter_blocks().size() and parameter_block_sizes().size(). + int NumParameterBlocks() const; + + // The size of the parameter vector obtained by summing over the sizes of all + // the parameter blocks. + int NumParameters() const; + + // Number of residual blocks in the problem. Always equals + // residual_blocks().size(). + int NumResidualBlocks() const; + + // The size of the residual vector obtained by summing over the sizes of all + // of the residual blocks. + int NumResiduals() const; + + // The size of the parameter block. + int ParameterBlockSize(const double* values) const; + + // The dimension of the tangent space of the LocalParameterization or Manifold + // for the parameter block. If there is no LocalParameterization or Manifold + // associated with this parameter block, then ParameterBlockLocalSize = + // ParameterBlockSize. + CERES_DEPRECATED_WITH_MSG( + "LocalParameterizations are deprecated. Use ParameterBlockTangentSize " + "instead.") + int ParameterBlockLocalSize(const double* values) const; + + // The dimenion of the tangent space of the LocalParameterization or Manifold + // for the parameter block. If there is no LocalParameterization or Manifold + // associated with this parameter block, then ParameterBlockTangentSize = + // ParameterBlockSize. + int ParameterBlockTangentSize(const double* values) const; + + // Is the given parameter block present in this problem or not? + bool HasParameterBlock(const double* values) const; + + // Fills the passed parameter_blocks vector with pointers to the parameter + // blocks currently in the problem. After this call, parameter_block.size() == + // NumParameterBlocks. + void GetParameterBlocks(std::vector* parameter_blocks) const; + + // Fills the passed residual_blocks vector with pointers to the residual + // blocks currently in the problem. After this call, residual_blocks.size() == + // NumResidualBlocks. + void GetResidualBlocks(std::vector* residual_blocks) const; + + // Get all the parameter blocks that depend on the given residual block. + void GetParameterBlocksForResidualBlock( + const ResidualBlockId residual_block, + std::vector* parameter_blocks) const; + + // Get the CostFunction for the given residual block. + const CostFunction* GetCostFunctionForResidualBlock( + const ResidualBlockId residual_block) const; + + // Get the LossFunction for the given residual block. Returns nullptr + // if no loss function is associated with this residual block. + const LossFunction* GetLossFunctionForResidualBlock( + const ResidualBlockId residual_block) const; + + // Get all the residual blocks that depend on the given parameter block. + // + // If Problem::Options::enable_fast_removal is true, then getting the residual + // blocks is fast and depends only on the number of residual + // blocks. Otherwise, getting the residual blocks for a parameter block will + // incur a scan of the entire Problem object. + void GetResidualBlocksForParameterBlock( + const double* values, + std::vector* residual_blocks) const; + + // Options struct to control Problem::Evaluate. + struct EvaluateOptions { + // The set of parameter blocks for which evaluation should be + // performed. This vector determines the order that parameter blocks occur + // in the gradient vector and in the columns of the jacobian matrix. If + // parameter_blocks is empty, then it is assumed to be equal to vector + // containing ALL the parameter blocks. Generally speaking the parameter + // blocks will occur in the order in which they were added to the + // problem. But, this may change if the user removes any parameter blocks + // from the problem. + // + // NOTE: This vector should contain the same pointers as the ones used to + // add parameter blocks to the Problem. These parameter block should NOT + // point to new memory locations. Bad things will happen otherwise. + std::vector parameter_blocks; + + // The set of residual blocks to evaluate. This vector determines the order + // in which the residuals occur, and how the rows of the jacobian are + // ordered. If residual_blocks is empty, then it is assumed to be equal to + // the vector containing ALL the residual blocks. Generally speaking the + // residual blocks will occur in the order in which they were added to the + // problem. But, this may change if the user removes any residual blocks + // from the problem. + std::vector residual_blocks; + + // Even though the residual blocks in the problem may contain loss + // functions, setting apply_loss_function to false will turn off the + // application of the loss function to the output of the cost function. This + // is of use for example if the user wishes to analyse the solution quality + // by studying the distribution of residuals before and after the solve. + bool apply_loss_function = true; + + int num_threads = 1; + }; + + // Evaluate Problem. Any of the output pointers can be nullptr. Which residual + // blocks and parameter blocks are used is controlled by the EvaluateOptions + // struct above. + // + // Note 1: The evaluation will use the values stored in the memory locations + // pointed to by the parameter block pointers used at the time of the + // construction of the problem. i.e., + // + // Problem problem; + // double x = 1; + // problem.AddResidualBlock(new MyCostFunction, nullptr, &x); + // + // double cost = 0.0; + // problem.Evaluate(Problem::EvaluateOptions(), &cost, + // nullptr, nullptr, nullptr); + // + // The cost is evaluated at x = 1. If you wish to evaluate the problem at x = + // 2, then + // + // x = 2; + // problem.Evaluate(Problem::EvaluateOptions(), &cost, + // nullptr, nullptr, nullptr); + // + // is the way to do so. + // + // Note 2: If no LocalParameterizations or Manifolds are used, then the size + // of the gradient vector (and the number of columns in the jacobian) is the + // sum of the sizes of all the parameter blocks. If a parameter block has a + // LocalParameterization or Manifold, then it contributes "TangentSize" + // entries to the gradient vector (and the number of columns in the jacobian). + // + // Note 3: This function cannot be called while the problem is being solved, + // for example it cannot be called from an IterationCallback at the end of an + // iteration during a solve. + // + // Note 4: If an EvaluationCallback is associated with the problem, then its + // PrepareForEvaluation method will be called every time this method is called + // with new_point = true. + bool Evaluate(const EvaluateOptions& options, + double* cost, + std::vector* residuals, + std::vector* gradient, + CRSMatrix* jacobian); + + // Evaluates the residual block, storing the scalar cost in *cost, the + // residual components in *residuals, and the jacobians between the parameters + // and residuals in jacobians[i], in row-major order. + // + // If residuals is nullptr, the residuals are not computed. + // + // If jacobians is nullptr, no Jacobians are computed. If jacobians[i] is + // nullptr, then the Jacobian for that parameter block is not computed. + // + // It is not okay to request the Jacobian w.r.t a parameter block that is + // constant. + // + // The return value indicates the success or failure. Even if the function + // returns false, the caller should expect the output memory locations to have + // been modified. + // + // The returned cost and jacobians have had robustification and + // LocalParameterization/Manifold applied already; for example, the jacobian + // for a 4-dimensional quaternion parameter using the + // "QuaternionParameterization" is num_residuals by 3 instead of num_residuals + // by 4. + // + // apply_loss_function as the name implies allows the user to switch the + // application of the loss function on and off. + // + // If an EvaluationCallback is associated with the problem, then its + // PrepareForEvaluation method will be called every time this method is called + // with new_point = true. This conservatively assumes that the user may have + // changed the parameter values since the previous call to evaluate / solve. + // For improved efficiency, and only if you know that the parameter values + // have not changed between calls, see + // EvaluateResidualBlockAssumingParametersUnchanged(). + bool EvaluateResidualBlock(ResidualBlockId residual_block_id, + bool apply_loss_function, + double* cost, + double* residuals, + double** jacobians) const; + + // Same as EvaluateResidualBlock except that if an EvaluationCallback is + // associated with the problem, then its PrepareForEvaluation method will be + // called every time this method is called with new_point = false. + // + // This means, if an EvaluationCallback is associated with the problem then it + // is the user's responsibility to call PrepareForEvaluation before calling + // this method if necessary, i.e. iff the parameter values have been changed + // since the last call to evaluate / solve.' + // + // This is because, as the name implies, we assume that the parameter blocks + // did not change since the last time PrepareForEvaluation was called (via + // Solve, Evaluate or EvaluateResidualBlock). + bool EvaluateResidualBlockAssumingParametersUnchanged( + ResidualBlockId residual_block_id, + bool apply_loss_function, + double* cost, + double* residuals, + double** jacobians) const; + + private: + friend class Solver; + friend class Covariance; + std::unique_ptr impl_; +}; + +} // namespace ceres + +#include "ceres/internal/reenable_warnings.h" + +#endif // CERES_PUBLIC_PROBLEM_H_ diff --git a/ceres-v2/include/product_manifold.h b/ceres-v2/include/product_manifold.h new file mode 100644 index 0000000000000000000000000000000000000000..33f046da24e685e3cf50233250469a0cd141a388 --- /dev/null +++ b/ceres-v2/include/product_manifold.h @@ -0,0 +1,328 @@ +// Ceres Solver - A fast non-linear least squares minimizer +// Copyright 2022 Google Inc. All rights reserved. +// http://ceres-solver.org/ +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are met: +// +// * Redistributions of source code must retain the above copyright notice, +// this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above copyright notice, +// this list of conditions and the following disclaimer in the documentation +// and/or other materials provided with the distribution. +// * Neither the name of Google Inc. nor the names of its contributors may be +// used to endorse or promote products derived from this software without +// specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE +// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR +// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF +// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN +// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) +// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE +// POSSIBILITY OF SUCH DAMAGE. +// +// Author: sameeragarwal@google.com (Sameer Agarwal) +// sergiu.deitsch@gmail.com (Sergiu Deitsch) +// + +#ifndef CERES_PUBLIC_PRODUCT_MANIFOLD_H_ +#define CERES_PUBLIC_PRODUCT_MANIFOLD_H_ + +#include +#include +#include +#include +#include +#include +#include +#include + +#include "ceres/internal/eigen.h" +#include "ceres/internal/fixed_array.h" +#include "ceres/internal/port.h" +#include "ceres/manifold.h" + +namespace ceres { + +// Construct a manifold by taking the Cartesian product of a number of other +// manifolds. This is useful, when a parameter block is the Cartesian product +// of two or more manifolds. For example the parameters of a camera consist of +// a rotation and a translation, i.e., SO(3) x R^3. +// +// Example usage: +// +// ProductManifold> se3; +// +// is the manifold for a rigid transformation, where the rotation is +// represented using a quaternion. +// +// Manifolds can be copied and moved to ProductManifold: +// +// SubsetManifold manifold1(5, {2}); +// SubsetManifold manifold2(3, {0, 1}); +// ProductManifold manifold(manifold1, +// manifold2); +// +// In advanced use cases, manifolds can be dynamically allocated and passed as +// (smart) pointers: +// +// ProductManifold, EuclideanManifold<3>> +// se3{std::make_unique(), EuclideanManifold<3>{}}; +// +// In C++17, the template parameters can be left out as they are automatically +// deduced making the initialization much simpler: +// +// ProductManifold se3{QuaternionManifold{}, EuclideanManifold<3>{}}; +// +// The manifold implementations must be either default constructible, copyable +// or moveable to be usable in a ProductManifold. +template +class ProductManifold final : public Manifold { + public: + // ProductManifold constructor perfect forwards arguments to store manifolds. + // + // Either use default construction or if you need to copy or move-construct a + // manifold instance, you need to pass an instance as an argument for all + // types given as class template parameters. + template , + Args...>::value>* = nullptr> + explicit ProductManifold(Args&&... manifolds) + : ProductManifold{std::make_index_sequence{}, + std::forward(manifolds)...} {} + + int AmbientSize() const override { return ambient_size_; } + int TangentSize() const override { return tangent_size_; } + + bool Plus(const double* x, + const double* delta, + double* x_plus_delta) const override { + return PlusImpl( + x, delta, x_plus_delta, std::make_index_sequence{}); + } + + bool Minus(const double* y, + const double* x, + double* y_minus_x) const override { + return MinusImpl( + y, x, y_minus_x, std::make_index_sequence{}); + } + + bool PlusJacobian(const double* x, double* jacobian_ptr) const override { + MatrixRef jacobian(jacobian_ptr, AmbientSize(), TangentSize()); + jacobian.setZero(); + internal::FixedArray buffer(buffer_size_); + + return PlusJacobianImpl( + x, jacobian, buffer, std::make_index_sequence{}); + } + + bool MinusJacobian(const double* x, double* jacobian_ptr) const override { + MatrixRef jacobian(jacobian_ptr, TangentSize(), AmbientSize()); + jacobian.setZero(); + internal::FixedArray buffer(buffer_size_); + + return MinusJacobianImpl( + x, jacobian, buffer, std::make_index_sequence{}); + } + + private: + static constexpr std::size_t kNumManifolds = 2 + sizeof...(ManifoldN); + + template + explicit ProductManifold(std::index_sequence, Args&&... manifolds) + : manifolds_{std::forward(manifolds)...}, + buffer_size_{(std::max)( + {(Dereference(std::get(manifolds_)).TangentSize() * + Dereference(std::get(manifolds_)).AmbientSize())...})}, + ambient_sizes_{ + Dereference(std::get(manifolds_)).AmbientSize()...}, + tangent_sizes_{ + Dereference(std::get(manifolds_)).TangentSize()...}, + ambient_offsets_{ExclusiveScan(ambient_sizes_)}, + tangent_offsets_{ExclusiveScan(tangent_sizes_)}, + ambient_size_{ + std::accumulate(ambient_sizes_.begin(), ambient_sizes_.end(), 0)}, + tangent_size_{ + std::accumulate(tangent_sizes_.begin(), tangent_sizes_.end(), 0)} {} + + template + bool PlusImpl(const double* x, + const double* delta, + double* x_plus_delta, + std::index_sequence) const { + if (!Dereference(std::get(manifolds_)) + .Plus(x + ambient_offsets_[Index0], + delta + tangent_offsets_[Index0], + x_plus_delta + ambient_offsets_[Index0])) { + return false; + } + + return PlusImpl(x, delta, x_plus_delta, std::index_sequence{}); + } + + static constexpr bool PlusImpl(const double* /*x*/, + const double* /*delta*/, + double* /*x_plus_delta*/, + std::index_sequence<>) noexcept { + return true; + } + + template + bool MinusImpl(const double* y, + const double* x, + double* y_minus_x, + std::index_sequence) const { + if (!Dereference(std::get(manifolds_)) + .Minus(y + ambient_offsets_[Index0], + x + ambient_offsets_[Index0], + y_minus_x + tangent_offsets_[Index0])) { + return false; + } + + return MinusImpl(y, x, y_minus_x, std::index_sequence{}); + } + + static constexpr bool MinusImpl(const double* /*y*/, + const double* /*x*/, + double* /*y_minus_x*/, + std::index_sequence<>) noexcept { + return true; + } + + template + bool PlusJacobianImpl(const double* x, + MatrixRef& jacobian, + internal::FixedArray& buffer, + std::index_sequence) const { + if (!Dereference(std::get(manifolds_)) + .PlusJacobian(x + ambient_offsets_[Index0], buffer.data())) { + return false; + } + + jacobian.block(ambient_offsets_[Index0], + tangent_offsets_[Index0], + ambient_sizes_[Index0], + tangent_sizes_[Index0]) = + MatrixRef( + buffer.data(), ambient_sizes_[Index0], tangent_sizes_[Index0]); + + return PlusJacobianImpl( + x, jacobian, buffer, std::index_sequence{}); + } + + static constexpr bool PlusJacobianImpl( + const double* /*x*/, + MatrixRef& /*jacobian*/, + internal::FixedArray& /*buffer*/, + std::index_sequence<>) noexcept { + return true; + } + + template + bool MinusJacobianImpl(const double* x, + MatrixRef& jacobian, + internal::FixedArray& buffer, + std::index_sequence) const { + if (!Dereference(std::get(manifolds_)) + .MinusJacobian(x + ambient_offsets_[Index0], buffer.data())) { + return false; + } + + jacobian.block(tangent_offsets_[Index0], + ambient_offsets_[Index0], + tangent_sizes_[Index0], + ambient_sizes_[Index0]) = + MatrixRef( + buffer.data(), tangent_sizes_[Index0], ambient_sizes_[Index0]); + + return MinusJacobianImpl( + x, jacobian, buffer, std::index_sequence{}); + } + + static constexpr bool MinusJacobianImpl( + const double* /*x*/, + MatrixRef& /*jacobian*/, + internal::FixedArray& /*buffer*/, + std::index_sequence<>) noexcept { + return true; + } + + template + static std::array ExclusiveScan(const std::array& values) { + std::array result; + T init = 0; + + // TODO Replace by std::exclusive_scan once C++17 is available + for (std::size_t i = 0; i != N; ++i) { + result[i] = init; + init += values[i]; + } + + return result; + } + + // TODO Replace by std::void_t once C++17 is available + template + struct Void { + using type = void; + }; + + template + struct IsDereferenceable : std::false_type {}; + + template + struct IsDereferenceable())>::type> + : std::true_type {}; + + template ::value>* = nullptr> + static constexpr decltype(auto) Dereference(T& value) { + return value; + } + + // Support dereferenceable types such as std::unique_ptr, std::shared_ptr, raw + // pointers etc. + template ::value>* = nullptr> + static constexpr decltype(auto) Dereference(T& value) { + return *value; + } + + template + static constexpr decltype(auto) Dereference(T* p) { + assert(p != nullptr); + return *p; + } + + std::tuple manifolds_; + int buffer_size_; + std::array ambient_sizes_; + std::array tangent_sizes_; + std::array ambient_offsets_; + std::array tangent_offsets_; + int ambient_size_; + int tangent_size_; +}; + +#ifdef CERES_HAS_CPP17 +// C++17 deduction guide that allows the user to avoid explicitly specifying +// the template parameters of ProductManifold. The class can instead be +// instantiated as follows: +// +// ProductManifold manifold{QuaternionManifold{}, EuclideanManifold<3>{}}; +// +template +ProductManifold(Manifold0&&, Manifold1&&, Manifolds&&...) + -> ProductManifold; +#endif + +} // namespace ceres + +#endif // CERES_PUBLIC_PRODUCT_MANIFOLD_H_ diff --git a/ceres-v2/include/rotation.h b/ceres-v2/include/rotation.h new file mode 100644 index 0000000000000000000000000000000000000000..51079901aaf9774eafd4ea7dd404a5aef37e4c7f --- /dev/null +++ b/ceres-v2/include/rotation.h @@ -0,0 +1,655 @@ +// Ceres Solver - A fast non-linear least squares minimizer +// Copyright 2019 Google Inc. All rights reserved. +// http://ceres-solver.org/ +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are met: +// +// * Redistributions of source code must retain the above copyright notice, +// this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above copyright notice, +// this list of conditions and the following disclaimer in the documentation +// and/or other materials provided with the distribution. +// * Neither the name of Google Inc. nor the names of its contributors may be +// used to endorse or promote products derived from this software without +// specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE +// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR +// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF +// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN +// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) +// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE +// POSSIBILITY OF SUCH DAMAGE. +// +// Author: keir@google.com (Keir Mierle) +// sameeragarwal@google.com (Sameer Agarwal) +// +// Templated functions for manipulating rotations. The templated +// functions are useful when implementing functors for automatic +// differentiation. +// +// In the following, the Quaternions are laid out as 4-vectors, thus: +// +// q[0] scalar part. +// q[1] coefficient of i. +// q[2] coefficient of j. +// q[3] coefficient of k. +// +// where: i*i = j*j = k*k = -1 and i*j = k, j*k = i, k*i = j. + +#ifndef CERES_PUBLIC_ROTATION_H_ +#define CERES_PUBLIC_ROTATION_H_ + +#include +#include +#include + +#include "glog/logging.h" + +namespace ceres { + +// Trivial wrapper to index linear arrays as matrices, given a fixed +// column and row stride. When an array "T* array" is wrapped by a +// +// (const) MatrixAdapter M" +// +// the expression M(i, j) is equivalent to +// +// arrary[i * row_stride + j * col_stride] +// +// Conversion functions to and from rotation matrices accept +// MatrixAdapters to permit using row-major and column-major layouts, +// and rotation matrices embedded in larger matrices (such as a 3x4 +// projection matrix). +template +struct MatrixAdapter; + +// Convenience functions to create a MatrixAdapter that treats the +// array pointed to by "pointer" as a 3x3 (contiguous) column-major or +// row-major matrix. +template +MatrixAdapter ColumnMajorAdapter3x3(T* pointer); + +template +MatrixAdapter RowMajorAdapter3x3(T* pointer); + +// Convert a value in combined axis-angle representation to a quaternion. +// The value angle_axis is a triple whose norm is an angle in radians, +// and whose direction is aligned with the axis of rotation, +// and quaternion is a 4-tuple that will contain the resulting quaternion. +// The implementation may be used with auto-differentiation up to the first +// derivative, higher derivatives may have unexpected results near the origin. +template +void AngleAxisToQuaternion(const T* angle_axis, T* quaternion); + +// Convert a quaternion to the equivalent combined axis-angle representation. +// The value quaternion must be a unit quaternion - it is not normalized first, +// and angle_axis will be filled with a value whose norm is the angle of +// rotation in radians, and whose direction is the axis of rotation. +// The implementation may be used with auto-differentiation up to the first +// derivative, higher derivatives may have unexpected results near the origin. +template +void QuaternionToAngleAxis(const T* quaternion, T* angle_axis); + +// Conversions between 3x3 rotation matrix (in column major order) and +// quaternion rotation representations. Templated for use with +// autodifferentiation. +template +void RotationMatrixToQuaternion(const T* R, T* quaternion); + +template +void RotationMatrixToQuaternion( + const MatrixAdapter& R, T* quaternion); + +// Conversions between 3x3 rotation matrix (in column major order) and +// axis-angle rotation representations. Templated for use with +// autodifferentiation. +template +void RotationMatrixToAngleAxis(const T* R, T* angle_axis); + +template +void RotationMatrixToAngleAxis( + const MatrixAdapter& R, T* angle_axis); + +template +void AngleAxisToRotationMatrix(const T* angle_axis, T* R); + +template +void AngleAxisToRotationMatrix( + const T* angle_axis, const MatrixAdapter& R); + +// Conversions between 3x3 rotation matrix (in row major order) and +// Euler angle (in degrees) rotation representations. +// +// The {pitch,roll,yaw} Euler angles are rotations around the {x,y,z} +// axes, respectively. They are applied in that same order, so the +// total rotation R is Rz * Ry * Rx. +template +void EulerAnglesToRotationMatrix(const T* euler, int row_stride, T* R); + +template +void EulerAnglesToRotationMatrix( + const T* euler, const MatrixAdapter& R); + +// Convert a 4-vector to a 3x3 scaled rotation matrix. +// +// The choice of rotation is such that the quaternion [1 0 0 0] goes to an +// identity matrix and for small a, b, c the quaternion [1 a b c] goes to +// the matrix +// +// [ 0 -c b ] +// I + 2 [ c 0 -a ] + higher order terms +// [ -b a 0 ] +// +// which corresponds to a Rodrigues approximation, the last matrix being +// the cross-product matrix of [a b c]. Together with the property that +// R(q1 * q2) = R(q1) * R(q2) this uniquely defines the mapping from q to R. +// +// No normalization of the quaternion is performed, i.e. +// R = ||q||^2 * Q, where Q is an orthonormal matrix +// such that det(Q) = 1 and Q*Q' = I +// +// WARNING: The rotation matrix is ROW MAJOR +template +inline void QuaternionToScaledRotation(const T q[4], T R[3 * 3]); + +template +inline void QuaternionToScaledRotation( + const T q[4], const MatrixAdapter& R); + +// Same as above except that the rotation matrix is normalized by the +// Frobenius norm, so that R * R' = I (and det(R) = 1). +// +// WARNING: The rotation matrix is ROW MAJOR +template +inline void QuaternionToRotation(const T q[4], T R[3 * 3]); + +template +inline void QuaternionToRotation( + const T q[4], const MatrixAdapter& R); + +// Rotates a point pt by a quaternion q: +// +// result = R(q) * pt +// +// Assumes the quaternion is unit norm. This assumption allows us to +// write the transform as (something)*pt + pt, as is clear from the +// formula below. If you pass in a quaternion with |q|^2 = 2 then you +// WILL NOT get back 2 times the result you get for a unit quaternion. +// +// Inplace rotation is not supported. pt and result must point to different +// memory locations, otherwise the result will be undefined. +template +inline void UnitQuaternionRotatePoint(const T q[4], const T pt[3], T result[3]); + +// With this function you do not need to assume that q has unit norm. +// It does assume that the norm is non-zero. +// +// Inplace rotation is not supported. pt and result must point to different +// memory locations, otherwise the result will be undefined. +template +inline void QuaternionRotatePoint(const T q[4], const T pt[3], T result[3]); + +// zw = z * w, where * is the Quaternion product between 4 vectors. +// +// Inplace quaternion product is not supported. The resulting quaternion zw must +// not share the memory with the input quaternion z and w, otherwise the result +// will be undefined. +template +inline void QuaternionProduct(const T z[4], const T w[4], T zw[4]); + +// xy = x cross y; +// +// Inplace cross product is not supported. The resulting vector x_cross_y must +// not share the memory with the input vectors x and y, otherwise the result +// will be undefined. +template +inline void CrossProduct(const T x[3], const T y[3], T x_cross_y[3]); + +template +inline T DotProduct(const T x[3], const T y[3]); + +// y = R(angle_axis) * x; +// +// Inplace rotation is not supported. pt and result must point to different +// memory locations, otherwise the result will be undefined. +template +inline void AngleAxisRotatePoint(const T angle_axis[3], + const T pt[3], + T result[3]); + +// --- IMPLEMENTATION + +template +struct MatrixAdapter { + T* pointer_; + explicit MatrixAdapter(T* pointer) : pointer_(pointer) {} + + T& operator()(int r, int c) const { + return pointer_[r * row_stride + c * col_stride]; + } +}; + +template +MatrixAdapter ColumnMajorAdapter3x3(T* pointer) { + return MatrixAdapter(pointer); +} + +template +MatrixAdapter RowMajorAdapter3x3(T* pointer) { + return MatrixAdapter(pointer); +} + +template +inline void AngleAxisToQuaternion(const T* angle_axis, T* quaternion) { + const T& a0 = angle_axis[0]; + const T& a1 = angle_axis[1]; + const T& a2 = angle_axis[2]; + const T theta_squared = a0 * a0 + a1 * a1 + a2 * a2; + + // For points not at the origin, the full conversion is numerically stable. + if (theta_squared > T(0.0)) { + const T theta = sqrt(theta_squared); + const T half_theta = theta * T(0.5); + const T k = sin(half_theta) / theta; + quaternion[0] = cos(half_theta); + quaternion[1] = a0 * k; + quaternion[2] = a1 * k; + quaternion[3] = a2 * k; + } else { + // At the origin, sqrt() will produce NaN in the derivative since + // the argument is zero. By approximating with a Taylor series, + // and truncating at one term, the value and first derivatives will be + // computed correctly when Jets are used. + const T k(0.5); + quaternion[0] = T(1.0); + quaternion[1] = a0 * k; + quaternion[2] = a1 * k; + quaternion[3] = a2 * k; + } +} + +template +inline void QuaternionToAngleAxis(const T* quaternion, T* angle_axis) { + const T& q1 = quaternion[1]; + const T& q2 = quaternion[2]; + const T& q3 = quaternion[3]; + const T sin_squared_theta = q1 * q1 + q2 * q2 + q3 * q3; + + // For quaternions representing non-zero rotation, the conversion + // is numerically stable. + if (sin_squared_theta > T(0.0)) { + const T sin_theta = sqrt(sin_squared_theta); + const T& cos_theta = quaternion[0]; + + // If cos_theta is negative, theta is greater than pi/2, which + // means that angle for the angle_axis vector which is 2 * theta + // would be greater than pi. + // + // While this will result in the correct rotation, it does not + // result in a normalized angle-axis vector. + // + // In that case we observe that 2 * theta ~ 2 * theta - 2 * pi, + // which is equivalent saying + // + // theta - pi = atan(sin(theta - pi), cos(theta - pi)) + // = atan(-sin(theta), -cos(theta)) + // + const T two_theta = + T(2.0) * ((cos_theta < T(0.0)) ? atan2(-sin_theta, -cos_theta) + : atan2(sin_theta, cos_theta)); + const T k = two_theta / sin_theta; + angle_axis[0] = q1 * k; + angle_axis[1] = q2 * k; + angle_axis[2] = q3 * k; + } else { + // For zero rotation, sqrt() will produce NaN in the derivative since + // the argument is zero. By approximating with a Taylor series, + // and truncating at one term, the value and first derivatives will be + // computed correctly when Jets are used. + const T k(2.0); + angle_axis[0] = q1 * k; + angle_axis[1] = q2 * k; + angle_axis[2] = q3 * k; + } +} + +template +void RotationMatrixToQuaternion(const T* R, T* quaternion) { + RotationMatrixToQuaternion(ColumnMajorAdapter3x3(R), quaternion); +} + +// This algorithm comes from "Quaternion Calculus and Fast Animation", +// Ken Shoemake, 1987 SIGGRAPH course notes +template +void RotationMatrixToQuaternion( + const MatrixAdapter& R, T* quaternion) { + const T trace = R(0, 0) + R(1, 1) + R(2, 2); + if (trace >= 0.0) { + T t = sqrt(trace + T(1.0)); + quaternion[0] = T(0.5) * t; + t = T(0.5) / t; + quaternion[1] = (R(2, 1) - R(1, 2)) * t; + quaternion[2] = (R(0, 2) - R(2, 0)) * t; + quaternion[3] = (R(1, 0) - R(0, 1)) * t; + } else { + int i = 0; + if (R(1, 1) > R(0, 0)) { + i = 1; + } + + if (R(2, 2) > R(i, i)) { + i = 2; + } + + const int j = (i + 1) % 3; + const int k = (j + 1) % 3; + T t = sqrt(R(i, i) - R(j, j) - R(k, k) + T(1.0)); + quaternion[i + 1] = T(0.5) * t; + t = T(0.5) / t; + quaternion[0] = (R(k, j) - R(j, k)) * t; + quaternion[j + 1] = (R(j, i) + R(i, j)) * t; + quaternion[k + 1] = (R(k, i) + R(i, k)) * t; + } +} + +// The conversion of a rotation matrix to the angle-axis form is +// numerically problematic when then rotation angle is close to zero +// or to Pi. The following implementation detects when these two cases +// occurs and deals with them by taking code paths that are guaranteed +// to not perform division by a small number. +template +inline void RotationMatrixToAngleAxis(const T* R, T* angle_axis) { + RotationMatrixToAngleAxis(ColumnMajorAdapter3x3(R), angle_axis); +} + +template +void RotationMatrixToAngleAxis( + const MatrixAdapter& R, T* angle_axis) { + T quaternion[4]; + RotationMatrixToQuaternion(R, quaternion); + QuaternionToAngleAxis(quaternion, angle_axis); + return; +} + +template +inline void AngleAxisToRotationMatrix(const T* angle_axis, T* R) { + AngleAxisToRotationMatrix(angle_axis, ColumnMajorAdapter3x3(R)); +} + +template +void AngleAxisToRotationMatrix( + const T* angle_axis, const MatrixAdapter& R) { + static const T kOne = T(1.0); + const T theta2 = DotProduct(angle_axis, angle_axis); + if (theta2 > T(std::numeric_limits::epsilon())) { + // We want to be careful to only evaluate the square root if the + // norm of the angle_axis vector is greater than zero. Otherwise + // we get a division by zero. + const T theta = sqrt(theta2); + const T wx = angle_axis[0] / theta; + const T wy = angle_axis[1] / theta; + const T wz = angle_axis[2] / theta; + + const T costheta = cos(theta); + const T sintheta = sin(theta); + + // clang-format off + R(0, 0) = costheta + wx*wx*(kOne - costheta); + R(1, 0) = wz*sintheta + wx*wy*(kOne - costheta); + R(2, 0) = -wy*sintheta + wx*wz*(kOne - costheta); + R(0, 1) = wx*wy*(kOne - costheta) - wz*sintheta; + R(1, 1) = costheta + wy*wy*(kOne - costheta); + R(2, 1) = wx*sintheta + wy*wz*(kOne - costheta); + R(0, 2) = wy*sintheta + wx*wz*(kOne - costheta); + R(1, 2) = -wx*sintheta + wy*wz*(kOne - costheta); + R(2, 2) = costheta + wz*wz*(kOne - costheta); + // clang-format on + } else { + // Near zero, we switch to using the first order Taylor expansion. + R(0, 0) = kOne; + R(1, 0) = angle_axis[2]; + R(2, 0) = -angle_axis[1]; + R(0, 1) = -angle_axis[2]; + R(1, 1) = kOne; + R(2, 1) = angle_axis[0]; + R(0, 2) = angle_axis[1]; + R(1, 2) = -angle_axis[0]; + R(2, 2) = kOne; + } +} + +template +inline void EulerAnglesToRotationMatrix(const T* euler, + const int row_stride_parameter, + T* R) { + EulerAnglesToRotationMatrix(euler, RowMajorAdapter3x3(R)); +} + +template +void EulerAnglesToRotationMatrix( + const T* euler, const MatrixAdapter& R) { + const double kPi = 3.14159265358979323846; + const T degrees_to_radians(kPi / 180.0); + + const T pitch(euler[0] * degrees_to_radians); + const T roll(euler[1] * degrees_to_radians); + const T yaw(euler[2] * degrees_to_radians); + + const T c1 = cos(yaw); + const T s1 = sin(yaw); + const T c2 = cos(roll); + const T s2 = sin(roll); + const T c3 = cos(pitch); + const T s3 = sin(pitch); + + R(0, 0) = c1 * c2; + R(0, 1) = -s1 * c3 + c1 * s2 * s3; + R(0, 2) = s1 * s3 + c1 * s2 * c3; + + R(1, 0) = s1 * c2; + R(1, 1) = c1 * c3 + s1 * s2 * s3; + R(1, 2) = -c1 * s3 + s1 * s2 * c3; + + R(2, 0) = -s2; + R(2, 1) = c2 * s3; + R(2, 2) = c2 * c3; +} + +template +inline void QuaternionToScaledRotation(const T q[4], T R[3 * 3]) { + QuaternionToScaledRotation(q, RowMajorAdapter3x3(R)); +} + +template +inline void QuaternionToScaledRotation( + const T q[4], const MatrixAdapter& R) { + // Make convenient names for elements of q. + T a = q[0]; + T b = q[1]; + T c = q[2]; + T d = q[3]; + // This is not to eliminate common sub-expression, but to + // make the lines shorter so that they fit in 80 columns! + T aa = a * a; + T ab = a * b; + T ac = a * c; + T ad = a * d; + T bb = b * b; + T bc = b * c; + T bd = b * d; + T cc = c * c; + T cd = c * d; + T dd = d * d; + + // clang-format off + R(0, 0) = aa + bb - cc - dd; R(0, 1) = T(2) * (bc - ad); R(0, 2) = T(2) * (ac + bd); + R(1, 0) = T(2) * (ad + bc); R(1, 1) = aa - bb + cc - dd; R(1, 2) = T(2) * (cd - ab); + R(2, 0) = T(2) * (bd - ac); R(2, 1) = T(2) * (ab + cd); R(2, 2) = aa - bb - cc + dd; + // clang-format on +} + +template +inline void QuaternionToRotation(const T q[4], T R[3 * 3]) { + QuaternionToRotation(q, RowMajorAdapter3x3(R)); +} + +template +inline void QuaternionToRotation( + const T q[4], const MatrixAdapter& R) { + QuaternionToScaledRotation(q, R); + + T normalizer = q[0] * q[0] + q[1] * q[1] + q[2] * q[2] + q[3] * q[3]; + normalizer = T(1) / normalizer; + + for (int i = 0; i < 3; ++i) { + for (int j = 0; j < 3; ++j) { + R(i, j) *= normalizer; + } + } +} + +template +inline void UnitQuaternionRotatePoint(const T q[4], + const T pt[3], + T result[3]) { + DCHECK_NE(pt, result) << "Inplace rotation is not supported."; + + // clang-format off + T uv0 = q[2] * pt[2] - q[3] * pt[1]; + T uv1 = q[3] * pt[0] - q[1] * pt[2]; + T uv2 = q[1] * pt[1] - q[2] * pt[0]; + uv0 += uv0; + uv1 += uv1; + uv2 += uv2; + result[0] = pt[0] + q[0] * uv0; + result[1] = pt[1] + q[0] * uv1; + result[2] = pt[2] + q[0] * uv2; + result[0] += q[2] * uv2 - q[3] * uv1; + result[1] += q[3] * uv0 - q[1] * uv2; + result[2] += q[1] * uv1 - q[2] * uv0; + // clang-format on +} + +template +inline void QuaternionRotatePoint(const T q[4], const T pt[3], T result[3]) { + DCHECK_NE(pt, result) << "Inplace rotation is not supported."; + + // 'scale' is 1 / norm(q). + const T scale = + T(1) / sqrt(q[0] * q[0] + q[1] * q[1] + q[2] * q[2] + q[3] * q[3]); + + // Make unit-norm version of q. + const T unit[4] = { + scale * q[0], + scale * q[1], + scale * q[2], + scale * q[3], + }; + + UnitQuaternionRotatePoint(unit, pt, result); +} + +template +inline void QuaternionProduct(const T z[4], const T w[4], T zw[4]) { + DCHECK_NE(z, zw) << "Inplace quaternion product is not supported."; + DCHECK_NE(w, zw) << "Inplace quaternion product is not supported."; + + // clang-format off + zw[0] = z[0] * w[0] - z[1] * w[1] - z[2] * w[2] - z[3] * w[3]; + zw[1] = z[0] * w[1] + z[1] * w[0] + z[2] * w[3] - z[3] * w[2]; + zw[2] = z[0] * w[2] - z[1] * w[3] + z[2] * w[0] + z[3] * w[1]; + zw[3] = z[0] * w[3] + z[1] * w[2] - z[2] * w[1] + z[3] * w[0]; + // clang-format on +} + +// xy = x cross y; +template +inline void CrossProduct(const T x[3], const T y[3], T x_cross_y[3]) { + DCHECK_NE(x, x_cross_y) << "Inplace cross product is not supported."; + DCHECK_NE(y, x_cross_y) << "Inplace cross product is not supported."; + + x_cross_y[0] = x[1] * y[2] - x[2] * y[1]; + x_cross_y[1] = x[2] * y[0] - x[0] * y[2]; + x_cross_y[2] = x[0] * y[1] - x[1] * y[0]; +} + +template +inline T DotProduct(const T x[3], const T y[3]) { + return (x[0] * y[0] + x[1] * y[1] + x[2] * y[2]); +} + +template +inline void AngleAxisRotatePoint(const T angle_axis[3], + const T pt[3], + T result[3]) { + DCHECK_NE(pt, result) << "Inplace rotation is not supported."; + + const T theta2 = DotProduct(angle_axis, angle_axis); + if (theta2 > T(std::numeric_limits::epsilon())) { + // Away from zero, use the rodriguez formula + // + // result = pt costheta + + // (w x pt) * sintheta + + // w (w . pt) (1 - costheta) + // + // We want to be careful to only evaluate the square root if the + // norm of the angle_axis vector is greater than zero. Otherwise + // we get a division by zero. + // + const T theta = sqrt(theta2); + const T costheta = cos(theta); + const T sintheta = sin(theta); + const T theta_inverse = T(1.0) / theta; + + const T w[3] = {angle_axis[0] * theta_inverse, + angle_axis[1] * theta_inverse, + angle_axis[2] * theta_inverse}; + + // Explicitly inlined evaluation of the cross product for + // performance reasons. + const T w_cross_pt[3] = {w[1] * pt[2] - w[2] * pt[1], + w[2] * pt[0] - w[0] * pt[2], + w[0] * pt[1] - w[1] * pt[0]}; + const T tmp = + (w[0] * pt[0] + w[1] * pt[1] + w[2] * pt[2]) * (T(1.0) - costheta); + + result[0] = pt[0] * costheta + w_cross_pt[0] * sintheta + w[0] * tmp; + result[1] = pt[1] * costheta + w_cross_pt[1] * sintheta + w[1] * tmp; + result[2] = pt[2] * costheta + w_cross_pt[2] * sintheta + w[2] * tmp; + } else { + // Near zero, the first order Taylor approximation of the rotation + // matrix R corresponding to a vector w and angle theta is + // + // R = I + hat(w) * sin(theta) + // + // But sintheta ~ theta and theta * w = angle_axis, which gives us + // + // R = I + hat(angle_axis) + // + // and actually performing multiplication with the point pt, gives us + // R * pt = pt + angle_axis x pt. + // + // Switching to the Taylor expansion near zero provides meaningful + // derivatives when evaluated using Jets. + // + // Explicitly inlined evaluation of the cross product for + // performance reasons. + const T w_cross_pt[3] = {angle_axis[1] * pt[2] - angle_axis[2] * pt[1], + angle_axis[2] * pt[0] - angle_axis[0] * pt[2], + angle_axis[0] * pt[1] - angle_axis[1] * pt[0]}; + + result[0] = pt[0] + w_cross_pt[0]; + result[1] = pt[1] + w_cross_pt[1]; + result[2] = pt[2] + w_cross_pt[2]; + } +} + +} // namespace ceres + +#endif // CERES_PUBLIC_ROTATION_H_ diff --git a/ceres-v2/include/sized_cost_function.h b/ceres-v2/include/sized_cost_function.h new file mode 100644 index 0000000000000000000000000000000000000000..d76b5c26b4cf45260db662e76ab41c52716f5e8c --- /dev/null +++ b/ceres-v2/include/sized_cost_function.h @@ -0,0 +1,69 @@ +// Ceres Solver - A fast non-linear least squares minimizer +// Copyright 2019 Google Inc. All rights reserved. +// http://ceres-solver.org/ +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are met: +// +// * Redistributions of source code must retain the above copyright notice, +// this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above copyright notice, +// this list of conditions and the following disclaimer in the documentation +// and/or other materials provided with the distribution. +// * Neither the name of Google Inc. nor the names of its contributors may be +// used to endorse or promote products derived from this software without +// specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE +// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR +// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF +// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN +// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) +// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE +// POSSIBILITY OF SUCH DAMAGE. +// +// Author: keir@google.com (Keir Mierle) +// +// A convenience class for cost functions which are statically sized. +// Compared to the dynamically-sized base class, this reduces boilerplate. +// +// The kNumResiduals template parameter can be a constant such as 2 or 5, or it +// can be ceres::DYNAMIC. If kNumResiduals is ceres::DYNAMIC, then subclasses +// are responsible for calling set_num_residuals() at runtime. + +#ifndef CERES_PUBLIC_SIZED_COST_FUNCTION_H_ +#define CERES_PUBLIC_SIZED_COST_FUNCTION_H_ + +#include "ceres/cost_function.h" +#include "ceres/types.h" +#include "glog/logging.h" +#include "internal/parameter_dims.h" + +namespace ceres { + +template +class SizedCostFunction : public CostFunction { + public: + static_assert(kNumResiduals > 0 || kNumResiduals == DYNAMIC, + "Cost functions must have at least one residual block."); + static_assert(internal::StaticParameterDims::kIsValid, + "Invalid parameter block dimension detected. Each parameter " + "block dimension must be bigger than zero."); + + using ParameterDims = internal::StaticParameterDims; + + SizedCostFunction() { + set_num_residuals(kNumResiduals); + *mutable_parameter_block_sizes() = std::vector{Ns...}; + } + + // Subclasses must implement Evaluate(). +}; + +} // namespace ceres + +#endif // CERES_PUBLIC_SIZED_COST_FUNCTION_H_ diff --git a/ceres-v2/include/solver.h b/ceres-v2/include/solver.h new file mode 100644 index 0000000000000000000000000000000000000000..026fc1c08306888a01870fdcc3eaddfb00cf2277 --- /dev/null +++ b/ceres-v2/include/solver.h @@ -0,0 +1,1066 @@ +// Ceres Solver - A fast non-linear least squares minimizer +// Copyright 2019 Google Inc. All rights reserved. +// http://ceres-solver.org/ +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are met: +// +// * Redistributions of source code must retain the above copyright notice, +// this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above copyright notice, +// this list of conditions and the following disclaimer in the documentation +// and/or other materials provided with the distribution. +// * Neither the name of Google Inc. nor the names of its contributors may be +// used to endorse or promote products derived from this software without +// specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE +// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR +// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF +// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN +// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) +// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE +// POSSIBILITY OF SUCH DAMAGE. +// +// Author: sameeragarwal@google.com (Sameer Agarwal) + +#ifndef CERES_PUBLIC_SOLVER_H_ +#define CERES_PUBLIC_SOLVER_H_ + +#include +#include +#include +#include +#include + +#include "ceres/crs_matrix.h" +#include "ceres/internal/config.h" +#include "ceres/internal/disable_warnings.h" +#include "ceres/internal/export.h" +#include "ceres/iteration_callback.h" +#include "ceres/ordered_groups.h" +#include "ceres/problem.h" +#include "ceres/types.h" + +namespace ceres { + +// Interface for non-linear least squares solvers. +class CERES_EXPORT Solver { + public: + virtual ~Solver(); + + // The options structure contains, not surprisingly, options that control how + // the solver operates. The defaults should be suitable for a wide range of + // problems; however, better performance is often obtainable with tweaking. + // + // The constants are defined inside types.h + struct CERES_EXPORT Options { + // Returns true if the options struct has a valid + // configuration. Returns false otherwise, and fills in *error + // with a message describing the problem. + bool IsValid(std::string* error) const; + + // Minimizer options ---------------------------------------- + + // Ceres supports the two major families of optimization strategies - + // Trust Region and Line Search. + // + // 1. The line search approach first finds a descent direction + // along which the objective function will be reduced and then + // computes a step size that decides how far should move along + // that direction. The descent direction can be computed by + // various methods, such as gradient descent, Newton's method and + // Quasi-Newton method. The step size can be determined either + // exactly or inexactly. + // + // 2. The trust region approach approximates the objective + // function using a model function (often a quadratic) over + // a subset of the search space known as the trust region. If the + // model function succeeds in minimizing the true objective + // function the trust region is expanded; conversely, otherwise it + // is contracted and the model optimization problem is solved + // again. + // + // Trust region methods are in some sense dual to line search methods: + // trust region methods first choose a step size (the size of the + // trust region) and then a step direction while line search methods + // first choose a step direction and then a step size. + MinimizerType minimizer_type = TRUST_REGION; + + LineSearchDirectionType line_search_direction_type = LBFGS; + LineSearchType line_search_type = WOLFE; + NonlinearConjugateGradientType nonlinear_conjugate_gradient_type = + FLETCHER_REEVES; + + // The LBFGS hessian approximation is a low rank approximation to + // the inverse of the Hessian matrix. The rank of the + // approximation determines (linearly) the space and time + // complexity of using the approximation. Higher the rank, the + // better is the quality of the approximation. The increase in + // quality is however is bounded for a number of reasons. + // + // 1. The method only uses secant information and not actual + // derivatives. + // + // 2. The Hessian approximation is constrained to be positive + // definite. + // + // So increasing this rank to a large number will cost time and + // space complexity without the corresponding increase in solution + // quality. There are no hard and fast rules for choosing the + // maximum rank. The best choice usually requires some problem + // specific experimentation. + // + // For more theoretical and implementation details of the LBFGS + // method, please see: + // + // Nocedal, J. (1980). "Updating Quasi-Newton Matrices with + // Limited Storage". Mathematics of Computation 35 (151): 773-782. + int max_lbfgs_rank = 20; + + // As part of the (L)BFGS update step (BFGS) / right-multiply step (L-BFGS), + // the initial inverse Hessian approximation is taken to be the Identity. + // However, Oren showed that using instead I * \gamma, where \gamma is + // chosen to approximate an eigenvalue of the true inverse Hessian can + // result in improved convergence in a wide variety of cases. Setting + // use_approximate_eigenvalue_bfgs_scaling to true enables this scaling. + // + // It is important to note that approximate eigenvalue scaling does not + // always improve convergence, and that it can in fact significantly degrade + // performance for certain classes of problem, which is why it is disabled + // by default. In particular it can degrade performance when the + // sensitivity of the problem to different parameters varies significantly, + // as in this case a single scalar factor fails to capture this variation + // and detrimentally downscales parts of the jacobian approximation which + // correspond to low-sensitivity parameters. It can also reduce the + // robustness of the solution to errors in the jacobians. + // + // Oren S.S., Self-scaling variable metric (SSVM) algorithms + // Part II: Implementation and experiments, Management Science, + // 20(5), 863-874, 1974. + bool use_approximate_eigenvalue_bfgs_scaling = false; + + // Degree of the polynomial used to approximate the objective + // function. Valid values are BISECTION, QUADRATIC and CUBIC. + // + // BISECTION corresponds to pure backtracking search with no + // interpolation. + LineSearchInterpolationType line_search_interpolation_type = CUBIC; + + // If during the line search, the step_size falls below this + // value, it is truncated to zero. + double min_line_search_step_size = 1e-9; + + // Line search parameters. + + // Solving the line search problem exactly is computationally + // prohibitive. Fortunately, line search based optimization + // algorithms can still guarantee convergence if instead of an + // exact solution, the line search algorithm returns a solution + // which decreases the value of the objective function + // sufficiently. More precisely, we are looking for a step_size + // s.t. + // + // f(step_size) <= f(0) + sufficient_decrease * f'(0) * step_size + // + double line_search_sufficient_function_decrease = 1e-4; + + // In each iteration of the line search, + // + // new_step_size >= max_line_search_step_contraction * step_size + // + // Note that by definition, for contraction: + // + // 0 < max_step_contraction < min_step_contraction < 1 + // + double max_line_search_step_contraction = 1e-3; + + // In each iteration of the line search, + // + // new_step_size <= min_line_search_step_contraction * step_size + // + // Note that by definition, for contraction: + // + // 0 < max_step_contraction < min_step_contraction < 1 + // + double min_line_search_step_contraction = 0.6; + + // Maximum number of trial step size iterations during each line + // search, if a step size satisfying the search conditions cannot + // be found within this number of trials, the line search will + // terminate. + + // The minimum allowed value is 0 for trust region minimizer and 1 + // otherwise. If 0 is specified for the trust region minimizer, + // then line search will not be used when solving constrained + // optimization problems. + int max_num_line_search_step_size_iterations = 20; + + // Maximum number of restarts of the line search direction algorithm before + // terminating the optimization. Restarts of the line search direction + // algorithm occur when the current algorithm fails to produce a new descent + // direction. This typically indicates a numerical failure, or a breakdown + // in the validity of the approximations used. + int max_num_line_search_direction_restarts = 5; + + // The strong Wolfe conditions consist of the Armijo sufficient + // decrease condition, and an additional requirement that the + // step-size be chosen s.t. the _magnitude_ ('strong' Wolfe + // conditions) of the gradient along the search direction + // decreases sufficiently. Precisely, this second condition + // is that we seek a step_size s.t. + // + // |f'(step_size)| <= sufficient_curvature_decrease * |f'(0)| + // + // Where f() is the line search objective and f'() is the derivative + // of f w.r.t step_size (d f / d step_size). + double line_search_sufficient_curvature_decrease = 0.9; + + // During the bracketing phase of the Wolfe search, the step size is + // increased until either a point satisfying the Wolfe conditions is + // found, or an upper bound for a bracket containing a point satisfying + // the conditions is found. Precisely, at each iteration of the + // expansion: + // + // new_step_size <= max_step_expansion * step_size. + // + // By definition for expansion, max_step_expansion > 1.0. + double max_line_search_step_expansion = 10.0; + + TrustRegionStrategyType trust_region_strategy_type = LEVENBERG_MARQUARDT; + + // Type of dogleg strategy to use. + DoglegType dogleg_type = TRADITIONAL_DOGLEG; + + // The classical trust region methods are descent methods, in that + // they only accept a point if it strictly reduces the value of + // the objective function. + // + // Relaxing this requirement allows the algorithm to be more + // efficient in the long term at the cost of some local increase + // in the value of the objective function. + // + // This is because allowing for non-decreasing objective function + // values in a principled manner allows the algorithm to "jump over + // boulders" as the method is not restricted to move into narrow + // valleys while preserving its convergence properties. + // + // Setting use_nonmonotonic_steps to true enables the + // non-monotonic trust region algorithm as described by Conn, + // Gould & Toint in "Trust Region Methods", Section 10.1. + // + // The parameter max_consecutive_nonmonotonic_steps controls the + // window size used by the step selection algorithm to accept + // non-monotonic steps. + // + // Even though the value of the objective function may be larger + // than the minimum value encountered over the course of the + // optimization, the final parameters returned to the user are the + // ones corresponding to the minimum cost over all iterations. + bool use_nonmonotonic_steps = false; + int max_consecutive_nonmonotonic_steps = 5; + + // Maximum number of iterations for the minimizer to run for. + int max_num_iterations = 50; + + // Maximum time for which the minimizer should run for. + double max_solver_time_in_seconds = 1e9; + + // Number of threads used by Ceres for evaluating the cost and + // jacobians. + int num_threads = 1; + + // Trust region minimizer settings. + double initial_trust_region_radius = 1e4; + double max_trust_region_radius = 1e16; + + // Minimizer terminates when the trust region radius becomes + // smaller than this value. + double min_trust_region_radius = 1e-32; + + // Lower bound for the relative decrease before a step is + // accepted. + double min_relative_decrease = 1e-3; + + // For the Levenberg-Marquadt algorithm, the scaled diagonal of + // the normal equations J'J is used to control the size of the + // trust region. Extremely small and large values along the + // diagonal can make this regularization scheme + // fail. max_lm_diagonal and min_lm_diagonal, clamp the values of + // diag(J'J) from above and below. In the normal course of + // operation, the user should not have to modify these parameters. + double min_lm_diagonal = 1e-6; + double max_lm_diagonal = 1e32; + + // Sometimes due to numerical conditioning problems or linear + // solver flakiness, the trust region strategy may return a + // numerically invalid step that can be fixed by reducing the + // trust region size. So the TrustRegionMinimizer allows for a few + // successive invalid steps before it declares NUMERICAL_FAILURE. + int max_num_consecutive_invalid_steps = 5; + + // Minimizer terminates when + // + // (new_cost - old_cost) < function_tolerance * old_cost; + // + double function_tolerance = 1e-6; + + // Minimizer terminates when + // + // max_i |x - Project(Plus(x, -g(x))| < gradient_tolerance + // + // This value should typically be 1e-4 * function_tolerance. + double gradient_tolerance = 1e-10; + + // Minimizer terminates when + // + // |step|_2 <= parameter_tolerance * ( |x|_2 + parameter_tolerance) + // + double parameter_tolerance = 1e-8; + + // Linear least squares solver options ------------------------------------- + + LinearSolverType linear_solver_type = +#if defined(CERES_NO_SPARSE) + DENSE_QR; +#else + SPARSE_NORMAL_CHOLESKY; +#endif + + // Type of preconditioner to use with the iterative linear solvers. + PreconditionerType preconditioner_type = JACOBI; + + // Type of clustering algorithm to use for visibility based + // preconditioning. This option is used only when the + // preconditioner_type is CLUSTER_JACOBI or CLUSTER_TRIDIAGONAL. + VisibilityClusteringType visibility_clustering_type = CANONICAL_VIEWS; + + // Subset preconditioner is a preconditioner for problems with + // general sparsity. Given a subset of residual blocks of a + // problem, it uses the corresponding subset of the rows of the + // Jacobian to construct a preconditioner. + // + // Suppose the Jacobian J has been horizontally partitioned as + // + // J = [P] + // [Q] + // + // Where, Q is the set of rows corresponding to the residual + // blocks in residual_blocks_for_subset_preconditioner. + // + // The preconditioner is the inverse of the matrix Q'Q. + // + // Obviously, the efficacy of the preconditioner depends on how + // well the matrix Q approximates J'J, or how well the chosen + // residual blocks approximate the non-linear least squares + // problem. + // + // If Solver::Options::preconditioner_type == SUBSET, then + // residual_blocks_for_subset_preconditioner must be non-empty. + std::unordered_set + residual_blocks_for_subset_preconditioner; + + // Ceres supports using multiple dense linear algebra libraries for dense + // matrix factorizations. Currently EIGEN, LAPACK and CUDA are the valid + // choices. EIGEN is always available, LAPACK refers to the system BLAS + + // LAPACK library which may or may not be available. CUDA refers to Nvidia's + // GPU based dense linear algebra library, which may or may not be + // available. + // + // This setting affects the DENSE_QR, DENSE_NORMAL_CHOLESKY and DENSE_SCHUR + // solvers. For small to moderate sized problem EIGEN is a fine choice but + // for large problems, an optimized LAPACK + BLAS or CUDA implementation can + // make a substantial difference in performance. + DenseLinearAlgebraLibraryType dense_linear_algebra_library_type = EIGEN; + + // Ceres supports using multiple sparse linear algebra libraries for sparse + // matrix ordering and factorizations. Currently, SUITE_SPARSE and CX_SPARSE + // are the valid choices, depending on whether they are linked into Ceres at + // build time. + SparseLinearAlgebraLibraryType sparse_linear_algebra_library_type = +#if !defined(CERES_NO_SUITESPARSE) + SUITE_SPARSE; +#elif defined(CERES_USE_EIGEN_SPARSE) + EIGEN_SPARSE; +#elif !defined(CERES_NO_CXSPARSE) + CX_SPARSE; +#elif !defined(CERES_NO_ACCELERATE_SPARSE) + ACCELERATE_SPARSE; +#else + NO_SPARSE; +#endif + + // The order in which variables are eliminated in a linear solver + // can have a significant of impact on the efficiency and accuracy + // of the method. e.g., when doing sparse Cholesky factorization, + // there are matrices for which a good ordering will give a + // Cholesky factor with O(n) storage, where as a bad ordering will + // result in an completely dense factor. + // + // Ceres allows the user to provide varying amounts of hints to + // the solver about the variable elimination ordering to use. This + // can range from no hints, where the solver is free to decide the + // best possible ordering based on the user's choices like the + // linear solver being used, to an exact order in which the + // variables should be eliminated, and a variety of possibilities + // in between. + // + // Instances of the ParameterBlockOrdering class are used to + // communicate this information to Ceres. + // + // Formally an ordering is an ordered partitioning of the + // parameter blocks, i.e, each parameter block belongs to exactly + // one group, and each group has a unique non-negative integer + // associated with it, that determines its order in the set of + // groups. + // + // Given such an ordering, Ceres ensures that the parameter blocks in + // the lowest numbered group are eliminated first, and then the + // parameter blocks in the next lowest numbered group and so on. Within + // each group, Ceres is free to order the parameter blocks as it + // chooses. + // + // If nullptr, then all parameter blocks are assumed to be in the + // same group and the solver is free to decide the best + // ordering. + // + // e.g. Consider the linear system + // + // x + y = 3 + // 2x + 3y = 7 + // + // There are two ways in which it can be solved. First eliminating x + // from the two equations, solving for y and then back substituting + // for x, or first eliminating y, solving for x and back substituting + // for y. The user can construct three orderings here. + // + // {0: x}, {1: y} - eliminate x first. + // {0: y}, {1: x} - eliminate y first. + // {0: x, y} - Solver gets to decide the elimination order. + // + // Thus, to have Ceres determine the ordering automatically using + // heuristics, put all the variables in group 0 and to control the + // ordering for every variable, create groups 0..N-1, one per + // variable, in the desired order. + // + // Bundle Adjustment + // ----------------- + // + // A particular case of interest is bundle adjustment, where the user + // has two options. The default is to not specify an ordering at all, + // the solver will see that the user wants to use a Schur type solver + // and figure out the right elimination ordering. + // + // But if the user already knows what parameter blocks are points and + // what are cameras, they can save preprocessing time by partitioning + // the parameter blocks into two groups, one for the points and one + // for the cameras, where the group containing the points has an id + // smaller than the group containing cameras. + std::shared_ptr linear_solver_ordering; + + // Use an explicitly computed Schur complement matrix with + // ITERATIVE_SCHUR. + // + // By default this option is disabled and ITERATIVE_SCHUR + // evaluates matrix-vector products between the Schur + // complement and a vector implicitly by exploiting the algebraic + // expression for the Schur complement. + // + // The cost of this evaluation scales with the number of non-zeros + // in the Jacobian. + // + // For small to medium sized problems there is a sweet spot where + // computing the Schur complement is cheap enough that it is much + // more efficient to explicitly compute it and use it for evaluating + // the matrix-vector products. + // + // Enabling this option tells ITERATIVE_SCHUR to use an explicitly + // computed Schur complement. + // + // NOTE: This option can only be used with the SCHUR_JACOBI + // preconditioner. + bool use_explicit_schur_complement = false; + + // Sparse Cholesky factorization algorithms use a fill-reducing + // ordering to permute the columns of the Jacobian matrix. There + // are two ways of doing this. + + // 1. Compute the Jacobian matrix in some order and then have the + // factorization algorithm permute the columns of the Jacobian. + + // 2. Compute the Jacobian with its columns already permuted. + + // The first option incurs a significant memory penalty. The + // factorization algorithm has to make a copy of the permuted + // Jacobian matrix, thus Ceres pre-permutes the columns of the + // Jacobian matrix and generally speaking, there is no performance + // penalty for doing so. + + // In some rare cases, it is worth using a more complicated + // reordering algorithm which has slightly better runtime + // performance at the expense of an extra copy of the Jacobian + // matrix. Setting use_postordering to true enables this tradeoff. + bool use_postordering = false; + + // Some non-linear least squares problems are symbolically dense but + // numerically sparse. i.e. at any given state only a small number + // of jacobian entries are non-zero, but the position and number of + // non-zeros is different depending on the state. For these problems + // it can be useful to factorize the sparse jacobian at each solver + // iteration instead of including all of the zero entries in a single + // general factorization. + // + // If your problem does not have this property (or you do not know), + // then it is probably best to keep this false, otherwise it will + // likely lead to worse performance. + + // This settings only affects the SPARSE_NORMAL_CHOLESKY solver. + bool dynamic_sparsity = false; + + // TODO(sameeragarwal): Further expand the documentation for the + // following two options. + + // NOTE1: EXPERIMENTAL FEATURE, UNDER DEVELOPMENT, USE AT YOUR OWN RISK. + // + // If use_mixed_precision_solves is true, the Gauss-Newton matrix + // is computed in double precision, but its factorization is + // computed in single precision. This can result in significant + // time and memory savings at the cost of some accuracy in the + // Gauss-Newton step. Iterative refinement is used to recover some + // of this accuracy back. + // + // If use_mixed_precision_solves is true, we recommend setting + // max_num_refinement_iterations to 2-3. + // + // NOTE2: The following two options are currently only applicable + // if sparse_linear_algebra_library_type is EIGEN_SPARSE or + // ACCELERATE_SPARSE, and linear_solver_type is SPARSE_NORMAL_CHOLESKY + // or SPARSE_SCHUR. + bool use_mixed_precision_solves = false; + + // Number steps of the iterative refinement process to run when + // computing the Gauss-Newton step. + int max_num_refinement_iterations = 0; + + // Some non-linear least squares problems have additional + // structure in the way the parameter blocks interact that it is + // beneficial to modify the way the trust region step is computed. + // + // e.g., consider the following regression problem + // + // y = a_1 exp(b_1 x) + a_2 exp(b_3 x^2 + c_1) + // + // Given a set of pairs{(x_i, y_i)}, the user wishes to estimate + // a_1, a_2, b_1, b_2, and c_1. + // + // Notice here that the expression on the left is linear in a_1 + // and a_2, and given any value for b_1, b_2 and c_1, it is + // possible to use linear regression to estimate the optimal + // values of a_1 and a_2. Indeed, its possible to analytically + // eliminate the variables a_1 and a_2 from the problem all + // together. Problems like these are known as separable least + // squares problem and the most famous algorithm for solving them + // is the Variable Projection algorithm invented by Golub & + // Pereyra. + // + // Similar structure can be found in the matrix factorization with + // missing data problem. There the corresponding algorithm is + // known as Wiberg's algorithm. + // + // Ruhe & Wedin (Algorithms for Separable Nonlinear Least Squares + // Problems, SIAM Reviews, 22(3), 1980) present an analysis of + // various algorithms for solving separable non-linear least + // squares problems and refer to "Variable Projection" as + // Algorithm I in their paper. + // + // Implementing Variable Projection is tedious and expensive, and + // they present a simpler algorithm, which they refer to as + // Algorithm II, where once the Newton/Trust Region step has been + // computed for the whole problem (a_1, a_2, b_1, b_2, c_1) and + // additional optimization step is performed to estimate a_1 and + // a_2 exactly. + // + // This idea can be generalized to cases where the residual is not + // linear in a_1 and a_2, i.e., Solve for the trust region step + // for the full problem, and then use it as the starting point to + // further optimize just a_1 and a_2. For the linear case, this + // amounts to doing a single linear least squares solve. For + // non-linear problems, any method for solving the a_1 and a_2 + // optimization problems will do. The only constraint on a_1 and + // a_2 is that they do not co-occur in any residual block. + // + // This idea can be further generalized, by not just optimizing + // (a_1, a_2), but decomposing the graph corresponding to the + // Hessian matrix's sparsity structure in a collection of + // non-overlapping independent sets and optimizing each of them. + // + // Setting "use_inner_iterations" to true enables the use of this + // non-linear generalization of Ruhe & Wedin's Algorithm II. This + // version of Ceres has a higher iteration complexity, but also + // displays better convergence behaviour per iteration. Setting + // Solver::Options::num_threads to the maximum number possible is + // highly recommended. + bool use_inner_iterations = false; + + // If inner_iterations is true, then the user has two choices. + // + // 1. Let the solver heuristically decide which parameter blocks + // to optimize in each inner iteration. To do this leave + // Solver::Options::inner_iteration_ordering untouched. + // + // 2. Specify a collection of of ordered independent sets. Where + // the lower numbered groups are optimized before the higher + // number groups. Each group must be an independent set. Not + // all parameter blocks need to be present in the ordering. + std::shared_ptr inner_iteration_ordering; + + // Generally speaking, inner iterations make significant progress + // in the early stages of the solve and then their contribution + // drops down sharply, at which point the time spent doing inner + // iterations is not worth it. + // + // Once the relative decrease in the objective function due to + // inner iterations drops below inner_iteration_tolerance, the use + // of inner iterations in subsequent trust region minimizer + // iterations is disabled. + double inner_iteration_tolerance = 1e-3; + + // Minimum number of iterations for which the linear solver should + // run, even if the convergence criterion is satisfied. + int min_linear_solver_iterations = 0; + + // Maximum number of iterations for which the linear solver should + // run. If the solver does not converge in less than + // max_linear_solver_iterations, then it returns MAX_ITERATIONS, + // as its termination type. + int max_linear_solver_iterations = 500; + + // Forcing sequence parameter. The truncated Newton solver uses + // this number to control the relative accuracy with which the + // Newton step is computed. + // + // This constant is passed to ConjugateGradientsSolver which uses + // it to terminate the iterations when + // + // (Q_i - Q_{i-1})/Q_i < eta/i + double eta = 1e-1; + + // Normalize the jacobian using Jacobi scaling before calling + // the linear least squares solver. + bool jacobi_scaling = true; + + // Logging options --------------------------------------------------------- + + LoggingType logging_type = PER_MINIMIZER_ITERATION; + + // By default the Minimizer progress is logged to VLOG(1), which + // is sent to STDERR depending on the vlog level. If this flag is + // set to true, and logging_type is not SILENT, the logging output + // is sent to STDOUT. + bool minimizer_progress_to_stdout = false; + + // List of iterations at which the minimizer should dump the trust + // region problem. Useful for testing and benchmarking. If empty + // (default), no problems are dumped. + std::vector trust_region_minimizer_iterations_to_dump; + + // Directory to which the problems should be written to. Should be + // non-empty if trust_region_minimizer_iterations_to_dump is + // non-empty and trust_region_problem_dump_format_type is not + // CONSOLE. + std::string trust_region_problem_dump_directory = "/tmp"; + DumpFormatType trust_region_problem_dump_format_type = TEXTFILE; + + // Finite differences options ---------------------------------------------- + + // Check all jacobians computed by each residual block with finite + // differences. This is expensive since it involves computing the + // derivative by normal means (e.g. user specified, autodiff, + // etc), then also computing it using finite differences. The + // results are compared, and if they differ substantially, details + // are printed to the log. + bool check_gradients = false; + + // Relative precision to check for in the gradient checker. If the + // relative difference between an element in a jacobian exceeds + // this number, then the jacobian for that cost term is dumped. + double gradient_check_relative_precision = 1e-8; + + // WARNING: This option only applies to the to the numeric + // differentiation used for checking the user provided derivatives + // when when Solver::Options::check_gradients is true. If you are + // using NumericDiffCostFunction and are interested in changing + // the step size for numeric differentiation in your cost + // function, please have a look at + // include/ceres/numeric_diff_options.h. + // + // Relative shift used for taking numeric derivatives when + // Solver::Options::check_gradients is true. + // + // For finite differencing, each dimension is evaluated at + // slightly shifted values; for the case of central difference, + // this is what gets evaluated: + // + // delta = gradient_check_numeric_derivative_relative_step_size; + // f_initial = f(x) + // f_forward = f((1 + delta) * x) + // f_backward = f((1 - delta) * x) + // + // The finite differencing is done along each dimension. The + // reason to use a relative (rather than absolute) step size is + // that this way, numeric differentiation works for functions where + // the arguments are typically large (e.g. 1e9) and when the + // values are small (e.g. 1e-5). It is possible to construct + // "torture cases" which break this finite difference heuristic, + // but they do not come up often in practice. + // + // TODO(keir): Pick a smarter number than the default above! In + // theory a good choice is sqrt(eps) * x, which for doubles means + // about 1e-8 * x. However, I have found this number too + // optimistic. This number should be exposed for users to change. + double gradient_check_numeric_derivative_relative_step_size = 1e-6; + + // If update_state_every_iteration is true, then Ceres Solver will + // guarantee that at the end of every iteration and before any + // user provided IterationCallback is called, the parameter blocks + // are updated to the current best solution found by the + // solver. Thus the IterationCallback can inspect the values of + // the parameter blocks for purposes of computation, visualization + // or termination. + + // If update_state_every_iteration is false then there is no such + // guarantee, and user provided IterationCallbacks should not + // expect to look at the parameter blocks and interpret their + // values. + bool update_state_every_iteration = false; + + // Callbacks that are executed at the end of each iteration of the + // Minimizer. An iteration may terminate midway, either due to + // numerical failures or because one of the convergence tests has + // been satisfied. In this case none of the callbacks are + // executed. + + // Callbacks are executed in the order that they are specified in + // this vector. By default, parameter blocks are updated only at the + // end of the optimization, i.e when the Minimizer terminates. This + // behaviour is controlled by update_state_every_iteration. If the + // user wishes to have access to the updated parameter blocks when + // his/her callbacks are executed, then set + // update_state_every_iteration to true. + // + // The solver does NOT take ownership of these pointers. + std::vector callbacks; + }; + + struct CERES_EXPORT Summary { + // A brief one line description of the state of the solver after + // termination. + std::string BriefReport() const; + + // A full multiline description of the state of the solver after + // termination. + std::string FullReport() const; + + bool IsSolutionUsable() const; + + // Minimizer summary ------------------------------------------------- + MinimizerType minimizer_type = TRUST_REGION; + + TerminationType termination_type = FAILURE; + + // Reason why the solver terminated. + std::string message = "ceres::Solve was not called."; + + // Cost of the problem (value of the objective function) before + // the optimization. + double initial_cost = -1.0; + + // Cost of the problem (value of the objective function) after the + // optimization. + double final_cost = -1.0; + + // The part of the total cost that comes from residual blocks that + // were held fixed by the preprocessor because all the parameter + // blocks that they depend on were fixed. + double fixed_cost = -1.0; + + // IterationSummary for each minimizer iteration in order. + std::vector iterations; + + // Number of minimizer iterations in which the step was + // accepted. Unless use_non_monotonic_steps is true this is also + // the number of steps in which the objective function value/cost + // went down. + int num_successful_steps = -1; + + // Number of minimizer iterations in which the step was rejected + // either because it did not reduce the cost enough or the step + // was not numerically valid. + int num_unsuccessful_steps = -1; + + // Number of times inner iterations were performed. + int num_inner_iteration_steps = -1; + + // Total number of iterations inside the line search algorithm + // across all invocations. We call these iterations "steps" to + // distinguish them from the outer iterations of the line search + // and trust region minimizer algorithms which call the line + // search algorithm as a subroutine. + int num_line_search_steps = -1; + + // All times reported below are wall times. + + // When the user calls Solve, before the actual optimization + // occurs, Ceres performs a number of preprocessing steps. These + // include error checks, memory allocations, and reorderings. This + // time is accounted for as preprocessing time. + double preprocessor_time_in_seconds = -1.0; + + // Time spent in the TrustRegionMinimizer. + double minimizer_time_in_seconds = -1.0; + + // After the Minimizer is finished, some time is spent in + // re-evaluating residuals etc. This time is accounted for in the + // postprocessor time. + double postprocessor_time_in_seconds = -1.0; + + // Some total of all time spent inside Ceres when Solve is called. + double total_time_in_seconds = -1.0; + + // Time (in seconds) spent in the linear solver computing the + // trust region step. + double linear_solver_time_in_seconds = -1.0; + + // Number of times the Newton step was computed by solving a + // linear system. This does not include linear solves used by + // inner iterations. + int num_linear_solves = -1; + + // Time (in seconds) spent evaluating the residual vector. + double residual_evaluation_time_in_seconds = -1.0; + + // Number of residual only evaluations. + int num_residual_evaluations = -1; + + // Time (in seconds) spent evaluating the jacobian matrix. + double jacobian_evaluation_time_in_seconds = -1.0; + + // Number of Jacobian (and residual) evaluations. + int num_jacobian_evaluations = -1; + + // Time (in seconds) spent doing inner iterations. + double inner_iteration_time_in_seconds = -1.0; + + // Cumulative timing information for line searches performed as part of the + // solve. Note that in addition to the case when the Line Search minimizer + // is used, the Trust Region minimizer also uses a line search when + // solving a constrained problem. + + // Time (in seconds) spent evaluating the univariate cost function as part + // of a line search. + double line_search_cost_evaluation_time_in_seconds = -1.0; + + // Time (in seconds) spent evaluating the gradient of the univariate cost + // function as part of a line search. + double line_search_gradient_evaluation_time_in_seconds = -1.0; + + // Time (in seconds) spent minimizing the interpolating polynomial + // to compute the next candidate step size as part of a line search. + double line_search_polynomial_minimization_time_in_seconds = -1.0; + + // Total time (in seconds) spent performing line searches. + double line_search_total_time_in_seconds = -1.0; + + // Number of parameter blocks in the problem. + int num_parameter_blocks = -1; + + // Number of parameters in the problem. + int num_parameters = -1; + + // Dimension of the tangent space of the problem (or the number of + // columns in the Jacobian for the problem). This is different + // from num_parameters if a parameter block is associated with a + // LocalParameterization/Manifold. + int num_effective_parameters = -1; + + // Number of residual blocks in the problem. + int num_residual_blocks = -1; + + // Number of residuals in the problem. + int num_residuals = -1; + + // Number of parameter blocks in the problem after the inactive + // and constant parameter blocks have been removed. A parameter + // block is inactive if no residual block refers to it. + int num_parameter_blocks_reduced = -1; + + // Number of parameters in the reduced problem. + int num_parameters_reduced = -1; + + // Dimension of the tangent space of the reduced problem (or the + // number of columns in the Jacobian for the reduced + // problem). This is different from num_parameters_reduced if a + // parameter block in the reduced problem is associated with a + // LocalParameterization/Manifold. + int num_effective_parameters_reduced = -1; + + // Number of residual blocks in the reduced problem. + int num_residual_blocks_reduced = -1; + + // Number of residuals in the reduced problem. + int num_residuals_reduced = -1; + + // Is the reduced problem bounds constrained. + bool is_constrained = false; + + // Number of threads specified by the user for Jacobian and + // residual evaluation. + int num_threads_given = -1; + + // Number of threads actually used by the solver for Jacobian and + // residual evaluation. This number is not equal to + // num_threads_given if OpenMP is not available. + int num_threads_used = -1; + + // Type of the linear solver requested by the user. + LinearSolverType linear_solver_type_given = +#if defined(CERES_NO_SPARSE) + DENSE_QR; +#else + SPARSE_NORMAL_CHOLESKY; +#endif + // Type of the linear solver actually used. This may be different + // from linear_solver_type_given if Ceres determines that the + // problem structure is not compatible with the linear solver + // requested or if the linear solver requested by the user is not + // available, e.g. The user requested SPARSE_NORMAL_CHOLESKY but + // no sparse linear algebra library was available. + LinearSolverType linear_solver_type_used = +#if defined(CERES_NO_SPARSE) + DENSE_QR; +#else + SPARSE_NORMAL_CHOLESKY; +#endif + + // Size of the elimination groups given by the user as hints to + // the linear solver. + std::vector linear_solver_ordering_given; + + // Size of the parameter groups used by the solver when ordering + // the columns of the Jacobian. This maybe different from + // linear_solver_ordering_given if the user left + // linear_solver_ordering_given blank and asked for an automatic + // ordering, or if the problem contains some constant or inactive + // parameter blocks. + std::vector linear_solver_ordering_used; + + // For Schur type linear solvers, this string describes the + // template specialization which was detected in the problem and + // should be used. + std::string schur_structure_given; + + // This is the Schur template specialization that was actually + // instantiated and used. The reason this will be different from + // schur_structure_given is because the corresponding template + // specialization does not exist. + // + // Template specializations can be added to ceres by editing + // internal/ceres/generate_template_specializations.py + std::string schur_structure_used; + + // True if the user asked for inner iterations to be used as part + // of the optimization. + bool inner_iterations_given = false; + + // True if the user asked for inner iterations to be used as part + // of the optimization and the problem structure was such that + // they were actually performed. e.g., in a problem with just one + // parameter block, inner iterations are not performed. + bool inner_iterations_used = false; + + // Size of the parameter groups given by the user for performing + // inner iterations. + std::vector inner_iteration_ordering_given; + + // Size of the parameter groups given used by the solver for + // performing inner iterations. This maybe different from + // inner_iteration_ordering_given if the user left + // inner_iteration_ordering_given blank and asked for an automatic + // ordering, or if the problem contains some constant or inactive + // parameter blocks. + std::vector inner_iteration_ordering_used; + + // Type of the preconditioner requested by the user. + PreconditionerType preconditioner_type_given = IDENTITY; + + // Type of the preconditioner actually used. This may be different + // from linear_solver_type_given if Ceres determines that the + // problem structure is not compatible with the linear solver + // requested or if the linear solver requested by the user is not + // available. + PreconditionerType preconditioner_type_used = IDENTITY; + + // Type of clustering algorithm used for visibility based + // preconditioning. Only meaningful when the preconditioner_type + // is CLUSTER_JACOBI or CLUSTER_TRIDIAGONAL. + VisibilityClusteringType visibility_clustering_type = CANONICAL_VIEWS; + + // Type of trust region strategy. + TrustRegionStrategyType trust_region_strategy_type = LEVENBERG_MARQUARDT; + + // Type of dogleg strategy used for solving the trust region + // problem. + DoglegType dogleg_type = TRADITIONAL_DOGLEG; + + // Type of the dense linear algebra library used. + DenseLinearAlgebraLibraryType dense_linear_algebra_library_type = EIGEN; + + // Type of the sparse linear algebra library used. + SparseLinearAlgebraLibraryType sparse_linear_algebra_library_type = + NO_SPARSE; + + // Type of line search direction used. + LineSearchDirectionType line_search_direction_type = LBFGS; + + // Type of the line search algorithm used. + LineSearchType line_search_type = WOLFE; + + // When performing line search, the degree of the polynomial used + // to approximate the objective function. + LineSearchInterpolationType line_search_interpolation_type = CUBIC; + + // If the line search direction is NONLINEAR_CONJUGATE_GRADIENT, + // then this indicates the particular variant of non-linear + // conjugate gradient used. + NonlinearConjugateGradientType nonlinear_conjugate_gradient_type = + FLETCHER_REEVES; + + // If the type of the line search direction is LBFGS, then this + // indicates the rank of the Hessian approximation. + int max_lbfgs_rank = -1; + }; + + // Once a least squares problem has been built, this function takes + // the problem and optimizes it based on the values of the options + // parameters. Upon return, a detailed summary of the work performed + // by the preprocessor, the non-linear minimizer and the linear + // solver are reported in the summary object. + virtual void Solve(const Options& options, + Problem* problem, + Solver::Summary* summary); +}; + +// Helper function which avoids going through the interface. +CERES_EXPORT void Solve(const Solver::Options& options, + Problem* problem, + Solver::Summary* summary); + +} // namespace ceres + +#include "ceres/internal/reenable_warnings.h" + +#endif // CERES_PUBLIC_SOLVER_H_ diff --git a/ceres-v2/include/sphere_manifold.h b/ceres-v2/include/sphere_manifold.h new file mode 100644 index 0000000000000000000000000000000000000000..5d71cbbca9a6d4dffdcaf4c16f642ae7a5e8bea9 --- /dev/null +++ b/ceres-v2/include/sphere_manifold.h @@ -0,0 +1,231 @@ +// Ceres Solver - A fast non-linear least squares minimizer +// Copyright 2022 Google Inc. All rights reserved. +// http://ceres-solver.org/ +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are met: +// +// * Redistributions of source code must retain the above copyright notice, +// this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above copyright notice, +// this list of conditions and the following disclaimer in the documentation +// and/or other materials provided with the distribution. +// * Neither the name of Google Inc. nor the names of its contributors may be +// used to endorse or promote products derived from this software without +// specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE +// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR +// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF +// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN +// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) +// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE +// POSSIBILITY OF SUCH DAMAGE. +// +// Author: vitus@google.com (Mike Vitus) +// jodebo_beck@gmx.de (Johannes Beck) + +#ifndef CERES_PUBLIC_SPHERE_MANIFOLD_H_ +#define CERES_PUBLIC_SPHERE_MANIFOLD_H_ + +#include +#include +#include +#include +#include + +#include "ceres/internal/disable_warnings.h" +#include "ceres/internal/export.h" +#include "ceres/internal/householder_vector.h" +#include "ceres/internal/sphere_manifold_functions.h" +#include "ceres/manifold.h" +#include "ceres/types.h" +#include "glog/logging.h" + +namespace ceres { + +// This provides a manifold on a sphere meaning that the norm of the vector +// stays the same. Such cases often arises in Structure for Motion +// problems. One example where they are used is in representing points whose +// triangulation is ill-conditioned. Here it is advantageous to use an +// over-parameterization since homogeneous vectors can represent points at +// infinity. +// +// The plus operator is defined as +// Plus(x, delta) = +// [sin(0.5 * |delta|) * delta / |delta|, cos(0.5 * |delta|)] * x +// +// The minus operator is defined as +// Minus(x, y) = 2 atan2(nhy, y[-1]) / nhy * hy[0 : size_ - 1] +// with nhy = norm(hy[0 : size_ - 1]) +// +// with * defined as an operator which applies the update orthogonal to x to +// remain on the sphere. The ambient space dimension is required to be greater +// than 1. +// +// The class works with dynamic and static ambient space dimensions. If the +// ambient space dimensions is known at compile time use +// +// SphereManifold<3> manifold; +// +// If the ambient space dimensions is not known at compile time the template +// parameter needs to be set to ceres::DYNAMIC and the actual dimension needs +// to be provided as a constructor argument: +// +// SphereManifold manifold(ambient_dim); +// +// See section B.2 (p.25) in "Integrating Generic Sensor Fusion Algorithms +// with Sound State Representations through Encapsulation of Manifolds" by C. +// Hertzberg, R. Wagner, U. Frese and L. Schroder for more details +// (https://arxiv.org/pdf/1107.1119.pdf) +template +class SphereManifold final : public Manifold { + public: + static_assert( + AmbientSpaceDimension == ceres::DYNAMIC || AmbientSpaceDimension > 1, + "The size of the homogeneous vector needs to be greater than 1."); + static_assert(ceres::DYNAMIC == Eigen::Dynamic, + "ceres::DYNAMIC needs to be the same as Eigen::Dynamic."); + + SphereManifold(); + explicit SphereManifold(int size); + + int AmbientSize() const override { + return AmbientSpaceDimension == ceres::DYNAMIC ? size_ + : AmbientSpaceDimension; + } + int TangentSize() const override { return AmbientSize() - 1; } + + bool Plus(const double* x, + const double* delta, + double* x_plus_delta) const override; + bool PlusJacobian(const double* x, double* jacobian) const override; + + bool Minus(const double* y, + const double* x, + double* y_minus_x) const override; + bool MinusJacobian(const double* x, double* jacobian) const override; + + private: + static constexpr int TangentSpaceDimension = + AmbientSpaceDimension > 0 ? AmbientSpaceDimension - 1 : Eigen::Dynamic; + + using AmbientVector = Eigen::Matrix; + using TangentVector = Eigen::Matrix; + using MatrixPlusJacobian = Eigen::Matrix; + using MatrixMinusJacobian = Eigen::Matrix; + + const int size_{}; +}; + +template +SphereManifold::SphereManifold() + : size_{AmbientSpaceDimension} { + static_assert( + AmbientSpaceDimension != Eigen::Dynamic, + "The size is set to dynamic. Please call the constructor with a size."); +} + +template +SphereManifold::SphereManifold(int size) : size_{size} { + if (AmbientSpaceDimension != Eigen::Dynamic) { + CHECK_EQ(AmbientSpaceDimension, size) + << "Specified size by template parameter differs from the supplied " + "one."; + } else { + CHECK_GT(size_, 1) + << "The size of the manifold needs to be greater than 1."; + } +} + +template +bool SphereManifold::Plus( + const double* x_ptr, + const double* delta_ptr, + double* x_plus_delta_ptr) const { + Eigen::Map x(x_ptr, size_); + Eigen::Map delta(delta_ptr, size_ - 1); + Eigen::Map x_plus_delta(x_plus_delta_ptr, size_); + + const double norm_delta = delta.norm(); + + if (norm_delta == 0.0) { + x_plus_delta = x; + return true; + } + + AmbientVector v(size_); + double beta; + + // NOTE: The explicit template arguments are needed here because + // ComputeHouseholderVector is templated and some versions of MSVC + // have trouble deducing the type of v automatically. + internal::ComputeHouseholderVector, + double, + AmbientSpaceDimension>(x, &v, &beta); + + internal::ComputeSphereManifoldPlus( + v, beta, x, delta, norm_delta, &x_plus_delta); + + return true; +} + +template +bool SphereManifold::PlusJacobian( + const double* x_ptr, double* jacobian_ptr) const { + Eigen::Map x(x_ptr, size_); + Eigen::Map jacobian(jacobian_ptr, size_, size_ - 1); + internal::ComputeSphereManifoldPlusJacobian(x, &jacobian); + + return true; +} + +template +bool SphereManifold::Minus(const double* y_ptr, + const double* x_ptr, + double* y_minus_x_ptr) const { + AmbientVector y = Eigen::Map(y_ptr, size_); + Eigen::Map x(x_ptr, size_); + Eigen::Map y_minus_x(y_minus_x_ptr, size_ - 1); + + // Apply hoseholder transformation. + AmbientVector v(size_); + double beta; + + // NOTE: The explicit template arguments are needed here because + // ComputeHouseholderVector is templated and some versions of MSVC + // have trouble deducing the type of v automatically. + internal::ComputeHouseholderVector, + double, + AmbientSpaceDimension>(x, &v, &beta); + internal::ComputeSphereManifoldMinus(v, beta, x, y, &y_minus_x); + return true; +} + +template +bool SphereManifold::MinusJacobian( + const double* x_ptr, double* jacobian_ptr) const { + Eigen::Map x(x_ptr, size_); + Eigen::Map jacobian(jacobian_ptr, size_ - 1, size_); + + internal::ComputeSphereManifoldMinusJacobian(x, &jacobian); + return true; +} + +} // namespace ceres + +// clang-format off +#include "ceres/internal/reenable_warnings.h" +// clang-format on + +#endif // CERES_PUBLIC_SPHERE_MANIFOLD_H_ diff --git a/ceres-v2/include/tiny_solver.h b/ceres-v2/include/tiny_solver.h new file mode 100644 index 0000000000000000000000000000000000000000..86a655dc07d25bb60899505d5378c4db76a3f022 --- /dev/null +++ b/ceres-v2/include/tiny_solver.h @@ -0,0 +1,400 @@ +// Ceres Solver - A fast non-linear least squares minimizer +// Copyright 2021 Google Inc. All rights reserved. +// http://ceres-solver.org/ +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are met: +// +// * Redistributions of source code must retain the above copyright notice, +// this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above copyright notice, +// this list of conditions and the following disclaimer in the documentation +// and/or other materials provided with the distribution. +// * Neither the name of Google Inc. nor the names of its contributors may be +// used to endorse or promote products derived from this software without +// specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE +// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR +// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF +// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN +// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) +// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE +// POSSIBILITY OF SUCH DAMAGE. +// +// Author: mierle@gmail.com (Keir Mierle) +// +// WARNING WARNING WARNING +// WARNING WARNING WARNING Tiny solver is experimental and will change. +// WARNING WARNING WARNING +// +// A tiny least squares solver using Levenberg-Marquardt, intended for solving +// small dense problems with low latency and low overhead. The implementation +// takes care to do all allocation up front, so that no memory is allocated +// during solving. This is especially useful when solving many similar problems; +// for example, inverse pixel distortion for every pixel on a grid. +// +// Note: This code has no dependencies beyond Eigen, including on other parts of +// Ceres, so it is possible to take this file alone and put it in another +// project without the rest of Ceres. +// +// Algorithm based off of: +// +// [1] K. Madsen, H. Nielsen, O. Tingleoff. +// Methods for Non-linear Least Squares Problems. +// http://www2.imm.dtu.dk/pubdb/views/edoc_download.php/3215/pdf/imm3215.pdf + +#ifndef CERES_PUBLIC_TINY_SOLVER_H_ +#define CERES_PUBLIC_TINY_SOLVER_H_ + +#include +#include + +#include "Eigen/Dense" + +namespace ceres { + +// To use tiny solver, create a class or struct that allows computing the cost +// function (described below). This is similar to a ceres::CostFunction, but is +// different to enable statically allocating all memory for the solver +// (specifically, enum sizes). Key parts are the Scalar typedef, the enums to +// describe problem sizes (needed to remove all heap allocations), and the +// operator() overload to evaluate the cost and (optionally) jacobians. +// +// struct TinySolverCostFunctionTraits { +// typedef double Scalar; +// enum { +// NUM_RESIDUALS = OR Eigen::Dynamic, +// NUM_PARAMETERS = OR Eigen::Dynamic, +// }; +// bool operator()(const double* parameters, +// double* residuals, +// double* jacobian) const; +// +// int NumResiduals() const; -- Needed if NUM_RESIDUALS == Eigen::Dynamic. +// int NumParameters() const; -- Needed if NUM_PARAMETERS == Eigen::Dynamic. +// }; +// +// For operator(), the size of the objects is: +// +// double* parameters -- NUM_PARAMETERS or NumParameters() +// double* residuals -- NUM_RESIDUALS or NumResiduals() +// double* jacobian -- NUM_RESIDUALS * NUM_PARAMETERS in column-major format +// (Eigen's default); or nullptr if no jacobian +// requested. +// +// An example (fully statically sized): +// +// struct MyCostFunctionExample { +// typedef double Scalar; +// enum { +// NUM_RESIDUALS = 2, +// NUM_PARAMETERS = 3, +// }; +// bool operator()(const double* parameters, +// double* residuals, +// double* jacobian) const { +// residuals[0] = x + 2*y + 4*z; +// residuals[1] = y * z; +// if (jacobian) { +// jacobian[0 * 2 + 0] = 1; // First column (x). +// jacobian[0 * 2 + 1] = 0; +// +// jacobian[1 * 2 + 0] = 2; // Second column (y). +// jacobian[1 * 2 + 1] = z; +// +// jacobian[2 * 2 + 0] = 4; // Third column (z). +// jacobian[2 * 2 + 1] = y; +// } +// return true; +// } +// }; +// +// The solver supports either statically or dynamically sized cost +// functions. If the number of residuals is dynamic then the Function +// must define: +// +// int NumResiduals() const; +// +// If the number of parameters is dynamic then the Function must +// define: +// +// int NumParameters() const; +// +template >> +class TinySolver { + public: + // This class needs to have an Eigen aligned operator new as it contains + // fixed-size Eigen types. + EIGEN_MAKE_ALIGNED_OPERATOR_NEW + + enum { + NUM_RESIDUALS = Function::NUM_RESIDUALS, + NUM_PARAMETERS = Function::NUM_PARAMETERS + }; + using Scalar = typename Function::Scalar; + using Parameters = typename Eigen::Matrix; + + enum Status { + // max_norm |J'(x) * f(x)| < gradient_tolerance + GRADIENT_TOO_SMALL, + // ||dx|| <= parameter_tolerance * (||x|| + parameter_tolerance) + RELATIVE_STEP_SIZE_TOO_SMALL, + // cost_threshold > ||f(x)||^2 / 2 + COST_TOO_SMALL, + // num_iterations >= max_num_iterations + HIT_MAX_ITERATIONS, + // (new_cost - old_cost) < function_tolerance * old_cost + COST_CHANGE_TOO_SMALL, + + // TODO(sameeragarwal): Deal with numerical failures. + }; + + struct Options { + int max_num_iterations = 50; + + // max_norm |J'(x) * f(x)| < gradient_tolerance + Scalar gradient_tolerance = 1e-10; + + // ||dx|| <= parameter_tolerance * (||x|| + parameter_tolerance) + Scalar parameter_tolerance = 1e-8; + + // (new_cost - old_cost) < function_tolerance * old_cost + Scalar function_tolerance = 1e-6; + + // cost_threshold > ||f(x)||^2 / 2 + Scalar cost_threshold = std::numeric_limits::epsilon(); + + Scalar initial_trust_region_radius = 1e4; + }; + + struct Summary { + // 1/2 ||f(x_0)||^2 + Scalar initial_cost = -1; + // 1/2 ||f(x)||^2 + Scalar final_cost = -1; + // max_norm(J'f(x)) + Scalar gradient_max_norm = -1; + int iterations = -1; + Status status = HIT_MAX_ITERATIONS; + }; + + bool Update(const Function& function, const Parameters& x) { + if (!function(x.data(), residuals_.data(), jacobian_.data())) { + return false; + } + + residuals_ = -residuals_; + + // On the first iteration, compute a diagonal (Jacobi) scaling + // matrix, which we store as a vector. + if (summary.iterations == 0) { + // jacobi_scaling = 1 / (1 + diagonal(J'J)) + // + // 1 is added to the denominator to regularize small diagonal + // entries. + jacobi_scaling_ = 1.0 / (1.0 + jacobian_.colwise().norm().array()); + } + + // This explicitly computes the normal equations, which is numerically + // unstable. Nevertheless, it is often good enough and is fast. + // + // TODO(sameeragarwal): Refactor this to allow for DenseQR + // factorization. + jacobian_ = jacobian_ * jacobi_scaling_.asDiagonal(); + jtj_ = jacobian_.transpose() * jacobian_; + g_ = jacobian_.transpose() * residuals_; + summary.gradient_max_norm = g_.array().abs().maxCoeff(); + cost_ = residuals_.squaredNorm() / 2; + return true; + } + + const Summary& Solve(const Function& function, Parameters* x_and_min) { + Initialize(function); + assert(x_and_min); + Parameters& x = *x_and_min; + summary = Summary(); + summary.iterations = 0; + + // TODO(sameeragarwal): Deal with failure here. + Update(function, x); + summary.initial_cost = cost_; + summary.final_cost = cost_; + + if (summary.gradient_max_norm < options.gradient_tolerance) { + summary.status = GRADIENT_TOO_SMALL; + return summary; + } + + if (cost_ < options.cost_threshold) { + summary.status = COST_TOO_SMALL; + return summary; + } + + Scalar u = 1.0 / options.initial_trust_region_radius; + Scalar v = 2; + + for (summary.iterations = 1; + summary.iterations < options.max_num_iterations; + summary.iterations++) { + jtj_regularized_ = jtj_; + const Scalar min_diagonal = 1e-6; + const Scalar max_diagonal = 1e32; + for (int i = 0; i < lm_diagonal_.rows(); ++i) { + lm_diagonal_[i] = std::sqrt( + u * (std::min)((std::max)(jtj_(i, i), min_diagonal), max_diagonal)); + jtj_regularized_(i, i) += lm_diagonal_[i] * lm_diagonal_[i]; + } + + // TODO(sameeragarwal): Check for failure and deal with it. + linear_solver_.compute(jtj_regularized_); + lm_step_ = linear_solver_.solve(g_); + dx_ = jacobi_scaling_.asDiagonal() * lm_step_; + + // Adding parameter_tolerance to x.norm() ensures that this + // works if x is near zero. + const Scalar parameter_tolerance = + options.parameter_tolerance * + (x.norm() + options.parameter_tolerance); + if (dx_.norm() < parameter_tolerance) { + summary.status = RELATIVE_STEP_SIZE_TOO_SMALL; + break; + } + x_new_ = x + dx_; + + // TODO(keir): Add proper handling of errors from user eval of cost + // functions. + function(&x_new_[0], &f_x_new_[0], nullptr); + + const Scalar cost_change = (2 * cost_ - f_x_new_.squaredNorm()); + // TODO(sameeragarwal): Better more numerically stable evaluation. + const Scalar model_cost_change = lm_step_.dot(2 * g_ - jtj_ * lm_step_); + + // rho is the ratio of the actual reduction in error to the reduction + // in error that would be obtained if the problem was linear. See [1] + // for details. + Scalar rho(cost_change / model_cost_change); + if (rho > 0) { + // Accept the Levenberg-Marquardt step because the linear + // model fits well. + x = x_new_; + + if (std::abs(cost_change) < options.function_tolerance) { + cost_ = f_x_new_.squaredNorm() / 2; + summary.status = COST_CHANGE_TOO_SMALL; + break; + } + + // TODO(sameeragarwal): Deal with failure. + Update(function, x); + if (summary.gradient_max_norm < options.gradient_tolerance) { + summary.status = GRADIENT_TOO_SMALL; + break; + } + + if (cost_ < options.cost_threshold) { + summary.status = COST_TOO_SMALL; + break; + } + + Scalar tmp = Scalar(2 * rho - 1); + u = u * (std::max)(Scalar(1 / 3.), Scalar(1) - tmp * tmp * tmp); + v = 2; + + } else { + // Reject the update because either the normal equations failed to solve + // or the local linear model was not good (rho < 0). + + // Additionally if the cost change is too small, then terminate. + if (std::abs(cost_change) < options.function_tolerance) { + // Terminate + summary.status = COST_CHANGE_TOO_SMALL; + break; + } + + // Reduce the size of the trust region. + u *= v; + v *= 2; + } + } + + summary.final_cost = cost_; + return summary; + } + + Options options; + Summary summary; + + private: + // Preallocate everything, including temporary storage needed for solving the + // linear system. This allows reusing the intermediate storage across solves. + LinearSolver linear_solver_; + Scalar cost_; + Parameters dx_, x_new_, g_, jacobi_scaling_, lm_diagonal_, lm_step_; + Eigen::Matrix residuals_, f_x_new_; + Eigen::Matrix jacobian_; + Eigen::Matrix jtj_, jtj_regularized_; + + // The following definitions are needed for template metaprogramming. + template + struct enable_if; + + template + struct enable_if { + using type = T; + }; + + // The number of parameters and residuals are dynamically sized. + template + typename enable_if<(R == Eigen::Dynamic && P == Eigen::Dynamic), void>::type + Initialize(const Function& function) { + Initialize(function.NumResiduals(), function.NumParameters()); + } + + // The number of parameters is dynamically sized and the number of + // residuals is statically sized. + template + typename enable_if<(R == Eigen::Dynamic && P != Eigen::Dynamic), void>::type + Initialize(const Function& function) { + Initialize(function.NumResiduals(), P); + } + + // The number of parameters is statically sized and the number of + // residuals is dynamically sized. + template + typename enable_if<(R != Eigen::Dynamic && P == Eigen::Dynamic), void>::type + Initialize(const Function& function) { + Initialize(R, function.NumParameters()); + } + + // The number of parameters and residuals are statically sized. + template + typename enable_if<(R != Eigen::Dynamic && P != Eigen::Dynamic), void>::type + Initialize(const Function& /* function */) {} + + void Initialize(int num_residuals, int num_parameters) { + dx_.resize(num_parameters); + x_new_.resize(num_parameters); + g_.resize(num_parameters); + jacobi_scaling_.resize(num_parameters); + lm_diagonal_.resize(num_parameters); + lm_step_.resize(num_parameters); + residuals_.resize(num_residuals); + f_x_new_.resize(num_residuals); + jacobian_.resize(num_residuals, num_parameters); + jtj_.resize(num_parameters, num_parameters); + jtj_regularized_.resize(num_parameters, num_parameters); + } +}; + +} // namespace ceres + +#endif // CERES_PUBLIC_TINY_SOLVER_H_ diff --git a/ceres-v2/include/tiny_solver_autodiff_function.h b/ceres-v2/include/tiny_solver_autodiff_function.h new file mode 100644 index 0000000000000000000000000000000000000000..3e3675ff07068be175af1c35de464b8c6c297821 --- /dev/null +++ b/ceres-v2/include/tiny_solver_autodiff_function.h @@ -0,0 +1,206 @@ +// Ceres Solver - A fast non-linear least squares minimizer +// Copyright 2019 Google Inc. All rights reserved. +// http://ceres-solver.org/ +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are met: +// +// * Redistributions of source code must retain the above copyright notice, +// this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above copyright notice, +// this list of conditions and the following disclaimer in the documentation +// and/or other materials provided with the distribution. +// * Neither the name of Google Inc. nor the names of its contributors may be +// used to endorse or promote products derived from this software without +// specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE +// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR +// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF +// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN +// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) +// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE +// POSSIBILITY OF SUCH DAMAGE. +// +// Author: mierle@gmail.com (Keir Mierle) +// +// WARNING WARNING WARNING +// WARNING WARNING WARNING Tiny solver is experimental and will change. +// WARNING WARNING WARNING + +#ifndef CERES_PUBLIC_TINY_SOLVER_AUTODIFF_FUNCTION_H_ +#define CERES_PUBLIC_TINY_SOLVER_AUTODIFF_FUNCTION_H_ + +#include +#include + +#include "Eigen/Core" +#include "ceres/jet.h" +#include "ceres/types.h" // For kImpossibleValue. + +namespace ceres { + +// An adapter around autodiff-style CostFunctors to enable easier use of +// TinySolver. See the example below showing how to use it: +// +// // Example for cost functor with static residual size. +// // Same as an autodiff cost functor, but taking only 1 parameter. +// struct MyFunctor { +// template +// bool operator()(const T* const parameters, T* residuals) const { +// const T& x = parameters[0]; +// const T& y = parameters[1]; +// const T& z = parameters[2]; +// residuals[0] = x + 2.*y + 4.*z; +// residuals[1] = y * z; +// return true; +// } +// }; +// +// typedef TinySolverAutoDiffFunction +// AutoDiffFunction; +// +// MyFunctor my_functor; +// AutoDiffFunction f(my_functor); +// +// Vec3 x = ...; +// TinySolver solver; +// solver.Solve(f, &x); +// +// // Example for cost functor with dynamic residual size. +// // NumResiduals() supplies dynamic size of residuals. +// // Same functionality as in tiny_solver.h but with autodiff. +// struct MyFunctorWithDynamicResiduals { +// int NumResiduals() const { +// return 2; +// } +// +// template +// bool operator()(const T* const parameters, T* residuals) const { +// const T& x = parameters[0]; +// const T& y = parameters[1]; +// const T& z = parameters[2]; +// residuals[0] = x + static_cast(2.)*y + static_cast(4.)*z; +// residuals[1] = y * z; +// return true; +// } +// }; +// +// typedef TinySolverAutoDiffFunction +// AutoDiffFunctionWithDynamicResiduals; +// +// MyFunctorWithDynamicResiduals my_functor_dyn; +// AutoDiffFunctionWithDynamicResiduals f(my_functor_dyn); +// +// Vec3 x = ...; +// TinySolver solver; +// solver.Solve(f, &x); +// +// WARNING: The cost function adapter is not thread safe. +template +class TinySolverAutoDiffFunction { + public: + // This class needs to have an Eigen aligned operator new as it contains + // as a member a Jet type, which itself has a fixed-size Eigen type as member. + EIGEN_MAKE_ALIGNED_OPERATOR_NEW + + explicit TinySolverAutoDiffFunction(const CostFunctor& cost_functor) + : cost_functor_(cost_functor) { + Initialize(cost_functor); + } + + using Scalar = T; + enum { + NUM_PARAMETERS = kNumParameters, + NUM_RESIDUALS = kNumResiduals, + }; + + // This is similar to AutoDifferentiate(), but since there is only one + // parameter block it is easier to inline to avoid overhead. + bool operator()(const T* parameters, T* residuals, T* jacobian) const { + if (jacobian == nullptr) { + // No jacobian requested, so just directly call the cost function with + // doubles, skipping jets and derivatives. + return cost_functor_(parameters, residuals); + } + // Initialize the input jets with passed parameters. + for (int i = 0; i < kNumParameters; ++i) { + jet_parameters_[i].a = parameters[i]; // Scalar part. + jet_parameters_[i].v.setZero(); // Derivative part. + jet_parameters_[i].v[i] = T(1.0); + } + + // Initialize the output jets such that we can detect user errors. + for (int i = 0; i < num_residuals_; ++i) { + jet_residuals_[i].a = kImpossibleValue; + jet_residuals_[i].v.setConstant(kImpossibleValue); + } + + // Execute the cost function, but with jets to find the derivative. + if (!cost_functor_(jet_parameters_, jet_residuals_.data())) { + return false; + } + + // Copy the jacobian out of the derivative part of the residual jets. + Eigen::Map> jacobian_matrix( + jacobian, num_residuals_, kNumParameters); + for (int r = 0; r < num_residuals_; ++r) { + residuals[r] = jet_residuals_[r].a; + // Note that while this looks like a fast vectorized write, in practice it + // unfortunately thrashes the cache since the writes to the column-major + // jacobian are strided (e.g. rows are non-contiguous). + jacobian_matrix.row(r) = jet_residuals_[r].v; + } + return true; + } + + int NumResiduals() const { + return num_residuals_; // Set by Initialize. + } + + private: + const CostFunctor& cost_functor_; + + // The number of residuals at runtime. + // This will be overriden if NUM_RESIDUALS == Eigen::Dynamic. + int num_residuals_ = kNumResiduals; + + // To evaluate the cost function with jets, temporary storage is needed. These + // are the buffers that are used during evaluation; parameters for the input, + // and jet_residuals_ are where the final cost and derivatives end up. + // + // Since this buffer is used for evaluation, the adapter is not thread safe. + using JetType = Jet; + mutable JetType jet_parameters_[kNumParameters]; + // Eigen::Matrix serves as static or dynamic container. + mutable Eigen::Matrix jet_residuals_; + + // The number of residuals is dynamically sized and the number of + // parameters is statically sized. + template + typename std::enable_if<(R == Eigen::Dynamic), void>::type Initialize( + const CostFunctor& function) { + jet_residuals_.resize(function.NumResiduals()); + num_residuals_ = function.NumResiduals(); + } + + // The number of parameters and residuals are statically sized. + template + typename std::enable_if<(R != Eigen::Dynamic), void>::type Initialize( + const CostFunctor& /* function */) { + num_residuals_ = kNumResiduals; + } +}; + +} // namespace ceres + +#endif // CERES_PUBLIC_TINY_SOLVER_AUTODIFF_FUNCTION_H_ diff --git a/ceres-v2/include/tiny_solver_cost_function_adapter.h b/ceres-v2/include/tiny_solver_cost_function_adapter.h new file mode 100644 index 0000000000000000000000000000000000000000..cc5ca16af5d3ebb95d43625bea19d6da491f0c47 --- /dev/null +++ b/ceres-v2/include/tiny_solver_cost_function_adapter.h @@ -0,0 +1,142 @@ +// Ceres Solver - A fast non-linear least squares minimizer +// Copyright 2019 Google Inc. All rights reserved. +// http://ceres-solver.org/ +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are met: +// +// * Redistributions of source code must retain the above copyright notice, +// this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above copyright notice, +// this list of conditions and the following disclaimer in the documentation +// and/or other materials provided with the distribution. +// * Neither the name of Google Inc. nor the names of its contributors may be +// used to endorse or promote products derived from this software without +// specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE +// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR +// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF +// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN +// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) +// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE +// POSSIBILITY OF SUCH DAMAGE. +// +// Author: sameeragarwal@google.com (Sameer Agarwal) + +#ifndef CERES_PUBLIC_TINY_SOLVER_COST_FUNCTION_ADAPTER_H_ +#define CERES_PUBLIC_TINY_SOLVER_COST_FUNCTION_ADAPTER_H_ + +#include "Eigen/Core" +#include "ceres/cost_function.h" +#include "glog/logging.h" + +namespace ceres { + +// An adapter class that lets users of TinySolver use +// ceres::CostFunction objects that have exactly one parameter block. +// +// The adapter allows for the number of residuals and the size of the +// parameter block to be specified at compile or run-time. +// +// WARNING: This object is not thread-safe. +// +// Example usage: +// +// CostFunction* cost_function = ... +// +// Number of residuals and parameter block size known at compile time: +// +// TinySolverCostFunctionAdapter +// cost_function_adapter(*cost_function); +// +// Number of residuals known at compile time and the parameter block +// size not known at compile time. +// +// TinySolverCostFunctionAdapter +// cost_function_adapter(*cost_function); +// +// Number of residuals not known at compile time and the parameter +// block size known at compile time. +// +// TinySolverCostFunctionAdapter +// cost_function_adapter(*cost_function); +// +// Number of residuals not known at compile time and the parameter +// block size not known at compile time. +// +// TinySolverCostFunctionAdapter cost_function_adapter(*cost_function); +// +template +class TinySolverCostFunctionAdapter { + public: + using Scalar = double; + enum ComponentSizeType { + NUM_PARAMETERS = kNumParameters, + NUM_RESIDUALS = kNumResiduals + }; + + // This struct needs to have an Eigen aligned operator new as it contains + // fixed-size Eigen types. + EIGEN_MAKE_ALIGNED_OPERATOR_NEW + + explicit TinySolverCostFunctionAdapter(const CostFunction& cost_function) + : cost_function_(cost_function) { + CHECK_EQ(cost_function_.parameter_block_sizes().size(), 1) + << "Only CostFunctions with exactly one parameter blocks are allowed."; + + const int parameter_block_size = cost_function_.parameter_block_sizes()[0]; + if (NUM_PARAMETERS == Eigen::Dynamic || NUM_RESIDUALS == Eigen::Dynamic) { + if (NUM_RESIDUALS != Eigen::Dynamic) { + CHECK_EQ(cost_function_.num_residuals(), NUM_RESIDUALS); + } + if (NUM_PARAMETERS != Eigen::Dynamic) { + CHECK_EQ(parameter_block_size, NUM_PARAMETERS); + } + + row_major_jacobian_.resize(cost_function_.num_residuals(), + parameter_block_size); + } + } + + bool operator()(const double* parameters, + double* residuals, + double* jacobian) const { + if (!jacobian) { + return cost_function_.Evaluate(¶meters, residuals, nullptr); + } + + double* jacobians[1] = {row_major_jacobian_.data()}; + if (!cost_function_.Evaluate(¶meters, residuals, jacobians)) { + return false; + } + + // The Function object used by TinySolver takes its Jacobian in a + // column-major layout, and the CostFunction objects use row-major + // Jacobian matrices. So the following bit of code does the + // conversion from row-major Jacobians to column-major Jacobians. + Eigen::Map> + col_major_jacobian(jacobian, NumResiduals(), NumParameters()); + col_major_jacobian = row_major_jacobian_; + return true; + } + + int NumResiduals() const { return cost_function_.num_residuals(); } + int NumParameters() const { + return cost_function_.parameter_block_sizes()[0]; + } + + private: + const CostFunction& cost_function_; + mutable Eigen::Matrix + row_major_jacobian_; +}; + +} // namespace ceres + +#endif // CERES_PUBLIC_TINY_SOLVER_COST_FUNCTION_ADAPTER_H_ diff --git a/ceres-v2/include/types.h b/ceres-v2/include/types.h new file mode 100644 index 0000000000000000000000000000000000000000..e5224238129dc48dd8798d7f02d14dbc6343e0f7 --- /dev/null +++ b/ceres-v2/include/types.h @@ -0,0 +1,535 @@ +// Ceres Solver - A fast non-linear least squares minimizer +// Copyright 2019 Google Inc. All rights reserved. +// http://ceres-solver.org/ +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are met: +// +// * Redistributions of source code must retain the above copyright notice, +// this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above copyright notice, +// this list of conditions and the following disclaimer in the documentation +// and/or other materials provided with the distribution. +// * Neither the name of Google Inc. nor the names of its contributors may be +// used to endorse or promote products derived from this software without +// specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE +// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR +// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF +// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN +// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) +// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE +// POSSIBILITY OF SUCH DAMAGE. +// +// Author: sameeragarwal@google.com (Sameer Agarwal) +// +// Enums and other top level class definitions. +// +// Note: internal/types.cc defines stringification routines for some +// of these enums. Please update those routines if you extend or +// remove enums from here. + +#ifndef CERES_PUBLIC_TYPES_H_ +#define CERES_PUBLIC_TYPES_H_ + +#include + +#include "ceres/internal/disable_warnings.h" +#include "ceres/internal/export.h" + +namespace ceres { + +// Argument type used in interfaces that can optionally take ownership +// of a passed in argument. If TAKE_OWNERSHIP is passed, the called +// object takes ownership of the pointer argument, and will call +// delete on it upon completion. +enum Ownership { + DO_NOT_TAKE_OWNERSHIP, + TAKE_OWNERSHIP, +}; + +// TODO(keir): Considerably expand the explanations of each solver type. +enum LinearSolverType { + // These solvers are for general rectangular systems formed from the + // normal equations A'A x = A'b. They are direct solvers and do not + // assume any special problem structure. + + // Solve the normal equations using a dense Cholesky solver; based + // on Eigen. + DENSE_NORMAL_CHOLESKY, + + // Solve the normal equations using a dense QR solver; based on + // Eigen. + DENSE_QR, + + // Solve the normal equations using a sparse cholesky solver; requires + // SuiteSparse or CXSparse. + SPARSE_NORMAL_CHOLESKY, + + // Specialized solvers, specific to problems with a generalized + // bi-partitite structure. + + // Solves the reduced linear system using a dense Cholesky solver; + // based on Eigen. + DENSE_SCHUR, + + // Solves the reduced linear system using a sparse Cholesky solver; + // based on CHOLMOD. + SPARSE_SCHUR, + + // Solves the reduced linear system using Conjugate Gradients, based + // on a new Ceres implementation. Suitable for large scale + // problems. + ITERATIVE_SCHUR, + + // Conjugate gradients on the normal equations. + CGNR +}; + +enum PreconditionerType { + // Trivial preconditioner - the identity matrix. + IDENTITY, + + // Block diagonal of the Gauss-Newton Hessian. + JACOBI, + + // Note: The following three preconditioners can only be used with + // the ITERATIVE_SCHUR solver. They are well suited for Structure + // from Motion problems. + + // Block diagonal of the Schur complement. This preconditioner may + // only be used with the ITERATIVE_SCHUR solver. + SCHUR_JACOBI, + + // Visibility clustering based preconditioners. + // + // The following two preconditioners use the visibility structure of + // the scene to determine the sparsity structure of the + // preconditioner. This is done using a clustering algorithm. The + // available visibility clustering algorithms are described below. + CLUSTER_JACOBI, + CLUSTER_TRIDIAGONAL, + + // Subset preconditioner is a general purpose preconditioner + // linear least squares problems. Given a set of residual blocks, + // it uses the corresponding subset of the rows of the Jacobian to + // construct a preconditioner. + // + // Suppose the Jacobian J has been horizontally partitioned as + // + // J = [P] + // [Q] + // + // Where, Q is the set of rows corresponding to the residual + // blocks in residual_blocks_for_subset_preconditioner. + // + // The preconditioner is the inverse of the matrix Q'Q. + // + // Obviously, the efficacy of the preconditioner depends on how + // well the matrix Q approximates J'J, or how well the chosen + // residual blocks approximate the non-linear least squares + // problem. + SUBSET, +}; + +enum VisibilityClusteringType { + // Canonical views algorithm as described in + // + // "Scene Summarization for Online Image Collections", Ian Simon, Noah + // Snavely, Steven M. Seitz, ICCV 2007. + // + // This clustering algorithm can be quite slow, but gives high + // quality clusters. The original visibility based clustering paper + // used this algorithm. + CANONICAL_VIEWS, + + // The classic single linkage algorithm. It is extremely fast as + // compared to CANONICAL_VIEWS, but can give slightly poorer + // results. For problems with large number of cameras though, this + // is generally a pretty good option. + // + // If you are using SCHUR_JACOBI preconditioner and have SuiteSparse + // available, CLUSTER_JACOBI and CLUSTER_TRIDIAGONAL in combination + // with the SINGLE_LINKAGE algorithm will generally give better + // results. + SINGLE_LINKAGE +}; + +enum SparseLinearAlgebraLibraryType { + // High performance sparse Cholesky factorization and approximate + // minimum degree ordering. + SUITE_SPARSE, + + // A lightweight replacement for SuiteSparse, which does not require + // a LAPACK/BLAS implementation. Consequently, its performance is + // also a bit lower than SuiteSparse. + CX_SPARSE, + + // Eigen's sparse linear algebra routines. In particular Ceres uses + // the Simplicial LDLT routines. + EIGEN_SPARSE, + + // Apple's Accelerate framework sparse linear algebra routines. + ACCELERATE_SPARSE, + + // No sparse linear solver should be used. This does not necessarily + // imply that Ceres was built without any sparse library, although that + // is the likely use case, merely that one should not be used. + NO_SPARSE +}; + +enum DenseLinearAlgebraLibraryType { + EIGEN, + LAPACK, + CUDA, +}; + +// Logging options +// The options get progressively noisier. +enum LoggingType { + SILENT, + PER_MINIMIZER_ITERATION, +}; + +enum MinimizerType { + LINE_SEARCH, + TRUST_REGION, +}; + +enum LineSearchDirectionType { + // Negative of the gradient. + STEEPEST_DESCENT, + + // A generalization of the Conjugate Gradient method to non-linear + // functions. The generalization can be performed in a number of + // different ways, resulting in a variety of search directions. The + // precise choice of the non-linear conjugate gradient algorithm + // used is determined by NonlinerConjuateGradientType. + NONLINEAR_CONJUGATE_GRADIENT, + + // BFGS, and it's limited memory approximation L-BFGS, are quasi-Newton + // algorithms that approximate the Hessian matrix by iteratively refining + // an initial estimate with rank-one updates using the gradient at each + // iteration. They are a generalisation of the Secant method and satisfy + // the Secant equation. The Secant equation has an infinium of solutions + // in multiple dimensions, as there are N*(N+1)/2 degrees of freedom in a + // symmetric matrix but only N conditions are specified by the Secant + // equation. The requirement that the Hessian approximation be positive + // definite imposes another N additional constraints, but that still leaves + // remaining degrees-of-freedom. (L)BFGS methods uniquely determine the + // approximate Hessian by imposing the additional constraints that the + // approximation at the next iteration must be the 'closest' to the current + // approximation (the nature of how this proximity is measured is actually + // the defining difference between a family of quasi-Newton methods including + // (L)BFGS & DFP). (L)BFGS is currently regarded as being the best known + // general quasi-Newton method. + // + // The principal difference between BFGS and L-BFGS is that whilst BFGS + // maintains a full, dense approximation to the (inverse) Hessian, L-BFGS + // maintains only a window of the last M observations of the parameters and + // gradients. Using this observation history, the calculation of the next + // search direction can be computed without requiring the construction of the + // full dense inverse Hessian approximation. This is particularly important + // for problems with a large number of parameters, where storage of an N-by-N + // matrix in memory would be prohibitive. + // + // For more details on BFGS see: + // + // Broyden, C.G., "The Convergence of a Class of Double-rank Minimization + // Algorithms,"; J. Inst. Maths. Applics., Vol. 6, pp 76-90, 1970. + // + // Fletcher, R., "A New Approach to Variable Metric Algorithms," + // Computer Journal, Vol. 13, pp 317-322, 1970. + // + // Goldfarb, D., "A Family of Variable Metric Updates Derived by Variational + // Means," Mathematics of Computing, Vol. 24, pp 23-26, 1970. + // + // Shanno, D.F., "Conditioning of Quasi-Newton Methods for Function + // Minimization," Mathematics of Computing, Vol. 24, pp 647-656, 1970. + // + // For more details on L-BFGS see: + // + // Nocedal, J. (1980). "Updating Quasi-Newton Matrices with Limited + // Storage". Mathematics of Computation 35 (151): 773-782. + // + // Byrd, R. H.; Nocedal, J.; Schnabel, R. B. (1994). + // "Representations of Quasi-Newton Matrices and their use in + // Limited Memory Methods". Mathematical Programming 63 (4): + // 129-156. + // + // A general reference for both methods: + // + // Nocedal J., Wright S., Numerical Optimization, 2nd Ed. Springer, 1999. + LBFGS, + BFGS, +}; + +// Nonlinear conjugate gradient methods are a generalization of the +// method of Conjugate Gradients for linear systems. The +// generalization can be carried out in a number of different ways +// leading to number of different rules for computing the search +// direction. Ceres provides a number of different variants. For more +// details see Numerical Optimization by Nocedal & Wright. +enum NonlinearConjugateGradientType { + FLETCHER_REEVES, + POLAK_RIBIERE, + HESTENES_STIEFEL, +}; + +enum LineSearchType { + // Backtracking line search with polynomial interpolation or + // bisection. + ARMIJO, + WOLFE, +}; + +// Ceres supports different strategies for computing the trust region +// step. +enum TrustRegionStrategyType { + // The default trust region strategy is to use the step computation + // used in the Levenberg-Marquardt algorithm. For more details see + // levenberg_marquardt_strategy.h + LEVENBERG_MARQUARDT, + + // Powell's dogleg algorithm interpolates between the Cauchy point + // and the Gauss-Newton step. It is particularly useful if the + // LEVENBERG_MARQUARDT algorithm is making a large number of + // unsuccessful steps. For more details see dogleg_strategy.h. + // + // NOTES: + // + // 1. This strategy has not been experimented with or tested as + // extensively as LEVENBERG_MARQUARDT, and therefore it should be + // considered EXPERIMENTAL for now. + // + // 2. For now this strategy should only be used with exact + // factorization based linear solvers, i.e., SPARSE_SCHUR, + // DENSE_SCHUR, DENSE_QR and SPARSE_NORMAL_CHOLESKY. + DOGLEG +}; + +// Ceres supports two different dogleg strategies. +// The "traditional" dogleg method by Powell and the +// "subspace" method described in +// R. H. Byrd, R. B. Schnabel, and G. A. Shultz, +// "Approximate solution of the trust region problem by minimization +// over two-dimensional subspaces", Mathematical Programming, +// 40 (1988), pp. 247--263 +enum DoglegType { + // The traditional approach constructs a dogleg path + // consisting of two line segments and finds the furthest + // point on that path that is still inside the trust region. + TRADITIONAL_DOGLEG, + + // The subspace approach finds the exact minimum of the model + // constrained to the subspace spanned by the dogleg path. + SUBSPACE_DOGLEG +}; + +enum TerminationType { + // Minimizer terminated because one of the convergence criterion set + // by the user was satisfied. + // + // 1. (new_cost - old_cost) < function_tolerance * old_cost; + // 2. max_i |gradient_i| < gradient_tolerance + // 3. |step|_2 <= parameter_tolerance * ( |x|_2 + parameter_tolerance) + // + // The user's parameter blocks will be updated with the solution. + CONVERGENCE, + + // The solver ran for maximum number of iterations or maximum amount + // of time specified by the user, but none of the convergence + // criterion specified by the user were met. The user's parameter + // blocks will be updated with the solution found so far. + NO_CONVERGENCE, + + // The minimizer terminated because of an error. The user's + // parameter blocks will not be updated. + FAILURE, + + // Using an IterationCallback object, user code can control the + // minimizer. The following enums indicate that the user code was + // responsible for termination. + // + // Minimizer terminated successfully because a user + // IterationCallback returned SOLVER_TERMINATE_SUCCESSFULLY. + // + // The user's parameter blocks will be updated with the solution. + USER_SUCCESS, + + // Minimizer terminated because because a user IterationCallback + // returned SOLVER_ABORT. + // + // The user's parameter blocks will not be updated. + USER_FAILURE +}; + +// Enums used by the IterationCallback instances to indicate to the +// solver whether it should continue solving, the user detected an +// error or the solution is good enough and the solver should +// terminate. +enum CallbackReturnType { + // Continue solving to next iteration. + SOLVER_CONTINUE, + + // Terminate solver, and do not update the parameter blocks upon + // return. Unless the user has set + // Solver:Options:::update_state_every_iteration, in which case the + // state would have been updated every iteration + // anyways. Solver::Summary::termination_type is set to USER_ABORT. + SOLVER_ABORT, + + // Terminate solver, update state and + // return. Solver::Summary::termination_type is set to USER_SUCCESS. + SOLVER_TERMINATE_SUCCESSFULLY +}; + +// The format in which linear least squares problems should be logged +// when Solver::Options::lsqp_iterations_to_dump is non-empty. +enum DumpFormatType { + // Print the linear least squares problem in a human readable format + // to stderr. The Jacobian is printed as a dense matrix. The vectors + // D, x and f are printed as dense vectors. This should only be used + // for small problems. + CONSOLE, + + // Write out the linear least squares problem to the directory + // pointed to by Solver::Options::lsqp_dump_directory as text files + // which can be read into MATLAB/Octave. The Jacobian is dumped as a + // text file containing (i,j,s) triplets, the vectors D, x and f are + // dumped as text files containing a list of their values. + // + // A MATLAB/octave script called lm_iteration_???.m is also output, + // which can be used to parse and load the problem into memory. + TEXTFILE +}; + +// For SizedCostFunction and AutoDiffCostFunction, DYNAMIC can be +// specified for the number of residuals. If specified, then the +// number of residuas for that cost function can vary at runtime. +enum DimensionType { + DYNAMIC = -1, +}; + +// The differentiation method used to compute numerical derivatives in +// NumericDiffCostFunction and DynamicNumericDiffCostFunction. +enum NumericDiffMethodType { + // Compute central finite difference: f'(x) ~ (f(x+h) - f(x-h)) / 2h. + CENTRAL, + + // Compute forward finite difference: f'(x) ~ (f(x+h) - f(x)) / h. + FORWARD, + + // Adaptive numerical differentiation using Ridders' method. Provides more + // accurate and robust derivatives at the expense of additional cost + // function evaluations. + RIDDERS +}; + +enum LineSearchInterpolationType { + BISECTION, + QUADRATIC, + CUBIC, +}; + +enum CovarianceAlgorithmType { + DENSE_SVD, + SPARSE_QR, +}; + +// It is a near impossibility that user code generates this exact +// value in normal operation, thus we will use it to fill arrays +// before passing them to user code. If on return an element of the +// array still contains this value, we will assume that the user code +// did not write to that memory location. +const double kImpossibleValue = 1e302; + +CERES_EXPORT const char* LinearSolverTypeToString(LinearSolverType type); +CERES_EXPORT bool StringToLinearSolverType(std::string value, + LinearSolverType* type); + +CERES_EXPORT const char* PreconditionerTypeToString(PreconditionerType type); +CERES_EXPORT bool StringToPreconditionerType(std::string value, + PreconditionerType* type); + +CERES_EXPORT const char* VisibilityClusteringTypeToString( + VisibilityClusteringType type); +CERES_EXPORT bool StringToVisibilityClusteringType( + std::string value, VisibilityClusteringType* type); + +CERES_EXPORT const char* SparseLinearAlgebraLibraryTypeToString( + SparseLinearAlgebraLibraryType type); +CERES_EXPORT bool StringToSparseLinearAlgebraLibraryType( + std::string value, SparseLinearAlgebraLibraryType* type); + +CERES_EXPORT const char* DenseLinearAlgebraLibraryTypeToString( + DenseLinearAlgebraLibraryType type); +CERES_EXPORT bool StringToDenseLinearAlgebraLibraryType( + std::string value, DenseLinearAlgebraLibraryType* type); + +CERES_EXPORT const char* TrustRegionStrategyTypeToString( + TrustRegionStrategyType type); +CERES_EXPORT bool StringToTrustRegionStrategyType( + std::string value, TrustRegionStrategyType* type); + +CERES_EXPORT const char* DoglegTypeToString(DoglegType type); +CERES_EXPORT bool StringToDoglegType(std::string value, DoglegType* type); + +CERES_EXPORT const char* MinimizerTypeToString(MinimizerType type); +CERES_EXPORT bool StringToMinimizerType(std::string value, MinimizerType* type); + +CERES_EXPORT const char* LineSearchDirectionTypeToString( + LineSearchDirectionType type); +CERES_EXPORT bool StringToLineSearchDirectionType( + std::string value, LineSearchDirectionType* type); + +CERES_EXPORT const char* LineSearchTypeToString(LineSearchType type); +CERES_EXPORT bool StringToLineSearchType(std::string value, + LineSearchType* type); + +CERES_EXPORT const char* NonlinearConjugateGradientTypeToString( + NonlinearConjugateGradientType type); +CERES_EXPORT bool StringToNonlinearConjugateGradientType( + std::string value, NonlinearConjugateGradientType* type); + +CERES_EXPORT const char* LineSearchInterpolationTypeToString( + LineSearchInterpolationType type); +CERES_EXPORT bool StringToLineSearchInterpolationType( + std::string value, LineSearchInterpolationType* type); + +CERES_EXPORT const char* CovarianceAlgorithmTypeToString( + CovarianceAlgorithmType type); +CERES_EXPORT bool StringToCovarianceAlgorithmType( + std::string value, CovarianceAlgorithmType* type); + +CERES_EXPORT const char* NumericDiffMethodTypeToString( + NumericDiffMethodType type); +CERES_EXPORT bool StringToNumericDiffMethodType(std::string value, + NumericDiffMethodType* type); + +CERES_EXPORT const char* LoggingTypeToString(LoggingType type); +CERES_EXPORT bool StringtoLoggingType(std::string value, LoggingType* type); + +CERES_EXPORT const char* DumpFormatTypeToString(DumpFormatType type); +CERES_EXPORT bool StringtoDumpFormatType(std::string value, + DumpFormatType* type); +CERES_EXPORT bool StringtoDumpFormatType(std::string value, LoggingType* type); + +CERES_EXPORT const char* TerminationTypeToString(TerminationType type); + +CERES_EXPORT bool IsSchurType(LinearSolverType type); +CERES_EXPORT bool IsSparseLinearAlgebraLibraryTypeAvailable( + SparseLinearAlgebraLibraryType type); +CERES_EXPORT bool IsDenseLinearAlgebraLibraryTypeAvailable( + DenseLinearAlgebraLibraryType type); + +} // namespace ceres + +#include "ceres/internal/reenable_warnings.h" + +#endif // CERES_PUBLIC_TYPES_H_ diff --git a/ceres-v2/include/version.h b/ceres-v2/include/version.h new file mode 100644 index 0000000000000000000000000000000000000000..e0d61972896676af940c85dec6525e6b5342246b --- /dev/null +++ b/ceres-v2/include/version.h @@ -0,0 +1,49 @@ +// Ceres Solver - A fast non-linear least squares minimizer +// Copyright 2021 Google Inc. All rights reserved. +// http://ceres-solver.org/ +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are met: +// +// * Redistributions of source code must retain the above copyright notice, +// this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above copyright notice, +// this list of conditions and the following disclaimer in the documentation +// and/or other materials provided with the distribution. +// * Neither the name of Google Inc. nor the names of its contributors may be +// used to endorse or promote products derived from this software without +// specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE +// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR +// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF +// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN +// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) +// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE +// POSSIBILITY OF SUCH DAMAGE. +// +// Author: mierle@gmail.com (Keir Mierle) + +#ifndef CERES_PUBLIC_VERSION_H_ +#define CERES_PUBLIC_VERSION_H_ + +#define CERES_VERSION_MAJOR 2 +#define CERES_VERSION_MINOR 1 +#define CERES_VERSION_REVISION 0 + +// Classic CPP stringifcation; the extra level of indirection allows the +// preprocessor to expand the macro before being converted to a string. +#define CERES_TO_STRING_HELPER(x) #x +#define CERES_TO_STRING(x) CERES_TO_STRING_HELPER(x) + +// The Ceres version as a string; for example "1.9.0". +#define CERES_VERSION_STRING \ + CERES_TO_STRING(CERES_VERSION_MAJOR) \ + "." CERES_TO_STRING(CERES_VERSION_MINOR) "." CERES_TO_STRING( \ + CERES_VERSION_REVISION) + +#endif // CERES_PUBLIC_VERSION_H_ diff --git a/ceres-v2/lib/CeresConfig.cmake b/ceres-v2/lib/CeresConfig.cmake new file mode 100644 index 0000000000000000000000000000000000000000..b79ab7b19c31077f66d8e48bec03ede0e6b31577 --- /dev/null +++ b/ceres-v2/lib/CeresConfig.cmake @@ -0,0 +1,340 @@ +# Ceres Solver - A fast non-linear least squares minimizer +# Copyright 2015 Google Inc. All rights reserved. +# http://ceres-solver.org/ +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are met: +# +# * Redistributions of source code must retain the above copyright notice, +# this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above copyright notice, +# this list of conditions and the following disclaimer in the documentation +# and/or other materials provided with the distribution. +# * Neither the name of Google Inc. nor the names of its contributors may be +# used to endorse or promote products derived from this software without +# specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE +# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR +# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF +# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN +# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) +# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE +# POSSIBILITY OF SUCH DAMAGE. +# +# Authors: pablo.speciale@gmail.com (Pablo Speciale) +# alexs.mac@gmail.com (Alex Stewart) +# + +# Config file for Ceres Solver - Find Ceres & dependencies. +# +# This file is used by CMake when find_package(Ceres) is invoked and either +# the directory containing this file either is present in CMAKE_MODULE_PATH +# (if Ceres was installed), or exists in the local CMake package registry if +# the Ceres build directory was exported. +# +# This module defines the following variables: +# +# Ceres_FOUND / CERES_FOUND: True if Ceres has been successfully +# found. Both variables are set as although +# FindPackage() only references Ceres_FOUND +# in Config mode, given the conventions for +# _FOUND when FindPackage() is +# called in Module mode, users could +# reasonably expect to use CERES_FOUND +# instead. +# +# CERES_VERSION: Version of Ceres found. +# +# CERES_LIBRARIES: Libraries for Ceres and all +# dependencies against which Ceres was +# compiled. This will not include any optional +# dependencies that were disabled when Ceres was +# compiled. +# +# NOTE: There is no equivalent of CERES_INCLUDE_DIRS as the exported +# CMake target already includes the definition of its public +# include directories. + +include(CMakeFindDependencyMacro) + +# Called if we failed to find Ceres or any of its required dependencies, +# unsets all public (designed to be used externally) variables and reports +# error message at priority depending upon [REQUIRED/QUIET/] argument. +macro(CERES_REPORT_NOT_FOUND REASON_MSG) + # FindPackage() only references Ceres_FOUND, and requires it to be + # explicitly set FALSE to denote not found (not merely undefined). + set(Ceres_FOUND FALSE) + set(CERES_FOUND FALSE) + unset(CERES_INCLUDE_DIR) + unset(CERES_INCLUDE_DIRS) + unset(CERES_LIBRARIES) + + # Reset the CMake module path to its state when this script was called. + set(CMAKE_MODULE_PATH ${CALLERS_CMAKE_MODULE_PATH}) + + # Note _FIND_[REQUIRED/QUIETLY] variables defined by + # FindPackage() use the camelcase library name, not uppercase. + if (Ceres_FIND_QUIETLY) + message(STATUS "Failed to find Ceres - " ${REASON_MSG} ${ARGN}) + elseif (Ceres_FIND_REQUIRED) + message(FATAL_ERROR "Failed to find Ceres - " ${REASON_MSG} ${ARGN}) + else() + # Neither QUIETLY nor REQUIRED, use SEND_ERROR which emits an error + # that prevents generation, but continues configuration. + message(SEND_ERROR "Failed to find Ceres - " ${REASON_MSG} ${ARGN}) + endif () + return() +endmacro(CERES_REPORT_NOT_FOUND) + + +# ceres_message([mode] "message text") +# +# Wraps the standard cmake 'message' command, but suppresses output +# if the QUIET flag was passed to the find_package(Ceres ...) call. +function(ceres_message) + if (NOT Ceres_FIND_QUIETLY) + message(${ARGN}) + endif() +endfunction() + + +# ceres_pretty_print_cmake_list( OUTPUT_VAR [item1 [item2 ... ]] ) +# +# Sets ${OUTPUT_VAR} in the caller's scope to a human-readable string +# representation of the list passed as the remaining arguments formed +# as: "[item1, item2, ..., itemN]". +function(ceres_pretty_print_cmake_list OUTPUT_VAR) + string(REPLACE ";" ", " PRETTY_LIST_STRING "[${ARGN}]") + set(${OUTPUT_VAR} "${PRETTY_LIST_STRING}" PARENT_SCOPE) +endfunction() + +# The list of (optional) components this version of Ceres was compiled with. +set(CERES_COMPILED_COMPONENTS "EigenSparse;SparseLinearAlgebraLibrary;LAPACK;SuiteSparse;CXSparse;SchurSpecializations;Multithreading") + +# If Ceres was not installed, then by definition it was exported +# from a build directory. +set(CERES_WAS_INSTALLED TRUE) + +# Record the state of the CMake module path when this script was +# called so that we can ensure that we leave it in the same state on +# exit as it was on entry, but modify it locally. +set(CALLERS_CMAKE_MODULE_PATH ${CMAKE_MODULE_PATH}) + +# Get the (current, i.e. installed) directory containing this file. +get_filename_component(CERES_CURRENT_CONFIG_DIR + "${CMAKE_CURRENT_LIST_FILE}" PATH) + +if (CERES_WAS_INSTALLED) + # Reset CMake module path to the installation directory of this + # script, thus we will use the FindPackage() scripts shipped with + # Ceres to find Ceres' dependencies, even if the user has equivalently + # named FindPackage() scripts in their project. + set(CMAKE_MODULE_PATH ${CERES_CURRENT_CONFIG_DIR}) + + # Build the absolute root install directory as a relative path + # (determined when Ceres was configured & built) from the current + # install directory for this this file. This allows for the install + # tree to be relocated, after Ceres was built, outside of CMake. + get_filename_component(CURRENT_ROOT_INSTALL_DIR + "${CERES_CURRENT_CONFIG_DIR}/../../../" + ABSOLUTE) + if (NOT EXISTS ${CURRENT_ROOT_INSTALL_DIR}) + ceres_report_not_found( + "Ceres install root: ${CURRENT_ROOT_INSTALL_DIR}, " + "determined from relative path from CeresConfig.cmake install location: " + "${CERES_CURRENT_CONFIG_DIR}, does not exist. Either the install " + "directory was deleted, or the install tree was only partially relocated " + "outside of CMake after Ceres was built.") + endif (NOT EXISTS ${CURRENT_ROOT_INSTALL_DIR}) + +else(CERES_WAS_INSTALLED) + # Ceres was exported from the build tree. + set(CERES_EXPORTED_BUILD_DIR ${CERES_CURRENT_CONFIG_DIR}) + get_filename_component(CERES_EXPORTED_SOURCE_DIR + "${CERES_EXPORTED_BUILD_DIR}/../../../" + ABSOLUTE) + if (NOT EXISTS ${CERES_EXPORTED_SOURCE_DIR}) + ceres_report_not_found( + "Ceres exported source directory: ${CERES_EXPORTED_SOURCE_DIR}, " + "determined from relative path from CeresConfig.cmake exported build " + "directory: ${CERES_EXPORTED_BUILD_DIR} does not exist.") + endif() + + # Reset CMake module path to the cmake directory in the Ceres source + # tree which was exported, thus we will use the FindPackage() scripts shipped + # with Ceres to find Ceres' dependencies, even if the user has equivalently + # named FindPackage() scripts in their project. + set(CMAKE_MODULE_PATH ${CERES_EXPORTED_SOURCE_DIR}/cmake) +endif(CERES_WAS_INSTALLED) + +# Set the version. +set(CERES_VERSION 2.1.0) + +include(CMakeFindDependencyMacro) +find_dependency(Threads) + +# Optional dependencies +find_dependency(CXSparse 3.2.0) +find_dependency(SuiteSparse 5.7.1) + +# As imported CMake targets are not re-exported when a dependent target is +# exported, we must invoke find_package(XXX) here to reload the definition +# of their targets. Without this, the dependency target names (e.g. +# 'gflags-shared') which will be present in the ceres target would not be +# defined, and so CMake will assume that they refer to a library name and +# fail to link correctly. + +# Eigen. +# Flag set during configuration and build of Ceres. +set(CERES_EIGEN_VERSION 3.3.7) +# Search quietly to control the timing of the error message if not found. The +# search should be for an exact match, but for usability reasons do a soft +# match and reject with an explanation below. +find_package(Eigen3 ${CERES_EIGEN_VERSION} QUIET) +if (Eigen3_FOUND) + if (NOT Eigen3_VERSION VERSION_EQUAL CERES_EIGEN_VERSION) + # CMake's VERSION check in FIND_PACKAGE() will accept any version >= the + # specified version. However, only version = is supported. Improve + # usability by explaining why we don't accept non-exact version matching. + ceres_report_not_found("Found Eigen dependency, but the version of Eigen " + "found (${Eigen3_VERSION}) does not exactly match the version of Eigen " + "Ceres was compiled with (${CERES_EIGEN_VERSION}). This can cause subtle " + "bugs by triggering violations of the One Definition Rule. See the " + "Wikipedia article http://en.wikipedia.org/wiki/One_Definition_Rule " + "for more details") + endif () + ceres_message(STATUS "Found required Ceres dependency: " + "Eigen version ${CERES_EIGEN_VERSION} in ${Eigen3_DIR}") +else (Eigen3_FOUND) + ceres_report_not_found("Missing required Ceres " + "dependency: Eigen version ${CERES_EIGEN_VERSION}, please set " + "Eigen3_DIR.") +endif (Eigen3_FOUND) + +# glog (and maybe gflags). +# +# Flags set during configuration and build of Ceres. +set(CERES_USES_MINIGLOG OFF) +set(CERES_GLOG_VERSION ) +set(CERES_GLOG_WAS_BUILT_WITH_CMAKE 0) + +set(CERES_USES_GFLAGS ON) +set(CERES_GFLAGS_VERSION 2.2.2) + +if (CERES_USES_MINIGLOG) + # Output message at standard log level (not the lower STATUS) so that + # the message is output in GUI during configuration to warn user. + ceres_message("-- Found Ceres compiled with miniglog substitute " + "for glog, beware this will likely cause problems if glog is later linked.") +else(CERES_USES_MINIGLOG) + if (CERES_GLOG_WAS_BUILT_WITH_CMAKE) + find_package(glog ${CERES_GLOG_VERSION} CONFIG QUIET) + set(GLOG_FOUND ${glog_FOUND}) + else() + # Version of glog against which Ceres was built was not built with CMake, + # use the exported glog find_package() module from Ceres to find it again. + # Append the locations of glog when Ceres was built to the search path hints. + list(APPEND GLOG_INCLUDE_DIR_HINTS "/usr/include") + get_filename_component(CERES_BUILD_GLOG_LIBRARY_DIR "/usr/lib/x86_64-linux-gnu/libglog.so" PATH) + list(APPEND GLOG_LIBRARY_DIR_HINTS ${CERES_BUILD_GLOG_LIBRARY_DIR}) + + # Search quietly s/t we control the timing of the error message if not found. + find_package(Glog QUIET) + endif() + + if (GLOG_FOUND) + ceres_message(STATUS "Found required Ceres dependency: glog") + else() + ceres_report_not_found("Missing required Ceres dependency: glog.") + endif() + + # gflags is only a public dependency of Ceres via glog, thus is not required + # if Ceres was built with MINIGLOG. + if (CERES_USES_GFLAGS) + # Search quietly s/t we control the timing of the error message if not found. + find_package(gflags ${CERES_GFLAGS_VERSION} QUIET) + if (gflags_FOUND AND TARGET gflags) + ceres_message(STATUS "Found required Ceres dependency: gflags") + else() + ceres_report_not_found("Missing required Ceres " + "dependency: gflags (not found, or not found as exported CMake target).") + endif() + endif() +endif(CERES_USES_MINIGLOG) + +# Import exported Ceres targets, if they have not already been imported. +if (NOT TARGET ceres AND NOT Ceres_BINARY_DIR) + include(${CERES_CURRENT_CONFIG_DIR}/CeresTargets.cmake) +endif (NOT TARGET ceres AND NOT Ceres_BINARY_DIR) +# Set the expected XX_LIBRARIES variable for FindPackage(). +set(CERES_LIBRARIES Ceres::ceres) + +# Reset CMake module path to its state when this script was called. +set(CMAKE_MODULE_PATH ${CALLERS_CMAKE_MODULE_PATH}) + +# Build the detected Ceres version string to correctly capture whether it +# was installed, or exported. +ceres_pretty_print_cmake_list(CERES_COMPILED_COMPONENTS_STRING + ${CERES_COMPILED_COMPONENTS}) +if (CERES_WAS_INSTALLED) + set(CERES_DETECTED_VERSION_STRING "Ceres version: ${CERES_VERSION} " + "installed in: ${CURRENT_ROOT_INSTALL_DIR} with components: " + "${CERES_COMPILED_COMPONENTS_STRING}") +else (CERES_WAS_INSTALLED) + set(CERES_DETECTED_VERSION_STRING "Ceres version: ${CERES_VERSION} " + "exported from build directory: ${CERES_EXPORTED_BUILD_DIR} with " + "components: ${CERES_COMPILED_COMPONENTS_STRING}") +endif() + +# If the user called this script through find_package() whilst specifying +# particular Ceres components that should be found via: +# find_package(Ceres COMPONENTS XXX YYY), check the requested components against +# those with which Ceres was compiled. In this case, we should only report +# Ceres as found if all the requested components have been found. +if (Ceres_FIND_COMPONENTS) + foreach (REQUESTED_COMPONENT ${Ceres_FIND_COMPONENTS}) + list(FIND CERES_COMPILED_COMPONENTS ${REQUESTED_COMPONENT} HAVE_REQUESTED_COMPONENT) + # list(FIND ..) returns -1 if the element was not in the list, but CMake + # interprets if (VAR) to be true if VAR is any non-zero number, even + # negative ones, hence we have to explicitly check for >= 0. + if (HAVE_REQUESTED_COMPONENT EQUAL -1) + # Check for the presence of all requested components before reporting + # not found, such that we report all of the missing components rather + # than just the first. + list(APPEND MISSING_CERES_COMPONENTS ${REQUESTED_COMPONENT}) + endif() + endforeach() + if (MISSING_CERES_COMPONENTS) + ceres_pretty_print_cmake_list(REQUESTED_CERES_COMPONENTS_STRING + ${Ceres_FIND_COMPONENTS}) + ceres_pretty_print_cmake_list(MISSING_CERES_COMPONENTS_STRING + ${MISSING_CERES_COMPONENTS}) + ceres_report_not_found("Missing requested Ceres components: " + "${MISSING_CERES_COMPONENTS_STRING} (components requested: " + "${REQUESTED_CERES_COMPONENTS_STRING}). Detected " + "${CERES_DETECTED_VERSION_STRING}.") + endif() +endif() + +# As we use CERES_REPORT_NOT_FOUND() to abort, if we reach this point we have +# found Ceres and all required dependencies. +ceres_message(STATUS "Found " ${CERES_DETECTED_VERSION_STRING}) + +# Set CERES_FOUND to be equivalent to Ceres_FOUND, which is set to +# TRUE by FindPackage() if this file is found and run, and after which +# Ceres_FOUND is not (explicitly, i.e. undefined does not count) set +# to FALSE. +set(CERES_FOUND TRUE) + +if (NOT TARGET ceres) + # For backwards compatibility, create a local 'alias' target with the + # non-namespace-qualified Ceres target name. Note that this is not a + # true ALIAS library in CMake terms as they cannot point to imported targets. + add_library(ceres INTERFACE IMPORTED) + set_target_properties(ceres PROPERTIES INTERFACE_LINK_LIBRARIES Ceres::ceres) +endif() diff --git a/ceres-v2/lib/CeresConfigVersion.cmake b/ceres-v2/lib/CeresConfigVersion.cmake new file mode 100644 index 0000000000000000000000000000000000000000..f0004769aee3a15efc6248d2bd6d404f79c8c388 --- /dev/null +++ b/ceres-v2/lib/CeresConfigVersion.cmake @@ -0,0 +1,70 @@ +# This is a basic version file for the Config-mode of find_package(). +# It is used by write_basic_package_version_file() as input file for configure_file() +# to create a version-file which can be installed along a config.cmake file. +# +# The created file sets PACKAGE_VERSION_EXACT if the current version string and +# the requested version string are exactly the same and it sets +# PACKAGE_VERSION_COMPATIBLE if the current version is >= requested version, +# but only if the requested major version is the same as the current one. +# The variable CVF_VERSION must be set before calling configure_file(). + + +set(PACKAGE_VERSION "2.1.0") + +if(PACKAGE_VERSION VERSION_LESS PACKAGE_FIND_VERSION) + set(PACKAGE_VERSION_COMPATIBLE FALSE) +else() + + if("2.1.0" MATCHES "^([0-9]+)\\.") + set(CVF_VERSION_MAJOR "${CMAKE_MATCH_1}") + if(NOT CVF_VERSION_MAJOR VERSION_EQUAL 0) + string(REGEX REPLACE "^0+" "" CVF_VERSION_MAJOR "${CVF_VERSION_MAJOR}") + endif() + else() + set(CVF_VERSION_MAJOR "2.1.0") + endif() + + if(PACKAGE_FIND_VERSION_RANGE) + # both endpoints of the range must have the expected major version + math (EXPR CVF_VERSION_MAJOR_NEXT "${CVF_VERSION_MAJOR} + 1") + if (NOT PACKAGE_FIND_VERSION_MIN_MAJOR STREQUAL CVF_VERSION_MAJOR + OR ((PACKAGE_FIND_VERSION_RANGE_MAX STREQUAL "INCLUDE" AND NOT PACKAGE_FIND_VERSION_MAX_MAJOR STREQUAL CVF_VERSION_MAJOR) + OR (PACKAGE_FIND_VERSION_RANGE_MAX STREQUAL "EXCLUDE" AND NOT PACKAGE_FIND_VERSION_MAX VERSION_LESS_EQUAL CVF_VERSION_MAJOR_NEXT))) + set(PACKAGE_VERSION_COMPATIBLE FALSE) + elseif(PACKAGE_FIND_VERSION_MIN_MAJOR STREQUAL CVF_VERSION_MAJOR + AND ((PACKAGE_FIND_VERSION_RANGE_MAX STREQUAL "INCLUDE" AND PACKAGE_VERSION VERSION_LESS_EQUAL PACKAGE_FIND_VERSION_MAX) + OR (PACKAGE_FIND_VERSION_RANGE_MAX STREQUAL "EXCLUDE" AND PACKAGE_VERSION VERSION_LESS PACKAGE_FIND_VERSION_MAX))) + set(PACKAGE_VERSION_COMPATIBLE TRUE) + else() + set(PACKAGE_VERSION_COMPATIBLE FALSE) + endif() + else() + if(PACKAGE_FIND_VERSION_MAJOR STREQUAL CVF_VERSION_MAJOR) + set(PACKAGE_VERSION_COMPATIBLE TRUE) + else() + set(PACKAGE_VERSION_COMPATIBLE FALSE) + endif() + + if(PACKAGE_FIND_VERSION STREQUAL PACKAGE_VERSION) + set(PACKAGE_VERSION_EXACT TRUE) + endif() + endif() +endif() + + +# if the installed project requested no architecture check, don't perform the check +if("FALSE") + return() +endif() + +# if the installed or the using project don't have CMAKE_SIZEOF_VOID_P set, ignore it: +if("${CMAKE_SIZEOF_VOID_P}" STREQUAL "" OR "8" STREQUAL "") + return() +endif() + +# check that the installed version has the same 32/64bit-ness as the one which is currently searching: +if(NOT CMAKE_SIZEOF_VOID_P STREQUAL "8") + math(EXPR installedBits "8 * 8") + set(PACKAGE_VERSION "${PACKAGE_VERSION} (${installedBits}bit)") + set(PACKAGE_VERSION_UNSUITABLE TRUE) +endif() diff --git a/ceres-v2/lib/CeresTargets-release.cmake b/ceres-v2/lib/CeresTargets-release.cmake new file mode 100644 index 0000000000000000000000000000000000000000..511e92084c0315ec2fe6aac728b4f8fa1ad1d6e6 --- /dev/null +++ b/ceres-v2/lib/CeresTargets-release.cmake @@ -0,0 +1,19 @@ +#---------------------------------------------------------------- +# Generated CMake target import file for configuration "Release". +#---------------------------------------------------------------- + +# Commands may need to know the format version. +set(CMAKE_IMPORT_FILE_VERSION 1) + +# Import target "Ceres::ceres" for configuration "Release" +set_property(TARGET Ceres::ceres APPEND PROPERTY IMPORTED_CONFIGURATIONS RELEASE) +set_target_properties(Ceres::ceres PROPERTIES + IMPORTED_LINK_INTERFACE_LANGUAGES_RELEASE "CXX" + IMPORTED_LOCATION_RELEASE "${_IMPORT_PREFIX}/lib/libceres.a" + ) + +list(APPEND _cmake_import_check_targets Ceres::ceres ) +list(APPEND _cmake_import_check_files_for_Ceres::ceres "${_IMPORT_PREFIX}/lib/libceres.a" ) + +# Commands beyond this point should not need to know the version. +set(CMAKE_IMPORT_FILE_VERSION) diff --git a/ceres-v2/lib/CeresTargets.cmake b/ceres-v2/lib/CeresTargets.cmake new file mode 100644 index 0000000000000000000000000000000000000000..4a20a553464c40491b6f87f39dd80c1d239c46ba --- /dev/null +++ b/ceres-v2/lib/CeresTargets.cmake @@ -0,0 +1,108 @@ +# Generated by CMake + +if("${CMAKE_MAJOR_VERSION}.${CMAKE_MINOR_VERSION}" LESS 2.8) + message(FATAL_ERROR "CMake >= 2.8.0 required") +endif() +if(CMAKE_VERSION VERSION_LESS "2.8.3") + message(FATAL_ERROR "CMake >= 2.8.3 required") +endif() +cmake_policy(PUSH) +cmake_policy(VERSION 2.8.3...3.23) +#---------------------------------------------------------------- +# Generated CMake target import file. +#---------------------------------------------------------------- + +# Commands may need to know the format version. +set(CMAKE_IMPORT_FILE_VERSION 1) + +# Protect against multiple inclusion, which would fail when already imported targets are added once more. +set(_cmake_targets_defined "") +set(_cmake_targets_not_defined "") +set(_cmake_expected_targets "") +foreach(_cmake_expected_target IN ITEMS Ceres::ceres) + list(APPEND _cmake_expected_targets "${_cmake_expected_target}") + if(TARGET "${_cmake_expected_target}") + list(APPEND _cmake_targets_defined "${_cmake_expected_target}") + else() + list(APPEND _cmake_targets_not_defined "${_cmake_expected_target}") + endif() +endforeach() +unset(_cmake_expected_target) +if(_cmake_targets_defined STREQUAL _cmake_expected_targets) + unset(_cmake_targets_defined) + unset(_cmake_targets_not_defined) + unset(_cmake_expected_targets) + unset(CMAKE_IMPORT_FILE_VERSION) + cmake_policy(POP) + return() +endif() +if(NOT _cmake_targets_defined STREQUAL "") + string(REPLACE ";" ", " _cmake_targets_defined_text "${_cmake_targets_defined}") + string(REPLACE ";" ", " _cmake_targets_not_defined_text "${_cmake_targets_not_defined}") + message(FATAL_ERROR "Some (but not all) targets in this export set were already defined.\nTargets Defined: ${_cmake_targets_defined_text}\nTargets not yet defined: ${_cmake_targets_not_defined_text}\n") +endif() +unset(_cmake_targets_defined) +unset(_cmake_targets_not_defined) +unset(_cmake_expected_targets) + + +# Compute the installation prefix relative to this file. +get_filename_component(_IMPORT_PREFIX "${CMAKE_CURRENT_LIST_FILE}" PATH) +get_filename_component(_IMPORT_PREFIX "${_IMPORT_PREFIX}" PATH) +get_filename_component(_IMPORT_PREFIX "${_IMPORT_PREFIX}" PATH) +get_filename_component(_IMPORT_PREFIX "${_IMPORT_PREFIX}" PATH) +if(_IMPORT_PREFIX STREQUAL "/") + set(_IMPORT_PREFIX "") +endif() + +# Create imported target Ceres::ceres +add_library(Ceres::ceres STATIC IMPORTED) + +set_target_properties(Ceres::ceres PROPERTIES + INTERFACE_COMPILE_FEATURES "cxx_std_14" + INTERFACE_INCLUDE_DIRECTORIES "${_IMPORT_PREFIX}/include;/usr/include" + INTERFACE_LINK_LIBRARIES "Threads::Threads;/usr/lib/x86_64-linux-gnu/libglog.so;gflags;\$;\$;\$;/usr/local/cuda/lib64/libcudart_static.a;\$;\$;/usr/lib/x86_64-linux-gnu/librt.so;/usr/local/cuda/lib64/libcublas.so;/usr/local/cuda/lib64/libcusolver.so;/usr/local/cuda/lib64/libcusparse.so;/usr/local/lib/libmkl_intel_lp64.so;/usr/local/lib/libmkl_intel_thread.so;/usr/local/lib/libmkl_core.so;/usr/local/lib/libiomp5.so;\$;\$;\$;\$;\$;\$;Eigen3::Eigen" +) + +if(CMAKE_VERSION VERSION_LESS 2.8.12) + message(FATAL_ERROR "This file relies on consumers using CMake 2.8.12 or greater.") +endif() + +# Load information for each installed configuration. +file(GLOB _cmake_config_files "${CMAKE_CURRENT_LIST_DIR}/CeresTargets-*.cmake") +foreach(_cmake_config_file IN LISTS _cmake_config_files) + include("${_cmake_config_file}") +endforeach() +unset(_cmake_config_file) +unset(_cmake_config_files) + +# Cleanup temporary variables. +set(_IMPORT_PREFIX) + +# Loop over all imported files and verify that they actually exist +foreach(_cmake_target IN LISTS _cmake_import_check_targets) + foreach(_cmake_file IN LISTS "_cmake_import_check_files_for_${_cmake_target}") + if(NOT EXISTS "${_cmake_file}") + message(FATAL_ERROR "The imported target \"${_cmake_target}\" references the file + \"${_cmake_file}\" +but this file does not exist. Possible reasons include: +* The file was deleted, renamed, or moved to another location. +* An install or uninstall procedure did not complete successfully. +* The installation package was faulty and contained + \"${CMAKE_CURRENT_LIST_FILE}\" +but not all the files it references. +") + endif() + endforeach() + unset(_cmake_file) + unset("_cmake_import_check_files_for_${_cmake_target}") +endforeach() +unset(_cmake_target) +unset(_cmake_import_check_targets) + +# This file does not depend on other imported targets which have +# been exported from the same project but in a separate export set. + +# Commands beyond this point should not need to know the version. +set(CMAKE_IMPORT_FILE_VERSION) +cmake_policy(POP) diff --git a/ceres-v2/lib/FindCXSparse.cmake b/ceres-v2/lib/FindCXSparse.cmake new file mode 100644 index 0000000000000000000000000000000000000000..afd1ebf152bcdadb35c8b5e84f9a4f1e74589c67 --- /dev/null +++ b/ceres-v2/lib/FindCXSparse.cmake @@ -0,0 +1,240 @@ +# Ceres Solver - A fast non-linear least squares minimizer +# Copyright 2022 Google Inc. All rights reserved. +# http://ceres-solver.org/ +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are met: +# +# * Redistributions of source code must retain the above copyright notice, +# this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above copyright notice, +# this list of conditions and the following disclaimer in the documentation +# and/or other materials provided with the distribution. +# * Neither the name of Google Inc. nor the names of its contributors may be +# used to endorse or promote products derived from this software without +# specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE +# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR +# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF +# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN +# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) +# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE +# POSSIBILITY OF SUCH DAMAGE. +# +# Author: alexs.mac@gmail.com (Alex Stewart) +# + +#[=======================================================================[.rst: +FindCXSparse +============ + +Find CXSparse and its dependencies. + +This module defines the following variables which should be referenced by the +caller to use the library. + +``CXSparse_FOUND`` + ``TRUE`` iff CXSparse and all dependencies have been found. + +``CXSparse_VERSION`` + Extracted from ``cs.h``. + +``CXSparse_VERSION_MAJOR`` + Equal to 3 if ``CXSparse_VERSION`` = 3.1.2 + +``CXSparse_VERSION_MINOR`` + Equal to 1 if ``CXSparse_VERSION`` = 3.1.2 + +``CXSparse_VERSION_PATCH`` + Equal to 2 if ``CXSparse_VERSION`` = 3.1.2 + +The following variables control the behaviour of this module: + +``CXSparse_NO_CMAKE`` + Do not attempt to use the native CXSparse CMake package configuration. + +Targets +------- + +The following target defines CXSparse. + +``CXSparse::CXSparse`` + The main CXSparse to be linked against. + +The following variables are also defined by this module, but in line with CMake +recommended ``find_package`` module style should NOT be referenced directly by +callers (use the plural variables detailed above instead). These variables do +however affect the behaviour of the module via ``find_[path/library]()`` which +are NOT re-called (i.e., search for library is not repeated) if these variables +are set with valid values *in the CMake cache*. This means that if these +variables are set directly in the cache, either by the user in the CMake GUI, or +by the user passing ``-DVAR=VALUE`` directives to CMake when called (which +explicitly defines a cache variable), then they will be used verbatim, bypassing +the ``HINTS`` variables and other hard-coded search locations. + +``CXSparse_INCLUDE_DIR`` + Include directory for CXSparse, not including the include directory of any + dependencies. + +``CXSparse_LIBRARY`` + CXSparse library, not including the libraries of any dependencies. +]=======================================================================] + +if (NOT CXSparse_NO_CMAKE) + find_package (CXSparse NO_MODULE QUIET) +endif (NOT CXSparse_NO_CMAKE) + +if (CXSparse_FOUND) + return () +endif (CXSparse_FOUND) + +# Reset CALLERS_CMAKE_FIND_LIBRARY_PREFIXES to its value when +# FindCXSparse was invoked. +macro(CXSparse_RESET_FIND_LIBRARY_PREFIX) + if (MSVC) + set(CMAKE_FIND_LIBRARY_PREFIXES "${CALLERS_CMAKE_FIND_LIBRARY_PREFIXES}") + endif (MSVC) +endmacro(CXSparse_RESET_FIND_LIBRARY_PREFIX) + +# Called if we failed to find CXSparse or any of it's required dependencies, +# unsets all public (designed to be used externally) variables and reports +# error message at priority depending upon [REQUIRED/QUIET/] argument. +macro(CXSparse_REPORT_NOT_FOUND REASON_MSG) + # Make results of search visible in the CMake GUI if CXSparse has not + # been found so that user does not have to toggle to advanced view. + mark_as_advanced(CLEAR CXSparse_INCLUDE_DIR + CXSparse_LIBRARY) + + cxsparse_reset_find_library_prefix() + + # Note _FIND_[REQUIRED/QUIETLY] variables defined by FindPackage() + # use the camelcase library name, not uppercase. + if (CXSparse_FIND_QUIETLY) + message(STATUS "Failed to find CXSparse - " ${REASON_MSG} ${ARGN}) + elseif (CXSparse_FIND_REQUIRED) + message(FATAL_ERROR "Failed to find CXSparse - " ${REASON_MSG} ${ARGN}) + else() + # Neither QUIETLY nor REQUIRED, use no priority which emits a message + # but continues configuration and allows generation. + message("-- Failed to find CXSparse - " ${REASON_MSG} ${ARGN}) + endif () + return() +endmacro(CXSparse_REPORT_NOT_FOUND) + +# Handle possible presence of lib prefix for libraries on MSVC, see +# also CXSparse_RESET_FIND_LIBRARY_PREFIX(). +if (MSVC) + # Preserve the caller's original values for CMAKE_FIND_LIBRARY_PREFIXES + # s/t we can set it back before returning. + set(CALLERS_CMAKE_FIND_LIBRARY_PREFIXES "${CMAKE_FIND_LIBRARY_PREFIXES}") + # The empty string in this list is important, it represents the case when + # the libraries have no prefix (shared libraries / DLLs). + set(CMAKE_FIND_LIBRARY_PREFIXES "lib" "" "${CMAKE_FIND_LIBRARY_PREFIXES}") +endif (MSVC) + +# Additional suffixes to try appending to each search path. +list(APPEND CXSparse_CHECK_PATH_SUFFIXES + suitesparse) # Linux/Windows + +# Search supplied hint directories first if supplied. +find_path(CXSparse_INCLUDE_DIR + NAMES cs.h + PATH_SUFFIXES ${CXSparse_CHECK_PATH_SUFFIXES}) +if (NOT CXSparse_INCLUDE_DIR OR + NOT EXISTS ${CXSparse_INCLUDE_DIR}) + cxsparse_report_not_found( + "Could not find CXSparse include directory, set CXSparse_INCLUDE_DIR " + "to directory containing cs.h") +endif (NOT CXSparse_INCLUDE_DIR OR + NOT EXISTS ${CXSparse_INCLUDE_DIR}) + +find_library(CXSparse_LIBRARY NAMES cxsparse + PATH_SUFFIXES ${CXSparse_CHECK_PATH_SUFFIXES}) + +if (NOT CXSparse_LIBRARY OR + NOT EXISTS ${CXSparse_LIBRARY}) + cxsparse_report_not_found( + "Could not find CXSparse library, set CXSparse_LIBRARY " + "to full path to libcxsparse.") +endif (NOT CXSparse_LIBRARY OR + NOT EXISTS ${CXSparse_LIBRARY}) + +# Mark internally as found, then verify. CXSparse_REPORT_NOT_FOUND() unsets +# if called. +set(CXSparse_FOUND TRUE) + +# Extract CXSparse version from cs.h +if (CXSparse_INCLUDE_DIR) + set(CXSparse_VERSION_FILE ${CXSparse_INCLUDE_DIR}/cs.h) + if (NOT EXISTS ${CXSparse_VERSION_FILE}) + cxsparse_report_not_found( + "Could not find file: ${CXSparse_VERSION_FILE} " + "containing version information in CXSparse install located at: " + "${CXSparse_INCLUDE_DIR}.") + else (NOT EXISTS ${CXSparse_VERSION_FILE}) + file(READ ${CXSparse_INCLUDE_DIR}/cs.h CXSparse_VERSION_FILE_CONTENTS) + + string(REGEX MATCH "#define CS_VER [0-9]+" + CXSparse_VERSION_MAJOR "${CXSparse_VERSION_FILE_CONTENTS}") + string(REGEX REPLACE "#define CS_VER ([0-9]+)" "\\1" + CXSparse_VERSION_MAJOR "${CXSparse_VERSION_MAJOR}") + + string(REGEX MATCH "#define CS_SUBVER [0-9]+" + CXSparse_VERSION_MINOR "${CXSparse_VERSION_FILE_CONTENTS}") + string(REGEX REPLACE "#define CS_SUBVER ([0-9]+)" "\\1" + CXSparse_VERSION_MINOR "${CXSparse_VERSION_MINOR}") + + string(REGEX MATCH "#define CS_SUBSUB [0-9]+" + CXSparse_VERSION_PATCH "${CXSparse_VERSION_FILE_CONTENTS}") + string(REGEX REPLACE "#define CS_SUBSUB ([0-9]+)" "\\1" + CXSparse_VERSION_PATCH "${CXSparse_VERSION_PATCH}") + + # This is on a single line s/t CMake does not interpret it as a list of + # elements and insert ';' separators which would result in 3.;1.;2 nonsense. + set(CXSparse_VERSION "${CXSparse_VERSION_MAJOR}.${CXSparse_VERSION_MINOR}.${CXSparse_VERSION_PATCH}") + set(CXSparse_VERSION_COMPONENTS 3) + endif (NOT EXISTS ${CXSparse_VERSION_FILE}) +endif (CXSparse_INCLUDE_DIR) + +# Catch the case when the caller has set CXSparse_LIBRARY in the cache / GUI and +# thus FIND_LIBRARY was not called, but specified library is invalid, otherwise +# we would report CXSparse as found. +# TODO: This regex for CXSparse library is pretty primitive, we use lowercase +# for comparison to handle Windows using CamelCase library names, could +# this check be better? +string(TOLOWER "${CXSparse_LIBRARY}" LOWERCASE_CXSparse_LIBRARY) +if (CXSparse_LIBRARY AND + EXISTS ${CXSparse_LIBRARY} AND + NOT "${LOWERCASE_CXSparse_LIBRARY}" MATCHES ".*cxsparse[^/]*") + cxsparse_report_not_found( + "Caller defined CXSparse_LIBRARY: " + "${CXSparse_LIBRARY} does not match CXSparse.") +endif (CXSparse_LIBRARY AND + EXISTS ${CXSparse_LIBRARY} AND + NOT "${LOWERCASE_CXSparse_LIBRARY}" MATCHES ".*cxsparse[^/]*") + +cxsparse_reset_find_library_prefix() + +mark_as_advanced(CXSparse_INCLUDE_DIR CXSparse_LIBRARY) + +# Handle REQUIRED / QUIET optional arguments and version. +include(FindPackageHandleStandardArgs) +find_package_handle_standard_args(CXSparse + REQUIRED_VARS CXSparse_INCLUDE_DIR CXSparse_LIBRARY + VERSION_VAR CXSparse_VERSION) + +if (CXSparse_INCLUDE_DIR AND CXSparse_LIBRARY) + if (NOT TARGET CXSparse::CXSparse) + add_library (CXSparse::CXSparse IMPORTED UNKNOWN) + endif (NOT TARGET CXSparse::CXSparse) + + set_property (TARGET CXSparse::CXSparse PROPERTY + IMPORTED_LOCATION ${CXSparse_LIBRARY}) + set_property (TARGET CXSparse::CXSparse PROPERTY + INTERFACE_INCLUDE_DIRECTORIES ${CXSparse_INCLUDE_DIR}) +endif (CXSparse_INCLUDE_DIR AND CXSparse_LIBRARY) diff --git a/ceres-v2/lib/FindGlog.cmake b/ceres-v2/lib/FindGlog.cmake new file mode 100644 index 0000000000000000000000000000000000000000..1a7b6c092ab6f36fd3d41c68c99518db624d0cb0 --- /dev/null +++ b/ceres-v2/lib/FindGlog.cmake @@ -0,0 +1,379 @@ +# Ceres Solver - A fast non-linear least squares minimizer +# Copyright 2015 Google Inc. All rights reserved. +# http://ceres-solver.org/ +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are met: +# +# * Redistributions of source code must retain the above copyright notice, +# this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above copyright notice, +# this list of conditions and the following disclaimer in the documentation +# and/or other materials provided with the distribution. +# * Neither the name of Google Inc. nor the names of its contributors may be +# used to endorse or promote products derived from this software without +# specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE +# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR +# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF +# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN +# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) +# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE +# POSSIBILITY OF SUCH DAMAGE. +# +# Author: alexs.mac@gmail.com (Alex Stewart) +# + +# FindGlog.cmake - Find Google glog logging library. +# +# This module defines the following variables: +# +# GLOG_FOUND: TRUE iff glog is found. +# GLOG_INCLUDE_DIRS: Include directories for glog. +# GLOG_LIBRARIES: Libraries required to link glog. +# FOUND_INSTALLED_GLOG_CMAKE_CONFIGURATION: True iff the version of glog found +# was built & installed / exported +# as a CMake package. +# +# The following variables control the behaviour of this module: +# +# GLOG_PREFER_EXPORTED_GLOG_CMAKE_CONFIGURATION: TRUE/FALSE, iff TRUE then +# then prefer using an exported CMake configuration +# generated by glog > 0.3.4 over searching for the +# glog components manually. Otherwise (FALSE) +# ignore any exported glog CMake configurations and +# always perform a manual search for the components. +# Default: TRUE iff user does not define this variable +# before we are called, and does NOT specify either +# GLOG_INCLUDE_DIR_HINTS or GLOG_LIBRARY_DIR_HINTS +# otherwise FALSE. +# GLOG_INCLUDE_DIR_HINTS: List of additional directories in which to +# search for glog includes, e.g: /timbuktu/include. +# GLOG_LIBRARY_DIR_HINTS: List of additional directories in which to +# search for glog libraries, e.g: /timbuktu/lib. +# +# The following variables are also defined by this module, but in line with +# CMake recommended FindPackage() module style should NOT be referenced directly +# by callers (use the plural variables detailed above instead). These variables +# do however affect the behaviour of the module via FIND_[PATH/LIBRARY]() which +# are NOT re-called (i.e. search for library is not repeated) if these variables +# are set with valid values _in the CMake cache_. This means that if these +# variables are set directly in the cache, either by the user in the CMake GUI, +# or by the user passing -DVAR=VALUE directives to CMake when called (which +# explicitly defines a cache variable), then they will be used verbatim, +# bypassing the HINTS variables and other hard-coded search locations. +# +# GLOG_INCLUDE_DIR: Include directory for glog, not including the +# include directory of any dependencies. +# GLOG_LIBRARY: glog library, not including the libraries of any +# dependencies. + +# Reset CALLERS_CMAKE_FIND_LIBRARY_PREFIXES to its value when +# FindGlog was invoked. +macro(GLOG_RESET_FIND_LIBRARY_PREFIX) + if (MSVC AND CALLERS_CMAKE_FIND_LIBRARY_PREFIXES) + set(CMAKE_FIND_LIBRARY_PREFIXES "${CALLERS_CMAKE_FIND_LIBRARY_PREFIXES}") + endif() +endmacro(GLOG_RESET_FIND_LIBRARY_PREFIX) + +# Called if we failed to find glog or any of it's required dependencies, +# unsets all public (designed to be used externally) variables and reports +# error message at priority depending upon [REQUIRED/QUIET/] argument. +macro(GLOG_REPORT_NOT_FOUND REASON_MSG) + unset(GLOG_FOUND) + unset(GLOG_INCLUDE_DIRS) + unset(GLOG_LIBRARIES) + # Make results of search visible in the CMake GUI if glog has not + # been found so that user does not have to toggle to advanced view. + mark_as_advanced(CLEAR GLOG_INCLUDE_DIR + GLOG_LIBRARY) + + glog_reset_find_library_prefix() + + # Note _FIND_[REQUIRED/QUIETLY] variables defined by FindPackage() + # use the camelcase library name, not uppercase. + if (Glog_FIND_QUIETLY) + message(STATUS "Failed to find glog - " ${REASON_MSG} ${ARGN}) + elseif (Glog_FIND_REQUIRED) + message(FATAL_ERROR "Failed to find glog - " ${REASON_MSG} ${ARGN}) + else() + # Neither QUIETLY nor REQUIRED, use no priority which emits a message + # but continues configuration and allows generation. + message("-- Failed to find glog - " ${REASON_MSG} ${ARGN}) + endif () + return() +endmacro(GLOG_REPORT_NOT_FOUND) + +# glog_message([mode] "message text") +# +# Wraps the standard cmake 'message' command, but suppresses output +# if the QUIET flag was passed to the find_package(Glog ...) call. +function(GLOG_MESSAGE) + if (NOT Glog_FIND_QUIETLY) + message(${ARGN}) + endif() +endfunction() + +# Protect against any alternative find_package scripts for this library having +# been called previously (in a client project) which set GLOG_FOUND, but not +# the other variables we require / set here which could cause the search logic +# here to fail. +unset(GLOG_FOUND) + +# ----------------------------------------------------------------- +# By default, if the user has expressed no preference for using an exported +# glog CMake configuration over performing a search for the installed +# components, and has not specified any hints for the search locations, then +# prefer a glog exported configuration if available. +if (NOT DEFINED GLOG_PREFER_EXPORTED_GLOG_CMAKE_CONFIGURATION + AND NOT GLOG_INCLUDE_DIR_HINTS + AND NOT GLOG_LIBRARY_DIR_HINTS) + glog_message(STATUS "No preference for use of exported glog CMake " + "configuration set, and no hints for include/library directories provided. " + "Defaulting to preferring an installed/exported glog CMake configuration " + "if available.") + set(GLOG_PREFER_EXPORTED_GLOG_CMAKE_CONFIGURATION TRUE) +endif() + +# On macOS, add the Homebrew prefix (with appropriate suffixes) to the +# respective HINTS directories (after any user-specified locations). This +# handles Homebrew installations into non-standard locations (not /usr/local). +# We do not use CMAKE_PREFIX_PATH for this as given the search ordering of +# find_xxx(), doing so would override any user-specified HINTS locations with +# the Homebrew version if it exists. +if (CMAKE_SYSTEM_NAME MATCHES "Darwin") + find_program(HOMEBREW_EXECUTABLE brew) + mark_as_advanced(FORCE HOMEBREW_EXECUTABLE) + if (HOMEBREW_EXECUTABLE) + # Detected a Homebrew install, query for its install prefix. + execute_process(COMMAND ${HOMEBREW_EXECUTABLE} --prefix + OUTPUT_VARIABLE HOMEBREW_INSTALL_PREFIX + OUTPUT_STRIP_TRAILING_WHITESPACE) + glog_message(STATUS "Detected Homebrew with install prefix: " + "${HOMEBREW_INSTALL_PREFIX}, adding to CMake search paths.") + list(APPEND GLOG_INCLUDE_DIR_HINTS "${HOMEBREW_INSTALL_PREFIX}/include") + list(APPEND GLOG_LIBRARY_DIR_HINTS "${HOMEBREW_INSTALL_PREFIX}/lib") + endif() +endif() + +if (GLOG_PREFER_EXPORTED_GLOG_CMAKE_CONFIGURATION) + # Try to find an exported CMake configuration for glog, as generated by + # glog versions > 0.3.4 + # + # We search twice, s/t we can invert the ordering of precedence used by + # find_package() for exported package build directories, and installed + # packages (found via CMAKE_SYSTEM_PREFIX_PATH), listed as items 6) and 7) + # respectively in [1]. + # + # By default, exported build directories are (in theory) detected first, and + # this is usually the case on Windows. However, on OS X & Linux, the install + # path (/usr/local) is typically present in the PATH environment variable + # which is checked in item 4) in [1] (i.e. before both of the above, unless + # NO_SYSTEM_ENVIRONMENT_PATH is passed). As such on those OSs installed + # packages are usually detected in preference to exported package build + # directories. + # + # To ensure a more consistent response across all OSs, and as users usually + # want to prefer an installed version of a package over a locally built one + # where both exist (esp. as the exported build directory might be removed + # after installation), we first search with NO_CMAKE_PACKAGE_REGISTRY which + # means any build directories exported by the user are ignored, and thus + # installed directories are preferred. If this fails to find the package + # we then research again, but without NO_CMAKE_PACKAGE_REGISTRY, so any + # exported build directories will now be detected. + # + # To prevent confusion on Windows, we also pass NO_CMAKE_BUILDS_PATH (which + # is item 5) in [1]), to not preferentially use projects that were built + # recently with the CMake GUI to ensure that we always prefer an installed + # version if available. + # + # NOTE: We use the NAMES option as glog erroneously uses 'google-glog' as its + # project name when built with CMake, but exports itself as just 'glog'. + # On Linux/OS X this does not break detection as the project name is + # not used as part of the install path for the CMake package files, + # e.g. /usr/local/lib/cmake/glog, where the suffix is hardcoded + # in glog's CMakeLists. However, on Windows the project name *is* + # part of the install prefix: C:/Program Files/google-glog/[include,lib]. + # However, by default CMake checks: + # C:/Program Files/ which does not + # exist and thus detection fails. Thus we use the NAMES to force the + # search to use both google-glog & glog. + # + # [1] http://www.cmake.org/cmake/help/v2.8.11/cmake.html#command:find_package + find_package(glog QUIET + NAMES google-glog glog + HINTS ${glog_DIR} ${HOMEBREW_INSTALL_PREFIX} + NO_MODULE + NO_CMAKE_PACKAGE_REGISTRY + NO_CMAKE_BUILDS_PATH) + if (glog_FOUND) + glog_message(STATUS "Found installed version of glog: ${glog_DIR}") + else() + # Failed to find an installed version of glog, repeat search allowing + # exported build directories. + glog_message(STATUS "Failed to find installed glog CMake configuration, " + "searching for glog build directories exported with CMake.") + # Again pass NO_CMAKE_BUILDS_PATH, as we know that glog is exported and + # do not want to treat projects built with the CMake GUI preferentially. + find_package(glog QUIET + NAMES google-glog glog + NO_MODULE + NO_CMAKE_BUILDS_PATH) + if (glog_FOUND) + glog_message(STATUS "Found exported glog build directory: ${glog_DIR}") + endif(glog_FOUND) + endif(glog_FOUND) + + set(FOUND_INSTALLED_GLOG_CMAKE_CONFIGURATION ${glog_FOUND}) + + if (FOUND_INSTALLED_GLOG_CMAKE_CONFIGURATION) + glog_message(STATUS "Detected glog version: ${glog_VERSION}") + set(GLOG_FOUND ${glog_FOUND}) + # glog wraps the include directories into the exported glog::glog target. + set(GLOG_INCLUDE_DIR "") + set(GLOG_LIBRARY glog::glog) + else (FOUND_INSTALLED_GLOG_CMAKE_CONFIGURATION) + glog_message(STATUS "Failed to find an installed/exported CMake " + "configuration for glog, will perform search for installed glog " + "components.") + endif (FOUND_INSTALLED_GLOG_CMAKE_CONFIGURATION) +endif(GLOG_PREFER_EXPORTED_GLOG_CMAKE_CONFIGURATION) + +if (NOT GLOG_FOUND) + # Either failed to find an exported glog CMake configuration, or user + # told us not to use one. Perform a manual search for all glog components. + + # Handle possible presence of lib prefix for libraries on MSVC, see + # also GLOG_RESET_FIND_LIBRARY_PREFIX(). + if (MSVC) + # Preserve the caller's original values for CMAKE_FIND_LIBRARY_PREFIXES + # s/t we can set it back before returning. + set(CALLERS_CMAKE_FIND_LIBRARY_PREFIXES "${CMAKE_FIND_LIBRARY_PREFIXES}") + # The empty string in this list is important, it represents the case when + # the libraries have no prefix (shared libraries / DLLs). + set(CMAKE_FIND_LIBRARY_PREFIXES "lib" "" "${CMAKE_FIND_LIBRARY_PREFIXES}") + endif (MSVC) + + # Search user-installed locations first, so that we prefer user installs + # to system installs where both exist. + list(APPEND GLOG_CHECK_INCLUDE_DIRS + /usr/local/include + /usr/local/homebrew/include # Mac OS X + /opt/local/var/macports/software # Mac OS X. + /opt/local/include + /usr/include) + # Windows (for C:/Program Files prefix). + list(APPEND GLOG_CHECK_PATH_SUFFIXES + glog/include + glog/Include + Glog/include + Glog/Include + google-glog/include # CMake installs with project name prefix. + google-glog/Include) + + list(APPEND GLOG_CHECK_LIBRARY_DIRS + /usr/local/lib + /usr/local/homebrew/lib # Mac OS X. + /opt/local/lib + /usr/lib) + # Windows (for C:/Program Files prefix). + list(APPEND GLOG_CHECK_LIBRARY_SUFFIXES + glog/lib + glog/Lib + Glog/lib + Glog/Lib + google-glog/lib # CMake installs with project name prefix. + google-glog/Lib) + + # Search supplied hint directories first if supplied. + find_path(GLOG_INCLUDE_DIR + NAMES glog/logging.h + HINTS ${GLOG_INCLUDE_DIR_HINTS} + PATHS ${GLOG_CHECK_INCLUDE_DIRS} + PATH_SUFFIXES ${GLOG_CHECK_PATH_SUFFIXES}) + if (NOT GLOG_INCLUDE_DIR OR + NOT EXISTS ${GLOG_INCLUDE_DIR}) + glog_report_not_found( + "Could not find glog include directory, set GLOG_INCLUDE_DIR " + "to directory containing glog/logging.h") + endif (NOT GLOG_INCLUDE_DIR OR + NOT EXISTS ${GLOG_INCLUDE_DIR}) + + find_library(GLOG_LIBRARY NAMES glog + HINTS ${GLOG_LIBRARY_DIR_HINTS} + PATHS ${GLOG_CHECK_LIBRARY_DIRS} + PATH_SUFFIXES ${GLOG_CHECK_LIBRARY_SUFFIXES}) + if (NOT GLOG_LIBRARY OR + NOT EXISTS ${GLOG_LIBRARY}) + glog_report_not_found( + "Could not find glog library, set GLOG_LIBRARY " + "to full path to libglog.") + endif (NOT GLOG_LIBRARY OR + NOT EXISTS ${GLOG_LIBRARY}) + + # Mark internally as found, then verify. GLOG_REPORT_NOT_FOUND() unsets + # if called. + set(GLOG_FOUND TRUE) + + # Glog does not seem to provide any record of the version in its + # source tree, thus cannot extract version. + + # Catch case when caller has set GLOG_INCLUDE_DIR in the cache / GUI and + # thus FIND_[PATH/LIBRARY] are not called, but specified locations are + # invalid, otherwise we would report the library as found. + if (GLOG_INCLUDE_DIR AND + NOT EXISTS ${GLOG_INCLUDE_DIR}/glog/logging.h) + glog_report_not_found( + "Caller defined GLOG_INCLUDE_DIR:" + " ${GLOG_INCLUDE_DIR} does not contain glog/logging.h header.") + endif (GLOG_INCLUDE_DIR AND + NOT EXISTS ${GLOG_INCLUDE_DIR}/glog/logging.h) + # TODO: This regex for glog library is pretty primitive, we use lowercase + # for comparison to handle Windows using CamelCase library names, could + # this check be better? + string(TOLOWER "${GLOG_LIBRARY}" LOWERCASE_GLOG_LIBRARY) + if (GLOG_LIBRARY AND + NOT "${LOWERCASE_GLOG_LIBRARY}" MATCHES ".*glog[^/]*") + glog_report_not_found( + "Caller defined GLOG_LIBRARY: " + "${GLOG_LIBRARY} does not match glog.") + endif (GLOG_LIBRARY AND + NOT "${LOWERCASE_GLOG_LIBRARY}" MATCHES ".*glog[^/]*") + + glog_reset_find_library_prefix() + +endif(NOT GLOG_FOUND) + +# Set standard CMake FindPackage variables if found. +if (GLOG_FOUND) + set(GLOG_INCLUDE_DIRS ${GLOG_INCLUDE_DIR}) + set(GLOG_LIBRARIES ${GLOG_LIBRARY}) +endif (GLOG_FOUND) + +# If we are using an exported CMake glog target, the include directories are +# wrapped into the target itself, and do not have to be (and are not) +# separately specified. In which case, we should not add GLOG_INCLUDE_DIRS +# to the list of required variables in order that glog be reported as found. +if (FOUND_INSTALLED_GLOG_CMAKE_CONFIGURATION) + set(GLOG_REQUIRED_VARIABLES GLOG_LIBRARIES) +else() + set(GLOG_REQUIRED_VARIABLES GLOG_INCLUDE_DIRS GLOG_LIBRARIES) +endif() + +# Handle REQUIRED / QUIET optional arguments. +include(FindPackageHandleStandardArgs) +find_package_handle_standard_args(Glog DEFAULT_MSG + ${GLOG_REQUIRED_VARIABLES}) + +# Only mark internal variables as advanced if we found glog, otherwise +# leave them visible in the standard GUI for the user to set manually. +if (GLOG_FOUND) + mark_as_advanced(FORCE GLOG_INCLUDE_DIR + GLOG_LIBRARY + glog_DIR) # Autogenerated by find_package(glog) +endif (GLOG_FOUND) diff --git a/ceres-v2/lib/FindMETIS.cmake b/ceres-v2/lib/FindMETIS.cmake new file mode 100644 index 0000000000000000000000000000000000000000..5f41792d78ddda35d9ac793b7459c1cdf00aa003 --- /dev/null +++ b/ceres-v2/lib/FindMETIS.cmake @@ -0,0 +1,110 @@ +# +# Copyright (c) 2022 Sergiu Deitsch +# +# Permission is hereby granted, free of charge, to any person obtaining a copy +# of this software and associated documentation files (the "Software"), to deal +# in the Software without restriction, including without limitation the rights +# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +# copies of the Software, and to permit persons to whom the Software is +# furnished to do so, subject to the following conditions: +# +# The above copyright notice and this permission notice shall be included in all +# copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +# FITNESS FOR A PARTMETISLAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +# SOFTWARE. +# +#[=======================================================================[.rst: +Module for locating METIS +========================= + +Read-only variables: + +``METIS_FOUND`` + Indicates whether the library has been found. + +``METIS_VERSION`` + Indicates library version. + +Targets +------- + +``METIS::METIS`` + Specifies targets that should be passed to target_link_libararies. +]=======================================================================] + +include (FindPackageHandleStandardArgs) + +find_path (METIS_INCLUDE_DIR NAMES metis.h + PATH_SUFFIXES include + DOC "METIS include directory") +find_library (METIS_LIBRARY_DEBUG NAMES metis + PATH_SUFFIXES Debug + DOC "METIS debug library") +find_library (METIS_LIBRARY_RELEASE NAMES metis + PATH_SUFFIXES Release + DOC "METIS release library") + +if (METIS_LIBRARY_RELEASE) + if (METIS_LIBRARY_DEBUG) + set (METIS_LIBRARY debug ${METIS_LIBRARY_DEBUG} optimized + ${METIS_LIBRARY_RELEASE} CACHE STRING "METIS library") + else (METIS_LIBRARY_DEBUG) + set (METIS_LIBRARY ${METIS_LIBRARY_RELEASE} CACHE FILEPATH "METIS library") + endif (METIS_LIBRARY_DEBUG) +elseif (METIS_LIBRARY_DEBUG) + set (METIS_LIBRARY ${METIS_LIBRARY_DEBUG} CACHE FILEPATH "METIS library") +endif (METIS_LIBRARY_RELEASE) + +set (_METIS_VERSION_HEADER ${METIS_INCLUDE_DIR}/metis.h) + +if (EXISTS ${_METIS_VERSION_HEADER}) + file (READ ${_METIS_VERSION_HEADER} _METIS_VERSION_CONTENTS) + + string (REGEX REPLACE ".*#define METIS_VER_MAJOR[ \t]+([0-9]+).*" "\\1" + METIS_VERSION_MAJOR "${_METIS_VERSION_CONTENTS}") + string (REGEX REPLACE ".*#define METIS_VER_MINOR[ \t]+([0-9]+).*" "\\1" + METIS_VERSION_MINOR "${_METIS_VERSION_CONTENTS}") + string (REGEX REPLACE ".*#define METIS_VER_SUBMINOR[ \t]+([0-9]+).*" "\\1" + METIS_VERSION_PATCH "${_METIS_VERSION_CONTENTS}") + + set (METIS_VERSION + ${METIS_VERSION_MAJOR}.${METIS_VERSION_MINOR}.${METIS_VERSION_PATCH}) + set (METIS_VERSION_COMPONENTS 3) +endif (EXISTS ${_METIS_VERSION_HEADER}) + +mark_as_advanced (METIS_INCLUDE_DIR METIS_LIBRARY_DEBUG METIS_LIBRARY_RELEASE + METIS_LIBRARY) + +if (NOT TARGET METIS::METIS) + if (METIS_INCLUDE_DIR OR METIS_LIBRARY) + add_library (METIS::METIS IMPORTED UNKNOWN) + endif (METIS_INCLUDE_DIR OR METIS_LIBRARY) +endif (NOT TARGET METIS::METIS) + +if (METIS_INCLUDE_DIR) + set_property (TARGET METIS::METIS PROPERTY INTERFACE_INCLUDE_DIRECTORIES + ${METIS_INCLUDE_DIR}) +endif (METIS_INCLUDE_DIR) + +if (METIS_LIBRARY_RELEASE) + set_property (TARGET METIS::METIS PROPERTY IMPORTED_LOCATION_RELEASE + ${METIS_LIBRARY_RELEASE}) + set_property (TARGET METIS::METIS APPEND PROPERTY IMPORTED_CONFIGURATIONS + RELEASE) +endif (METIS_LIBRARY_RELEASE) + +if (METIS_LIBRARY_DEBUG) + set_property (TARGET METIS::METIS PROPERTY IMPORTED_LOCATION_DEBUG + ${METIS_LIBRARY_DEBUG}) + set_property (TARGET METIS::METIS APPEND PROPERTY IMPORTED_CONFIGURATIONS + DEBUG) +endif (METIS_LIBRARY_DEBUG) + +find_package_handle_standard_args (METIS REQUIRED_VARS + METIS_INCLUDE_DIR METIS_LIBRARY VERSION_VAR METIS_VERSION) diff --git a/ceres-v2/lib/FindSuiteSparse.cmake b/ceres-v2/lib/FindSuiteSparse.cmake new file mode 100644 index 0000000000000000000000000000000000000000..768b5df3a5f9bb23e6ed721b0f9b3cf575c3ad5a --- /dev/null +++ b/ceres-v2/lib/FindSuiteSparse.cmake @@ -0,0 +1,488 @@ +# Ceres Solver - A fast non-linear least squares minimizer +# Copyright 2022 Google Inc. All rights reserved. +# http://ceres-solver.org/ +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are met: +# +# * Redistributions of source code must retain the above copyright notice, +# this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above copyright notice, +# this list of conditions and the following disclaimer in the documentation +# and/or other materials provided with the distribution. +# * Neither the name of Google Inc. nor the names of its contributors may be +# used to endorse or promote products derived from this software without +# specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE +# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR +# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF +# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN +# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) +# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE +# POSSIBILITY OF SUCH DAMAGE. +# +# Author: alexs.mac@gmail.com (Alex Stewart) +# + +#[=======================================================================[.rst: +FindSuiteSparse +=============== + +Module for locating SuiteSparse libraries and its dependencies. + +This module defines the following variables: + +``SuiteSparse_FOUND`` + ``TRUE`` iff SuiteSparse and all dependencies have been found. + +``SuiteSparse_VERSION`` + Extracted from ``SuiteSparse_config.h`` (>= v4). + +``SuiteSparse_VERSION_MAJOR`` + Equal to 4 if ``SuiteSparse_VERSION`` = 4.2.1 + +``SuiteSparse_VERSION_MINOR`` + Equal to 2 if ``SuiteSparse_VERSION`` = 4.2.1 + +``SuiteSparse_VERSION_PATCH`` + Equal to 1 if ``SuiteSparse_VERSION`` = 4.2.1 + +The following variables control the behaviour of this module: + +``SuiteSparse_NO_CMAKE`` + Do not attempt to use the native SuiteSparse CMake package configuration. + + +Targets +------- + +The following targets define the SuiteSparse components searched for. + +``SuiteSparse::AMD`` + Symmetric Approximate Minimum Degree (AMD) + +``SuiteSparse::CAMD`` + Constrained Approximate Minimum Degree (CAMD) + +``SuiteSparse::COLAMD`` + Column Approximate Minimum Degree (COLAMD) + +``SuiteSparse::CCOLAMD`` + Constrained Column Approximate Minimum Degree (CCOLAMD) + +``SuiteSparse::CHOLMOD`` + Sparse Supernodal Cholesky Factorization and Update/Downdate (CHOLMOD) + +``SuiteSparse::SPQR`` + Multifrontal Sparse QR (SuiteSparseQR) + +``SuiteSparse::Config`` + Common configuration for all but CSparse (SuiteSparse version >= 4). + +Optional SuiteSparse dependencies: + +``METIS::METIS`` + Serial Graph Partitioning and Fill-reducing Matrix Ordering (METIS) +]=======================================================================] + +if (NOT SuiteSparse_NO_CMAKE) + find_package (SuiteSparse NO_MODULE QUIET) +endif (NOT SuiteSparse_NO_CMAKE) + +if (SuiteSparse_FOUND) + return () +endif (SuiteSparse_FOUND) + +# Push CMP0057 to enable support for IN_LIST, when cmake_minimum_required is +# set to <3.3. +cmake_policy (PUSH) +cmake_policy (SET CMP0057 NEW) + +if (NOT SuiteSparse_FIND_COMPONENTS) + set (SuiteSparse_FIND_COMPONENTS + AMD + CAMD + CCOLAMD + CHOLMOD + COLAMD + SPQR + ) + + foreach (component IN LISTS SuiteSparse_FIND_COMPONENTS) + set (SuiteSparse_FIND_REQUIRED_${component} TRUE) + endforeach (component IN LISTS SuiteSparse_FIND_COMPONENTS) +endif (NOT SuiteSparse_FIND_COMPONENTS) + +# Assume SuiteSparse was found and set it to false only if third-party +# dependencies could not be located. SuiteSparse components are handled by +# FindPackageHandleStandardArgs HANDLE_COMPONENTS option. +set (SuiteSparse_FOUND TRUE) + +include (CheckLibraryExists) + +# Config is a base component and thus always required +set (SuiteSparse_IMPLICIT_COMPONENTS Config) + +# CHOLMOD depends on AMD, CAMD, CCOLAMD, and COLAMD. +if (CHOLMOD IN_LIST SuiteSparse_FIND_COMPONENTS) + list (APPEND SuiteSparse_IMPLICIT_COMPONENTS AMD CAMD CCOLAMD COLAMD) +endif (CHOLMOD IN_LIST SuiteSparse_FIND_COMPONENTS) + +# SPQR depends on CHOLMOD. +if (SPQR IN_LIST SuiteSparse_FIND_COMPONENTS) + list (APPEND SuiteSparse_IMPLICIT_COMPONENTS CHOLMOD) +endif (SPQR IN_LIST SuiteSparse_FIND_COMPONENTS) + +# Implicit components are always required +foreach (component IN LISTS SuiteSparse_IMPLICIT_COMPONENTS) + set (SuiteSparse_FIND_REQUIRED_${component} TRUE) +endforeach (component IN LISTS SuiteSparse_IMPLICIT_COMPONENTS) + +list (APPEND SuiteSparse_FIND_COMPONENTS ${SuiteSparse_IMPLICIT_COMPONENTS}) + +# Do not list components multiple times. +list (REMOVE_DUPLICATES SuiteSparse_FIND_COMPONENTS) + +# Reset CALLERS_CMAKE_FIND_LIBRARY_PREFIXES to its value when +# FindSuiteSparse was invoked. +macro(SuiteSparse_RESET_FIND_LIBRARY_PREFIX) + if (MSVC) + set(CMAKE_FIND_LIBRARY_PREFIXES "${CALLERS_CMAKE_FIND_LIBRARY_PREFIXES}") + endif (MSVC) +endmacro(SuiteSparse_RESET_FIND_LIBRARY_PREFIX) + +# Called if we failed to find SuiteSparse or any of it's required dependencies, +# unsets all public (designed to be used externally) variables and reports +# error message at priority depending upon [REQUIRED/QUIET/] argument. +macro(SuiteSparse_REPORT_NOT_FOUND REASON_MSG) + # Will be set to FALSE by find_package_handle_standard_args + unset (SuiteSparse_FOUND) + + # Do NOT unset SuiteSparse_REQUIRED_VARS here, as it is used by + # FindPackageHandleStandardArgs() to generate the automatic error message on + # failure which highlights which components are missing. + + suitesparse_reset_find_library_prefix() + + # Note _FIND_[REQUIRED/QUIETLY] variables defined by FindPackage() + # use the camelcase library name, not uppercase. + if (SuiteSparse_FIND_QUIETLY) + message(STATUS "Failed to find SuiteSparse - " ${REASON_MSG} ${ARGN}) + elseif (SuiteSparse_FIND_REQUIRED) + message(FATAL_ERROR "Failed to find SuiteSparse - " ${REASON_MSG} ${ARGN}) + else() + # Neither QUIETLY nor REQUIRED, use no priority which emits a message + # but continues configuration and allows generation. + message("-- Failed to find SuiteSparse - " ${REASON_MSG} ${ARGN}) + endif (SuiteSparse_FIND_QUIETLY) + + # Do not call return(), s/t we keep processing if not called with REQUIRED + # and report all missing components, rather than bailing after failing to find + # the first. +endmacro(SuiteSparse_REPORT_NOT_FOUND) + +# Handle possible presence of lib prefix for libraries on MSVC, see +# also SuiteSparse_RESET_FIND_LIBRARY_PREFIX(). +if (MSVC) + # Preserve the caller's original values for CMAKE_FIND_LIBRARY_PREFIXES + # s/t we can set it back before returning. + set(CALLERS_CMAKE_FIND_LIBRARY_PREFIXES "${CMAKE_FIND_LIBRARY_PREFIXES}") + # The empty string in this list is important, it represents the case when + # the libraries have no prefix (shared libraries / DLLs). + set(CMAKE_FIND_LIBRARY_PREFIXES "lib" "" "${CMAKE_FIND_LIBRARY_PREFIXES}") +endif (MSVC) + +# Additional suffixes to try appending to each search path. +list(APPEND SuiteSparse_CHECK_PATH_SUFFIXES + suitesparse) # Windows/Ubuntu + +# Wrappers to find_path/library that pass the SuiteSparse search hints/paths. +# +# suitesparse_find_component( [FILES name1 [name2 ...]] +# [LIBRARIES name1 [name2 ...]]) +macro(suitesparse_find_component COMPONENT) + include(CMakeParseArguments) + set(MULTI_VALUE_ARGS FILES LIBRARIES) + cmake_parse_arguments(SuiteSparse_FIND_COMPONENT_${COMPONENT} + "" "" "${MULTI_VALUE_ARGS}" ${ARGN}) + + set(SuiteSparse_${COMPONENT}_FOUND TRUE) + if (SuiteSparse_FIND_COMPONENT_${COMPONENT}_FILES) + find_path(SuiteSparse_${COMPONENT}_INCLUDE_DIR + NAMES ${SuiteSparse_FIND_COMPONENT_${COMPONENT}_FILES} + PATH_SUFFIXES ${SuiteSparse_CHECK_PATH_SUFFIXES}) + if (SuiteSparse_${COMPONENT}_INCLUDE_DIR) + message(STATUS "Found ${COMPONENT} headers in: " + "${SuiteSparse_${COMPONENT}_INCLUDE_DIR}") + mark_as_advanced(SuiteSparse_${COMPONENT}_INCLUDE_DIR) + else() + # Specified headers not found. + set(SuiteSparse_${COMPONENT}_FOUND FALSE) + if (SuiteSparse_FIND_REQUIRED_${COMPONENT}) + suitesparse_report_not_found( + "Did not find ${COMPONENT} header (required SuiteSparse component).") + else() + message(STATUS "Did not find ${COMPONENT} header (optional " + "SuiteSparse component).") + # Hide optional vars from CMake GUI even if not found. + mark_as_advanced(SuiteSparse_${COMPONENT}_INCLUDE_DIR) + endif() + endif() + endif() + + if (SuiteSparse_FIND_COMPONENT_${COMPONENT}_LIBRARIES) + find_library(SuiteSparse_${COMPONENT}_LIBRARY + NAMES ${SuiteSparse_FIND_COMPONENT_${COMPONENT}_LIBRARIES} + PATH_SUFFIXES ${SuiteSparse_CHECK_PATH_SUFFIXES}) + if (SuiteSparse_${COMPONENT}_LIBRARY) + message(STATUS "Found ${COMPONENT} library: ${SuiteSparse_${COMPONENT}_LIBRARY}") + mark_as_advanced(SuiteSparse_${COMPONENT}_LIBRARY) + else () + # Specified libraries not found. + set(SuiteSparse_${COMPONENT}_FOUND FALSE) + if (SuiteSparse_FIND_REQUIRED_${COMPONENT}) + suitesparse_report_not_found( + "Did not find ${COMPONENT} library (required SuiteSparse component).") + else() + message(STATUS "Did not find ${COMPONENT} library (optional SuiteSparse " + "dependency)") + # Hide optional vars from CMake GUI even if not found. + mark_as_advanced(SuiteSparse_${COMPONENT}_LIBRARY) + endif() + endif() + endif() + + # A component can be optional (given to OPTIONAL_COMPONENTS). However, if the + # component is implicit (must be always present, such as the Config component) + # assume it be required as well. + if (SuiteSparse_FIND_REQUIRED_${COMPONENT}) + list (APPEND SuiteSparse_REQUIRED_VARS SuiteSparse_${COMPONENT}_INCLUDE_DIR) + list (APPEND SuiteSparse_REQUIRED_VARS SuiteSparse_${COMPONENT}_LIBRARY) + endif (SuiteSparse_FIND_REQUIRED_${COMPONENT}) + + # Define the target only if the include directory and the library were found + if (SuiteSparse_${COMPONENT}_INCLUDE_DIR AND SuiteSparse_${COMPONENT}_LIBRARY) + if (NOT TARGET SuiteSparse::${COMPONENT}) + add_library(SuiteSparse::${COMPONENT} IMPORTED UNKNOWN) + endif (NOT TARGET SuiteSparse::${COMPONENT}) + + set_property(TARGET SuiteSparse::${COMPONENT} PROPERTY + INTERFACE_INCLUDE_DIRECTORIES ${SuiteSparse_${COMPONENT}_INCLUDE_DIR}) + set_property(TARGET SuiteSparse::${COMPONENT} PROPERTY + IMPORTED_LOCATION ${SuiteSparse_${COMPONENT}_LIBRARY}) + endif (SuiteSparse_${COMPONENT}_INCLUDE_DIR AND SuiteSparse_${COMPONENT}_LIBRARY) +endmacro() + +# Given the number of components of SuiteSparse, and to ensure that the +# automatic failure message generated by FindPackageHandleStandardArgs() +# when not all required components are found is helpful, we maintain a list +# of all variables that must be defined for SuiteSparse to be considered found. +unset(SuiteSparse_REQUIRED_VARS) + +# BLAS. +find_package(BLAS QUIET) +if (NOT BLAS_FOUND) + suitesparse_report_not_found( + "Did not find BLAS library (required for SuiteSparse).") +endif (NOT BLAS_FOUND) + +# LAPACK. +find_package(LAPACK QUIET) +if (NOT LAPACK_FOUND) + suitesparse_report_not_found( + "Did not find LAPACK library (required for SuiteSparse).") +endif (NOT LAPACK_FOUND) + +foreach (component IN LISTS SuiteSparse_FIND_COMPONENTS) + string (TOLOWER ${component} component_library) + + if (component STREQUAL "Config") + set (component_header SuiteSparse_config.h) + set (component_library suitesparseconfig) + elseif (component STREQUAL "SPQR") + set (component_header SuiteSparseQR.hpp) + else (component STREQUAL "SPQR") + set (component_header ${component_library}.h) + endif (component STREQUAL "Config") + + suitesparse_find_component(${component} + FILES ${component_header} + LIBRARIES ${component_library}) +endforeach (component IN LISTS SuiteSparse_FIND_COMPONENTS) + +if (TARGET SuiteSparse::SPQR) + # SuiteSparseQR may be compiled with Intel Threading Building Blocks, + # we assume that if TBB is installed, SuiteSparseQR was compiled with + # support for it, this will do no harm if it wasn't. + find_package(TBB QUIET) + if (TBB_FOUND) + message(STATUS "Found Intel Thread Building Blocks (TBB) library " + "(${TBB_VERSION_MAJOR}.${TBB_VERSION_MINOR} / ${TBB_INTERFACE_VERSION}) " + "include location: ${TBB_INCLUDE_DIRS}. Assuming SuiteSparseQR was " + "compiled with TBB.") + # Add the TBB libraries to the SuiteSparseQR libraries (the only + # libraries to optionally depend on TBB). + if (TARGET TBB::tbb) + # Native TBB package configuration provides an imported target. Use it if + # available. + set_property (TARGET SuiteSparse::SPQR APPEND PROPERTY + INTERFACE_LINK_LIBRARIES TBB::tbb) + else (TARGET TBB::tbb) + set_property (TARGET SuiteSparse::SPQR APPEND PROPERTY + INTERFACE_INCLUDE_DIRECTORIES ${TBB_INCLUDE_DIRS}) + set_property (TARGET SuiteSparse::SPQR APPEND PROPERTY + INTERFACE_LINK_LIBRARIES ${TBB_LIBRARIES}) + endif (TARGET TBB::tbb) + else (TBB_FOUND) + message(STATUS "Did not find Intel TBB library, assuming SuiteSparseQR was " + "not compiled with TBB.") + endif (TBB_FOUND) +endif (TARGET SuiteSparse::SPQR) + +check_library_exists(rt shm_open "" HAVE_LIBRT) + +if (TARGET SuiteSparse::Config) + # SuiteSparse_config (SuiteSparse version >= 4) requires librt library for + # timing by default when compiled on Linux or Unix, but not on OSX (which + # does not have librt). + if (HAVE_LIBRT) + message(STATUS "Adding librt to " + "SuiteSparse_config libraries (required on Linux & Unix [not OSX] if " + "SuiteSparse is compiled with timing).") + set_property (TARGET SuiteSparse::Config APPEND PROPERTY + INTERFACE_LINK_LIBRARIES $) + else (HAVE_LIBRT) + message(STATUS "Could not find librt, but found SuiteSparse_config, " + "assuming that SuiteSparse was compiled without timing.") + endif (HAVE_LIBRT) + + # Add BLAS and LAPACK as dependencies of SuiteSparse::Config for convenience + # given that all components depend on it. + if (BLAS_FOUND) + if (TARGET BLAS::BLAS) + set_property (TARGET SuiteSparse::Config APPEND PROPERTY + INTERFACE_LINK_LIBRARIES $) + else (TARGET BLAS::BLAS) + set_property (TARGET SuiteSparse::Config APPEND PROPERTY + INTERFACE_LINK_LIBRARIES ${BLAS_LIBRARIES}) + endif (TARGET BLAS::BLAS) + endif (BLAS_FOUND) + + if (LAPACK_FOUND) + if (TARGET LAPACK::LAPACK) + set_property (TARGET SuiteSparse::Config APPEND PROPERTY + INTERFACE_LINK_LIBRARIES $) + else (TARGET LAPACK::LAPACK) + set_property (TARGET SuiteSparse::Config APPEND PROPERTY + INTERFACE_LINK_LIBRARIES ${LAPACK_LIBRARIES}) + endif (TARGET LAPACK::LAPACK) + endif (LAPACK_FOUND) + + # SuiteSparse version >= 4. + set(SuiteSparse_VERSION_FILE + ${SuiteSparse_Config_INCLUDE_DIR}/SuiteSparse_config.h) + if (NOT EXISTS ${SuiteSparse_VERSION_FILE}) + suitesparse_report_not_found( + "Could not find file: ${SuiteSparse_VERSION_FILE} containing version " + "information for >= v4 SuiteSparse installs, but SuiteSparse_config was " + "found (only present in >= v4 installs).") + else (NOT EXISTS ${SuiteSparse_VERSION_FILE}) + file(READ ${SuiteSparse_VERSION_FILE} Config_CONTENTS) + + string(REGEX MATCH "#define SUITESPARSE_MAIN_VERSION [0-9]+" + SuiteSparse_VERSION_MAJOR "${Config_CONTENTS}") + string(REGEX REPLACE "#define SUITESPARSE_MAIN_VERSION ([0-9]+)" "\\1" + SuiteSparse_VERSION_MAJOR "${SuiteSparse_VERSION_MAJOR}") + + string(REGEX MATCH "#define SUITESPARSE_SUB_VERSION [0-9]+" + SuiteSparse_VERSION_MINOR "${Config_CONTENTS}") + string(REGEX REPLACE "#define SUITESPARSE_SUB_VERSION ([0-9]+)" "\\1" + SuiteSparse_VERSION_MINOR "${SuiteSparse_VERSION_MINOR}") + + string(REGEX MATCH "#define SUITESPARSE_SUBSUB_VERSION [0-9]+" + SuiteSparse_VERSION_PATCH "${Config_CONTENTS}") + string(REGEX REPLACE "#define SUITESPARSE_SUBSUB_VERSION ([0-9]+)" "\\1" + SuiteSparse_VERSION_PATCH "${SuiteSparse_VERSION_PATCH}") + + # This is on a single line s/t CMake does not interpret it as a list of + # elements and insert ';' separators which would result in 4.;2.;1 nonsense. + set(SuiteSparse_VERSION + "${SuiteSparse_VERSION_MAJOR}.${SuiteSparse_VERSION_MINOR}.${SuiteSparse_VERSION_PATCH}") + set(SuiteSparse_VERSION_COMPONENTS 3) + endif (NOT EXISTS ${SuiteSparse_VERSION_FILE}) +endif (TARGET SuiteSparse::Config) + +# METIS (Optional dependency). +find_package (METIS) + +# CHOLMOD requires AMD CAMD CCOLAMD COLAMD +if (TARGET SuiteSparse::CHOLMOD) + # METIS is optional + if (TARGET METIS::METIS) + set_property (TARGET SuiteSparse::CHOLMOD APPEND PROPERTY + INTERFACE_LINK_LIBRARIES METIS::METIS) + endif (TARGET METIS::METIS) + + foreach (component IN ITEMS AMD CAMD CCOLAMD COLAMD) + if (TARGET SuiteSparse::${component}) + set_property (TARGET SuiteSparse::CHOLMOD APPEND PROPERTY + INTERFACE_LINK_LIBRARIES SuiteSparse::${component}) + else (TARGET SuiteSparse::${component}) + # Consider CHOLMOD not found if COLAMD cannot be found + set (SuiteSparse_CHOLMOD_FOUND FALSE) + endif (TARGET SuiteSparse::${component}) + endforeach (component IN ITEMS AMD CAMD CCOLAMD COLAMD) +endif (TARGET SuiteSparse::CHOLMOD) + +# SPQR requires CHOLMOD +if (TARGET SuiteSparse::SPQR) + if (TARGET SuiteSparse::CHOLMOD) + set_property (TARGET SuiteSparse::SPQR APPEND PROPERTY + INTERFACE_LINK_LIBRARIES SuiteSparse::CHOLMOD) + else (TARGET SuiteSparse::CHOLMOD) + # Consider SPQR not found if CHOLMOD cannot be found + set (SuiteSparse_SQPR_FOUND FALSE) + endif (TARGET SuiteSparse::CHOLMOD) +endif (TARGET SuiteSparse::SPQR) + +# Add SuiteSparse::Config as dependency to all components +if (TARGET SuiteSparse::Config) + foreach (component IN LISTS SuiteSparse_FIND_COMPONENTS) + if (component STREQUAL Config) + continue () + endif (component STREQUAL Config) + + if (TARGET SuiteSparse::${component}) + set_property (TARGET SuiteSparse::${component} APPEND PROPERTY + INTERFACE_LINK_LIBRARIES SuiteSparse::Config) + endif (TARGET SuiteSparse::${component}) + endforeach (component IN LISTS SuiteSparse_FIND_COMPONENTS) +endif (TARGET SuiteSparse::Config) + +suitesparse_reset_find_library_prefix() + +# Handle REQUIRED and QUIET arguments to FIND_PACKAGE +include(FindPackageHandleStandardArgs) +if (SuiteSparse_FOUND) + find_package_handle_standard_args(SuiteSparse + REQUIRED_VARS ${SuiteSparse_REQUIRED_VARS} + VERSION_VAR SuiteSparse_VERSION + FAIL_MESSAGE "Failed to find some/all required components of SuiteSparse." + HANDLE_COMPONENTS) +else (SuiteSparse_FOUND) + # Do not pass VERSION_VAR to FindPackageHandleStandardArgs() if we failed to + # find SuiteSparse to avoid a confusing autogenerated failure message + # that states 'not found (missing: FOO) (found version: x.y.z)'. + find_package_handle_standard_args(SuiteSparse + REQUIRED_VARS ${SuiteSparse_REQUIRED_VARS} + FAIL_MESSAGE "Failed to find some/all required components of SuiteSparse." + HANDLE_COMPONENTS) +endif (SuiteSparse_FOUND) + +# Pop CMP0057. +cmake_policy (POP) diff --git a/ceres-v2/lib/libceres.a b/ceres-v2/lib/libceres.a new file mode 100644 index 0000000000000000000000000000000000000000..0dfd4f11440bdb772cccdcf73af2e54eb77e3d7a --- /dev/null +++ b/ceres-v2/lib/libceres.a @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:9567fd04281af7ad3757eb3a284346a9bb22d2e4e5c818cc1e8c1f9d8166c31a +size 11354694