diff --git a/stan/math/mix/functor/finite_diff_grad_hessian_auto.hpp b/stan/math/mix/functor/finite_diff_grad_hessian_auto.hpp index 108b059149c..fd6cde303c8 100644 --- a/stan/math/mix/functor/finite_diff_grad_hessian_auto.hpp +++ b/stan/math/mix/functor/finite_diff_grad_hessian_auto.hpp @@ -2,7 +2,7 @@ #define STAN_MATH_MIX_FUNCTOR_FINITE_DIFF_GRAD_HESSIAN_AUTO_HPP #include -#include +#include #include #include #include diff --git a/stan/math/prim/functor/finite_diff_gradient_auto.hpp b/stan/math/prim/functor/finite_diff_gradient_auto.hpp index 66bc59b4e23..56c6a90f40b 100644 --- a/stan/math/prim/functor/finite_diff_gradient_auto.hpp +++ b/stan/math/prim/functor/finite_diff_gradient_auto.hpp @@ -2,7 +2,8 @@ #define STAN_MATH_PRIM_FUNCTOR_FINITE_DIFF_GRADIENT_AUTO_HPP #include -#include +#include +#include #include namespace stan { @@ -10,35 +11,7 @@ namespace math { /** * Calculate the value and the gradient of the specified function - * at the specified argument using finite difference. - * - *

The functor must implement - * - * - * double operator()(const Eigen::Matrix&) const; - * - * - *

Error of derivative in dimension `i` should be on the should be on - * order of `epsilon(i)^6`, where `epsilon(i) = sqrt(delta) * abs(x(i))` - * for input `x` at dimension `i`. - * - * The reference for this algorithm is: - * - *
Robert de Levie. 2009. An improved numerical approximation - * for the first derivative. Journal of Chemical Sciences 121(5), page - * 3. - * - *

The reference for automatically setting the difference is this - * section of the Wikipedia, - * - *
Numerical - * differentiation: practical considerations using floating point - * arithmetic. - * - *

Evaluating this function involves 6 calls to the function being - * differentiated for each dimension in the input, plus one global - * evaluation. All evaluations will be for double-precision inputs. + * at the specified argument using finite differences. * * @tparam F Type of function * @param[in] f function @@ -50,36 +23,18 @@ template > void finite_diff_gradient_auto(const F& f, const VectorT& x, ScalarT& fx, VectorT& grad_fx) { + using boost::math::differentiation::finite_difference_derivative; VectorT x_temp(x); fx = f(x); grad_fx.resize(x.size()); - for (int i = 0; i < x.size(); ++i) { - double h = finite_diff_stepsize(value_of_rec(x[i])); - - ScalarT delta_f = 0; - - x_temp[i] = x[i] + 3 * h; - delta_f += f(x_temp); - - x_temp[i] = x[i] + 2 * h; - delta_f -= 9 * f(x_temp); - - x_temp[i] = x[i] + h; - delta_f += 45 * f(x_temp); - - x_temp[i] = x[i] + -3 * h; - delta_f -= f(x_temp); - - x_temp[i] = x[i] + -2 * h; - delta_f += 9 * f(x_temp); - - x_temp[i] = x[i] - h; - delta_f -= 45 * f(x_temp); - - delta_f /= 60 * h; + for (Eigen::Index i = 0; i < x.size(); ++i) { + auto fun = [&i, &x_temp, &f](const auto& y) { + x_temp[i] = y; + return f(x_temp); + }; + grad_fx[i] = finite_difference_derivative(fun, x[i]); x_temp[i] = x[i]; - grad_fx[i] = delta_f; } }