diff --git a/src/_quarto.yml b/src/_quarto.yml
index a202fded4..f6aa243df 100644
--- a/src/_quarto.yml
+++ b/src/_quarto.yml
@@ -188,6 +188,7 @@ website:
- reference-manual/pathfinder.qmd
- reference-manual/variational.qmd
- reference-manual/laplace.qmd
+ - reference-manual/laplace_embedded.qmd
- reference-manual/diagnostics.qmd
- section: "Usage"
contents:
@@ -237,6 +238,7 @@ website:
- section: "Additional Distributions"
contents:
- functions-reference/hidden_markov_models.qmd
+ - functions-reference/embedded_laplace.qmd
- section: "Appendix"
contents:
- functions-reference/mathematical_functions.qmd
diff --git a/src/bibtex/all.bib b/src/bibtex/all.bib
index 9f8fabd3e..d59eca688 100644
--- a/src/bibtex/all.bib
+++ b/src/bibtex/all.bib
@@ -274,7 +274,7 @@ @article{turing:1936
volume={58}, number={345-363}, pages={5}, year={1936}
}
-
+
@article{kucukelbir:2015,
title={Automatic variational inference in Stan}, author={Kucukelbir, Alp and
Ranganath, Rajesh and Gelman, Andrew and Blei, David}, journal={Advances in
@@ -1273,7 +1273,7 @@ @article{Timonen+etal:2023:ODE-PSIS
title={An importance sampling approach for reliable and efficient inference in
{Bayesian} ordinary differential equation models}, author={Timonen, Juho and
Siccha, Nikolas and Bales, Ben and L{\"a}hdesm{\"a}ki, Harri and Vehtari,
- Aki}, journal={Stat}, year={2023}, volume = 12, number = 1, pages = {e614}
+ Aki}, journal={Stat}, year={2023}, volume = 12, number = 1, pages = {e614}
}
@article{Vehtari+etal:2024:PSIS,
@@ -1320,5 +1320,71 @@ @misc{seyboldt:2024
@article{lancaster:1965, ISSN = {00029890, 19300972}, URL =
{http://www.jstor.org/stable/2312989}, author = {H. O. Lancaster}, journal =
{The American Mathematical Monthly}, number = {1}, pages = {4--12}, publisher =
-{Mathematical Association of America}, title = {The {H}elmert Matrices}, urldate
-= {2025-05-07}, volume = {72}, year = {1965} }
+{Mathematical Association of America}, title = {The {H}elmert Matrices},
+urldate = {2025-05-07}, volume = {72}, year = {1965} }
+
+@article{Margossian:2020,
+ Author = {Margossian, Charles C and Vehtari, Aki and Simpson, Daniel
+ and Agrawal, Raj},
+ Title = {Hamiltonian Monte Carlo using an adjoint-differentiated Laplace approximation: Bayesian inference for latent Gaussian models and beyond},
+ journal = {Advances in Neural Information Processing Systems},
+ volume = {34},
+ Year = {2020}}
+
+@article{Kuss:2005,
+ author = {Kuss, Malte and Rasmussen, Carl E},
+ title = {Assessing Approximate Inference for Binary {Gaussian} Process Classification},
+ journal = {Journal of Machine Learning Research},
+ volume = {6},
+ pages = {1679 -- 1704},
+ year = {2005}}
+
+@article{Vanhatalo:2010,
+ author = {Jarno Vanhatalo and Ville Pietil\"{a}inen and Aki Vehtari},
+ title = {Approximate inference for disease mapping with sparse {Gaussian} processes},
+ journal = {Statistics in Medicine},
+ year = {2010},
+ volume = {29},
+ number = {15},
+ pages = {1580--1607}
+}
+
+@article{Cseke:2011,
+ author = {Botond Cseke and Heskes, Tom},
+ title = {Approximate marginals in latent {Gaussian} models},
+ journal = {Journal of Machine Learning Research},
+ volume = {12},
+ issue = {2},
+ page = {417 -- 454},
+ year = {2011}}
+
+@article{Vehtari:2016,
+ author = {Aki Vehtari and Tommi Mononen and Ville Tolvanen and Tuomas Sivula and Ole Winther},
+ title = {Bayesian Leave-One-Out Cross-Validation Approximations for {Gaussian} Latent Variable Models},
+ journal = {Journal of Machine Learning Research},
+ year = {2016},
+ volume = {17},
+ number = {103},
+ pages = {1--38},
+ url = {http://jmlr.org/papers/v17/14-540.html}
+}
+
+
+@article{Margossian:2023,
+ author = {Margossian, Charles C},
+ title = {General adjoint-differentiated Laplace approximation},
+ journal = {arXiv:2306.14976 },
+ year = {2023}}
+
+
+ @article{Rue:2009,
+ title={Approximate Bayesian inference for latent Gaussian models by using integrated nested Laplace approximations},
+ author={Rue, H{\aa}vard and Martino, Sara and Chopin, Nicolas},
+ journal={Journal of the Royal Statistical Society: Series B (Statistical Methodology)},
+ volume={71},
+ number={2},
+ pages={319--392},
+ year={2009},
+ publisher={Wiley Online Library},
+ doi={10.1111/j.1467-9868.2008.00700.x}
+}
diff --git a/src/functions-reference/_quarto.yml b/src/functions-reference/_quarto.yml
index bc6e83acb..9fd6f9a9a 100644
--- a/src/functions-reference/_quarto.yml
+++ b/src/functions-reference/_quarto.yml
@@ -72,6 +72,7 @@ book:
- part: "Additional Distributions"
chapters:
- hidden_markov_models.qmd
+ - embedded_laplace.qmd
- part: "Appendix"
chapters:
- mathematical_functions.qmd
diff --git a/src/functions-reference/embedded_laplace.qmd b/src/functions-reference/embedded_laplace.qmd
new file mode 100644
index 000000000..bf9e7f301
--- /dev/null
+++ b/src/functions-reference/embedded_laplace.qmd
@@ -0,0 +1,694 @@
+---
+pagetitle: Embedded Laplace Approximation
+---
+
+# Embedded Laplace Approximation
+
+The embedded Laplace approximation can be used to approximate certain
+marginal and conditional distributions that arise in latent Gaussian models.
+A latent Gaussian model observes the following hierarchical structure:
+$$
+ \phi \sim p(\phi), \\
+ \theta \sim \text{MultiNormal}(0, K(\phi)), \\
+ y \sim p(y \mid \theta, \phi).
+$$
+In this formulation, $y$ represents the
+observed data, and $p(y \mid \theta, \phi)$ is the likelihood function that
+specifies how observations are generated conditional on the latent variables
+$\theta$ and hyperparameters $\phi$.
+$\phi$ denotes the set of hyperparameters governing the model and
+$p(\phi)$ is the prior distribution placed over these hyperparameters.
+$K(\phi)$ denotes the prior covariance matrix for the latent Gaussian variables
+$\theta$ and is parameterized by $\phi$.
+The prior $p(\theta)$ is restricted to be a multivariate normal.
+To sample from the joint posterior $p(\phi, \theta \mid y)$, we can either
+use a standard method, such as Markov chain Monte Carlo, or we can follow
+a two-step procedure:
+
+1. sample from the *marginal posterior* $p(\phi \mid y)$,
+2. sample from the *conditional posterior* $p(\theta \mid y, \phi)$.
+
+In the above procedure, neither the marginal posterior nor the conditional posterior
+are typically available in closed form and so they must be approximated.
+The marginal posterior can be written as $p(\phi \mid y) \propto p(y \mid \phi) p(\phi)$,
+where $p(y \mid \phi) = \int p(y \mid \phi, \theta) p(\theta) \text{d}\theta$
+is called the marginal likelihood. The Laplace method approximates
+$p(y \mid \phi, \theta) p(\theta)$ with a normal distribution centered at
+$$
+ \theta^* = \underset{\theta}{\text{argmax}} \ \log p(\theta \mid y, \phi),
+$$
+and $\theta^*$ is obtained using a numerical optimizer.
+The resulting Gaussian integral can be evaluated analytically to obtain an
+approximation to the log marginal likelihood
+$\log \hat p(y \mid \phi) \approx \log p(y \mid \phi)$.
+Specifically:
+$$
+ \hat p(y \mid \phi) = \frac{p(\theta^* \mid \phi) p(y \mid \theta^*, \phi)}{\hat p (\theta^* \mid \phi, y)}.
+$$
+
+Combining this marginal likelihood with the prior in the `model`
+block, we can then sample from the marginal posterior $p(\phi \mid y)$
+using one of Stan's algorithms. The marginal posterior is lower
+dimensional and likely to have a simpler geometry leading to more
+efficient inference. On the other hand each marginal likelihood
+computation is more costly, and the combined change in efficiency
+depends on the case.
+
+To obtain posterior draws for $\theta$, we sample from the normal
+approximation to $p(\theta \mid y, \phi)$ in `generated quantities`.
+The process of iteratively sampling from $p(\phi \mid y)$ (say, with MCMC) and
+then $p(\theta \mid y, \phi)$ produces samples from the joint posterior
+$p(\theta, \phi \mid y)$.
+
+The Laplace approximation is especially useful if $p(\theta)$ is
+multivariate normal and $p(y \mid \phi, \theta)$ is
+log-concave. Stan's embedded Laplace approximation is restricted to the case
+where the prior $p(\theta)$ is multivariate normal.
+Furthermore, the likelihood $p(y \mid \phi, \theta)$ must be computed using
+only operations which support higher-order derivatives
+(see section [specifying the likelihood function](#laplace_likelihood_spec)).
+
+## Approximating the log marginal likelihood $\log p(y \mid \phi)$
+
+In the `model` block, we increment `target` with `laplace_marginal`, a function
+that approximates the log marginal likelihood $\log p(y \mid \phi)$.
+The signature of the function is:
+
+\index{{\tt \bfseries laplace\_marginal\_tol }!{\tt (function likelihood\_function, tuple(...) likelihood\_arguments, vector theta\_init, function covariance\_function, tuple(...) covariance\_arguments): real}|hyperpage}
+
+
+
+`real` **`laplace_marginal`**`(function likelihood_function, tuple(...) likelihood_arguments, vector theta_init, function covariance_function, tuple(...) covariance_arguments)`
+
+Which returns an approximation to the log marginal likelihood $p(y \mid \phi)$.
+{{< since 2.37 >}}
+
+This function takes in the following arguments.
+
+1. `likelihood_function` - user-specified log likelihood whose first argument is the vector of latent Gaussian variables `theta`
+2. `likelihood_arguments` - A tuple of the log likelihood arguments whose internal members will be passed to the covariance function
+3. `theta_init` - an initial guess for the optimization problem that underlies the Laplace approximation,
+4. `covariance_function` - Prior covariance function
+5. `covariance_arguments` A tuple of the arguments whose internal members will be passed to the the covariance function
+
+The size of $\theta_\text{init}$ must be consistent with the size of the $\theta$ argument
+passed to `likelihood_function`.
+
+Below we go over each argument in more detail.
+
+## Specifying the log likelihood function {#laplace-likelihood_spec}
+
+The first step to use the embedded Laplace approximation is to write down a
+function in the `functions` block which returns the log joint likelihood
+$\log p(y \mid \theta, \phi)$.
+
+There are a few constraints on this function:
+
+1. The function return type must be `real`
+
+2. The first argument must be the latent Gaussian variable $\theta$ and must
+have type `vector`.
+
+3. The operations in the function must support higher-order automatic
+differentiation (AD). Most functions in Stan support higher-order AD.
+The exceptions are functions with specialized calls for reverse-mode AD, and
+these are higher-order functions (algebraic solvers, differential equation
+solvers, and integrators), the marginalization function for hidden Markov
+models (HMM) function, and the embedded Laplace approximation itself.
+
+The base signature of the function is
+
+```stan
+real likelihood_function(vector theta, ...)
+```
+
+The `...` represents a set of optional variadic arguments. There is no type
+restrictions for the variadic arguments `...` and each argument can be passed
+as data or parameter.
+
+The tuple after `likelihood_function` contains the arguments that get passed
+to `likelihood_function` *excluding $\theta$*. For instance, if a user defined
+likelihood uses a real and a matrix the likelihood function's signature would
+first have a vector and then a real and matrix argument.
+
+```stan
+real likelihood_fun(vector theta, real a, matrix X)
+```
+
+The call to the laplace marginal would start with this likelihood and
+tuple holding the other likelihood arguments.
+
+```stan
+real val = laplace_marginal(likelihood_fun, (a, X), ...);
+```
+
+As always, users should use parameter arguments only when necessary in order to
+speed up differentiation.
+In general, we recommend marking data only arguments with the keyword `data`,
+for example,
+
+```stan
+real likelihood_function(vector theta, data vector x, ...)
+```
+
+## Specifying the covariance function
+
+The argument `covariance_function` returns the prior covariance matrix
+$K$. The signature for this function is the same as a standard stan function.
+It's return type must be a matrix of size $n \times n$ where $n$ is the size of $\theta$.
+
+```stan
+matrix covariance_function(...)
+```
+
+
+
+The `...` represents a set of optional
+variadic arguments. There is no type restrictions for the variadic arguments
+`...` and each argument can be passed as data or parameter. The variables
+$\phi$ is implicitly defined as the collection of all non-data arguments passed
+to `likelihood_function` (excluding $\theta$) and `covariance_function`.
+
+The tuple after `covariance_function` contains the arguments that get passed
+to `covariance_function`. For instance, if a user defined covariance function
+uses two vectors
+```stan
+matrix cov_fun(real b, matrix Z)
+```
+the call to the Laplace marginal would include the covariance function and
+a tuple holding the covariance function arguments.
+
+```stan
+real val = laplace_marginal(likelihood_fun, (a, X), theta_init, cov_fun, (b, Z), ...);
+```
+
+## Control parameters
+
+It also possible to specify control parameters, which can help improve the
+optimization that underlies the Laplace approximation, using `laplace_marginal_tol`
+with the following signature:
+
+\index{{\tt \bfseries laplace\_marginal\_tol }!{\tt (function likelihood\_function, tuple(...), vector theta\_init, function covariance\_function, tuple(...), real tol, int max\_steps, int hessian\_block\_size, int solver, int max\_steps\_linesearch): real}|hyperpage}
+
+
+\index{{\tt \bfseries laplace\_marginal\_tol }!{\tt (function likelihood\_function, tuple(...), vector theta\_init, function covariance\_function, tuple(...), real tol, int max\_steps, int hessian\_block\_size, int solver, int max\_steps\_linesearch): real}|hyperpage}
+
+`real` **`laplace_marginal_tol`**`(function likelihood_function, tuple(...), vector theta_init, function covariance_function, tuple(...), real tol, int max_steps, int hessian_block_size, int solver, int max_steps_linesearch)`
\newline
+
+Returns an approximation to the log marginal likelihood $p(y \mid \phi)$
+and allows the user to tune the control parameters of the approximation.
+
+* `tol`: the tolerance $\epsilon$ of the optimizer. Specifically, the optimizer
+stops when $||\nabla \log p(\theta \mid y, \phi)|| \le \epsilon$. By default,
+the value is $\epsilon = 10^{-6}$.
+
+* `max_num_steps`: the maximum number of steps taken by the optimizer before
+it gives up (in which case the Metropolis proposal gets rejected). The default
+is 100 steps.
+
+* `hessian_block_size`: the size of the blocks, assuming the Hessian
+$\partial \log p(y \mid \theta, phi) \ \partial \theta$ is block-diagonal.
+The structure of the Hessian is determined by the dependence structure of $y$
+on $\theta$. By default, the Hessian is treated as diagonal
+(`hessian_block_size=1`). If the Hessian is not block diagonal, then set
+`hessian_block_size=n`, where `n` is the size of $\theta$.
+
+* `solver`: choice of Newton solver. The optimizer used to compute the
+Laplace approximation does one of three matrix decompositions to compute a
+Newton step. The problem determines which decomposition is numerical stable.
+By default (`solver=1`), the solver makes a Cholesky decomposition of the
+negative Hessian, $- \partial \log p(y \mid \theta, \phi) / \partial \theta$.
+If `solver=2`, the solver makes a Cholesky decomposition of the covariance
+matrix $K(\phi)$.
+If the Cholesky decomposition cannot be computed for neither the negative
+Hessian nor the covariance matrix, use `solver=3` which uses a more expensive
+but less specialized approach.
+
+* `max_steps_linesearch`: maximum number of steps in linesearch. The linesearch
+method tries to insure that the Newton step leads to a decrease in the
+objective function. If the Newton step does not improve the objective function,
+the step is repeatedly halved until the objective function decreases or the
+maximum number of steps in the linesearch is reached. By default,
+`max_steps_linesearch=0`, meaning no linesearch is performed.
+
+{{< since 2.37 >}}
+
+## Sample from the approximate conditional $\hat{p}(\theta \mid y, \phi)$
+
+In `generated quantities`, it is possible to sample from the Laplace
+approximation of $p(\theta \mid \phi, y)$ using `laplace_latent_rng`.
+The signature for `laplace_latent_rng` follows closely
+the signature for `laplace_marginal`:
+
+
+\index{{\tt \bfseries laplace\_latent\_rng }!{\tt (function likelihood\_function, tuple(...), vector theta\_init, function covariance\_function, tuple(...)): vector}|hyperpage}
+
+`vector` **`laplace_latent_rng`**`(function likelihood_function, tuple(...), vector theta_init, function covariance_function, tuple(...))`
\newline
+
+Draws approximate samples from the conditional posterior $p(\theta \mid y, \phi)$.
+{{< since 2.37 >}}
+
+Once again, it is possible to specify control parameters:
+\index{{\tt \bfseries laplace\_latent\_tol\_rng }!{\tt (function likelihood\_function, tuple(...), vector theta\_init, function covariance\_function, tuple(...), real tol, int max\_steps, int hessian\_block\_size, int solver, int max\_steps\_linesearch): vector}|hyperpage}
+
+`vector` **`laplace_latent_tol_rng`**`(function likelihood_function, tuple(...), vector theta_init, function covariance_function, tuple(...), real tol, int max_steps, int hessian_block_size, int solver, int max_steps_linesearch)`
\newline
+Draws approximate samples from the conditional posterior $p(\theta \mid y, \phi)$
+and allows the user to tune the control parameters of the approximation.
+{{< since 2.37 >}}
+
+## Built-in Laplace marginal likelihood functions
+
+Stan provides convenient wrappers for the embedded Laplace approximation
+when applied to latent Gaussian models with certain likelihoods.
+With this wrapper, the likelihood is pre-specified and does not need to be
+specified by the user.
+The selection of supported likelihoods is currently
+narrow and expected to grow. The wrappers exist for the user's
+convenience but are not more computationally efficient than specifying log
+likelihoods in the `functions` block.
+
+### Poisson with log link
+
+Given count data, with each observed count $y_i$ associated with a group
+$g(i)$ and a corresponding latent variable $\theta_{g(i)}$, and Poisson model,
+the likelihood is
+$$
+p(y \mid \theta, \phi) = \prod_i\text{Poisson} (y_i \mid \exp(\theta_{g(i)})).
+$$
+The arguments required to compute this likelihood are:
+
+* `y`: an array of counts.
+* `y_index`: an array whose $i^\text{th}$ element indicates to which
+group the $i^\text{th}$ observation belongs to.
+
+
+\index{{\tt \bfseries laplace\_marginal\_poisson\_log }!sampling statement|hyperpage}
+
+`y ~ ` **`laplace_marginal_poisson_log`**`(y_index, theta_init, covariance_function, (...))`
\newline
+
+Increment target log probability density with `laplace_marginal_poisson_log_lupmf(y | y_index, theta_init, covariance_function, (...))`.
+{{< since 2.37 >}}
+
+
+\index{{\tt \bfseries laplace\_marginal\_tol\_poisson\_log }!sampling statement|hyperpage}
+
+`y ~ ` **`laplace_marginal_tol_poisson_log`**`(y_index, theta_init, covariance_function, (...), tol, max_steps, hessian_block_size, solver, max_steps_linesearch)`
\newline
+
+Increment target log probability density with `laplace_marginal_poisson_log_lupmf(y | y_index, theta_init, covariance_function, (...))`.
+
+The signatures for the embedded Laplace approximation function with a Poisson
+likelihood are
+
+
+\index{{\tt \bfseries laplace\_marginal\_poisson\_log\_lpmf }!{\tt (array[] int y \textbar\ array[] int y\_index, vector theta\_init, function covariance\_function, tuple(...)): real}|hyperpage}
+`real` **`laplace_marginal_poisson_log_lpmf`**`(array[] int y | array[] int y_index, vector theta_init, function covariance_function, tuple(...))`
\newline
+
+Returns an approximation to the log marginal likelihood $p(y \mid \phi)$
+in the special case where the likelihood $p(y \mid \theta)$ is a Poisson
+distribution with a log link.
+{{< since 2.37 >}}
+
+
+\index{{\tt \bfseries laplace\_marginal\_tol\_poisson\_log\_lpmf }!{\tt (array[] int y \textbar\ array[] int y\_index, vector theta\_init, function covariance\_function, tuple(...), real tol, int max\_steps, int hessian\_block\_size, int solver, int max\_steps\_linesearch): real}|hyperpage}
+`real` **`laplace_marginal_tol_poisson_log_lpmf`**`(array[] int y | array[] int y_index, vector theta_init, function covariance_function, tuple(...), real tol, int max_steps, int hessian_block_size, int solver, int max_steps_linesearch)`
\newline
+
+Returns an approximation to the log marginal likelihood $p(y \mid \phi)$
+in the special case where the likelihood $p(y \mid \theta)$ is a Poisson
+distribution with a log link, and allows the user to tune the control
+parameters of the approximation.
+{{< since 2.37 >}}
+
+
+
+\index{{\tt \bfseries laplace\_marginal\_poisson\_log\_lupmf }!{\tt (array[] int y \textbar\ array[] int y\_index, vector theta\_init, function covariance\_function, tuple(...), real tol, int max\_steps, int hessian\_block\_size, int solver, int max\_steps\_linesearch): real}|hyperpage}
+`real` **`laplace_marginal_poisson_log_lupmf`**`(array[] int y | array[] int y_index, vector theta_init, function covariance_function, tuple(...), real tol, int max_steps, int hessian_block_size, int solver, int max_steps_linesearch)`
\newline
+
+Returns an approximation to the log marginal likelihood $p(y \mid \phi)$
+in the special case where the likelihood $p(y \mid \theta)$ is a Poisson
+distribution with a log link.
+{{< since 2.37 >}}
+
+
+\index{{\tt \bfseries laplace\_marginal\_tol\_poisson\_log\_lupmf }!{\tt (array[] int y \textbar\ array[] int y\_index, vector theta\_init, function covariance\_function, tuple(...), real tol, int max\_steps, int hessian\_block\_size, int solver, int max\_steps\_linesearch): real}|hyperpage}
+
+`real` **`laplace_marginal_tol_poisson_log_lupmf`**`(array[] int y | array[] int y_index, vector theta_init, function covariance_function, tuple(...), real tol, int max_steps, int hessian_block_size, int solver, int max_steps_linesearch)`
\newline
+
+Returns an approximation to the log marginal likelihood $p(y \mid \phi)$
+in the special case where the likelihood $p(y \mid \theta)$ is a Poisson
+distribution with a log link, and allows the user to tune the control
+parameters of the approximation.
+{{< since 2.37 >}}
+
+
+\index{{\tt \bfseries laplace\_latent\_poisson\_log\_rng }!{\tt (array[] int y, array[] int y\_index, vector theta\_init, function covariance\_function, tuple(...)): vector}|hyperpage}
+`vector` **`laplace_latent_poisson_log_rng`**`(array[] int y, array[] int y_index, vector theta_init, function covariance_function, tuple(...))`
\newline
+Returns a draw from the Laplace approximation to the conditional posterior
+$p(\theta \mid y, \phi)$ in the special case where the likelihood
+$p(y \mid \theta)$ is a Poisson distribution with a log link.
+{{< since 2.37 >}}
+
+
+\index{{\tt \bfseries laplace\_latent\_tol\_poisson\_log\_rng }!{\tt (array[] int y, array[] int y\_index, vector theta\_init, function covariance\_function, tuple(...), real tol, int max\_steps, int hessian\_block\_size, int solver, int max\_steps\_linesearch): vector}|hyperpage}
+
+`vector` **`laplace_latent_tol_poisson_log_rng`**`(array[] int y, array[] int y_index, vector theta_init, function covariance_function, tuple(...), real tol, int max_steps, int hessian_block_size, int solver, int max_steps_linesearch)`
\newline
+
+Returns a draw from the Laplace approximation to the conditional posterior
+$p(\theta \mid y, \phi)$ in the special case where the likelihood
+$p(y \mid \theta)$ is a Poisson distribution with a log link
+and allows the user to tune the control parameters of the approximation.
+{{< since 2.37 >}}
+
+A similar built-in likelihood lets users specify an offset $x \in \mathbb R$
+with $x_i \ge 0$ to the rate parameter of the Poisson. This is equivalent to
+specifying a prior mean $log(x_i)$ for $\theta_i$. The likelihood is then,
+$$
+p(y \mid \theta, \phi) = \prod_i\text{Poisson} (y_i \mid \exp(\theta_{g(i)}) x_i).
+$$
+
+
+\index{{\tt \bfseries laplace\_marginal\_poisson\_2\_log }!sampling statement|hyperpage}
+
+`y ~ ` **`laplace_marginal_poisson_2_log`**`(y_index, x, theta_init, covariance_function, (...))`
\newline
+
+Increment target log probability density with `laplace_marginal_poisson_2_log_lupmf(y | y_index, x, theta_init, covariance_function, (...))`.
+{{< since 2.37 >}}
+
+
+\index{{\tt \bfseries laplace\_marginal\_tol\_poisson\_2\_log }!sampling statement|hyperpage}
+
+`y ~ ` **`laplace_marginal_tol_poisson_2_log`**`(y_index, x, theta_init, covariance_function, (...), tol, max_steps, hessian_block_size, solver, max_steps_linesearch)`
\newline
+Increment target log probability density with `laplace_marginal_tol_poisson_2_log_lupmf(y | y_index, x, theta_init, covariance_function, (...), tol, max_steps, hessian_block_size, solver, max_steps_linesearch)`.
+{{< since 2.37 >}}
+
+The signatures for this function are:
+
+
+\index{{\tt \bfseries laplace\_marginal\_poisson\_2\_log\_lpmf }!{\tt (array[] int y \textbar\ array[] int y\_index, vector x, vector theta\_init, function covariance\_function, tuple(...)): real}|hyperpage}
+`real` **`laplace_marginal_poisson_2_log_lpmf`**`(array[] int y | array[] int y_index, vector x, vector theta_init, function covariance_function, tuple(...))`
\newline
+Returns an approximation to the log marginal likelihood $p(y \mid \phi)$
+in the special case where the likelihood $p(y \mid \theta)$ is a Poisson
+distribution with a log link and an offset.
+{{< since 2.37 >}}
+
+
+\index{{\tt \bfseries laplace\_marginal\_tol\_poisson\_2\_log\_lpmf }!{\tt (array[] int y \textbar\ array[] int y\_index, vector x, vector theta\_init, function covariance\_function, tuple(...), real tol, int max\_steps, int hessian\_block\_size, int solver, int max\_steps\_linesearch): real}|hyperpage}
+
+`real` **`laplace_marginal_tol_poisson_2_log_lpmf`**`(array[] int y | array[] int y_index, vector x, vector theta_init, function covariance_function, tuple(...), real tol, int max_steps, int hessian_block_size, int solver, int max_steps_linesearch)`
\newline
+
+Returns an approximation to the log marginal likelihood $p(y \mid \phi)$
+in the special case where the likelihood $p(y \mid \theta)$ is a Poisson
+distribution with a log link and an offset
+and allows the user to tune the control parameters of the approximation.
+{{< since 2.37 >}}
+
+
+\index{{\tt \bfseries laplace\_marginal\_poisson\_2\_log\_lpmf }!{\tt (array[] int y \textbar\ array[] int y\_index, vector x, vector theta\_init, function covariance\_function, tuple(...): real}|hyperpage}
+`real` **`laplace_marginal_poisson_2_log_lpmf`**`(array[] int y | array[] int y_index, vector x, vector theta_init, function covariance_function, tuple(...), real tol, int max_steps, int hessian_block_size, int solver, int max_steps_linesearch)`
\newline
+
+Returns an approximation to the log marginal likelihood $p(y \mid \phi)$
+in the special case where the likelihood $p(y \mid \theta)$ is a Poisson
+distribution with a log link and an offset.
+{{< since 2.37 >}}
+
+
+\index{{\tt \bfseries laplace\_marginal\_tol\_poisson\_2\_log\_lupmf }!{\tt (array[] int y \textbar\ array[] int y\_index, vector x, vector theta\_init, function covariance\_function, tuple(...), real tol, int max\_steps, int hessian\_block\_size, int solver, int max\_steps\_linesearch)): real}|hyperpage}
+
+`real` **`laplace_marginal_tol_poisson_2_log_lupmf`**`(array[] int y | array[] int y_index, vector x, vector theta_init, function covariance_function, tuple(...), real tol, int max_steps, int hessian_block_size, int solver, int max_steps_linesearch)`
\newline
+
+Returns an approximation to the log marginal likelihood $p(y \mid \phi)$
+in the special case where the likelihood $p(y \mid \theta)$ is a Poisson
+distribution with a log link and an offset
+and allows the user to tune the control parameters of the approximation.
+{{< since 2.37 >}}
+
+
+\index{{\tt \bfseries laplace\_latent\_poisson\_2\_log\_rng }!{\tt (array[] int y, array[] int y\_index, vector theta\_init, function covariance\_function, tuple(...)): vector}|hyperpage}
+
+`vector` **`laplace_latent_poisson_2_log_rng`**`(array[] int y, array[] int y_index, vector x, vector theta_init, function covariance_function, tuple(...))`
\newline
+
+Returns a draw from the Laplace approximation to the conditional posterior
+$p(\theta \mid y, \phi)$ in the special case where the likelihood
+$p(y \mid \theta)$ is a Poisson distribution with a log link and an offset.
+{{< since 2.37 >}}
+
+
+\index{{\tt \bfseries laplace\_latent\_tol\_poisson\_2\_log\_rng }!{\tt (array[] int y, array[] int y\_index, vector x, vector theta\_init, function covariance\_function, tuple(...), real tol, int max\_steps, int hessian\_block\_size, int solver, int max\_steps\_linesearch): vector}|hyperpage}
+
+`vector` **`laplace_latent_tol_poisson_2_log_rng`**`(array[] int y, array[] int y_index, vector x, vector theta_init, function covariance_function, tuple(...), real tol, int max_steps, int hessian_block_size, int solver, int max_steps_linesearch)`
\newline
+
+Returns a draw from the Laplace approximation to the conditional posterior
+$p(\theta \mid y, \phi)$ in the special case where the likelihood
+$p(y \mid \theta)$ is a Poisson distribution with a log link and an offset,
+and allows the user to tune the control parameters of the approximation.
+{{< since 2.37 >}}
+
+
+### Negative Binomial with log link
+
+The negative Binomial distribution generalizes the Poisson distribution by
+introducing the dispersion parameter $\eta$. The corresponding likelihood is then
+$$
+p(y \mid \theta, \phi) = \prod_i\text{NegBinomial2} (y_i \mid \exp(\theta_{g(i)}), \eta).
+$$
+Here we use the alternative parameterization implemented in Stan, meaning that
+$$
+\mathbb E(y_i) = \exp (\theta_{g(i)}), \\
+\text{Var}(y_i) = \mathbb E(y_i) + \frac{(\mathbb E(y_i))^2}{\eta}.
+$$
+The arguments for the likelihood function are:
+
+* `y`: the observed counts
+* `y_index`: an array whose $i^\text{th}$ element indicates to which
+group the $i^\text{th}$ observation belongs to.
+* `eta`: the overdispersion parameter.
+
+
+\index{{\tt \bfseries laplace\_marginal\_neg\_binomial\_2\_log }!sampling statement|hyperpage}
+
+`y ~ ` **`laplace_marginal_neg_binomial_2_log`**`(y_index, eta, theta_init, covariance_function, (...))`
\newline
+
+Increment target log probability density with `laplace_marginal_neg_binomial_2_log_lupmf(y | y_index, eta, theta_init, covariance_function, (...))`.
+{{< since 2.37 >}}
+
+
+\index{{\tt \bfseries laplace\_marginal\_tol\_neg\_binomial\_2\_log }!sampling statement|hyperpage}
+
+`y ~ ` **`laplace_marginal_tol_neg_binomial_2_log`**`(y_index, eta, theta_init, covariance_function, (...), tol, max_steps, hessian_block_size, solver, max_steps_linesearch)`
\newline
+
+Increment target log probability density with `laplace_marginal_tol_neg_binomial_2_log_lupmf(y | y_index, eta, theta_init, covariance_function, (...), tol, max_steps, hessian_block_size, solver, max_steps_linesearch)`.
+{{< since 2.37 >}}
+
+
+The function signatures for the embedded Laplace approximation with a negative
+Binomial likelihood are
+
+
+\index{{\tt \bfseries laplace\_marginal\_neg\_binomial\_2\_log\_lpmf }!{\tt (array[] int y \textbar\ array[] int y\_index, vector theta\_init, function covariance\_function, tuple(...)): real}|hyperpage}
+
+`real` **`laplace_marginal_neg_binomial_2_log_lpmf`**`(array[] int y | array[] int y_index, real eta, vector theta_init, function covariance_function, tuple(...))`
\newline
+
+Returns an approximation to the log marginal likelihood $p(y \mid \phi, \eta)$
+in the special case where the likelihood $p(y \mid \theta, \eta)$ is a Negative
+Binomial distribution with a log link.
+{{< since 2.37 >}}
+
+
+\index{{\tt \bfseries laplace\_marginal\_tol\_neg\_binomial\_2\_log\_lpmf }!{\tt (array[] int y \textbar\ array[] int y\_index, vector theta\_init, function covariance\_function, tuple(...), real tol, int max\_steps, int hessian\_block\_size, int solver, int max\_steps\_linesearch): real}|hyperpage}
+
+`real` **`laplace_marginal_tol_neg_binomial_2_log_lpmf`**`(array[] int y | array[] int y_index, real eta, vector theta_init, function covariance_function, tuple(...), real tol, int max_steps, int hessian_block_size, int solver, int max_steps_linesearch)`
\newline
+
+Returns an approximation to the log marginal likelihood $p(y \mid \phi, \eta)$
+in the special case where the likelihood $p(y \mid \theta, \eta)$ is a Negative
+Binomial distribution with a log link, and allows the user to tune the control
+parameters of the approximation.
+{{< since 2.37 >}}
+
+
+\index{{\tt \bfseries laplace\_marginal\_neg\_binomial\_2\_log\_lupmf }!{\tt (array[] int y \textbar\ array[] int y\_index, vector theta\_init, function covariance\_function, tuple(...)): real}|hyperpage}
+
+`real` **`laplace_marginal_neg_binomial_2_log_lupmf`**`(array[] int y | array[] int y_index, real eta, vector theta_init, function covariance_function, tuple(...))`
\newline
+
+Returns an approximation to the log marginal likelihood $p(y \mid \phi, \eta)$
+in the special case where the likelihood $p(y \mid \theta, \eta)$ is a Negative
+Binomial distribution with a log link.
+{{< since 2.37 >}}
+
+
+\index{{\tt \bfseries laplace\_marginal\_tol\_neg\_binomial\_2\_log\_lupmf }!{\tt (array[] int y \textbar\ array[] int y\_index, vector theta\_init, function covariance\_function, tuple(...), real tol, int max\_steps, int hessian\_block\_size, int solver, int max\_steps\_linesearch): real}|hyperpage}
+
+`real` **`laplace_marginal_tol_neg_binomial_2_log_lupmf`**`(array[] int y | array[] int y_index, real eta, vector theta_init, function covariance_function, tuple(...), real tol, int max_steps, int hessian_block_size, int solver, int max_steps_linesearch)`
\newline
+
+Returns an approximation to the log marginal likelihood $p(y \mid \phi, \eta)$
+in the special case where the likelihood $p(y \mid \theta, \eta)$ is a Negative
+Binomial distribution with a log link, and allows the user to tune the control
+parameters of the approximation.
+{{< since 2.37 >}}
+
+
+\index{{\tt \bfseries laplace\_latent\_neg\_binomial\_2\_log\_rng }!{\tt (array[] int y, array[] int y\_index, vector theta\_init, function covariance\_function, tuple(...)): vector}|hyperpage}
+
+`vector` **`laplace_latent_neg_binomial_2_log_rng`**`(array[] int y, array[] int y_index, real eta, vector theta_init, function covariance_function, tuple(...))`
\newline
+
+Returns a draw from the Laplace approximation to the conditional posterior
+$p(\theta \mid y, \phi, \eta)$ in the special case where the likelihood
+$p(y \mid \theta, \eta)$ is a Negative binomial distribution with a log link.
+{{< since 2.37 >}}
+
+
+\index{{\tt \bfseries laplace\_latent\_tol\_neg\_binomial\_2\_log\_rng }!{\tt (array[] int y, array[] int y\_index, vector theta\_init, function covariance\_function, tuple(...), real tol, int max\_steps, int hessian\_block\_size, int solver, int max\_steps\_linesearch): vector}|hyperpage}
+
+`vector` **`laplace_latent_tol_neg_binomial_2_log_rng`**`(array[] int y, array[] int y_index, real eta, vector theta_init, function covariance_function, tuple(...), real tol, int max_steps, int hessian_block_size, int solver, int max_steps_linesearch)`
\newline
+
+Returns a draw from the Laplace approximation to the conditional posterior
+$p(\theta \mid y, \phi, \eta)$ in the special case where the likelihood
+$p(y \mid \theta, \eta)$ is a Negative binomial distribution with a log link
+and allows the user to tune the control parameters of the approximation.
+{{< since 2.37 >}}
+
+### Bernoulli with logit link
+
+Given binary outcome $y_i \in \{0, 1\}$ and Bernoulli model, the likelihood is
+$$
+p(y \mid \theta, \phi) = \prod_i\text{Bernoulli} (y_i \mid \text{logit}^{-1}(\theta_{g(i)})).
+$$
+The arguments of the likelihood function are:
+
+* `y`: the observed counts
+* `y_index`: an array whose $i^\text{th}$ element indicates to which
+group the $i^\text{th}$ observation belongs to.
+
+
+\index{{\tt \bfseries laplace\_marginal\_bernoulli\_logit }!sampling statement|hyperpage}
+
+`y ~ ` **`laplace_marginal_bernoulli_logit`**`(y_index, theta_init, covariance_function, (...))`
\newline
+
+Increment target log probability density with `laplace_marginal_bernoulli_logit_lupmf(y | y_index, theta_init, covariance_function, (...))`.
+{{< since 2.37 >}}
+
+
+\index{{\tt \bfseries laplace\_marginal\_tol\_bernoulli\_logit }!sampling statement|hyperpage}
+
+`y ~ ` **`laplace_marginal_tol_bernoulli_logit`**`(y_index, theta_init, covariance_function, (...), tol, max_steps, hessian_block_size, solver, max_steps_linesearch)`
\newline
+
+Increment target log probability density with `laplace_marginal_tol_bernoulli_logit_lupmf(y | y_index, theta_init, covariance_function, (...), tol, max_steps, hessian_block_size, solver, max_steps_linesearch)`.
+{{< since 2.37 >}}
+
+
+The function signatures for the embedded Laplace approximation with a Bernoulli likelihood are
+
+
+\index{{\tt \bfseries laplace\_marginal\_bernoulli\_logit\_lpmf }!{\tt (array[] int y \textbar\ array[] int y\_index, vector theta\_init, function covariance\_function, tuple(...)): real}|hyperpage}
+
+`real` **`laplace_marginal_bernoulli_logit_lpmf`**`(array[] int y | array[] int y_index, vector theta_init, function covariance_function, tuple(...))`
\newline
+
+Returns an approximation to the log marginal likelihood $p(y \mid \phi)$
+in the special case where the likelihood $p(y \mid \theta)$ is a bernoulli
+distribution with a logit link.
+{{< since 2.37 >}}
+
+
+\index{{\tt \bfseries laplace\_marginal\_tol\_bernoulli\_logit\_lpmf }!{\tt (array[] int y \textbar\ array[] int y\_index, vector theta\_init, function covariance\_function, tuple(...), real tol, int max\_steps, int hessian\_block\_size, int solver, int max\_steps\_linesearch): real}|hyperpage}
+
+`real` **`laplace_marginal_tol_bernoulli_logit_lpmf`**`(array[] int y | array[] int y_index, vector theta_init, function covariance_function, tuple(...), real tol, int max_steps, int hessian_block_size, int solver, int max_steps_linesearch)`
\newline
+
+Returns an approximation to the log marginal likelihood $p(y \mid \phi)$
+in the special case where the likelihood $p(y \mid \theta)$ is a bernoulli
+distribution with a logit link and allows the user to tune the control parameters.
+{{< since 2.37 >}}
+
+
+
+\index{{\tt \bfseries laplace\_marginal\_bernoulli\_logit\_lupmf }!{\tt (array[] int y \textbar\ array[] int y\_index, vector theta\_init, function covariance\_function, tuple(...)): real}|hyperpage}
+
+`real` **`laplace_marginal_bernoulli_logit_lupmf`**`(array[] int y | array[] int y_index, vector theta_init, function covariance_function, tuple(...))`
\newline
+
+Returns an approximation to the log marginal likelihood $p(y \mid \phi)$
+in the special case where the likelihood $p(y \mid \theta)$ is a bernoulli
+distribution with a logit link.
+{{< since 2.37 >}}
+
+
+\index{{\tt \bfseries laplace\_marginal\_tol\_bernoulli\_logit\_lupmf }!{\tt (array[] int y \textbar\ array[] int y\_index, vector theta\_init, function covariance\_function, tuple(...), real tol, int max\_steps, int hessian\_block\_size, int solver, int max\_steps\_linesearch): real}|hyperpage}
+
+`real` **`laplace_marginal_tol_bernoulli_logit_lupmf`**`(array[] int y | array[] int y_index, vector theta_init, function covariance_function, tuple(...), real tol, int max_steps, int hessian_block_size, int solver, int max_steps_linesearch)`
\newline
+
+Returns an approximation to the log marginal likelihood $p(y \mid \phi)$
+in the special case where the likelihood $p(y \mid \theta)$ is a bernoulli
+distribution with a logit link and allows the user to tune the control parameters.
+{{< since 2.37 >}}
+
+
+\index{{\tt \bfseries laplace\_latent\_bernoulli\_logit\_rng }!{\tt (array[] int y, array[] int y\_index, vector theta\_init, function covariance\_function, tuple(...)): vector}|hyperpage}
+
+`vector` **`laplace_latent_bernoulli_logit_rng`**`(array[] int y, array[] int y_index, vector theta_init, function covariance_function, tuple(...))`
\newline
+
+Returns a draw from the Laplace approximation to the conditional posterior
+$p(\theta \mid y, \phi)$ in the special case where the likelihood
+$p(y \mid \theta)$ is a Bernoulli distribution with a logit link.
+{{< since 2.37 >}}
+
+
+\index{{\tt \bfseries laplace\_latent\_tol\_bernoulli\_logit\_rng }!{\tt (array[] int y, array[] int y\_index, vector theta\_init, function covariance\_function, tuple(...), real tol, int max\_steps, int hessian\_block\_size, int solver, int max\_steps\_linesearch): vector}|hyperpage}
+
+`vector` **`laplace_latent_tol_bernoulli_logit_rng`**`(array[] int y, array[] int y_index, vector theta_init, function covariance_function, tuple(...), real tol, int max_steps, int hessian_block_size, int solver, int max_steps_linesearch)`
\newline
+
+Returns a draw from the Laplace approximation to the conditional posterior
+$p(\theta \mid y, \phi)$ in the special case where the likelihood
+$p(y \mid \theta)$ is a Bernoulli distribution with a logit link,
+and lets the user tune the control parameters of the approximation.
+{{< since 2.37 >}}
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
diff --git a/src/functions-reference/functions_index.qmd b/src/functions-reference/functions_index.qmd
index 8bf2b8ad3..77ac2441b 100644
--- a/src/functions-reference/functions_index.qmd
+++ b/src/functions-reference/functions_index.qmd
@@ -1621,6 +1621,181 @@ pagetitle: Alphabetical Index
-