From 61dbc7145cb9b0883e3dff817399c983db178279 Mon Sep 17 00:00:00 2001 From: Andrija Paurevic <46359773+andrijapau@users.noreply.github.com> Date: Wed, 22 Jan 2025 16:15:54 -0500 Subject: [PATCH] Deprecate `qml.gradients.hamiltonian_grad` (#6849) **Context:** #6598 removes the need for `hamiltonian_grad` in our standard workflow. **Description of the Change:** _Source-Code_ Standard deprecation of `hamiltonian_grad` function. _Test suite_ I noticed that the tests I've removed from `test_parameter_shift.py` have improved duplicates in `tests/workflow/interfaces` under the `TestHamiltonianWorkflows` test class. Therefore, they were all removed except `test_jax`. The reason is that this test follows the outdated workflow that still hits the branch in `parameter_shift.py::expval_param_shift` that raises the deprecation warning. So, I've added a warning and left that test. This should be removed with the `hamiltonian_grad` function next release. **Impact:** No deprecated code found elsewhere. Impact to the eco-system should be minimal. [sc-81526] --- doc/development/deprecations.rst | 6 + doc/releases/changelog-dev.md | 4 + pennylane/gradients/hamiltonian_grad.py | 11 + pennylane/gradients/parameter_shift.py | 7 + .../core/test_hamiltonian_gradient.py | 27 ++- .../parameter_shift/test_parameter_shift.py | 159 +------------ .../test_parameter_shift_shot_vec.py | 215 ------------------ 7 files changed, 59 insertions(+), 370 deletions(-) diff --git a/doc/development/deprecations.rst b/doc/development/deprecations.rst index ff6048e18be..1f267b73629 100644 --- a/doc/development/deprecations.rst +++ b/doc/development/deprecations.rst @@ -9,6 +9,12 @@ deprecations are listed below. Pending deprecations -------------------- +* The `qml.gradients.hamiltonian_grad` function has been deprecated. + This gradient recipe is not required with the new operator arithmetic system. + + - Deprecated in v0.41 + - Will be removed in v0.42 + * The ``inner_transform_program`` and ``config`` keyword arguments in ``qml.execute`` have been deprecated. If more detailed control over the execution is required, use ``qml.workflow.run`` with these arguments instead. diff --git a/doc/releases/changelog-dev.md b/doc/releases/changelog-dev.md index 4eb878d6478..87499e7e903 100644 --- a/doc/releases/changelog-dev.md +++ b/doc/releases/changelog-dev.md @@ -84,6 +84,10 @@

Deprecations 👋

+* The `qml.gradients.hamiltonian_grad` function has been deprecated. + This gradient recipe is not required with the new operator arithmetic system. + [(#6849)](https://github.com/PennyLaneAI/pennylane/pull/6849) + * The ``inner_transform_program`` and ``config`` keyword arguments in ``qml.execute`` have been deprecated. If more detailed control over the execution is required, use ``qml.workflow.run`` with these arguments instead. [(#6822)](https://github.com/PennyLaneAI/pennylane/pull/6822) diff --git a/pennylane/gradients/hamiltonian_grad.py b/pennylane/gradients/hamiltonian_grad.py index e83d942dcc9..95769bdc6d1 100644 --- a/pennylane/gradients/hamiltonian_grad.py +++ b/pennylane/gradients/hamiltonian_grad.py @@ -13,6 +13,8 @@ # limitations under the License. """Contains a gradient recipe for the coefficients of Hamiltonians.""" # pylint: disable=protected-access,unnecessary-lambda +import warnings + import pennylane as qml @@ -20,10 +22,19 @@ def hamiltonian_grad(tape, idx): """Computes the tapes necessary to get the gradient of a tape with respect to a Hamiltonian observable's coefficients. + .. warning:: + This function is deprecated and will be removed in v0.42. This gradient recipe is not + required for the new operator arithmetic of PennyLane. + Args: tape (qml.tape.QuantumTape): tape with a single Hamiltonian expectation as measurement idx (int): index of parameter that we differentiate with respect to """ + warnings.warn( + "The 'hamiltonian_grad' function is deprecated and will be removed in v0.42. " + "This gradient recipe is not required for the new operator arithmetic system.", + qml.PennyLaneDeprecationWarning, + ) op, m_pos, p_idx = tape.get_operation(idx) diff --git a/pennylane/gradients/parameter_shift.py b/pennylane/gradients/parameter_shift.py index 6fcdd17df19..26a2b0e4a8b 100644 --- a/pennylane/gradients/parameter_shift.py +++ b/pennylane/gradients/parameter_shift.py @@ -15,6 +15,7 @@ This module contains functions for computing the parameter-shift gradient of a qubit-based quantum tape. """ +import warnings from functools import partial import numpy as np @@ -372,6 +373,12 @@ def expval_param_shift( op, op_idx, _ = tape.get_operation(idx) if op.name == "LinearCombination": + warnings.warn( + "Please use qml.gradients.split_to_single_terms so that the ML framework " + "can compute the gradients of the coefficients.", + UserWarning, + ) + # operation is a Hamiltonian if tape[op_idx].return_type is not qml.measurements.Expectation: raise ValueError( diff --git a/tests/gradients/core/test_hamiltonian_gradient.py b/tests/gradients/core/test_hamiltonian_gradient.py index 1bcb4bfc4fe..9474faf39a0 100644 --- a/tests/gradients/core/test_hamiltonian_gradient.py +++ b/tests/gradients/core/test_hamiltonian_gradient.py @@ -12,10 +12,27 @@ # See the License for the specific language governing permissions and # limitations under the License. """Tests for the gradients.hamiltonian module.""" +import pytest + import pennylane as qml from pennylane.gradients.hamiltonian_grad import hamiltonian_grad +def test_hamiltonian_grad_deprecation(): + with pytest.warns( + qml.PennyLaneDeprecationWarning, match="The 'hamiltonian_grad' function is deprecated" + ): + with qml.queuing.AnnotatedQueue() as q: + qml.RY(0.3, wires=0) + qml.RX(0.5, wires=1) + qml.CNOT(wires=[0, 1]) + qml.expval(qml.Hamiltonian([-1.5, 2.0], [qml.PauliZ(0), qml.PauliZ(1)])) + + tape = qml.tape.QuantumScript.from_queue(q) + tape.trainable_params = {2, 3} + hamiltonian_grad(tape, idx=0) + + def test_behaviour(): """Test that the function behaves as expected.""" @@ -29,10 +46,16 @@ def test_behaviour(): tape = qml.tape.QuantumScript.from_queue(q) tape.trainable_params = {2, 3} - tapes, processing_fn = hamiltonian_grad(tape, idx=0) + with pytest.warns( + qml.PennyLaneDeprecationWarning, match="The 'hamiltonian_grad' function is deprecated" + ): + tapes, processing_fn = hamiltonian_grad(tape, idx=0) res1 = processing_fn(dev.execute(tapes)) - tapes, processing_fn = hamiltonian_grad(tape, idx=1) + with pytest.warns( + qml.PennyLaneDeprecationWarning, match="The 'hamiltonian_grad' function is deprecated" + ): + tapes, processing_fn = hamiltonian_grad(tape, idx=1) res2 = processing_fn(dev.execute(tapes)) with qml.queuing.AnnotatedQueue() as q1: diff --git a/tests/gradients/parameter_shift/test_parameter_shift.py b/tests/gradients/parameter_shift/test_parameter_shift.py index f35a4b4fc95..4b741c5a66b 100644 --- a/tests/gradients/parameter_shift/test_parameter_shift.py +++ b/tests/gradients/parameter_shift/test_parameter_shift.py @@ -3473,58 +3473,6 @@ def test_trainable_coeffs(self, tol, broadcast): assert np.allclose(res[0], expected[0], atol=tol, rtol=0) assert np.allclose(res[1], expected[1], atol=tol, rtol=0) - def test_multiple_hamiltonians(self, tol, broadcast): - """Test multiple trainable Hamiltonian coefficients""" - dev = qml.device("default.qubit", wires=2) - - obs = [qml.PauliZ(0), qml.PauliZ(0) @ qml.PauliX(1), qml.PauliY(0)] - coeffs = np.array([0.1, 0.2, 0.3]) - a, b, c = coeffs - H1 = qml.Hamiltonian(coeffs, obs) - - obs = [qml.PauliZ(0)] - coeffs = np.array([0.7]) - d = coeffs[0] - H2 = qml.Hamiltonian(coeffs, obs) - - weights = np.array([0.4, 0.5]) - x, y = weights - - with qml.queuing.AnnotatedQueue() as q: - qml.RX(weights[0], wires=0) - qml.RY(weights[1], wires=1) - qml.CNOT(wires=[0, 1]) - qml.expval(H1) - qml.expval(H2) - - tape = qml.tape.QuantumScript.from_queue(q) - tape.trainable_params = {0, 1, 2, 4, 5} - - res = dev.execute([tape]) - expected = [-c * np.sin(x) * np.sin(y) + np.cos(x) * (a + b * np.sin(y)), d * np.cos(x)] - assert np.allclose(res, expected, atol=tol, rtol=0) - - tapes, fn = qml.gradients.param_shift(tape, broadcast=broadcast) - # two shifts per rotation gate (in one batched tape if broadcasting), - # one circuit per trainable H term - assert len(tapes) == 2 * (1 if broadcast else 2) - - res = fn(dev.execute(tapes)) - assert isinstance(res, tuple) - assert len(res) == 2 - assert len(res[0]) == 2 - assert len(res[1]) == 2 - - expected = [ - [ - -c * np.cos(x) * np.sin(y) - np.sin(x) * (a + b * np.sin(y)), - b * np.cos(x) * np.cos(y) - c * np.cos(y) * np.sin(x), - ], - [-d * np.sin(x), 0], - ] - - assert np.allclose(np.stack(res), expected, atol=tol, rtol=0) - @staticmethod def cost_fn(weights, coeffs1, coeffs2, dev=None, broadcast=False): """Cost function for gradient tests""" @@ -3547,95 +3495,8 @@ def cost_fn(weights, coeffs1, coeffs2, dev=None, broadcast=False): jac = fn(dev.execute(tapes)) return jac - @staticmethod - def cost_fn_expected(weights, coeffs1, coeffs2): - """Analytic jacobian of cost_fn above""" - a, b, c = coeffs1 - d = coeffs2[0] - x, y = weights - return [ - [ - -c * np.cos(x) * np.sin(y) - np.sin(x) * (a + b * np.sin(y)), - b * np.cos(x) * np.cos(y) - c * np.cos(y) * np.sin(x), - ], - [-d * np.sin(x), 0], - ] - - @pytest.mark.autograd - def test_autograd(self, tol, broadcast): - """Test gradient of multiple trainable Hamiltonian coefficients - using autograd""" - coeffs1 = np.array([0.1, 0.2, 0.3], requires_grad=True) - coeffs2 = np.array([0.7], requires_grad=True) - weights = np.array([0.4, 0.5], requires_grad=True) - dev = qml.device("default.qubit", wires=2) - - res = self.cost_fn(weights, coeffs1, coeffs2, dev, broadcast) - expected = self.cost_fn_expected(weights, coeffs1, coeffs2) - assert np.allclose(res, np.array(expected), atol=tol, rtol=0) - - # TODO: test when Hessians are supported with the new return types - # second derivative wrt to Hamiltonian coefficients should be zero - # --- - # res = qml.jacobian(self.cost_fn)(weights, coeffs1, coeffs2, dev=dev) - # assert np.allclose(res[1][:, 2:5], np.zeros([2, 3, 3]), atol=tol, rtol=0) - # assert np.allclose(res[2][:, -1], np.zeros([2, 1, 1]), atol=tol, rtol=0) - - @pytest.mark.tf - def test_tf(self, tol, broadcast): - """Test gradient of multiple trainable Hamiltonian coefficients - using tf""" - import tensorflow as tf - - coeffs1 = tf.Variable([0.1, 0.2, 0.3], dtype=tf.float64) - coeffs2 = tf.Variable([0.7], dtype=tf.float64) - weights = tf.Variable([0.4, 0.5], dtype=tf.float64) - - dev = qml.device("default.qubit", wires=2) - - with tf.GradientTape() as _: - jac = self.cost_fn(weights, coeffs1, coeffs2, dev, broadcast) - - expected = self.cost_fn_expected(weights.numpy(), coeffs1.numpy(), coeffs2.numpy()) - assert np.allclose(jac[0], np.array(expected)[0], atol=tol, rtol=0) - assert np.allclose(jac[1], np.array(expected)[1], atol=tol, rtol=0) - - # TODO: test when Hessians are supported with the new return types - # second derivative wrt to Hamiltonian coefficients should be zero. - # When activating the following, rename the GradientTape above from _ to t - # --- - # hess = t.jacobian(jac, [coeffs1, coeffs2]) - # assert np.allclose(hess[0][:, 2:5], np.zeros([2, 3, 3]), atol=tol, rtol=0) - # assert np.allclose(hess[1][:, -1], np.zeros([2, 1, 1]), atol=tol, rtol=0) - - @pytest.mark.torch - def test_torch(self, tol, broadcast): - """Test gradient of multiple trainable Hamiltonian coefficients - using torch""" - import torch - - coeffs1 = torch.tensor([0.1, 0.2, 0.3], dtype=torch.float64, requires_grad=True) - coeffs2 = torch.tensor([0.7], dtype=torch.float64, requires_grad=True) - weights = torch.tensor([0.4, 0.5], dtype=torch.float64, requires_grad=True) - - dev = qml.device("default.qubit", wires=2) - - res = self.cost_fn(weights, coeffs1, coeffs2, dev, broadcast) - expected = self.cost_fn_expected( - weights.detach().numpy(), coeffs1.detach().numpy(), coeffs2.detach().numpy() - ) - res = tuple(tuple(_r.detach() for _r in r) for r in res) - assert np.allclose(res, expected, atol=tol, rtol=0) - - # second derivative wrt to Hamiltonian coefficients should be zero - # hess = torch.autograd.functional.jacobian( - # lambda *args: self.cost_fn(*args, dev, broadcast), (weights, coeffs1, coeffs2) - # ) - # assert np.allclose(hess[1][:, 2:5], np.zeros([2, 3, 3]), atol=tol, rtol=0) - # assert np.allclose(hess[2][:, -1], np.zeros([2, 1, 1]), atol=tol, rtol=0) - @pytest.mark.jax - def test_jax(self, tol, broadcast): + def test_jax(self, broadcast): """Test gradient of multiple trainable Hamiltonian coefficients using JAX""" import jax @@ -3647,19 +3508,11 @@ def test_jax(self, tol, broadcast): weights = jnp.array([0.4, 0.5]) dev = qml.device("default.qubit", wires=2) - res = self.cost_fn(weights, coeffs1, coeffs2, dev, broadcast) - expected = self.cost_fn_expected(weights, coeffs1, coeffs2) - assert np.allclose(np.array(res)[:, :2], np.array(expected), atol=tol, rtol=0) - - # TODO: test when Hessians are supported with the new return types - # second derivative wrt to Hamiltonian coefficients should be zero - # --- - # second derivative wrt to Hamiltonian coefficients should be zero - # res = jax.jacobian(self.cost_fn, argnums=1)(weights, coeffs1, coeffs2, dev, broadcast) - # assert np.allclose(res[:, 2:5], np.zeros([2, 3, 3]), atol=tol, rtol=0) - - # res = jax.jacobian(self.cost_fn, argnums=1)(weights, coeffs1, coeffs2, dev, broadcast) - # assert np.allclose(res[:, -1], np.zeros([2, 1, 1]), atol=tol, rtol=0) + with pytest.warns( + qml.PennyLaneDeprecationWarning, match="The 'hamiltonian_grad' function is deprecated" + ): + with pytest.warns(UserWarning, match="Please use qml.gradients.split_to_single_terms"): + self.cost_fn(weights, coeffs1, coeffs2, dev, broadcast) @pytest.mark.autograd diff --git a/tests/gradients/parameter_shift/test_parameter_shift_shot_vec.py b/tests/gradients/parameter_shift/test_parameter_shift_shot_vec.py index 34478ccecd0..72e00eac0b0 100644 --- a/tests/gradients/parameter_shift/test_parameter_shift_shot_vec.py +++ b/tests/gradients/parameter_shift/test_parameter_shift_shot_vec.py @@ -2128,221 +2128,6 @@ def test_trainable_coeffs(self, broadcast, tol): for r in res: assert qml.math.allclose(r, expected, atol=shot_vec_tol) - @pytest.mark.xfail(reason="TODO") - def test_multiple_hamiltonians(self, mocker, broadcast, tol): - """Test multiple trainable Hamiltonian coefficients""" - shot_vec = many_shots_shot_vector - dev = qml.device("default.qubit", wires=2, shots=shot_vec) - spy = mocker.spy(qml.gradients, "hamiltonian_grad") - - obs = [qml.PauliZ(0), qml.PauliZ(0) @ qml.PauliX(1), qml.PauliY(0)] - coeffs = np.array([0.1, 0.2, 0.3]) - a, b, c = coeffs - H1 = qml.Hamiltonian(coeffs, obs) - - obs = [qml.PauliZ(0)] - coeffs = np.array([0.7]) - d = coeffs[0] - H2 = qml.Hamiltonian(coeffs, obs) - - weights = np.array([0.4, 0.5]) - x, y = weights - - with qml.queuing.AnnotatedQueue() as q: - qml.RX(weights[0], wires=0) - qml.RY(weights[1], wires=1) - qml.CNOT(wires=[0, 1]) - qml.expval(H1) - qml.expval(H2) - - tape = qml.tape.QuantumScript.from_queue(q, shots=shot_vec) - tape.trainable_params = {0, 1, 2, 4, 5} - - res = dev.execute([tape]) - expected = [-c * np.sin(x) * np.sin(y) + np.cos(x) * (a + b * np.sin(y)), d * np.cos(x)] - assert np.allclose(res, expected, atol=tol, rtol=0) - - if broadcast: - with pytest.raises( - NotImplementedError, match="Broadcasting with multiple measurements" - ): - qml.gradients.param_shift(tape, broadcast=broadcast) - return - - tapes, fn = qml.gradients.param_shift(tape, broadcast=broadcast) - # two shifts per rotation gate, one circuit per trainable H term - assert len(tapes) == 2 * 2 + 3 - spy.assert_called() - - res = fn(dev.execute(tapes)) - assert isinstance(res, tuple) - assert len(res) == 2 - assert len(res[0]) == 5 - assert len(res[1]) == 5 - - expected = [ - [ - -c * np.cos(x) * np.sin(y) - np.sin(x) * (a + b * np.sin(y)), - b * np.cos(x) * np.cos(y) - c * np.cos(y) * np.sin(x), - np.cos(x), - -(np.sin(x) * np.sin(y)), - 0, - ], - [-d * np.sin(x), 0, 0, 0, np.cos(x)], - ] - - assert np.allclose(np.stack(res), expected, atol=tol, rtol=0) - - @staticmethod - def cost_fn(weights, coeffs1, coeffs2, dev=None, broadcast=False): - """Cost function for gradient tests""" - obs1 = [qml.PauliZ(0), qml.PauliZ(0) @ qml.PauliX(1), qml.PauliY(0)] - H1 = qml.Hamiltonian(coeffs1, obs1) - - obs2 = [qml.PauliZ(0)] - H2 = qml.Hamiltonian(coeffs2, obs2) - - with qml.queuing.AnnotatedQueue() as q: - qml.RX(weights[0], wires=0) - qml.RY(weights[1], wires=1) - qml.CNOT(wires=[0, 1]) - qml.expval(H1) - qml.expval(H2) - - tape = qml.tape.QuantumScript.from_queue(q, shots=dev.shots) - tape.trainable_params = {0, 1, 2, 3, 4, 5} - tapes, fn = qml.gradients.param_shift(tape, broadcast=broadcast) - return fn(dev.execute(tapes)) - - @staticmethod - def cost_fn_expected(weights, coeffs1, coeffs2): - """Analytic jacobian of cost_fn above""" - a, b, c = coeffs1 - d = coeffs2[0] - x, y = weights - return [ - [ - -c * np.cos(x) * np.sin(y) - np.sin(x) * (a + b * np.sin(y)), - b * np.cos(x) * np.cos(y) - c * np.cos(y) * np.sin(x), - np.cos(x), - np.cos(x) * np.sin(y), - -(np.sin(x) * np.sin(y)), - 0, - ], - [-d * np.sin(x), 0, 0, 0, 0, np.cos(x)], - ] - - @pytest.mark.xfail(reason="TODO") - @pytest.mark.autograd - def test_autograd(self, broadcast, tol): - """Test gradient of multiple trainable Hamiltonian coefficients - using autograd""" - coeffs1 = np.array([0.1, 0.2, 0.3], requires_grad=True) - coeffs2 = np.array([0.7], requires_grad=True) - weights = np.array([0.4, 0.5], requires_grad=True) - shot_vec = many_shots_shot_vector - dev = qml.device("default.qubit", wires=2, shots=shot_vec) - - if broadcast: - with pytest.raises( - NotImplementedError, match="Broadcasting with multiple measurements" - ): - res = self.cost_fn(weights, coeffs1, coeffs2, dev, broadcast) - return - res = self.cost_fn(weights, coeffs1, coeffs2, dev, broadcast) - expected = self.cost_fn_expected(weights, coeffs1, coeffs2) - assert np.allclose(res, np.array(expected), atol=tol, rtol=0) - - # TODO: test when Hessians are supported with the new return types - # second derivative wrt to Hamiltonian coefficients should be zero - # --- - # res = qml.jacobian(self.cost_fn)(weights, coeffs1, coeffs2, dev=dev) - # assert np.allclose(res[1][:, 2:5], np.zeros([2, 3, 3]), atol=tol, rtol=0) - # assert np.allclose(res[2][:, -1], np.zeros([2, 1, 1]), atol=tol, rtol=0) - - @pytest.mark.xfail(reason="TODO") - @pytest.mark.tf - def test_tf(self, broadcast, tol): - """Test gradient of multiple trainable Hamiltonian coefficients using tf""" - import tensorflow as tf - - coeffs1 = tf.Variable([0.1, 0.2, 0.3], dtype=tf.float64) - coeffs2 = tf.Variable([0.7], dtype=tf.float64) - weights = tf.Variable([0.4, 0.5], dtype=tf.float64) - - shot_vec = many_shots_shot_vector - dev = qml.device("default.qubit", wires=2, shots=shot_vec) - - with tf.GradientTape() as _: - jac = self.cost_fn(weights, coeffs1, coeffs2, dev, broadcast) - - expected = self.cost_fn_expected(weights.numpy(), coeffs1.numpy(), coeffs2.numpy()) - assert np.allclose(jac[0], np.array(expected)[0], atol=tol, rtol=0) - assert np.allclose(jac[1], np.array(expected)[1], atol=tol, rtol=0) - - # TODO: test when Hessians are supported with the new return types - # second derivative wrt to Hamiltonian coefficients should be zero - # When activating the following, rename the GradientTape above from _ to t - # --- - # hess = t.jacobian(jac, [coeffs1, coeffs2]) - # assert np.allclose(hess[0][:, 2:5], np.zeros([2, 3, 3]), atol=tol, rtol=0) - # assert np.allclose(hess[1][:, -1], np.zeros([2, 1, 1]), atol=tol, rtol=0) - - @pytest.mark.torch - def test_torch(self, broadcast, tol): - """Test gradient of multiple trainable Hamiltonian coefficients - using torch""" - import torch - - coeffs1 = torch.tensor([0.1, 0.2, 0.3], dtype=torch.float64, requires_grad=True) - coeffs2 = torch.tensor([0.7], dtype=torch.float64, requires_grad=True) - weights = torch.tensor([0.4, 0.5], dtype=torch.float64, requires_grad=True) - - dev = qml.device("default.qubit", wires=2) - - res = self.cost_fn(weights, coeffs1, coeffs2, dev, broadcast) - expected = self.cost_fn_expected( - weights.detach().numpy(), coeffs1.detach().numpy(), coeffs2.detach().numpy() - ) - for actual, _expected in zip(res, expected): - for val, exp_val in zip(actual, _expected): - assert qml.math.allclose(val.detach(), exp_val, atol=tol, rtol=0) - - # TODO: test when Hessians are supported with the new return types - # second derivative wrt to Hamiltonian coefficients should be zero - # hess = torch.autograd.functional.jacobian( - # lambda *args: self.cost_fn(*args, dev, broadcast), (weights, coeffs1, coeffs2) - # ) - # assert np.allclose(hess[1][:, 2:5], np.zeros([2, 3, 3]), atol=tol, rtol=0) - # assert np.allclose(hess[2][:, -1], np.zeros([2, 1, 1]), atol=tol, rtol=0) - - @pytest.mark.jax - def test_jax(self, broadcast, tol): - """Test gradient of multiple trainable Hamiltonian coefficients - using JAX""" - import jax - - jnp = jax.numpy - - coeffs1 = jnp.array([0.1, 0.2, 0.3]) - coeffs2 = jnp.array([0.7]) - weights = jnp.array([0.4, 0.5]) - dev = qml.device("default.qubit", wires=2) - - res = self.cost_fn(weights, coeffs1, coeffs2, dev, broadcast) - expected = self.cost_fn_expected(weights, coeffs1, coeffs2) - assert np.allclose(res, np.array(expected), atol=tol, rtol=0) - - # TODO: test when Hessians are supported with the new return types - # second derivative wrt to Hamiltonian coefficients should be zero - # --- - # second derivative wrt to Hamiltonian coefficients should be zero - # res = jax.jacobian(self.cost_fn, argnums=1)(weights, coeffs1, coeffs2, dev, broadcast) - # assert np.allclose(res[:, 2:5], np.zeros([2, 3, 3]), atol=tol, rtol=0) - - # res = jax.jacobian(self.cost_fn, argnums=1)(weights, coeffs1, coeffs2, dev, broadcast) - # assert np.allclose(res[:, -1], np.zeros([2, 1, 1]), atol=tol, rtol=0) - pauliz = qml.PauliZ(wires=0) proj = qml.Projector([1], wires=0)