Skip to content

Commit

Permalink
Deprecate qml.gradients.hamiltonian_grad (#6849)
Browse files Browse the repository at this point in the history
**Context:**

#6598 removes the need for `hamiltonian_grad` in our standard workflow.

**Description of the Change:**

_Source-Code_

Standard deprecation of `hamiltonian_grad` function.

_Test suite_

I noticed that the tests I've removed from `test_parameter_shift.py`
have improved duplicates in `tests/workflow/interfaces` under the
`TestHamiltonianWorkflows` test class. Therefore, they were all removed
except `test_jax`. The reason is that this test follows the outdated
workflow that still hits the branch in
`parameter_shift.py::expval_param_shift` that raises the deprecation
warning. So, I've added a warning and left that test. This should be
removed with the `hamiltonian_grad` function next release.

**Impact:**

No deprecated code found elsewhere. Impact to the eco-system should be
minimal.

[sc-81526]
  • Loading branch information
andrijapau authored Jan 22, 2025
1 parent 3e1521b commit 61dbc71
Show file tree
Hide file tree
Showing 7 changed files with 59 additions and 370 deletions.
6 changes: 6 additions & 0 deletions doc/development/deprecations.rst
Original file line number Diff line number Diff line change
Expand Up @@ -9,6 +9,12 @@ deprecations are listed below.
Pending deprecations
--------------------

* The `qml.gradients.hamiltonian_grad` function has been deprecated.
This gradient recipe is not required with the new operator arithmetic system.

- Deprecated in v0.41
- Will be removed in v0.42

* The ``inner_transform_program`` and ``config`` keyword arguments in ``qml.execute`` have been deprecated.
If more detailed control over the execution is required, use ``qml.workflow.run`` with these arguments instead.

Expand Down
4 changes: 4 additions & 0 deletions doc/releases/changelog-dev.md
Original file line number Diff line number Diff line change
Expand Up @@ -84,6 +84,10 @@

<h3>Deprecations 👋</h3>

* The `qml.gradients.hamiltonian_grad` function has been deprecated.
This gradient recipe is not required with the new operator arithmetic system.
[(#6849)](https://github.com/PennyLaneAI/pennylane/pull/6849)

* The ``inner_transform_program`` and ``config`` keyword arguments in ``qml.execute`` have been deprecated.
If more detailed control over the execution is required, use ``qml.workflow.run`` with these arguments instead.
[(#6822)](https://github.com/PennyLaneAI/pennylane/pull/6822)
Expand Down
11 changes: 11 additions & 0 deletions pennylane/gradients/hamiltonian_grad.py
Original file line number Diff line number Diff line change
Expand Up @@ -13,17 +13,28 @@
# limitations under the License.
"""Contains a gradient recipe for the coefficients of Hamiltonians."""
# pylint: disable=protected-access,unnecessary-lambda
import warnings

import pennylane as qml


def hamiltonian_grad(tape, idx):
"""Computes the tapes necessary to get the gradient of a tape with respect to
a Hamiltonian observable's coefficients.
.. warning::
This function is deprecated and will be removed in v0.42. This gradient recipe is not
required for the new operator arithmetic of PennyLane.
Args:
tape (qml.tape.QuantumTape): tape with a single Hamiltonian expectation as measurement
idx (int): index of parameter that we differentiate with respect to
"""
warnings.warn(
"The 'hamiltonian_grad' function is deprecated and will be removed in v0.42. "
"This gradient recipe is not required for the new operator arithmetic system.",
qml.PennyLaneDeprecationWarning,
)

op, m_pos, p_idx = tape.get_operation(idx)

Expand Down
7 changes: 7 additions & 0 deletions pennylane/gradients/parameter_shift.py
Original file line number Diff line number Diff line change
Expand Up @@ -15,6 +15,7 @@
This module contains functions for computing the parameter-shift gradient
of a qubit-based quantum tape.
"""
import warnings
from functools import partial

import numpy as np
Expand Down Expand Up @@ -372,6 +373,12 @@ def expval_param_shift(
op, op_idx, _ = tape.get_operation(idx)

if op.name == "LinearCombination":
warnings.warn(
"Please use qml.gradients.split_to_single_terms so that the ML framework "
"can compute the gradients of the coefficients.",
UserWarning,
)

# operation is a Hamiltonian
if tape[op_idx].return_type is not qml.measurements.Expectation:
raise ValueError(
Expand Down
27 changes: 25 additions & 2 deletions tests/gradients/core/test_hamiltonian_gradient.py
Original file line number Diff line number Diff line change
Expand Up @@ -12,10 +12,27 @@
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for the gradients.hamiltonian module."""
import pytest

import pennylane as qml
from pennylane.gradients.hamiltonian_grad import hamiltonian_grad


def test_hamiltonian_grad_deprecation():
with pytest.warns(
qml.PennyLaneDeprecationWarning, match="The 'hamiltonian_grad' function is deprecated"
):
with qml.queuing.AnnotatedQueue() as q:
qml.RY(0.3, wires=0)
qml.RX(0.5, wires=1)
qml.CNOT(wires=[0, 1])
qml.expval(qml.Hamiltonian([-1.5, 2.0], [qml.PauliZ(0), qml.PauliZ(1)]))

tape = qml.tape.QuantumScript.from_queue(q)
tape.trainable_params = {2, 3}
hamiltonian_grad(tape, idx=0)


def test_behaviour():
"""Test that the function behaves as expected."""

Expand All @@ -29,10 +46,16 @@ def test_behaviour():

tape = qml.tape.QuantumScript.from_queue(q)
tape.trainable_params = {2, 3}
tapes, processing_fn = hamiltonian_grad(tape, idx=0)
with pytest.warns(
qml.PennyLaneDeprecationWarning, match="The 'hamiltonian_grad' function is deprecated"
):
tapes, processing_fn = hamiltonian_grad(tape, idx=0)
res1 = processing_fn(dev.execute(tapes))

tapes, processing_fn = hamiltonian_grad(tape, idx=1)
with pytest.warns(
qml.PennyLaneDeprecationWarning, match="The 'hamiltonian_grad' function is deprecated"
):
tapes, processing_fn = hamiltonian_grad(tape, idx=1)
res2 = processing_fn(dev.execute(tapes))

with qml.queuing.AnnotatedQueue() as q1:
Expand Down
159 changes: 6 additions & 153 deletions tests/gradients/parameter_shift/test_parameter_shift.py
Original file line number Diff line number Diff line change
Expand Up @@ -3473,58 +3473,6 @@ def test_trainable_coeffs(self, tol, broadcast):
assert np.allclose(res[0], expected[0], atol=tol, rtol=0)
assert np.allclose(res[1], expected[1], atol=tol, rtol=0)

def test_multiple_hamiltonians(self, tol, broadcast):
"""Test multiple trainable Hamiltonian coefficients"""
dev = qml.device("default.qubit", wires=2)

obs = [qml.PauliZ(0), qml.PauliZ(0) @ qml.PauliX(1), qml.PauliY(0)]
coeffs = np.array([0.1, 0.2, 0.3])
a, b, c = coeffs
H1 = qml.Hamiltonian(coeffs, obs)

obs = [qml.PauliZ(0)]
coeffs = np.array([0.7])
d = coeffs[0]
H2 = qml.Hamiltonian(coeffs, obs)

weights = np.array([0.4, 0.5])
x, y = weights

with qml.queuing.AnnotatedQueue() as q:
qml.RX(weights[0], wires=0)
qml.RY(weights[1], wires=1)
qml.CNOT(wires=[0, 1])
qml.expval(H1)
qml.expval(H2)

tape = qml.tape.QuantumScript.from_queue(q)
tape.trainable_params = {0, 1, 2, 4, 5}

res = dev.execute([tape])
expected = [-c * np.sin(x) * np.sin(y) + np.cos(x) * (a + b * np.sin(y)), d * np.cos(x)]
assert np.allclose(res, expected, atol=tol, rtol=0)

tapes, fn = qml.gradients.param_shift(tape, broadcast=broadcast)
# two shifts per rotation gate (in one batched tape if broadcasting),
# one circuit per trainable H term
assert len(tapes) == 2 * (1 if broadcast else 2)

res = fn(dev.execute(tapes))
assert isinstance(res, tuple)
assert len(res) == 2
assert len(res[0]) == 2
assert len(res[1]) == 2

expected = [
[
-c * np.cos(x) * np.sin(y) - np.sin(x) * (a + b * np.sin(y)),
b * np.cos(x) * np.cos(y) - c * np.cos(y) * np.sin(x),
],
[-d * np.sin(x), 0],
]

assert np.allclose(np.stack(res), expected, atol=tol, rtol=0)

@staticmethod
def cost_fn(weights, coeffs1, coeffs2, dev=None, broadcast=False):
"""Cost function for gradient tests"""
Expand All @@ -3547,95 +3495,8 @@ def cost_fn(weights, coeffs1, coeffs2, dev=None, broadcast=False):
jac = fn(dev.execute(tapes))
return jac

@staticmethod
def cost_fn_expected(weights, coeffs1, coeffs2):
"""Analytic jacobian of cost_fn above"""
a, b, c = coeffs1
d = coeffs2[0]
x, y = weights
return [
[
-c * np.cos(x) * np.sin(y) - np.sin(x) * (a + b * np.sin(y)),
b * np.cos(x) * np.cos(y) - c * np.cos(y) * np.sin(x),
],
[-d * np.sin(x), 0],
]

@pytest.mark.autograd
def test_autograd(self, tol, broadcast):
"""Test gradient of multiple trainable Hamiltonian coefficients
using autograd"""
coeffs1 = np.array([0.1, 0.2, 0.3], requires_grad=True)
coeffs2 = np.array([0.7], requires_grad=True)
weights = np.array([0.4, 0.5], requires_grad=True)
dev = qml.device("default.qubit", wires=2)

res = self.cost_fn(weights, coeffs1, coeffs2, dev, broadcast)
expected = self.cost_fn_expected(weights, coeffs1, coeffs2)
assert np.allclose(res, np.array(expected), atol=tol, rtol=0)

# TODO: test when Hessians are supported with the new return types
# second derivative wrt to Hamiltonian coefficients should be zero
# ---
# res = qml.jacobian(self.cost_fn)(weights, coeffs1, coeffs2, dev=dev)
# assert np.allclose(res[1][:, 2:5], np.zeros([2, 3, 3]), atol=tol, rtol=0)
# assert np.allclose(res[2][:, -1], np.zeros([2, 1, 1]), atol=tol, rtol=0)

@pytest.mark.tf
def test_tf(self, tol, broadcast):
"""Test gradient of multiple trainable Hamiltonian coefficients
using tf"""
import tensorflow as tf

coeffs1 = tf.Variable([0.1, 0.2, 0.3], dtype=tf.float64)
coeffs2 = tf.Variable([0.7], dtype=tf.float64)
weights = tf.Variable([0.4, 0.5], dtype=tf.float64)

dev = qml.device("default.qubit", wires=2)

with tf.GradientTape() as _:
jac = self.cost_fn(weights, coeffs1, coeffs2, dev, broadcast)

expected = self.cost_fn_expected(weights.numpy(), coeffs1.numpy(), coeffs2.numpy())
assert np.allclose(jac[0], np.array(expected)[0], atol=tol, rtol=0)
assert np.allclose(jac[1], np.array(expected)[1], atol=tol, rtol=0)

# TODO: test when Hessians are supported with the new return types
# second derivative wrt to Hamiltonian coefficients should be zero.
# When activating the following, rename the GradientTape above from _ to t
# ---
# hess = t.jacobian(jac, [coeffs1, coeffs2])
# assert np.allclose(hess[0][:, 2:5], np.zeros([2, 3, 3]), atol=tol, rtol=0)
# assert np.allclose(hess[1][:, -1], np.zeros([2, 1, 1]), atol=tol, rtol=0)

@pytest.mark.torch
def test_torch(self, tol, broadcast):
"""Test gradient of multiple trainable Hamiltonian coefficients
using torch"""
import torch

coeffs1 = torch.tensor([0.1, 0.2, 0.3], dtype=torch.float64, requires_grad=True)
coeffs2 = torch.tensor([0.7], dtype=torch.float64, requires_grad=True)
weights = torch.tensor([0.4, 0.5], dtype=torch.float64, requires_grad=True)

dev = qml.device("default.qubit", wires=2)

res = self.cost_fn(weights, coeffs1, coeffs2, dev, broadcast)
expected = self.cost_fn_expected(
weights.detach().numpy(), coeffs1.detach().numpy(), coeffs2.detach().numpy()
)
res = tuple(tuple(_r.detach() for _r in r) for r in res)
assert np.allclose(res, expected, atol=tol, rtol=0)

# second derivative wrt to Hamiltonian coefficients should be zero
# hess = torch.autograd.functional.jacobian(
# lambda *args: self.cost_fn(*args, dev, broadcast), (weights, coeffs1, coeffs2)
# )
# assert np.allclose(hess[1][:, 2:5], np.zeros([2, 3, 3]), atol=tol, rtol=0)
# assert np.allclose(hess[2][:, -1], np.zeros([2, 1, 1]), atol=tol, rtol=0)

@pytest.mark.jax
def test_jax(self, tol, broadcast):
def test_jax(self, broadcast):
"""Test gradient of multiple trainable Hamiltonian coefficients
using JAX"""
import jax
Expand All @@ -3647,19 +3508,11 @@ def test_jax(self, tol, broadcast):
weights = jnp.array([0.4, 0.5])
dev = qml.device("default.qubit", wires=2)

res = self.cost_fn(weights, coeffs1, coeffs2, dev, broadcast)
expected = self.cost_fn_expected(weights, coeffs1, coeffs2)
assert np.allclose(np.array(res)[:, :2], np.array(expected), atol=tol, rtol=0)

# TODO: test when Hessians are supported with the new return types
# second derivative wrt to Hamiltonian coefficients should be zero
# ---
# second derivative wrt to Hamiltonian coefficients should be zero
# res = jax.jacobian(self.cost_fn, argnums=1)(weights, coeffs1, coeffs2, dev, broadcast)
# assert np.allclose(res[:, 2:5], np.zeros([2, 3, 3]), atol=tol, rtol=0)

# res = jax.jacobian(self.cost_fn, argnums=1)(weights, coeffs1, coeffs2, dev, broadcast)
# assert np.allclose(res[:, -1], np.zeros([2, 1, 1]), atol=tol, rtol=0)
with pytest.warns(
qml.PennyLaneDeprecationWarning, match="The 'hamiltonian_grad' function is deprecated"
):
with pytest.warns(UserWarning, match="Please use qml.gradients.split_to_single_terms"):
self.cost_fn(weights, coeffs1, coeffs2, dev, broadcast)


@pytest.mark.autograd
Expand Down
Loading

0 comments on commit 61dbc71

Please sign in to comment.