diff --git a/tensornetwork/backends/pytorch/pytorch_backend.py b/tensornetwork/backends/pytorch/pytorch_backend.py index 8467fac39..fb3ae238a 100644 --- a/tensornetwork/backends/pytorch/pytorch_backend.py +++ b/tensornetwork/backends/pytorch/pytorch_backend.py @@ -475,3 +475,17 @@ def sign(self, tensor: Tensor) -> Tensor: def item(self, tensor): return tensor.item() + + def power(self, a: Tensor, b: Tensor) -> Tensor: + """ + Returns the power of tensor a to the value of b. + In the case b is a tensor, then the power is by element + with a as the base and b as the exponent. + In the case b is a scalar, then the power of each value in a + is raised to the exponent of b. + + Args: + a: The tensor that contains the base. + b: The tensor that contains the exponent or a single scalar. + """ + return a ** b diff --git a/tensornetwork/backends/pytorch/pytorch_backend_test.py b/tensornetwork/backends/pytorch/pytorch_backend_test.py index f44aed5c6..82a3e6356 100644 --- a/tensornetwork/backends/pytorch/pytorch_backend_test.py +++ b/tensornetwork/backends/pytorch/pytorch_backend_test.py @@ -566,6 +566,18 @@ def test_matmul(): np.testing.assert_allclose(expected, actual) +def test_power(): + np.random.seed(10) + backend = pytorch_backend.PyTorchBackend() + t1 = np.random.rand(10, 2, 3) + t2 = np.random.rand(10, 3, 4) + a = backend.convert_to_tensor(t1) + b = backend.convert_to_tensor(t2) + actual = backend.power(a, b) + expected = np.power(t1, t2) + np.testing.assert_allclose(expected, actual) + + @pytest.mark.parametrize("dtype", torch_randn_dtypes) @pytest.mark.parametrize("offset", range(-2, 2)) @pytest.mark.parametrize("axis1", [-2, 0]) diff --git a/tensornetwork/backends/symmetric/symmetric_backend.py b/tensornetwork/backends/symmetric/symmetric_backend.py index 9e55bc694..1b1c517e3 100644 --- a/tensornetwork/backends/symmetric/symmetric_backend.py +++ b/tensornetwork/backends/symmetric/symmetric_backend.py @@ -689,3 +689,17 @@ def matmul(self, tensor1: Tensor, tensor2: Tensor): if (tensor1.ndim != 2) or (tensor2.ndim != 2): raise ValueError("inputs to `matmul` have to be matrices") return tensor1 @ tensor2 + + def power(self, a: Tensor, b: Tensor) -> Tensor: + """ + Returns the power of tensor a to the value of b. + In the case b is a tensor, then the power is by element + with a as the base and b as the exponent. + In the case b is a scalar, then the power of each value in a + is raised to the exponent of b. + + Args: + a: The tensor that contains the base. + b: The tensor that contains the exponent or a single scalar. + """ + return a ** b diff --git a/tensornetwork/backends/symmetric/symmetric_backend_test.py b/tensornetwork/backends/symmetric/symmetric_backend_test.py index a8b78051e..f38d8bdc5 100644 --- a/tensornetwork/backends/symmetric/symmetric_backend_test.py +++ b/tensornetwork/backends/symmetric/symmetric_backend_test.py @@ -609,6 +609,19 @@ def test_addition_raises(R, dtype, num_charges): backend.addition(a, c) +@pytest.mark.parametrize("dtype", np_dtypes) +@pytest.mark.parametrize("num_charges", [1, 2]) +def test_power(dtype, num_charges): + np.random.seed(10) + R = 4 + backend = symmetric_backend.SymmetricBackend() + a = get_tensor(R, num_charges, dtype) + b = BlockSparseTensor.random(a.sparse_shape) + expected = np.power(a.data, b.data) + actual = backend.power(a.data, b.data) + np.testing.assert_allclose(expected, actual) + + @pytest.mark.parametrize("dtype", np_dtypes) @pytest.mark.parametrize("R", [2, 3, 4, 5]) @pytest.mark.parametrize("num_charges", [1, 2])