From b31e41e2bc917d3e48b4c8e41d116c0b78eac6ec Mon Sep 17 00:00:00 2001 From: tostenzel Date: Thu, 4 Jan 2024 13:52:13 +0100 Subject: [PATCH] Distribute tests to match package structure --- tests/test_autograd.py | 128 +++++++++++ tests/test_helpers.py | 33 +++ tests/test_mnist.py | 7 +- tests/test_tensor.py | 303 +-------------------------- tests/test_tensor_combine_segment.py | 17 ++ tests/test_tensor_create.py | 90 ++++++++ tests/test_tensor_reduce.py | 27 +++ tests/test_tensor_reshape.py | 43 ++++ 8 files changed, 341 insertions(+), 307 deletions(-) create mode 100644 tests/test_autograd.py create mode 100644 tests/test_helpers.py create mode 100644 tests/test_tensor_combine_segment.py create mode 100644 tests/test_tensor_create.py create mode 100644 tests/test_tensor_reduce.py create mode 100644 tests/test_tensor_reshape.py diff --git a/tests/test_autograd.py b/tests/test_autograd.py new file mode 100644 index 0000000..69b449e --- /dev/null +++ b/tests/test_autograd.py @@ -0,0 +1,128 @@ +import numpy as np +import torch +import unittest +from edugrad import Tensor + +from tests.gradcheck import numerical_jacobian, jacobian, gradcheck + +x_init = np.random.randn(1, 3).astype(np.float32) +U_init = np.random.randn(3, 3).astype(np.float32) +V_init = np.random.randn(3, 3).astype(np.float32) +W_init = np.random.randn(3, 3).astype(np.float32) +m_init = np.random.randn(1, 3).astype(np.float32) + + +class TestTinygrad(unittest.TestCase): + def test_backward_pass(self): + def test_tinygrad(): + x = Tensor(x_init, requires_grad=True) + W = Tensor(W_init, requires_grad=True) + m = Tensor(m_init) + out = x.dot(W).relu() + out = out.log_softmax() + out = out.mul(m).add(m).sum() + out.backward() + return out.numpy(), x.grad.numpy(), W.grad.numpy() + + def test_pytorch(): + x = torch.tensor(x_init, requires_grad=True) + W = torch.tensor(W_init, requires_grad=True) + m = torch.tensor(m_init) + out = x.matmul(W).relu() + out = torch.nn.functional.log_softmax(out, dim=1) + out = out.mul(m).add(m).sum() + out.backward() + return out.detach().numpy(), x.grad, W.grad + + for x, y in zip(test_tinygrad(), test_pytorch()): + np.testing.assert_allclose(x, y, atol=1e-5) + + def test_backward_pass_diamond_model(self): + def test_tinygrad(): + u = Tensor(U_init, requires_grad=True) + v = Tensor(V_init, requires_grad=True) + w = Tensor(W_init, requires_grad=True) + x = u.mul(v).relu() + y = u.mul(w).relu() + out = x.add(y).mul(y).relu() + out = out.log_softmax() + out = out.sum() + out.backward() + return out.numpy(), u.grad.numpy(), v.grad.numpy(), w.grad.numpy() + + def test_pytorch(): + u = torch.tensor(U_init, requires_grad=True) + v = torch.tensor(V_init, requires_grad=True) + w = torch.tensor(W_init, requires_grad=True) + x = u.mul(v).relu() + y = u.mul(w).relu() + out = x.add(y).mul(y).relu() + out = torch.nn.functional.log_softmax(out, dim=1) + out = out.sum() + out.backward() + return out.detach().numpy(), u.grad, v.grad, w.grad + + for x, y in zip(test_tinygrad(), test_pytorch()): + np.testing.assert_allclose(x, y, atol=1e-5) + + def test_nograd(self): + x = Tensor(x_init, requires_grad=False) + m = Tensor(m_init, requires_grad=False) + W = Tensor(W_init, requires_grad=True) + tmp = x.mul(m) + mm = tmp.matmul(W) + out = mm.relu() + out = out.sum() + out.backward() + assert x.grad is None + assert m.grad is None + assert tmp.grad is None + assert mm.grad is not None + assert W.grad is not None + + def test_jacobian(self): + W = np.random.RandomState(42069).random((10, 5)).astype(np.float32) + x = np.random.RandomState(69420).random((1, 10)).astype(np.float32) + + torch_x = torch.tensor(x, requires_grad=True) + torch_W = torch.tensor(W, requires_grad=True) + + def torch_func(x): + return torch.nn.functional.log_softmax(x.matmul(torch_W).relu(), dim=1) + + PJ = torch.autograd.functional.jacobian(torch_func, torch_x).squeeze().numpy() + + tiny_x = Tensor(x, requires_grad=True) + tiny_W = Tensor(W, requires_grad=True) + + def tiny_func(x): + return x.dot(tiny_W).relu().log_softmax() + + J = jacobian(tiny_func, tiny_x) + NJ = numerical_jacobian(tiny_func, tiny_x) + + np.testing.assert_allclose(PJ, J, atol=1e-5) + np.testing.assert_allclose(PJ, NJ, atol=1e-3) + + def test_gradcheck(self): + W = np.random.RandomState(1337).random((10, 5)).astype(np.float32) + x = np.random.RandomState(7331).random((1, 10)).astype(np.float32) + + tiny_x = Tensor(x, requires_grad=True) + tiny_W = Tensor(W, requires_grad=True) + + def tiny_func(x): + return x.dot(tiny_W).relu().log_softmax() + + self.assertTrue(gradcheck(tiny_func, tiny_x, eps=1e-3)) + + # coarse approx. since a "big" eps and the non-linearities of the model + self.assertFalse(gradcheck(tiny_func, tiny_x, eps=1e-5)) + + + def test_deepwalk_ctx_check(self): + layer = Tensor.uniform(1, 1, requires_grad=True) + x = Tensor.randn(1, 1, 1) + x.dot(layer).mean().backward() + x = Tensor.randn(1, 1, 1) + x.dot(layer).mean().backward() \ No newline at end of file diff --git a/tests/test_helpers.py b/tests/test_helpers.py new file mode 100644 index 0000000..edf9a80 --- /dev/null +++ b/tests/test_helpers.py @@ -0,0 +1,33 @@ +import unittest +from edugrad import Tensor + + +class TestTinygrad(unittest.TestCase): + + def test_argfix(self): + self.assertEqual(Tensor.zeros().shape, ()) + self.assertEqual(Tensor.ones().shape, ()) + + self.assertEqual(Tensor.zeros([]).shape, ()) + self.assertEqual(Tensor.ones([]).shape, ()) + + self.assertEqual(Tensor.zeros(tuple()).shape, ()) + self.assertEqual(Tensor.ones(tuple()).shape, ()) + + self.assertEqual(Tensor.zeros(1).shape, (1,)) + self.assertEqual(Tensor.ones(1).shape, (1,)) + + self.assertEqual(Tensor.zeros(1, 10, 20).shape, (1, 10, 20)) + self.assertEqual(Tensor.ones(1, 10, 20).shape, (1, 10, 20)) + + self.assertEqual(Tensor.zeros([1]).shape, (1,)) + self.assertEqual(Tensor.ones([1]).shape, (1,)) + + self.assertEqual(Tensor.zeros([10, 20, 40]).shape, (10, 20, 40)) + self.assertEqual(Tensor.ones([10, 20, 40]).shape, (10, 20, 40)) + + self.assertEqual(Tensor.rand(1, 10, 20).shape, (1, 10, 20)) + self.assertEqual(Tensor.rand((10, 20, 40)).shape, (10, 20, 40)) + + self.assertEqual(Tensor.empty(1, 10, 20).shape, (1, 10, 20)) + self.assertEqual(Tensor.empty((10, 20, 40)).shape, (10, 20, 40)) diff --git a/tests/test_mnist.py b/tests/test_mnist.py index 24ccdb5..612415c 100644 --- a/tests/test_mnist.py +++ b/tests/test_mnist.py @@ -1,9 +1,6 @@ -import sys -print(sys.path) - -import pytest from applications.learn_mnist import train_and_evaluate_mnist + def test_mnist_accuracy(): test_accuracy = train_and_evaluate_mnist() - assert test_accuracy > 0.93, f"Test accuracy too low: {test_accuracy}" \ No newline at end of file + assert test_accuracy > 0.93, f"Test accuracy too low: {test_accuracy}" diff --git a/tests/test_tensor.py b/tests/test_tensor.py index 913d308..4a68d51 100644 --- a/tests/test_tensor.py +++ b/tests/test_tensor.py @@ -1,189 +1,10 @@ import numpy as np -import torch import unittest, copy from edugrad import Tensor from edugrad.dtypes import dtypes -from tests.gradcheck import numerical_jacobian, jacobian, gradcheck - -x_init = np.random.randn(1, 3).astype(np.float32) -U_init = np.random.randn(3, 3).astype(np.float32) -V_init = np.random.randn(3, 3).astype(np.float32) -W_init = np.random.randn(3, 3).astype(np.float32) -m_init = np.random.randn(1, 3).astype(np.float32) - - class TestTinygrad(unittest.TestCase): - def test_zerodim_initialization(self): - a = Tensor(55) - b = Tensor(3.14) - - self.assertEqual(a.shape, ()) - self.assertEqual(b.shape, ()) - - def test_plus_equals(self): - a = Tensor.randn(10, 10) - b = Tensor.randn(10, 10) - c = a + b - val1 = c.numpy() - a += b - val2 = a.numpy() - np.testing.assert_allclose(val1, val2) - - def test_backward_pass(self): - def test_tinygrad(): - x = Tensor(x_init, requires_grad=True) - W = Tensor(W_init, requires_grad=True) - m = Tensor(m_init) - out = x.dot(W).relu() - out = out.log_softmax() - out = out.mul(m).add(m).sum() - out.backward() - return out.numpy(), x.grad.numpy(), W.grad.numpy() - - def test_pytorch(): - x = torch.tensor(x_init, requires_grad=True) - W = torch.tensor(W_init, requires_grad=True) - m = torch.tensor(m_init) - out = x.matmul(W).relu() - out = torch.nn.functional.log_softmax(out, dim=1) - out = out.mul(m).add(m).sum() - out.backward() - return out.detach().numpy(), x.grad, W.grad - - for x, y in zip(test_tinygrad(), test_pytorch()): - np.testing.assert_allclose(x, y, atol=1e-5) - - # @unittest.skipIf(Device.DEFAULT == "WEBGPU", "this test uses more than 8 bufs which breaks webgpu") #TODO: remove after #1461 - def test_backward_pass_diamond_model(self): - def test_tinygrad(): - u = Tensor(U_init, requires_grad=True) - v = Tensor(V_init, requires_grad=True) - w = Tensor(W_init, requires_grad=True) - x = u.mul(v).relu() - y = u.mul(w).relu() - out = x.add(y).mul(y).relu() - out = out.log_softmax() - out = out.sum() - out.backward() - return out.numpy(), u.grad.numpy(), v.grad.numpy(), w.grad.numpy() - - def test_pytorch(): - u = torch.tensor(U_init, requires_grad=True) - v = torch.tensor(V_init, requires_grad=True) - w = torch.tensor(W_init, requires_grad=True) - x = u.mul(v).relu() - y = u.mul(w).relu() - out = x.add(y).mul(y).relu() - out = torch.nn.functional.log_softmax(out, dim=1) - out = out.sum() - out.backward() - return out.detach().numpy(), u.grad, v.grad, w.grad - - for x, y in zip(test_tinygrad(), test_pytorch()): - np.testing.assert_allclose(x, y, atol=1e-5) - - def test_nograd(self): - x = Tensor(x_init, requires_grad=False) - m = Tensor(m_init, requires_grad=False) - W = Tensor(W_init, requires_grad=True) - tmp = x.mul(m) - mm = tmp.matmul(W) - out = mm.relu() - out = out.sum() - out.backward() - assert x.grad is None - assert m.grad is None - assert tmp.grad is None - assert mm.grad is not None - assert W.grad is not None - - def test_jacobian(self): - W = np.random.RandomState(42069).random((10, 5)).astype(np.float32) - x = np.random.RandomState(69420).random((1, 10)).astype(np.float32) - - torch_x = torch.tensor(x, requires_grad=True) - torch_W = torch.tensor(W, requires_grad=True) - - def torch_func(x): - return torch.nn.functional.log_softmax(x.matmul(torch_W).relu(), dim=1) - - PJ = torch.autograd.functional.jacobian(torch_func, torch_x).squeeze().numpy() - - tiny_x = Tensor(x, requires_grad=True) - tiny_W = Tensor(W, requires_grad=True) - - def tiny_func(x): - return x.dot(tiny_W).relu().log_softmax() - - J = jacobian(tiny_func, tiny_x) - NJ = numerical_jacobian(tiny_func, tiny_x) - - np.testing.assert_allclose(PJ, J, atol=1e-5) - np.testing.assert_allclose(PJ, NJ, atol=1e-3) - - def test_gradcheck(self): - W = np.random.RandomState(1337).random((10, 5)).astype(np.float32) - x = np.random.RandomState(7331).random((1, 10)).astype(np.float32) - - tiny_x = Tensor(x, requires_grad=True) - tiny_W = Tensor(W, requires_grad=True) - - def tiny_func(x): - return x.dot(tiny_W).relu().log_softmax() - - self.assertTrue(gradcheck(tiny_func, tiny_x, eps=1e-3)) - - # coarse approx. since a "big" eps and the non-linearities of the model - self.assertFalse(gradcheck(tiny_func, tiny_x, eps=1e-5)) - - def test_random_fns_are_deterministic_with_seed(self): - for random_fn in [Tensor.randn, Tensor.normal, Tensor.uniform, Tensor.scaled_uniform]: - with self.subTest(msg=f"Tensor.{random_fn.__name__}"): - Tensor.manual_seed(1337) - a = random_fn(10, 10) - Tensor.manual_seed(1337) - b = random_fn(10, 10) - np.testing.assert_allclose(a.numpy(), b.numpy()) - - def test_randn_isnt_inf_on_zero(self): - # simulate failure case of rand handing a zero to randn - original_rand, Tensor.rand = Tensor.rand, Tensor.zeros - try: - self.assertNotIn(np.inf, Tensor.randn(16).numpy()) - except: - raise - finally: - Tensor.rand = original_rand - - def test_zeros_like_has_same_dtype_and_shape(self): - for datatype in [dtypes.float16, dtypes.float32, dtypes.int8, dtypes.int32, dtypes.int64, dtypes.uint8]: - a = Tensor([1, 2, 3], dtype=datatype) - b = Tensor.zeros_like(a) - assert a.dtype == b.dtype, f"dtype mismatch {a.dtype=} != {b.dtype}" - assert a.shape == b.shape, f"shape mismatch {a.shape} != {b.shape}" - - a = Tensor([1, 2, 3]) - b = Tensor.zeros_like(a, dtype=dtypes.int8) - assert ( - a.dtype == dtypes.only_int and b.dtype == dtypes.int8 - ), "a.dtype should be int and b.dtype should be char" - assert a.shape == b.shape, f"shape mismatch {a.shape} != {b.shape}" - - def test_ones_like_has_same_dtype_and_shape(self): - for datatype in [dtypes.float16, dtypes.float32, dtypes.int8, dtypes.int32, dtypes.int64, dtypes.uint8]: - a = Tensor([1, 2, 3], dtype=datatype) - b = Tensor.ones_like(a) - assert a.dtype == b.dtype, f"dtype mismatch {a.dtype=} != {b.dtype}" - assert a.shape == b.shape, f"shape mismatch {a.shape} != {b.shape}" - - a = Tensor([1, 2, 3]) - b = Tensor.ones_like(a, dtype=dtypes.int8) - assert ( - a.dtype == dtypes.only_int and b.dtype == dtypes.int8 - ), "a.dtype should be int and b.dtype should be char" - assert a.shape == b.shape, f"shape mismatch {a.shape} != {b.shape}" def test_ndim(self): assert Tensor(1).ndim == 0 @@ -191,34 +12,6 @@ def test_ndim(self): assert Tensor.randn(2, 2, 2).ndim == 3 assert Tensor.randn(1, 1, 1, 1, 1, 1).ndim == 6 - def test_argfix(self): - self.assertEqual(Tensor.zeros().shape, ()) - self.assertEqual(Tensor.ones().shape, ()) - - self.assertEqual(Tensor.zeros([]).shape, ()) - self.assertEqual(Tensor.ones([]).shape, ()) - - self.assertEqual(Tensor.zeros(tuple()).shape, ()) - self.assertEqual(Tensor.ones(tuple()).shape, ()) - - self.assertEqual(Tensor.zeros(1).shape, (1,)) - self.assertEqual(Tensor.ones(1).shape, (1,)) - - self.assertEqual(Tensor.zeros(1, 10, 20).shape, (1, 10, 20)) - self.assertEqual(Tensor.ones(1, 10, 20).shape, (1, 10, 20)) - - self.assertEqual(Tensor.zeros([1]).shape, (1,)) - self.assertEqual(Tensor.ones([1]).shape, (1,)) - - self.assertEqual(Tensor.zeros([10, 20, 40]).shape, (10, 20, 40)) - self.assertEqual(Tensor.ones([10, 20, 40]).shape, (10, 20, 40)) - - self.assertEqual(Tensor.rand(1, 10, 20).shape, (1, 10, 20)) - self.assertEqual(Tensor.rand((10, 20, 40)).shape, (10, 20, 40)) - - self.assertEqual(Tensor.empty(1, 10, 20).shape, (1, 10, 20)) - self.assertEqual(Tensor.empty((10, 20, 40)).shape, (10, 20, 40)) - def test_numel(self): assert Tensor.randn(10, 10).numel() == 100 assert Tensor.randn(1, 2, 5).numel() == 10 @@ -232,12 +25,6 @@ def test_element_size(self): dtype.itemsize == Tensor.randn(3, dtype=dtype).element_size() ), f"Tensor.element_size() not matching Tensor.dtype.itemsize for {dtype}" - def test_deepwalk_ctx_check(self): - layer = Tensor.uniform(1, 1, requires_grad=True) - x = Tensor.randn(1, 1, 1) - x.dot(layer).mean().backward() - x = Tensor.randn(1, 1, 1) - x.dot(layer).mean().backward() def test_zerosized_tensors(self): np.testing.assert_equal(Tensor([]).numpy(), np.array([])) @@ -328,74 +115,6 @@ def test_item_to_tensor_to_item(self): np.testing.assert_allclose(reshaped_item, a), a class TestZeroShapeTensor(unittest.TestCase): - def test_rand(self): - t = Tensor.rand(3, 2, 0) - assert t.shape == (3, 2, 0) - np.testing.assert_equal(t.numpy(), np.zeros((3, 2, 0))) - t = Tensor.rand(0) - assert t.shape == (0,) - np.testing.assert_equal(t.numpy(), np.zeros((0,))) - t = Tensor.rand(0, 0, 0) - assert t.shape == (0, 0, 0) - np.testing.assert_equal(t.numpy(), np.zeros((0, 0, 0))) - - def test_full(self): - t = Tensor.zeros(3, 2, 0) - assert t.shape == (3, 2, 0) - np.testing.assert_equal(t.numpy(), np.zeros((3, 2, 0))) - t = Tensor.full((3, 2, 0), 12) - assert t.shape == (3, 2, 0) - np.testing.assert_equal(t.numpy(), np.full((3, 2, 0), 12)) - - def test_reshape(self): - t = Tensor.zeros(3, 2, 0) - a = t.reshape(7, 0) - assert a.shape == (7, 0) - np.testing.assert_equal(a.numpy(), np.zeros((7, 0))) - with self.assertRaises(ValueError): - # cannot reshape array of size 0 into shape () - a = t.reshape(()) - - def test_expand(self): - t = Tensor.full((3, 2, 0), 12) - # with numpy operands could not be broadcast together with remapped shapes [original->remapped]: (3,2,0) - # and requested shape (6,2,0) - with self.assertRaises(ValueError): - t = t.expand((6, 2, 0)) - # assert t.shape == (6, 2, 0) - # np.testing.assert_equal(t.numpy(), np.full((6, 2, 0), 12)) - - def test_pad(self): - t = Tensor.rand(3, 2, 0).pad((None, None, (1, 1)), 1) - assert t.shape == (3, 2, 2) - np.testing.assert_equal(t.numpy(), np.ones((3, 2, 2))) - - # torch does not support padding non-zero dim with 0-size. torch.nn.functional.pad(torch.zeros(3,2,0), [0,0,0,4,0,0]) - t = Tensor.rand(3, 2, 0).pad((None, (1, 1), None), 1) - assert t.shape == (3, 4, 0) - np.testing.assert_equal(t.numpy(), np.ones((3, 4, 0))) - - t = Tensor.rand(3, 2, 0).pad(((1, 1), None, None), 1) - assert t.shape == (5, 2, 0) - np.testing.assert_equal(t.numpy(), np.ones((5, 2, 0))) - - def test_shrink_into_zero(self): - t = Tensor.rand(3, 4) - assert t.shrink((None, (2, 2))).shape == (3, 0) - assert t.shrink(((2, 2), None)).shape == (0, 4) - assert t.shrink(((2, 2), (2, 2))).shape == (0, 0) - - def test_cat(self): - s = Tensor.rand(3, 2, 2) - t = Tensor.rand(3, 2, 0).cat(s, dim=2) - assert t.shape == (3, 2, 2) - np.testing.assert_equal(t.numpy(), s.numpy()) - - # torch does not support padding non-zero dim with 0-size. torch.nn.functional.pad(torch.zeros(3,2,0), [0,0,0,4,0,0]) - s = Tensor.rand(3, 4, 0) - t = Tensor.rand(3, 2, 0).cat(s, dim=1) - assert t.shape == (3, 6, 0) - np.testing.assert_equal(t.numpy(), np.zeros((3, 6, 0))) def test_elementwise(self): a = Tensor.rand(3, 2, 0) @@ -415,26 +134,6 @@ def test_elementwise(self): assert c.shape == (3, 2, 0) np.testing.assert_equal(c.numpy(), np.where(mask.numpy(), a.numpy(), b.numpy())) - def test_reduce_over_non_zero(self): - a = Tensor.ones(3, 2, 0).sum(axis=1) - assert a.shape == (3, 0) - np.testing.assert_equal(a.numpy(), np.sum(np.zeros((3, 2, 0)), axis=1)) - - def test_reduce_over_zero(self): - a = Tensor.ones(3, 2, 0).sum(axis=2) - assert a.shape == (3, 2) - np.testing.assert_equal(a.numpy(), np.sum(np.zeros((3, 2, 0)), axis=2)) - - a = Tensor.ones(3, 2, 0).sum(axis=2, keepdim=True) - assert a.shape == (3, 2, 1) - np.testing.assert_equal(a.numpy(), np.sum(np.zeros((3, 2, 0)), axis=2, keepdims=True)) - - def test_reduce_default(self): - np.testing.assert_equal(Tensor([]).max().numpy(), -float("inf")) - np.testing.assert_equal(Tensor([]).min().numpy(), float("inf")) - np.testing.assert_equal(Tensor([]).sum().numpy(), 0) - np.testing.assert_equal(Tensor([]).mean().numpy(), 0) - if __name__ == "__main__": - unittest.main() \ No newline at end of file + unittest.main() diff --git a/tests/test_tensor_combine_segment.py b/tests/test_tensor_combine_segment.py new file mode 100644 index 0000000..ed272cd --- /dev/null +++ b/tests/test_tensor_combine_segment.py @@ -0,0 +1,17 @@ +import numpy as np +import unittest +from edugrad import Tensor + + +class TestZeroShapeTensor(unittest.TestCase): + def test_cat(self): + s = Tensor.rand(3, 2, 2) + t = Tensor.rand(3, 2, 0).cat(s, dim=2) + assert t.shape == (3, 2, 2) + np.testing.assert_equal(t.numpy(), s.numpy()) + + # torch does not support padding non-zero dim with 0-size. torch.nn.functional.pad(torch.zeros(3,2,0), [0,0,0,4,0,0]) + s = Tensor.rand(3, 4, 0) + t = Tensor.rand(3, 2, 0).cat(s, dim=1) + assert t.shape == (3, 6, 0) + np.testing.assert_equal(t.numpy(), np.zeros((3, 6, 0))) diff --git a/tests/test_tensor_create.py b/tests/test_tensor_create.py new file mode 100644 index 0000000..068408c --- /dev/null +++ b/tests/test_tensor_create.py @@ -0,0 +1,90 @@ +import numpy as np +import unittest +from edugrad import Tensor +from edugrad.dtypes import dtypes + + +class TestTinygrad(unittest.TestCase): + def test_zerodim_initialization(self): + a = Tensor(55) + b = Tensor(3.14) + + self.assertEqual(a.shape, ()) + self.assertEqual(b.shape, ()) + + def test_plus_equals(self): + a = Tensor.randn(10, 10) + b = Tensor.randn(10, 10) + c = a + b + val1 = c.numpy() + a += b + val2 = a.numpy() + np.testing.assert_allclose(val1, val2) + + def test_random_fns_are_deterministic_with_seed(self): + for random_fn in [Tensor.randn, Tensor.normal, Tensor.uniform, Tensor.scaled_uniform]: + with self.subTest(msg=f"Tensor.{random_fn.__name__}"): + Tensor.manual_seed(1337) + a = random_fn(10, 10) + Tensor.manual_seed(1337) + b = random_fn(10, 10) + np.testing.assert_allclose(a.numpy(), b.numpy()) + + def test_randn_isnt_inf_on_zero(self): + # simulate failure case of rand handing a zero to randn + original_rand, Tensor.rand = Tensor.rand, Tensor.zeros + try: + self.assertNotIn(np.inf, Tensor.randn(16).numpy()) + except: + raise + finally: + Tensor.rand = original_rand + + def test_zeros_like_has_same_dtype_and_shape(self): + for datatype in [dtypes.float16, dtypes.float32, dtypes.int8, dtypes.int32, dtypes.int64, dtypes.uint8]: + a = Tensor([1, 2, 3], dtype=datatype) + b = Tensor.zeros_like(a) + assert a.dtype == b.dtype, f"dtype mismatch {a.dtype=} != {b.dtype}" + assert a.shape == b.shape, f"shape mismatch {a.shape} != {b.shape}" + + a = Tensor([1, 2, 3]) + b = Tensor.zeros_like(a, dtype=dtypes.int8) + assert ( + a.dtype == dtypes.only_int and b.dtype == dtypes.int8 + ), "a.dtype should be int and b.dtype should be char" + assert a.shape == b.shape, f"shape mismatch {a.shape} != {b.shape}" + + def test_ones_like_has_same_dtype_and_shape(self): + for datatype in [dtypes.float16, dtypes.float32, dtypes.int8, dtypes.int32, dtypes.int64, dtypes.uint8]: + a = Tensor([1, 2, 3], dtype=datatype) + b = Tensor.ones_like(a) + assert a.dtype == b.dtype, f"dtype mismatch {a.dtype=} != {b.dtype}" + assert a.shape == b.shape, f"shape mismatch {a.shape} != {b.shape}" + + a = Tensor([1, 2, 3]) + b = Tensor.ones_like(a, dtype=dtypes.int8) + assert ( + a.dtype == dtypes.only_int and b.dtype == dtypes.int8 + ), "a.dtype should be int and b.dtype should be char" + assert a.shape == b.shape, f"shape mismatch {a.shape} != {b.shape}" + + +class TestZeroShapeTensor(unittest.TestCase): + def test_rand(self): + t = Tensor.rand(3, 2, 0) + assert t.shape == (3, 2, 0) + np.testing.assert_equal(t.numpy(), np.zeros((3, 2, 0))) + t = Tensor.rand(0) + assert t.shape == (0,) + np.testing.assert_equal(t.numpy(), np.zeros((0,))) + t = Tensor.rand(0, 0, 0) + assert t.shape == (0, 0, 0) + np.testing.assert_equal(t.numpy(), np.zeros((0, 0, 0))) + + def test_full(self): + t = Tensor.zeros(3, 2, 0) + assert t.shape == (3, 2, 0) + np.testing.assert_equal(t.numpy(), np.zeros((3, 2, 0))) + t = Tensor.full((3, 2, 0), 12) + assert t.shape == (3, 2, 0) + np.testing.assert_equal(t.numpy(), np.full((3, 2, 0), 12)) diff --git a/tests/test_tensor_reduce.py b/tests/test_tensor_reduce.py new file mode 100644 index 0000000..258f4b2 --- /dev/null +++ b/tests/test_tensor_reduce.py @@ -0,0 +1,27 @@ +import numpy as np +import unittest, copy +from edugrad import Tensor +from edugrad.dtypes import dtypes + + +class TestZeroShapeTensor(unittest.TestCase): + + def test_reduce_over_non_zero(self): + a = Tensor.ones(3, 2, 0).sum(axis=1) + assert a.shape == (3, 0) + np.testing.assert_equal(a.numpy(), np.sum(np.zeros((3, 2, 0)), axis=1)) + + def test_reduce_over_zero(self): + a = Tensor.ones(3, 2, 0).sum(axis=2) + assert a.shape == (3, 2) + np.testing.assert_equal(a.numpy(), np.sum(np.zeros((3, 2, 0)), axis=2)) + + a = Tensor.ones(3, 2, 0).sum(axis=2, keepdim=True) + assert a.shape == (3, 2, 1) + np.testing.assert_equal(a.numpy(), np.sum(np.zeros((3, 2, 0)), axis=2, keepdims=True)) + + def test_reduce_default(self): + np.testing.assert_equal(Tensor([]).max().numpy(), -float("inf")) + np.testing.assert_equal(Tensor([]).min().numpy(), float("inf")) + np.testing.assert_equal(Tensor([]).sum().numpy(), 0) + np.testing.assert_equal(Tensor([]).mean().numpy(), 0) diff --git a/tests/test_tensor_reshape.py b/tests/test_tensor_reshape.py new file mode 100644 index 0000000..72c235f --- /dev/null +++ b/tests/test_tensor_reshape.py @@ -0,0 +1,43 @@ +import numpy as np +import unittest +from edugrad import Tensor + + +class TestZeroShapeTensor(unittest.TestCase): + def test_reshape(self): + t = Tensor.zeros(3, 2, 0) + a = t.reshape(7, 0) + assert a.shape == (7, 0) + np.testing.assert_equal(a.numpy(), np.zeros((7, 0))) + with self.assertRaises(ValueError): + # cannot reshape array of size 0 into shape () + a = t.reshape(()) + + def test_expand(self): + t = Tensor.full((3, 2, 0), 12) + # with numpy operands could not be broadcast together with remapped shapes [original->remapped]: (3,2,0) + # and requested shape (6,2,0) + with self.assertRaises(ValueError): + t = t.expand((6, 2, 0)) + # assert t.shape == (6, 2, 0) + # np.testing.assert_equal(t.numpy(), np.full((6, 2, 0), 12)) + + def test_pad(self): + t = Tensor.rand(3, 2, 0).pad((None, None, (1, 1)), 1) + assert t.shape == (3, 2, 2) + np.testing.assert_equal(t.numpy(), np.ones((3, 2, 2))) + + # torch does not support padding non-zero dim with 0-size. torch.nn.functional.pad(torch.zeros(3,2,0), [0,0,0,4,0,0]) + t = Tensor.rand(3, 2, 0).pad((None, (1, 1), None), 1) + assert t.shape == (3, 4, 0) + np.testing.assert_equal(t.numpy(), np.ones((3, 4, 0))) + + t = Tensor.rand(3, 2, 0).pad(((1, 1), None, None), 1) + assert t.shape == (5, 2, 0) + np.testing.assert_equal(t.numpy(), np.ones((5, 2, 0))) + + def test_shrink_into_zero(self): + t = Tensor.rand(3, 4) + assert t.shrink((None, (2, 2))).shape == (3, 0) + assert t.shrink(((2, 2), None)).shape == (0, 4) + assert t.shrink(((2, 2), (2, 2))).shape == (0, 0)