Skip to content

Commit

Permalink
Adjust tests to type casting in Tensor(list) and some renamings
Browse files Browse the repository at this point in the history
  • Loading branch information
tostenzel committed Jan 4, 2024
1 parent b31e41e commit 9c58db1
Show file tree
Hide file tree
Showing 7 changed files with 64 additions and 58 deletions.
4 changes: 2 additions & 2 deletions applications/learn_mnist.py
Original file line number Diff line number Diff line change
Expand Up @@ -24,7 +24,7 @@ def parse(file):
return X_train, Y_train, X_test, Y_test


class TinyConvNet:
class ConvNet:
def __init__(self):
# https://keras.io/examples/vision/mnist_convnet/
kernel_sz = 3
Expand All @@ -42,7 +42,7 @@ def __call__(self, x: Tensor):

def train_and_evaluate_mnist(num_steps=100, batch_size=128, learning_rate=0.001):
X_train, Y_train, X_test, Y_test = fetch_mnist()
model = TinyConvNet()
model = ConvNet()
opt = optimizer.Adam([model.c1, model.c2, model.l1], lr=learning_rate)

with Tensor.train():
Expand Down
2 changes: 1 addition & 1 deletion edugrad/tensor.py
Original file line number Diff line number Diff line change
Expand Up @@ -74,7 +74,7 @@ def __init__(

# --------------------------------------------------------------------------------------------------------------
# Handles Tensor(x) for x with different data types.
# We cast x = list(y) up to float32 (default_type) for every type that y can have
# We cast x = list(y) up to float32 (default_type) for every type that y can have if no other type is provided

if isinstance(data, TensorData):
assert dtype is None or dtype == data.dtype, "dtype doesn't match, and casting isn't supported"
Expand Down
2 changes: 1 addition & 1 deletion tests/gradcheck.py
Original file line number Diff line number Diff line change
Expand Up @@ -19,7 +19,7 @@ def jacobian(func, input):
input.grad = None
output = func(input)

# tinygrad doesn't support slicing, tiny-hack to select
# edugrad doesn't support slicing, tiny-hack to select
# the needed scalar an backpropagate only through it
o_scalar = Tensor(mask_like(output.numpy(), o, 1.0)).mul(output).sum()
o_scalar.backward()
Expand Down
34 changes: 17 additions & 17 deletions tests/test_autograd.py
Original file line number Diff line number Diff line change
Expand Up @@ -12,9 +12,9 @@
m_init = np.random.randn(1, 3).astype(np.float32)


class TestTinygrad(unittest.TestCase):
class TestEdugrad(unittest.TestCase):
def test_backward_pass(self):
def test_tinygrad():
def test_edugrad():
x = Tensor(x_init, requires_grad=True)
W = Tensor(W_init, requires_grad=True)
m = Tensor(m_init)
Expand All @@ -34,11 +34,11 @@ def test_pytorch():
out.backward()
return out.detach().numpy(), x.grad, W.grad

for x, y in zip(test_tinygrad(), test_pytorch()):
for x, y in zip(test_edugrad(), test_pytorch()):
np.testing.assert_allclose(x, y, atol=1e-5)

def test_backward_pass_diamond_model(self):
def test_tinygrad():
def test_edugrad():
u = Tensor(U_init, requires_grad=True)
v = Tensor(V_init, requires_grad=True)
w = Tensor(W_init, requires_grad=True)
Expand All @@ -62,7 +62,7 @@ def test_pytorch():
out.backward()
return out.detach().numpy(), u.grad, v.grad, w.grad

for x, y in zip(test_tinygrad(), test_pytorch()):
for x, y in zip(test_edugrad(), test_pytorch()):
np.testing.assert_allclose(x, y, atol=1e-5)

def test_nograd(self):
Expand Down Expand Up @@ -92,14 +92,14 @@ def torch_func(x):

PJ = torch.autograd.functional.jacobian(torch_func, torch_x).squeeze().numpy()

tiny_x = Tensor(x, requires_grad=True)
tiny_W = Tensor(W, requires_grad=True)
edugrad_x = Tensor(x, requires_grad=True)
edugrad_W = Tensor(W, requires_grad=True)

def tiny_func(x):
return x.dot(tiny_W).relu().log_softmax()
def func(x):
return x.dot(edugrad_W).relu().log_softmax()

J = jacobian(tiny_func, tiny_x)
NJ = numerical_jacobian(tiny_func, tiny_x)
J = jacobian(func, edugrad_x)
NJ = numerical_jacobian(func, edugrad_x)

np.testing.assert_allclose(PJ, J, atol=1e-5)
np.testing.assert_allclose(PJ, NJ, atol=1e-3)
Expand All @@ -108,16 +108,16 @@ def test_gradcheck(self):
W = np.random.RandomState(1337).random((10, 5)).astype(np.float32)
x = np.random.RandomState(7331).random((1, 10)).astype(np.float32)

tiny_x = Tensor(x, requires_grad=True)
tiny_W = Tensor(W, requires_grad=True)
edugrad_x = Tensor(x, requires_grad=True)
edugrad_W = Tensor(W, requires_grad=True)

def tiny_func(x):
return x.dot(tiny_W).relu().log_softmax()
def func(x):
return x.dot(edugrad_W).relu().log_softmax()

self.assertTrue(gradcheck(tiny_func, tiny_x, eps=1e-3))
self.assertTrue(gradcheck(func, edugrad_x, eps=1e-3))

# coarse approx. since a "big" eps and the non-linearities of the model
self.assertFalse(gradcheck(tiny_func, tiny_x, eps=1e-5))
self.assertFalse(gradcheck(func, edugrad_x, eps=1e-5))


def test_deepwalk_ctx_check(self):
Expand Down
2 changes: 1 addition & 1 deletion tests/test_helpers.py
Original file line number Diff line number Diff line change
Expand Up @@ -2,7 +2,7 @@
from edugrad import Tensor


class TestTinygrad(unittest.TestCase):
class TestEdugrad(unittest.TestCase):

def test_argfix(self):
self.assertEqual(Tensor.zeros().shape, ())
Expand Down
59 changes: 39 additions & 20 deletions tests/test_tensor.py
Original file line number Diff line number Diff line change
Expand Up @@ -3,8 +3,8 @@
from edugrad import Tensor
from edugrad.dtypes import dtypes


class TestTinygrad(unittest.TestCase):
# Tensor(x) casts all types up to float32
class TestEdugrad(unittest.TestCase):

def test_ndim(self):
assert Tensor(1).ndim == 0
Expand Down Expand Up @@ -32,29 +32,36 @@ def test_zerosized_tensors(self):

def test_tensor_ndarray_dtype(self):
arr = np.array([1]) # where dtype is implicitly int64
assert Tensor(arr).dtype == dtypes.int64
with self.assertRaises(KeyError):
# DTYPES_DICT[np.dtype(x).name] key not available because dtype.int64 not defined
assert Tensor(arr).dtype == dtypes.int64
assert (
Tensor(arr, dtype=dtypes.float32).dtype == dtypes.float32
) # check if ndarray correctly casts to Tensor dtype
assert Tensor(arr, dtype=dtypes.float64).dtype == dtypes.float64 # check that it works for something else
with self.assertRaises(AttributeError):
# dtype.float64 not defined
assert Tensor(arr, dtype=dtypes.float64).dtype == dtypes.float64 # check that it works for something else

def test_tensor_list_dtype(self):
for arr in ([1], [[[1]]], [[1, 1], [1, 1]], [[[1, 1], [1, 1]], [[1, 1], [1, 1]]]):
x = Tensor(arr)
have = x.dtype
assert Tensor(arr).dtype == dtypes.only_int
with self.assertRaises(AssertionError):
# we always cast up to float32, even only int int32.
assert Tensor(arr).dtype == dtypes.only_int
assert Tensor(arr, dtype=dtypes.float32).dtype == dtypes.float32
assert Tensor(arr, dtype=dtypes.float64).dtype == dtypes.float64

for arr in (
[True],
[[[False]]],
[[True, False], [True, False]],
[[[False, True], [False, False]], [[True, True], [False, True]]],
):
assert Tensor(arr).dtype == dtypes.bool
with self.assertRaises(AssertionError):
# we always cast up to float32, even bool.
assert Tensor(arr).dtype == dtypes.bool
assert Tensor(arr, dtype=dtypes.float32).dtype == dtypes.float32
assert Tensor(arr, dtype=dtypes.float64).dtype == dtypes.float64
with self.assertRaises(AttributeError):
# dtype.float64 not defined
assert Tensor(arr, dtype=dtypes.float64).dtype == dtypes.float64

# empty tensor defaults
for arr in ([], [[[]]], [[], []]):
Expand All @@ -65,7 +72,9 @@ def test_tensor_list_dtype(self):
# mixture of bool and int
for arr in ([True, 3], [[True], [3]], [[[True]], [[3]]], [[True, 3], [3, True]]):
t = Tensor(arr)
assert t.dtype == dtypes.only_int
with self.assertRaises(AssertionError):
# we always cast up to float32,
assert t.dtype == dtypes.only_int
np.testing.assert_allclose(t.numpy(), np.array(arr))

# mixture of bool, int and float
Expand Down Expand Up @@ -104,15 +113,25 @@ def test_tensor_copy(self):

def test_item_to_tensor_to_item(self):
for a in [0, 1, 2, 3, -1, -100, 100, -101.1, 2.345, 100.1, True, False]:
item = Tensor(a).item()
assert type(item) == type(a), a
np.testing.assert_allclose(item, a), a
buffered_item = Tensor([a]).item()
assert type(buffered_item) == type(a), a
np.testing.assert_allclose(buffered_item, a), a
reshaped_item = Tensor([a]).reshape((1, 1, 1, 1, 1)).item()
assert type(reshaped_item) == type(a), a
np.testing.assert_allclose(reshaped_item, a), a
tensor_item = Tensor(a).item()
buffered_tensor_item = Tensor([a]).item()
reshaped_tensor_item = Tensor([a]).reshape((1, 1, 1, 1, 1)).item()
np.testing.assert_allclose(tensor_item, a)
np.testing.assert_allclose(buffered_tensor_item, a)
np.testing.assert_allclose(reshaped_tensor_item, a)
self.assertEqual(type(tensor_item), type(a))

# For non-floats, assert that type check raises AssertionError if Tensor created from list
if isinstance(a, float):
# For floats, type should be retained
self.assertEqual(type(tensor_item), float)
self.assertEqual(type(buffered_tensor_item), float)
self.assertEqual(type(reshaped_tensor_item), float)
else:
with self.assertRaises(AssertionError):
self.assertEqual(type(buffered_tensor_item), type(a))
with self.assertRaises(AssertionError):
self.assertEqual(type(reshaped_tensor_item), type(a))

class TestZeroShapeTensor(unittest.TestCase):

Expand Down
19 changes: 3 additions & 16 deletions tests/test_tensor_create.py
Original file line number Diff line number Diff line change
Expand Up @@ -4,7 +4,7 @@
from edugrad.dtypes import dtypes


class TestTinygrad(unittest.TestCase):
class TestEdugrad(unittest.TestCase):
def test_zerodim_initialization(self):
a = Tensor(55)
b = Tensor(3.14)
Expand Down Expand Up @@ -41,33 +41,20 @@ def test_randn_isnt_inf_on_zero(self):
Tensor.rand = original_rand

def test_zeros_like_has_same_dtype_and_shape(self):
for datatype in [dtypes.float16, dtypes.float32, dtypes.int8, dtypes.int32, dtypes.int64, dtypes.uint8]:
for datatype in [dtypes.float32, dtypes.int32]:
a = Tensor([1, 2, 3], dtype=datatype)
b = Tensor.zeros_like(a)
assert a.dtype == b.dtype, f"dtype mismatch {a.dtype=} != {b.dtype}"
assert a.shape == b.shape, f"shape mismatch {a.shape} != {b.shape}"

a = Tensor([1, 2, 3])
b = Tensor.zeros_like(a, dtype=dtypes.int8)
assert (
a.dtype == dtypes.only_int and b.dtype == dtypes.int8
), "a.dtype should be int and b.dtype should be char"
assert a.shape == b.shape, f"shape mismatch {a.shape} != {b.shape}"

def test_ones_like_has_same_dtype_and_shape(self):
for datatype in [dtypes.float16, dtypes.float32, dtypes.int8, dtypes.int32, dtypes.int64, dtypes.uint8]:
for datatype in [dtypes.float32, dtypes.int32]:
a = Tensor([1, 2, 3], dtype=datatype)
b = Tensor.ones_like(a)
assert a.dtype == b.dtype, f"dtype mismatch {a.dtype=} != {b.dtype}"
assert a.shape == b.shape, f"shape mismatch {a.shape} != {b.shape}"

a = Tensor([1, 2, 3])
b = Tensor.ones_like(a, dtype=dtypes.int8)
assert (
a.dtype == dtypes.only_int and b.dtype == dtypes.int8
), "a.dtype should be int and b.dtype should be char"
assert a.shape == b.shape, f"shape mismatch {a.shape} != {b.shape}"


class TestZeroShapeTensor(unittest.TestCase):
def test_rand(self):
Expand Down

0 comments on commit 9c58db1

Please sign in to comment.