-
Notifications
You must be signed in to change notification settings - Fork 0
Commit
This commit does not belong to any branch on this repository, and may belong to a fork outside of the repository.
Distribute tests to match package structure
- Loading branch information
Showing
8 changed files
with
341 additions
and
307 deletions.
There are no files selected for viewing
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,128 @@ | ||
import numpy as np | ||
import torch | ||
import unittest | ||
from edugrad import Tensor | ||
|
||
from tests.gradcheck import numerical_jacobian, jacobian, gradcheck | ||
|
||
x_init = np.random.randn(1, 3).astype(np.float32) | ||
U_init = np.random.randn(3, 3).astype(np.float32) | ||
V_init = np.random.randn(3, 3).astype(np.float32) | ||
W_init = np.random.randn(3, 3).astype(np.float32) | ||
m_init = np.random.randn(1, 3).astype(np.float32) | ||
|
||
|
||
class TestTinygrad(unittest.TestCase): | ||
def test_backward_pass(self): | ||
def test_tinygrad(): | ||
x = Tensor(x_init, requires_grad=True) | ||
W = Tensor(W_init, requires_grad=True) | ||
m = Tensor(m_init) | ||
out = x.dot(W).relu() | ||
out = out.log_softmax() | ||
out = out.mul(m).add(m).sum() | ||
out.backward() | ||
return out.numpy(), x.grad.numpy(), W.grad.numpy() | ||
|
||
def test_pytorch(): | ||
x = torch.tensor(x_init, requires_grad=True) | ||
W = torch.tensor(W_init, requires_grad=True) | ||
m = torch.tensor(m_init) | ||
out = x.matmul(W).relu() | ||
out = torch.nn.functional.log_softmax(out, dim=1) | ||
out = out.mul(m).add(m).sum() | ||
out.backward() | ||
return out.detach().numpy(), x.grad, W.grad | ||
|
||
for x, y in zip(test_tinygrad(), test_pytorch()): | ||
np.testing.assert_allclose(x, y, atol=1e-5) | ||
|
||
def test_backward_pass_diamond_model(self): | ||
def test_tinygrad(): | ||
u = Tensor(U_init, requires_grad=True) | ||
v = Tensor(V_init, requires_grad=True) | ||
w = Tensor(W_init, requires_grad=True) | ||
x = u.mul(v).relu() | ||
y = u.mul(w).relu() | ||
out = x.add(y).mul(y).relu() | ||
out = out.log_softmax() | ||
out = out.sum() | ||
out.backward() | ||
return out.numpy(), u.grad.numpy(), v.grad.numpy(), w.grad.numpy() | ||
|
||
def test_pytorch(): | ||
u = torch.tensor(U_init, requires_grad=True) | ||
v = torch.tensor(V_init, requires_grad=True) | ||
w = torch.tensor(W_init, requires_grad=True) | ||
x = u.mul(v).relu() | ||
y = u.mul(w).relu() | ||
out = x.add(y).mul(y).relu() | ||
out = torch.nn.functional.log_softmax(out, dim=1) | ||
out = out.sum() | ||
out.backward() | ||
return out.detach().numpy(), u.grad, v.grad, w.grad | ||
|
||
for x, y in zip(test_tinygrad(), test_pytorch()): | ||
np.testing.assert_allclose(x, y, atol=1e-5) | ||
|
||
def test_nograd(self): | ||
x = Tensor(x_init, requires_grad=False) | ||
m = Tensor(m_init, requires_grad=False) | ||
W = Tensor(W_init, requires_grad=True) | ||
tmp = x.mul(m) | ||
mm = tmp.matmul(W) | ||
out = mm.relu() | ||
out = out.sum() | ||
out.backward() | ||
assert x.grad is None | ||
assert m.grad is None | ||
assert tmp.grad is None | ||
assert mm.grad is not None | ||
assert W.grad is not None | ||
|
||
def test_jacobian(self): | ||
W = np.random.RandomState(42069).random((10, 5)).astype(np.float32) | ||
x = np.random.RandomState(69420).random((1, 10)).astype(np.float32) | ||
|
||
torch_x = torch.tensor(x, requires_grad=True) | ||
torch_W = torch.tensor(W, requires_grad=True) | ||
|
||
def torch_func(x): | ||
return torch.nn.functional.log_softmax(x.matmul(torch_W).relu(), dim=1) | ||
|
||
PJ = torch.autograd.functional.jacobian(torch_func, torch_x).squeeze().numpy() | ||
|
||
tiny_x = Tensor(x, requires_grad=True) | ||
tiny_W = Tensor(W, requires_grad=True) | ||
|
||
def tiny_func(x): | ||
return x.dot(tiny_W).relu().log_softmax() | ||
|
||
J = jacobian(tiny_func, tiny_x) | ||
NJ = numerical_jacobian(tiny_func, tiny_x) | ||
|
||
np.testing.assert_allclose(PJ, J, atol=1e-5) | ||
np.testing.assert_allclose(PJ, NJ, atol=1e-3) | ||
|
||
def test_gradcheck(self): | ||
W = np.random.RandomState(1337).random((10, 5)).astype(np.float32) | ||
x = np.random.RandomState(7331).random((1, 10)).astype(np.float32) | ||
|
||
tiny_x = Tensor(x, requires_grad=True) | ||
tiny_W = Tensor(W, requires_grad=True) | ||
|
||
def tiny_func(x): | ||
return x.dot(tiny_W).relu().log_softmax() | ||
|
||
self.assertTrue(gradcheck(tiny_func, tiny_x, eps=1e-3)) | ||
|
||
# coarse approx. since a "big" eps and the non-linearities of the model | ||
self.assertFalse(gradcheck(tiny_func, tiny_x, eps=1e-5)) | ||
|
||
|
||
def test_deepwalk_ctx_check(self): | ||
layer = Tensor.uniform(1, 1, requires_grad=True) | ||
x = Tensor.randn(1, 1, 1) | ||
x.dot(layer).mean().backward() | ||
x = Tensor.randn(1, 1, 1) | ||
x.dot(layer).mean().backward() |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,33 @@ | ||
import unittest | ||
from edugrad import Tensor | ||
|
||
|
||
class TestTinygrad(unittest.TestCase): | ||
|
||
def test_argfix(self): | ||
self.assertEqual(Tensor.zeros().shape, ()) | ||
self.assertEqual(Tensor.ones().shape, ()) | ||
|
||
self.assertEqual(Tensor.zeros([]).shape, ()) | ||
self.assertEqual(Tensor.ones([]).shape, ()) | ||
|
||
self.assertEqual(Tensor.zeros(tuple()).shape, ()) | ||
self.assertEqual(Tensor.ones(tuple()).shape, ()) | ||
|
||
self.assertEqual(Tensor.zeros(1).shape, (1,)) | ||
self.assertEqual(Tensor.ones(1).shape, (1,)) | ||
|
||
self.assertEqual(Tensor.zeros(1, 10, 20).shape, (1, 10, 20)) | ||
self.assertEqual(Tensor.ones(1, 10, 20).shape, (1, 10, 20)) | ||
|
||
self.assertEqual(Tensor.zeros([1]).shape, (1,)) | ||
self.assertEqual(Tensor.ones([1]).shape, (1,)) | ||
|
||
self.assertEqual(Tensor.zeros([10, 20, 40]).shape, (10, 20, 40)) | ||
self.assertEqual(Tensor.ones([10, 20, 40]).shape, (10, 20, 40)) | ||
|
||
self.assertEqual(Tensor.rand(1, 10, 20).shape, (1, 10, 20)) | ||
self.assertEqual(Tensor.rand((10, 20, 40)).shape, (10, 20, 40)) | ||
|
||
self.assertEqual(Tensor.empty(1, 10, 20).shape, (1, 10, 20)) | ||
self.assertEqual(Tensor.empty((10, 20, 40)).shape, (10, 20, 40)) |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -1,9 +1,6 @@ | ||
import sys | ||
print(sys.path) | ||
|
||
import pytest | ||
from applications.learn_mnist import train_and_evaluate_mnist | ||
|
||
|
||
def test_mnist_accuracy(): | ||
test_accuracy = train_and_evaluate_mnist() | ||
assert test_accuracy > 0.93, f"Test accuracy too low: {test_accuracy}" | ||
assert test_accuracy > 0.93, f"Test accuracy too low: {test_accuracy}" |
Oops, something went wrong.