diff --git a/README.md b/README.md index f8d1994..eada3c0 100644 --- a/README.md +++ b/README.md @@ -1,14 +1,13 @@ # DASF is an Accelerated and Scalable Framework +[![Continuous Test](https://github.com/discovery-unicamp/dasf-core/actions/workflows/ci.yaml/badge.svg)](https://github.com/discovery-unicamp/dasf-core/actions/workflows/ci.yaml) + DASF is a generic framework specialized in acceleration and scaling common techniques for Machine Learning. DASF uses most methods and function from the most common libraries to increase the speed up of most algorithms. Part of this is to use Dask data to scale computation and RAPIDS AI algorithms to extend the support to GPUs as well. -### CI Results -[![Continuous Test](https://github.com/discovery-unicamp/dasf-core/actions/workflows/ci.yaml/badge.svg)](https://github.com/discovery-unicamp/dasf-core/actions/workflows/ci.yaml) - ## Installation For now, the installation can be done using docker or singularity (if available). diff --git a/tests/datasets/test_generic.py b/tests/datasets/test_generic.py index d5ce835..e78e6fb 100644 --- a/tests/datasets/test_generic.py +++ b/tests/datasets/test_generic.py @@ -3,6 +3,8 @@ import os import unittest +import numpy as np + from pytest import fixture from parameterized import parameterized_class @@ -45,3 +47,105 @@ def test_dataset_load(self): self.assertTrue(hasattr(dataset, '_metadata')) self.assertTrue("size" in dataset._metadata) + + +class TestDatasetArray(unittest.TestCase): + def test_shape(self): + filename = os.getenv('PYTEST_CURRENT_TEST') + test_dir, _ = os.path.splitext(filename) + raw_path = os.path.join(test_dir, "simple", "Array.npy") + + dataset = DatasetArray(name="Array", root=raw_path, download=False) + + self.assertEqual(dataset.shape, (40, 40, 40)) + + def test_add(self): + filename = os.getenv('PYTEST_CURRENT_TEST') + test_dir, _ = os.path.splitext(filename) + raw_path = os.path.join(test_dir, "simple", "Array.npy") + + dataset1 = DatasetArray(name="Array", root=raw_path, download=False) + dataset2 = DatasetArray(name="Array", root=raw_path, download=False) + + dataset1.load() + dataset2.load() + + np1 = np.load(raw_path) + np2 = np.load(raw_path) + + dataset3 = dataset1 + dataset2 + + np3 = np1 + np2 + + self.assertTrue(np.array_equal(dataset3, np3)) + + def test_sub(self): + filename = os.getenv('PYTEST_CURRENT_TEST') + test_dir, _ = os.path.splitext(filename) + raw_path = os.path.join(test_dir, "simple", "Array.npy") + + dataset1 = DatasetArray(name="Array", root=raw_path, download=False) + dataset2 = DatasetArray(name="Array", root=raw_path, download=False) + + dataset1.load() + dataset2.load() + + np1 = np.load(raw_path) + np2 = np.load(raw_path) + + dataset3 = dataset1 - dataset2 + + np3 = np1 - np2 + + self.assertTrue(np.array_equal(dataset3, np3)) + + def test_mul(self): + filename = os.getenv('PYTEST_CURRENT_TEST') + test_dir, _ = os.path.splitext(filename) + raw_path = os.path.join(test_dir, "simple", "Array.npy") + + dataset1 = DatasetArray(name="Array", root=raw_path, download=False) + dataset2 = DatasetArray(name="Array", root=raw_path, download=False) + + dataset1.load() + dataset2.load() + + np1 = np.load(raw_path) + np2 = np.load(raw_path) + + dataset3 = dataset1 * dataset2 + + np3 = np1 * np2 + + self.assertTrue(np.array_equal(dataset3, np3)) + +# def test_div(self): +# filename = os.getenv('PYTEST_CURRENT_TEST') +# test_dir, _ = os.path.splitext(filename) +# raw_path = os.path.join(test_dir, "simple", "Array.npy") +# +# dataset1 = DatasetArray(name="Array", root=raw_path, download=False) +# dataset2 = DatasetArray(name="Array", root=raw_path, download=False) +# +# dataset1.load() +# dataset2.load() +# +# np1 = np.load(raw_path) +# np2 = np.load(raw_path) +# +# dataset3 = dataset1 / dataset2 +# +# np3 = np1 / np2 +# +# self.assertTrue(np.array_equal(dataset3, np3)) + + def test_avg(self): + filename = os.getenv('PYTEST_CURRENT_TEST') + test_dir, _ = os.path.splitext(filename) + raw_path = os.path.join(test_dir, "simple", "Array.npy") + + dataset = DatasetArray(name="Array", root=raw_path, download=False) + + dataset.load() + + self.assertEqual(dataset.avg(), 0.0)