Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

tests: Add llvm_not_implemented mark #3075

Merged
merged 4 commits into from
Oct 21, 2024
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
3 changes: 3 additions & 0 deletions conftest.py
Original file line number Diff line number Diff line change
Expand Up @@ -50,6 +50,9 @@ def pytest_runtest_setup(item):
if m in item.keywords and not item.config.getvalue(m):
pytest.skip('{0} tests not requested'.format(m))

if 'llvm' in item.keywords and 'llvm_not_implemented' in item.keywords:
pytest.skip('LLVM implementation not available')

if 'cuda' in item.keywords and not pnlvm.ptx_enabled:
pytest.skip('PTX engine not enabled/available')

Expand Down
1 change: 1 addition & 0 deletions setup.cfg
Original file line number Diff line number Diff line change
Expand Up @@ -32,6 +32,7 @@ markers =
acnested
composition: PsyNeuLink Composition tests
llvm: Tests using LLVM runtime compiler
llvm_not_implemented: Tests that should use LLVM runtime compiler but the functionality is not yet implemented
cuda: Tests using LLVM runtime compiler and CUDA GPGPU backend
control: Tests including control mechanism and/or control projection
state_features: Tests for OptimizationControlMechanism state_features specifications
Expand Down
379 changes: 126 additions & 253 deletions tests/composition/test_autodiffcomposition.py

Large diffs are not rendered by default.

4 changes: 1 addition & 3 deletions tests/composition/test_composition.py
Original file line number Diff line number Diff line change
Expand Up @@ -5569,11 +5569,9 @@ def test_partially_overlapping_local_and_control_mech_control_specs_in_unnested_
class TestImportComposition:
@pytest.mark.pytorch
@pytest.mark.composition
@pytest.mark.llvm_not_implemented
def test_import_composition(self, comp_mode):

if comp_mode != pnl.ExecutionMode.Python:
pytest.skip('Compilation not yet support for Composition.import.')

em = EMComposition(memory_template=(2,5), memory_capacity=4)

i1 = ProcessingMechanism()
Expand Down
102 changes: 69 additions & 33 deletions tests/functions/test_integrator.py
Original file line number Diff line number Diff line change
@@ -1,9 +1,7 @@

import numpy as np
import pytest

import psyneulink as pnl
import psyneulink.core.llvm as pnlvm
import psyneulink.core.components.functions.stateful.integratorfunctions as Functions
from psyneulink.core.components.functions.function import FunctionError
from psyneulink.core.components.functions.nonstateful.transferfunctions import Angle
Expand All @@ -25,64 +23,75 @@ def SimpleIntFun(init, value, iterations, noise, rate, offset, **kwargs):
if "initializer" in kwargs:
return [4.91845218, 4.78766907, 4.73758993, 5.04920442, 4.09842889,
4.2909061, 4.05866892, 5.23154257, 5.23413599, 4.86548903]

else:
return [4.12672714, 4.25877415, 4.16954537, 4.12360778, 4.02739283,
4.2037768, 4.03845052, 4.39892272, 4.45597924, 3.99547688]
elif isinstance(noise, pnl.DistributionFunction):
if "initializer" in kwargs:
return [6.07047464, 1.45183492, 2.13615798, 3.22296925, 3.29867927,
0.9734048, 2.54011924, 3.21213761, 1.54651058, 2.7026355, ]

else:
return [5.2787496, 0.92294, 1.56811342, 2.29737262, 3.22764321,
0.8862755, 2.51990084, 2.37951776, 0.76835383, 1.83262335]
else:
if "initializer" in kwargs:
return [5.53160614, 4.86244369, 3.79932695, 5.06809088, 2.1305511,
3.8879681, 2.16602771, 5.74284825, 4.47697989, 3.78677378]

else:
return [4.7398811, 4.33354877, 3.23128239, 4.14249424, 2.05951504,
3.8008388, 2.14580932, 4.9102284, 3.69882314, 2.91676163]

def AdaptiveIntFun(init, value, iterations, noise, rate, offset, **kwargs):
assert iterations == 3

if np.isscalar(noise):
if "initializer" in kwargs:
return [3.44619156, 3.44183529, 3.38970396, 3.49707692, 3.08413924,
3.22437653, 3.07231498, 3.66899395, 3.69062231, 3.37774376]
else:
return [3.13125441, 3.23144828, 3.16374378, 3.12888752, 3.05588209,
3.18971771, 3.06427238, 3.33778941, 3.38108243, 3.03166509]

elif isinstance(noise, pnl.DistributionFunction):
if "initializer" in kwargs:
return [4.18870661, 1.3561085, 1.69287182, 1.94643064, 2.12581409,
1.05242466, 2.05628752, 1.90164378, 1.18394637, 1.39578569]

else:
return [3.87376946, 1.14572149, 1.46691163, 1.57824123, 2.09755694,
1.01776584, 2.04824492, 1.57043925, 0.8744065, 1.04970702]
else:
if "initializer" in kwargs:
return [3.91143701, 3.49857235, 2.67777415, 3.51140748, 1.59096419,
2.91863753, 1.63622751, 4.05695955, 3.11611173, 2.55924237]

else:
return [3.59649986, 3.28818534, 2.45181396, 3.14321808, 1.56270704,
2.88397872, 1.62818492, 3.72575501, 2.80657186, 2.2131637]

def DriftIntFun(init, value, iterations, noise, **kwargs):
assert iterations == 3

if np.isscalar(noise):
if "initializer" not in kwargs:
return ([0.35782281, 4.03326927, 4.90427264, 0.90944534, 1.45943493,
2.31791882, 3.05580281, 1.20089146, 2.8408554 , 1.93964773],
[3., 3., 3., 3., 3., 3., 3., 3., 3., 3.])

else:
return ([1.14954785, 4.56216419, 5.4723172 , 1.83504198, 1.53047099,
2.40504812, 3.07602121, 2.0335113 , 3.61901215, 2.80965988],
[3., 3., 3., 3., 3., 3., 3., 3., 3., 3.])

else:
if "initializer" not in kwargs:
return ([0.17810305, 4.06675934, 4.20730295, 0.90582833, 1.60883329,
2.27822395, 2.2923697 , 1.10933472, 2.71418965, 1.86808107],
[3., 3., 3., 3., 3., 3., 3., 3., 3., 3.])

else:
return ([0.96982809, 4.59565426, 4.77534751, 1.83142497, 1.67986935,
2.36535325, 2.3125881 , 1.94195457, 3.4923464 , 2.73809322],
Expand All @@ -96,11 +105,13 @@ def LeakyFun(init, value, iterations, noise, **kwargs):
return [2.20813608, 2.25674001, 2.22389663, 2.2069879, 2.17157305, 2.23649656, 2.17564317, 2.30832598, 2.32932737, 2.15982541]
else:
return [2.93867224, 2.74475902, 2.74803958, 3.06104933, 2.23711905, 2.31689203, 2.19429898, 3.07659637, 3.04734388, 2.96259823]

elif isinstance(noise, pnl.DistributionFunction):
if "initializer" not in kwargs:
return [2.55912037, 1.24455938, 1.43417309, 1.638423, 1.91298882, 1.22700281, 1.71226825, 1.67794471, 1.20395947, 1.48326449]
else:
return [3.28965653, 1.73257839, 1.95831604, 2.49248443, 1.97853482, 1.30739828, 1.73092406, 2.4462151, 1.92197598, 2.28603731]

else:
if "initializer" not in kwargs:
return [2.39694798, 2.27976578, 1.9349721, 2.21280371, 1.5655935, 2.11241762, 1.59283164, 2.46577518, 2.09617208, 1.82765063]
Expand All @@ -115,27 +126,56 @@ def AccumulatorFun(init, value, iterations, noise, **kwargs):
# variable is not used in Accumulator
return [[1.38631136, 1.38631136, 1.38631136, 1.38631136, 1.38631136,
1.38631136, 1.38631136, 1.38631136, 1.38631136, 1.38631136]]

else:
return [[1.40097107, 1.39610447, 1.39682937, 1.40344986, 1.38762668,
1.38792466, 1.38668573, 1.40172829, 1.40071984, 1.40242065]]

elif isinstance(noise, pnl.DistributionFunction):
if "initializer" not in kwargs:
return [[1.46381634, 0.97440038, 0.54931704, 0.28681701, 0.26162584,
0.66800459, 1.1010486, 0.02587729, 0.38761176, -0.56452977]]

else:
return [[1.47847605, 0.98419348, 0.55983505, 0.30395551, 0.26294116,
0.66961789, 1.10142297, 0.04129421, 0.40202024, -0.54842049]]

else:
if "initializer" not in kwargs:
return [[1.65907194, 1.41957474, 0.96892655, 1.39471298, 0.51090402,
1.20706503, 0.5443729, 1.61376489, 1.04949166, 0.90644658]]

else:
return [[1.67373165, 1.42936784, 0.97944456, 1.41185147, 0.51221934,
1.20867833, 0.54474727, 1.62918182, 1.06390014, 0.92255587]]

def DriftOnASphereFun(init, value, iterations, noise, **kwargs):
assert iterations == 3

GROUP_PREFIX="IntegratorFunction "
if np.isscalar(noise):
if "initializer" not in kwargs:
return [1.10030505e-01, 6.77893188e-06, 4.36876221e-06, -4.83568579e-06,
4.55349584e-05, 1.77044532e-04, 1.27797893e-03, -1.92233627e-02,
9.74815346e-01, -2.22179738e-01, -6.97708243e-06]

else:
return [-1.32269048e-01, 4.35051787e-05, 3.87398441e-05, -3.95620568e-06,
1.27324586e-04, -5.01625256e-04, -8.37794371e-04, 1.25048720e-01,
7.47570336e-01, -6.52303943e-01, -6.57270465e-05]

else:
if "initializer" not in kwargs:
return [ 0.23690849, 0.00140115, 0.0020072, -0.00128063,
-0.00096267, -0.01620475, -0.02644836, 0.46090672,
0.82875571, -0.31584261, -0.00132534]

else:
return [-3.72900858e-03, -3.38148799e-04, -6.43154678e-04, 4.36274120e-05,
6.67038983e-04, -2.87440868e-03, -2.08163440e-03, 4.41976901e-01,
5.31162110e-01, -7.22848147e-01, 4.66808385e-04]


GROUP_PREFIX="IntegratorFunction "

@pytest.mark.function
@pytest.mark.integrator_function
Expand All @@ -146,53 +186,49 @@ def AccumulatorFun(init, value, iterations, noise, **kwargs):
@pytest.mark.parametrize("noise", [RAND2, test_noise_arr, pnl.NormalDist],
ids=["SNOISE", "VNOISE", "FNOISE"])
@pytest.mark.parametrize("func", [
(Functions.AdaptiveIntegrator, AdaptiveIntFun),
(Functions.SimpleIntegrator, SimpleIntFun),
(Functions.DriftDiffusionIntegrator, DriftIntFun),
(Functions.LeakyCompetingIntegrator, LeakyFun),
(Functions.AccumulatorIntegrator, AccumulatorFun),
(pnl.AdaptiveIntegrator, AdaptiveIntFun),
(pnl.SimpleIntegrator, SimpleIntFun),
(pnl.DriftDiffusionIntegrator, DriftIntFun),
(pnl.LeakyCompetingIntegrator, LeakyFun),
(pnl.AccumulatorIntegrator, AccumulatorFun),
pytest.param((pnl.DriftOnASphereIntegrator, DriftOnASphereFun), marks=pytest.mark.llvm_not_implemented),
], ids=lambda x: x[0])
@pytest.mark.benchmark
def test_execute(func, func_mode, variable, noise, params, benchmark):
benchmark.group = GROUP_PREFIX + func[0].componentName
func_class, func_res = func
benchmark.group = GROUP_PREFIX + func_class.componentName

try:
noise = noise()
except TypeError as e:
if "object is not callable" not in str(e):
raise e from None
else:
assert isinstance(noise, pnl.DistributionFunction)
if func[1] == DriftIntFun:
pytest.skip("DriftDiffusionIntegrator doesn't support functional noise")
if issubclass(func_class, (pnl.DriftDiffusionIntegrator, pnl.DriftOnASphereIntegrator)):
pytest.skip("{} doesn't support functional noise".format(func_class.componentName))

if 'DriftOnASphereIntegrator' in func[0].componentName:
if func_mode != 'Python':
pytest.skip("DriftOnASphereIntegrator not yet compiled")
params.update({'dimension':len(variable) + 1})
else:
if 'dimension' in params:
params.pop('dimension')

if 'AccumulatorIntegrator' in func[0].componentName:
params = {
**params,
'increment': RAND0_1,
}
params.pop('offset')

# If we are dealing with a DriftDiffusionIntegrator, noise and time_step_size defaults
# have changed since this test was created. Hard code their old values.
if 'DriftDiffusionIntegrator' in str(func[0]):
f = func[0](default_variable=variable, noise=np.sqrt(noise), time_step_size=1.0, **params)
else:
f = func[0](default_variable=variable, noise=noise, **params)
params = {**params, 'dimension': len(variable) + 1}

elif issubclass(func_class, pnl.AccumulatorIntegrator):
params = {**params, 'increment': RAND0_1}
params.pop('offset', None)

elif issubclass(func_class, pnl.DriftDiffusionIntegrator):
# If we are dealing with a DriftDiffusionIntegrator, noise and
# time_step_size defaults have changed since this test was created.
# Hard code their old values.
params = {**params, 'time_step_size': 1.0}
noise = np.sqrt(noise)

f = func_class(default_variable=variable, noise=noise, **params)
ex = pytest.helpers.get_func_execution(f, func_mode)

ex(variable)
ex(variable)
res = benchmark(ex, variable)
expected = func[1](f.initializer, variable, 3, noise, **params)

expected = func_res(f.initializer, variable, 3, noise, **params)
np.testing.assert_allclose(res, expected, rtol=1e-5, atol=1e-8)


Expand Down
18 changes: 12 additions & 6 deletions tests/functions/test_memory.py
Original file line number Diff line number Diff line change
Expand Up @@ -34,7 +34,9 @@
# (Functions.Buffer, test_var, {'rate':RAND1}, [[0.0],[0.0]]),
pytest.param(Functions.Buffer, test_var[0], {'history':512, 'rate':RAND1, 'initializer':[test_var[0]]},
# TODO: Why is the first result using rate^2 ?
[test_var[0] * RAND1 * RAND1, test_var[0] * RAND1], id="Buffer"),
[test_var[0] * RAND1 * RAND1, test_var[0] * RAND1],
marks=pytest.mark.llvm_not_implemented,
id="Buffer"),

# Tests using Mersenne-Twister as function PRNG
pytest.param(Functions.DictionaryMemory, test_var, {'seed': module_seed},
Expand Down Expand Up @@ -71,15 +73,19 @@
# ContentAddressableMemory
pytest.param(Functions.ContentAddressableMemory, test_var, {'rate':RAND1, 'retrieval_prob':0.1, 'seed': module_seed},
np.zeros_like(test_var),
marks=pytest.mark.llvm_not_implemented,
id="ContentAddressableMemory Low Retrieval"),
pytest.param(Functions.ContentAddressableMemory, test_var, {'rate':RAND1, 'storage_prob':0.1, 'seed': module_seed},
np.zeros_like(test_var),
marks=pytest.mark.llvm_not_implemented,
id="ContentAddressableMemory Low Storage"),
pytest.param(Functions.ContentAddressableMemory, test_var, {'rate':RAND1, 'retrieval_prob':0.9, 'storage_prob':0.9, 'seed': module_seed},
[test_var[0], test_var[1]],
marks=pytest.mark.llvm_not_implemented,
id="ContentAddressableMemory High Storage/Retrieval"),
pytest.param(Functions.ContentAddressableMemory, test_var, {'initializer':test_initializer, 'rate':RAND1, 'seed': module_seed},
[test_var[0], test_var[1]],
marks=pytest.mark.llvm_not_implemented,
id="ContentAddressableMemory Initializer"),

# Tests using philox var
Expand Down Expand Up @@ -117,15 +123,19 @@
# ContentAddressableMemory
pytest.param(Functions.ContentAddressableMemory, philox_var, {'rate':RAND1, 'retrieval_prob':0.1, 'seed': module_seed},
np.zeros_like(philox_var),
marks=pytest.mark.llvm_not_implemented,
id="ContentAddressableMemory Low Retrieval Philox"),
pytest.param(Functions.ContentAddressableMemory, philox_var, {'rate':RAND1, 'storage_prob':0.01, 'seed': module_seed},
np.zeros_like(philox_var),
marks=pytest.mark.llvm_not_implemented,
id="ContentAddressableMemory Low Storage Philox"),
pytest.param(Functions.ContentAddressableMemory, philox_var, {'rate':RAND1, 'retrieval_prob':0.98, 'storage_prob':0.98, 'seed': module_seed},
[philox_var[0], philox_var[1]],
marks=pytest.mark.llvm_not_implemented,
id="ContentAddressableMemory High Storage/Retrieval Philox"),
pytest.param(Functions.ContentAddressableMemory, philox_var, {'initializer':philox_initializer, 'rate':RAND1, 'seed': module_seed},
[philox_var[0], philox_var[1]],
marks=pytest.mark.llvm_not_implemented,
id="ContentAddressableMemory Initializer Philox"),
]

Expand All @@ -134,11 +144,6 @@
@pytest.mark.benchmark
@pytest.mark.parametrize("func, variable, params, expected", test_data)
def test_basic(func, variable, params, expected, benchmark, func_mode):
if func is Functions.Buffer and func_mode != 'Python':
pytest.skip("Not implemented")
if func is Functions.ContentAddressableMemory and func_mode != 'Python':
pytest.skip("Not implemented")

benchmark.group = func.componentName
f = func(default_variable=variable, **params)
if variable is philox_var:
Expand All @@ -153,6 +158,7 @@ def test_basic(func, variable, params, expected, benchmark, func_mode):
# "duplicate_keys"
if len(variable) == 2:
EX([variable[0], variable[1] * 4])

res = benchmark(EX, variable)

# This still needs to use "allclose" as the key gets manipulated before
Expand Down
Loading
Loading