diff --git a/.github/actions/cleanup-pip-cache/action.yml b/.github/actions/cleanup-pip-cache/action.yml index 029dec1b84d..657a7ad0fa5 100644 --- a/.github/actions/cleanup-pip-cache/action.yml +++ b/.github/actions/cleanup-pip-cache/action.yml @@ -13,6 +13,6 @@ runs: for P in $CACHED; do # Remove cached and not installed if [ `echo $INSTALLED | grep -o $P | wc -l` == "0" ] ; then - pip cache remove -v $P + pip cache remove -v $P || true fi done diff --git a/.github/workflows/pnl-ci-docs.yml b/.github/workflows/pnl-ci-docs.yml index ea8f14e659d..3a2158c80c0 100644 --- a/.github/workflows/pnl-ci-docs.yml +++ b/.github/workflows/pnl-ci-docs.yml @@ -3,20 +3,33 @@ name: PsyNeuLink Docs CI on: push jobs: - build: + docs-build: runs-on: ${{ matrix.os }} strategy: fail-fast: false matrix: - python-version: [3.6, 3.7] # Doesn't work in 3.8 or 3.9 + python-version: [3.6, 3.7, 3.8] python-architecture: ['x64'] os: [ubuntu-latest, macos-latest, windows-latest] + outputs: + on_master: ${{ steps.on_master.outputs.on_master }} + steps: - name: Checkout sources uses: actions/checkout@v2 with: - fetch-depth: 10 + fetch-depth: 0 + + - name: Check if on master + id: on_master + shell: bash + run: | + git branch -a --contains $GITHUB_REF + git describe --always --tags + export ON_MASTER=$(git branch -a --contains $GITHUB_REF | grep -q '^ remotes/origin/master$' && echo "master" || echo "") + echo "Found out: ${ON_MASTER}" + echo ::set-output name=on_master::$ON_MASTER - name: Set up Python ${{ matrix.python-version }} uses: actions/setup-python@v2.2.1 @@ -76,3 +89,61 @@ jobs: name: Documentation-${{ matrix.os }}-${{ matrix.python-version }}-${{ matrix.python-architecture }} retention-days: 1 path: pnl-html + + docs-deploy: + strategy: + fail-fast: false + matrix: + python-version: [3.7] + os: [ubuntu-latest] + + runs-on: ${{ matrix.os }} + needs: [docs-build] + if: github.ref == 'refs/heads/master' || github.ref == 'refs/heads/devel' || github.ref == 'refs/heads/docs' || (contains(github.ref, 'tags') && contains(needs.*.outputs.on_master, 'master')) + + steps: + - name: Checkout docs + uses: actions/checkout@v2 + with: + ref: gh-pages + + - name: Download branch docs + uses: actions/download-artifact@v2 + with: + name: Documentation-${{ matrix.os }}-${{ matrix.python-version }}-x64 + path: _built_docs/${{ github.ref }} + if: github.ref == 'refs/heads/master' || github.ref == 'refs/heads/devel' || github.ref == 'refs/heads/docs' + + - name: Update branch docs + shell: bash + run: | + mkdir -p branch + rm -rf "branch/${GITHUB_REF##*/}" + # Remove '.doctrees' and move to correct location + rm -rf "_built_docs/${GITHUB_REF}/.doctrees" + mv -f "_built_docs/${GITHUB_REF}" branch/ + if: github.ref == 'refs/heads/master' || github.ref == 'refs/heads/devel' || github.ref == 'refs/heads/docs' + + - name: Download main docs + uses: actions/download-artifact@v2 + with: + name: Documentation-${{ matrix.os }}-${{ matrix.python-version }}-x64 + # This overwrites files in current directory + if: contains(github.ref, 'tags') && contains(needs.*.outputs.on_master, 'master') + + - name: Update main docs + shell: bash + run: | + # Remove '.doctrees' + rm -rf ".doctrees" + if: contains(github.ref, 'tags') && contains(needs.*.outputs.on_master, 'master') + + - name: Commit docs changes + shell: bash + run: | + # Commit changes to git + git add . + git config user.name "Documentation Bot" + git config user.email "doc-bot@psyneulink.princeton.edu" + git commit -m "Docs changes for $GITHUB_REF $GITHUB_SHA" + git push diff --git a/.github/workflows/pnl-ci.yml b/.github/workflows/pnl-ci.yml index aed55b727ed..cdeaafaa8a9 100644 --- a/.github/workflows/pnl-ci.yml +++ b/.github/workflows/pnl-ci.yml @@ -21,7 +21,7 @@ jobs: os: [ubuntu-latest, macos-latest, windows-latest] include: # add 32-bit build on windows - - python-version: 3.6 + - python-version: 3.8 python-architecture: 'x86' os: windows-latest diff --git a/Scripts/Examples/LC Control Mechanism Composition.py b/Scripts/Examples/LC Control Mechanism Composition.py index 06b896d775c..7de913a8166 100644 --- a/Scripts/Examples/LC Control Mechanism Composition.py +++ b/Scripts/Examples/LC Control Mechanism Composition.py @@ -7,7 +7,7 @@ A = TransferMechanism(function=Logistic(gain=user_specified_gain), name='A') B = TransferMechanism(function=Logistic(gain=user_specified_gain), name='B') -# B.output_ports[0].value *= 0.0 # Reset after init | Doesn't matter here b/c default var = zero, no intercept +# B.output_ports[0].parameters.value.set(0.0, override=True) # Reset after init | Doesn't matter here b/c default var = zero, no intercept LC = LCControlMechanism( modulated_mechanisms=[A, B], diff --git a/dev_requirements.txt b/dev_requirements.txt index f81b68e74d0..9e88bd859c6 100644 --- a/dev_requirements.txt +++ b/dev_requirements.txt @@ -1,7 +1,7 @@ jupyter<=1.0.0 -pytest<6.2.2 +pytest<6.2.3 pytest-benchmark<=3.2.3 -pytest-cov<=2.10.1 +pytest-cov<2.11.2 pytest-helpers-namespace<=2019.1.8 pytest-profiling<=1.7.0 pytest-pycodestyle<=2.2.0 diff --git a/docs/source/Preferences.rst b/docs/source/Preferences.rst index 2920100d1f4..4f2c244404b 100644 --- a/docs/source/Preferences.rst +++ b/docs/source/Preferences.rst @@ -7,7 +7,8 @@ Standard prefereces: - paramValidation (bool): enables/disables run-time validation of the execute method of a Function object -- reportOutput (bool): enables/disables reporting of execution of execute method +- reportOutput ([bool, str]): enables/disables reporting of execution of execute method: + True prints input/output, 'params' or 'parameters' includes parameter values - log (bool): sets LogCondition for a given Component diff --git a/psyneulink/core/components/component.py b/psyneulink/core/components/component.py index ccad0a756fa..cd0630d8e4c 100644 --- a/psyneulink/core/components/component.py +++ b/psyneulink/core/components/component.py @@ -520,7 +520,7 @@ from psyneulink.core.globals.utilities import \ ContentAddressableList, convert_all_elements_to_np_array, convert_to_np_array, get_deepcopy_with_shared,\ is_instance_or_subclass, is_matrix, iscompatible, kwCompatibilityLength, prune_unused_args, \ - get_all_explicit_arguments, call_with_pruned_args, safe_equals + get_all_explicit_arguments, call_with_pruned_args, safe_equals, safe_len from psyneulink.core.scheduling.condition import Never __all__ = [ @@ -650,7 +650,14 @@ def setter(self, value): f' for example, .{param.name}.base = {value}', FutureWarning, ) - getattr(self.parameters, p.name)._set(value, self.most_recent_context) + try: + getattr(self.parameters, p.name).set(value, self.most_recent_context) + except ParameterError as e: + if 'Pass override=True to force set.' in str(e): + raise ParameterError( + f"Parameter '{p.name}' is read-only. Set at your own risk." + f' Use .parameters.{p.name}.set with override=True to force set.' + ) from None return property(getter).setter(setter) @@ -1002,7 +1009,10 @@ def _validate_variable(self, variable): return None def _parse_modulable(self, param_name, param_value): - from psyneulink.core.components.functions.distributionfunctions import DistributionFunction + from psyneulink.core.components.mechanisms.modulatory.modulatorymechanism import ModulatoryMechanism_Base + from psyneulink.core.components.ports.modulatorysignals import ModulatorySignal + from psyneulink.core.components.projections.modulatory.modulatoryprojection import ModulatoryProjection_Base + # assume 2-tuple with class/instance as second item is a proper # modulatory spec, can possibly add in a flag on acceptable # classes in the future @@ -1018,14 +1028,10 @@ def _parse_modulable(self, param_name, param_value): ) ): value = param_value[0] - # assume a DistributionFunction is allowed to persist, for noise elif ( - ( - is_instance_or_subclass(param_value, Component) - and not is_instance_or_subclass( - param_value, - DistributionFunction - ) + is_instance_or_subclass( + param_value, + (ModulatoryMechanism_Base, ModulatorySignal, ModulatoryProjection_Base) ) or ( isinstance(param_value, str) @@ -1286,9 +1292,9 @@ def _get_compilation_state(self): if not hasattr(self, 'ports'): blacklist.add("value") def _is_compilation_state(p): - val = p.get() # memoize for this function - return val is not None and p.name not in blacklist and \ - (p.name in whitelist or isinstance(val, Component)) + #FIXME: This should use defaults instead of 'p.get' + return p.name not in blacklist and \ + (p.name in whitelist or isinstance(p.get(), Component)) return filter(_is_compilation_state, self.parameters) @@ -2040,8 +2046,9 @@ def _is_user_specified(parameter): if isinstance(val, Function): val.owner = self + val = p._parse(val) p._validate(val) - p.set(val, context=context, skip_history=True, override=True) + p._set(val, context=context, skip_history=True, override=True) if isinstance(p.default_value, Function): p.default_value.owner = p @@ -2704,7 +2711,7 @@ def _get_param_value_from_tuple(self, param_spec): return value - def _validate_function(self, function): + def _validate_function(self, function, context=None): """Check that either params[FUNCTION] and/or self.execute are implemented # FROM _validate_params: @@ -2752,7 +2759,7 @@ def _validate_function(self, function): or isinstance(function, types.MethodType) or is_instance_or_subclass(function, Function) ): - self.function = function + self.parameters.function._set(function, context) return # self.function is NOT OK, so raise exception else: @@ -2813,7 +2820,7 @@ def _instantiate_function(self, function, function_params=None, context=None): # Specification is a standard python function, so wrap as a UserDefnedFunction # Note: parameter_ports for function's parameters will be created in_instantiate_attributes_after_function if isinstance(function, types.FunctionType): - self.function = UserDefinedFunction(default_variable=function_variable, + function = UserDefinedFunction(default_variable=function_variable, custom_function=function, owner=self, context=context) @@ -2840,9 +2847,7 @@ def _instantiate_function(self, function, function_params=None, context=None): # class default functions should always be copied, otherwise anything this component # does with its function will propagate to anything else that wants to use # the default - if function.owner is None: - self.function = function - elif function.owner is self: + if function.owner is self: try: if function._is_pnl_inherent: # This will most often occur if a Function instance is @@ -2860,15 +2865,15 @@ def _instantiate_function(self, function, function_params=None, context=None): ' psyneulinkhelp@princeton.edu or' ' https://github.com/PrincetonUniversity/PsyNeuLink/issues' ) - self.function = copy.deepcopy(function) + function = copy.deepcopy(function) except AttributeError: - self.function = function - else: - self.function = copy.deepcopy(function) + pass + elif function.owner is not None: + function = copy.deepcopy(function) # set owner first because needed for is_initializing calls - self.function.owner = self - self.function._update_default_variable(function_variable, context) + function.owner = self + function._update_default_variable(function_variable, context) # Specification is Function class # Note: parameter_ports for function's parameters will be created in_instantiate_attributes_after_function @@ -2899,11 +2904,13 @@ def _instantiate_function(self, function, function_params=None, context=None): pass _, kwargs = prune_unused_args(function.__init__, args=[], kwargs=kwargs_to_instantiate) - self.function = function(default_variable=function_variable, owner=self, **kwargs) + function = function(default_variable=function_variable, owner=self, **kwargs) else: raise ComponentError(f'Unsupported function type: {type(function)}, function={function}.') + self.parameters.function._set(function, context) + # KAM added 6/14/18 for functions that do not pass their has_initializers status up to their owner via property # FIX: need comprehensive solution for has_initializers; need to determine whether ports affect mechanism's # has_initializers status @@ -3155,6 +3162,77 @@ def _get_current_parameter_value(self, parameter, context=None): return parameter._get(context) + def _try_execute_param(self, param, var, context=None): + def fill_recursively(arr, value, indices=()): + if arr.ndim == 0: + try: + value = value(context=context) + except TypeError: + try: + value = value() + except TypeError: + pass + return value + + try: + len_value = len(value) + len_arr = safe_len(arr) + + if len_value > len_arr: + if len_arr == len_value - 1: + ignored_items_str = f'Item {len_value - 1}' + else: + ignored_items_str = f'The items {len_arr} to {len_value - 1}' + + warnings.warn( + f'The length of {value} is greater than that of {arr}.' + f'{ignored_items_str} will be ignored for index {indices}' + ) + except TypeError: + # if noise value is not an iterable, ignore shape warnings + pass + + for i, _ in enumerate(arr): + new_indices = indices + (i,) # for error reporting + try: + arr[i] = fill_recursively(arr[i], value[i], new_indices) + except (IndexError, TypeError): + arr[i] = fill_recursively(arr[i], value, new_indices) + + return arr + + var = convert_all_elements_to_np_array(var, cast_from=np.integer, cast_to=float) + + # handle simple wrapping of a Component (e.g. from ParameterPort in + # case of set after Component instantiation) + if ( + (isinstance(param, list) and len(param) == 1) + or (isinstance(param, np.ndarray) and param.shape == (1,)) + ): + if isinstance(param[0], Component): + param = param[0] + + # Currently most noise functions do not return noise in the same + # shape as their variable: + if isinstance(param, Component): + try: + if param.defaults.value.shape == var.shape: + return param(context=context) + except AttributeError: + pass + + # special case where var is shaped same as param, but with extra dims + # assign param elements to deepest dim of var (ex: param [1, 2, 3], var [[0, 0, 0]]) + try: + if param.shape != var.shape: + if param.shape == np.squeeze(var).shape: + param = param.reshape(var.shape) + except AttributeError: + pass + + fill_recursively(var, param) + return var + def _increment_execution_count(self, count=1): self.parameters.execution_count.set(self.execution_count + count, override=True) return self.execution_count @@ -3768,4 +3846,4 @@ def base(self): @base.setter def base(self, value): - self._parameter._set(value, self._owner.most_recent_context) + self._parameter.set(value, self._owner.most_recent_context) diff --git a/psyneulink/core/components/functions/statefulfunctions/memoryfunctions.py b/psyneulink/core/components/functions/statefulfunctions/memoryfunctions.py index 4a6e24e2dc4..980eb504e2a 100644 --- a/psyneulink/core/components/functions/statefulfunctions/memoryfunctions.py +++ b/psyneulink/core/components/functions/statefulfunctions/memoryfunctions.py @@ -25,6 +25,7 @@ from collections import deque import numpy as np +import numbers import typecheck as tc import warnings @@ -317,6 +318,8 @@ def _function(self, # Apply rate and/or noise, if they are specified, to all stored items if len(previous_value): + # TODO: remove this shape hack when buffer shapes made consistent + noise = np.reshape(noise, np.asarray(previous_value[0]).shape) previous_value = convert_to_np_array(previous_value) * rate + noise previous_value = deque(previous_value, maxlen=self.parameters.history._get(context)) @@ -1097,8 +1100,15 @@ def _function(self, # SO, WOULD HAVE TO RETURN ZEROS ON INIT AND THEN SUPPRESS AFTERWARDS, AS MOCKED UP BELOW memory = [[0]* self.parameters.key_size._get(context), [0]* self.parameters.val_size._get(context)] # Store variable to dict: - if noise: - key += noise + if noise is not None: + key = np.asarray(key, dtype=float) + if isinstance(noise, numbers.Number): + key += noise + else: + # assume array with same shape as variable + # TODO: does val need noise? + key += noise[KEYS] + if storage_prob == 1.0 or (storage_prob > 0.0 and storage_prob > random_state.rand()): self._store_memory(variable, context) diff --git a/psyneulink/core/components/functions/statefulfunctions/statefulfunction.py b/psyneulink/core/components/functions/statefulfunctions/statefulfunction.py index 4c2ab566931..89e67c059dc 100644 --- a/psyneulink/core/components/functions/statefulfunctions/statefulfunction.py +++ b/psyneulink/core/components/functions/statefulfunctions/statefulfunction.py @@ -30,7 +30,7 @@ from psyneulink.core.components.functions.distributionfunctions import DistributionFunction from psyneulink.core.globals.keywords import STATEFUL_FUNCTION_TYPE, STATEFUL_FUNCTION, NOISE, RATE from psyneulink.core.globals.parameters import Parameter -from psyneulink.core.globals.utilities import parameter_spec, iscompatible, object_has_single_value, convert_to_np_array, contains_type +from psyneulink.core.globals.utilities import parameter_spec, iscompatible, convert_to_np_array, contains_type from psyneulink.core.globals.preferences.basepreferenceset import is_pref_set from psyneulink.core.globals.context import ContextFlags, handle_external_context @@ -199,7 +199,7 @@ class Parameters(Function_Base.Parameters): def _validate_noise(self, noise): if ( - isinstance(noise, collections.Iterable) + isinstance(noise, collections.abc.Iterable) # assume ComponentsMeta are functions and contains_type(noise, ComponentsMeta) ): @@ -387,58 +387,6 @@ def _validate_noise(self, noise): raise FunctionError("The elements of a noise list or array must be scalars or functions. " "{} is not a valid noise element for {}".format(noise[i], self.name)) - def _try_execute_param(self, param, var, context=None): - - # FIX: [JDC 12/18/18 - HACK TO DEAL WITH ENFORCEMENT OF 2D BELOW] - param_shape = np.array(param).shape - if not len(param_shape): - param_shape = np.array(var).shape - # param is a list; if any element is callable, execute it - if isinstance(param, (np.ndarray, list)): - # NOTE: np.atleast_2d will cause problems if the param has "rows" of different lengths - # FIX: WHY FORCE 2d?? - param = np.atleast_2d(param) - for i in range(len(param)): - for j in range(len(param[i])): - try: - param[i][j] = param[i][j](context=context) - except TypeError: - try: - param[i][j] = param[i][j]() - except TypeError: - pass - try: - param = param.reshape(param_shape) - except ValueError: - if object_has_single_value(param): - param = np.full(param_shape, float(param)) - - # param is one function - elif callable(param): - # NOTE: np.atleast_2d will cause problems if the param has "rows" of different lengths - new_param = [] - # FIX: WHY FORCE 2d?? - for row in np.atleast_2d(var): - # for row in np.atleast_1d(var): - # for row in var: - new_row = [] - for item in row: - try: - val = param(context=context) - except TypeError: - val = param() - new_row.append(val) - new_param.append(new_row) - param = np.asarray(new_param) - # FIX: [JDC 12/18/18 - HACK TO DEAL WITH ENFORCEMENT OF 2D ABOVE] - try: - if len(np.squeeze(param)): - param = param.reshape(param_shape) - except TypeError: - pass - - return param - def _instantiate_attributes_before_function(self, function=None, context=None): if not self.parameters.initializer._user_specified: self._initialize_previous_value(np.zeros_like(self.defaults.variable), context) diff --git a/psyneulink/core/components/mechanisms/mechanism.py b/psyneulink/core/components/mechanisms/mechanism.py index 3b27e704331..14a133f3d04 100644 --- a/psyneulink/core/components/mechanisms/mechanism.py +++ b/psyneulink/core/components/mechanisms/mechanism.py @@ -2141,7 +2141,7 @@ def _instantiate_function(self, function, function_params=None, context=None): with warnings.catch_warnings(): warnings.simplefilter(action='ignore', category=UserWarning) self.function.output_type = FunctionOutputType.NP_2D_ARRAY - self.function.enable_output_type_conversion = True + self.function.parameters.enable_output_type_conversion._set(True, context) self.function._instantiate_value(context) @@ -2536,8 +2536,7 @@ def execute(self, if self.prefs.reportOutputPref and (context.execution_phase & ContextFlags.PROCESSING | ContextFlags.LEARNING): self._report_mechanism_execution( self.get_input_values(context), - self.parameters.values(), - self.output_port.parameters.value._get(context), + output=self.output_port.parameters.value._get(context), context=context ) @@ -3138,7 +3137,7 @@ def _report_mechanism_execution(self, input_val=None, params=None, output=None, input_val = self.get_input_values(context) if output is None: output = self.output_port.parameters.value._get(context) - params = params or self.parameters.values() + params = params or {p.name: p._get(context) for p in self.parameters} if 'mechanism' in self.name or 'Mechanism' in self.name: mechanism_string = ' ' @@ -3154,7 +3153,12 @@ def _report_mechanism_execution(self, input_val=None, params=None, output=None, print("\n\'{}\'{} executed:\n- input: {}".format(self.name, mechanism_string, input_string)) - if params: + try: + include_params = re.match('param(eter)?s?', self.reportOutputPref, flags=re.IGNORECASE) + except TypeError: + include_params = False + + if include_params: print("- params:") # Sort for consistency of output params_keys_sorted = sorted(params.keys()) @@ -3184,7 +3188,7 @@ def _report_mechanism_execution(self, input_val=None, params=None, output=None, for fct_param_name in func_params_keys_sorted: print("\t\t{}: {}". format(fct_param_name, - str(getattr(self.function.parameters, fct_param_name)).__str__().strip("[]"))) + str(getattr(self.function.parameters, fct_param_name)._get(context)).__str__().strip("[]"))) # FIX: kmantel: previous version would fail on anything but iterables of things that can be cast to floats # if you want more specific output, you can add conditional tests here diff --git a/psyneulink/core/components/mechanisms/modulatory/control/controlmechanism.py b/psyneulink/core/components/mechanisms/modulatory/control/controlmechanism.py index f90579a3635..ea5895a27ed 100644 --- a/psyneulink/core/components/mechanisms/modulatory/control/controlmechanism.py +++ b/psyneulink/core/components/mechanisms/modulatory/control/controlmechanism.py @@ -1325,17 +1325,17 @@ def _instantiate_objective_mechanism(self, context=None): monitored_output_ports = [] - self.monitor_for_control = self.monitor_for_control or [] - if not isinstance(self.monitor_for_control, list): - self.monitor_for_control = [self.monitor_for_control] + monitor_for_control = self.monitor_for_control or [] + if not isinstance(monitor_for_control, list): + monitor_for_control = [monitor_for_control] # If objective_mechanism is used to specify OutputPorts to be monitored (legacy feature) # move them to monitor_for_control if isinstance(self.objective_mechanism, list): - self.monitor_for_control.extend(self.objective_mechanism) + monitor_for_control.extend(self.objective_mechanism) # Add items in monitor_for_control to monitored_output_ports - for i, item in enumerate(self.monitor_for_control): + for i, item in enumerate(monitor_for_control): # If it is already in the list received from System, ignore if item in monitored_output_ports: # NOTE: this can happen if ControlMechanisms is being constructed by System @@ -1389,7 +1389,7 @@ def _instantiate_objective_mechanism(self, context=None): # ASSIGN ATTRIBUTES self._objective_projection = projection_from_objective - self.monitor_for_control = self.monitored_output_ports + self.parameters.monitor_for_control._set(self.monitored_output_ports, context) def _instantiate_input_ports(self, context=None): @@ -1483,7 +1483,7 @@ def _instantiate_control_signal(self, control_signal, context=None): """ if self.output_ports is None: - self.output_ports = [] + self.parameters.output_ports._set([], context) control_signal = self._instantiate_control_signal_type(control_signal, context) control_signal.owner = self diff --git a/psyneulink/core/components/mechanisms/modulatory/control/optimizationcontrolmechanism.py b/psyneulink/core/components/mechanisms/modulatory/control/optimizationcontrolmechanism.py index 0fb66d3214b..fcc39b33911 100644 --- a/psyneulink/core/components/mechanisms/modulatory/control/optimizationcontrolmechanism.py +++ b/psyneulink/core/components/mechanisms/modulatory/control/optimizationcontrolmechanism.py @@ -794,13 +794,15 @@ def _instantiate_input_ports(self, context=None): # If any features were specified (assigned to self.input_ports in __init__): if self.input_ports: - self.input_ports = _parse_shadow_inputs(self, self.input_ports) - self.input_ports = self._parse_feature_specs(self.input_ports, self.feature_function) + input_ports = _parse_shadow_inputs(self, self.input_ports) + input_ports = self._parse_feature_specs(input_ports, self.feature_function) # Insert primary InputPort for outcome from ObjectiveMechanism; # assumes this will be a single scalar value and must be named OUTCOME by convention of ControlSignal - self.input_ports.insert(0, outcome_input_port), + input_ports.insert(0, outcome_input_port), else: - self.input_ports = [outcome_input_port] + input_ports = [outcome_input_port] + + self.parameters.input_ports._set(input_ports, context) # Configure default_variable to comport with full set of input_ports self.defaults.variable, _ = self._handle_arg_input_ports(self.input_ports) diff --git a/psyneulink/core/components/mechanisms/modulatory/learning/learningmechanism.py b/psyneulink/core/components/mechanisms/modulatory/learning/learningmechanism.py index 15ce9b4659e..a7cfc5a827a 100644 --- a/psyneulink/core/components/mechanisms/modulatory/learning/learningmechanism.py +++ b/psyneulink/core/components/mechanisms/modulatory/learning/learningmechanism.py @@ -1180,7 +1180,10 @@ def _instantiate_attributes_before_function(self, function=None, context=None): """ if self._error_sources: - self.input_ports = self.input_ports[:2] + [ERROR_SIGNAL] * len(self._error_sources) + self.parameters.input_ports._set( + self.input_ports[:2] + [ERROR_SIGNAL] * len(self._error_sources), + context + ) super()._instantiate_attributes_before_function(function=function, context=context) @@ -1247,9 +1250,13 @@ def _instantiate_output_ports(self, context=None): # Reassign learning_signals to capture any user_defined LearningSignals instantiated in call to super # and assign them to a ContentAddressableList - self.learning_signals = ContentAddressableList(component_type=LearningSignal, - list=[port for port in self.output_ports if - isinstance(port, LearningSignal)]) + self.parameters.learning_signals._set( + ContentAddressableList( + component_type=LearningSignal, + list=[port for port in self.output_ports if isinstance(port, LearningSignal)] + ), + context + ) # Initialize _error_signals; this is assigned for efficiency (rather than just using the property) # since it is used by the execute method diff --git a/psyneulink/core/components/mechanisms/processing/transfermechanism.py b/psyneulink/core/components/mechanisms/processing/transfermechanism.py index c9ab1644b59..2d6dac2c541 100644 --- a/psyneulink/core/components/mechanisms/processing/transfermechanism.py +++ b/psyneulink/core/components/mechanisms/processing/transfermechanism.py @@ -640,7 +640,7 @@ from psyneulink.core.globals.preferences.basepreferenceset import is_pref_set from psyneulink.core.globals.preferences.preferenceset import PreferenceLevel from psyneulink.core.globals.utilities import \ - all_within_range, append_type_to_name, iscompatible, is_comparison_operator, convert_to_np_array + all_within_range, append_type_to_name, iscompatible, is_comparison_operator, convert_to_np_array, safe_equals from psyneulink.core.scheduling.condition import TimeScale from psyneulink.core.globals.registry import remove_instance_from_registry, register_instance @@ -1304,37 +1304,6 @@ def _validate_noise(self, noise): "function, or array/list of these.".format(noise, self.name)) - def _try_execute_param(self, param, var, context=None): - - # param is a list; if any element is callable, execute it - if isinstance(param, (np.ndarray, list)): - # NOTE: np.atleast_2d will cause problems if the param has "rows" of different lengths - param = np.atleast_2d(param) - for i in range(len(param)): - for j in range(len(param[i])): - if callable(param[i][j]): - try: - param[i][j] = param[i][j](context=context) - except TypeError: - param[i][j] = param[i][j]() - - # param is one function - elif callable(param): - # NOTE: np.atleast_2d will cause problems if the param has "rows" of different lengths - new_param = [] - for row in np.atleast_2d(var): - new_row = [] - for item in row: - try: - val = param(context=context) - except TypeError: - val = param() - new_row.append(val) - new_param.append(new_row) - param = new_param - - return param - def _instantiate_parameter_ports(self, function=None, context=None): # If function is a logistic, and clip has not been specified, bound it between 0 and 1 @@ -1364,11 +1333,12 @@ def _instantiate_output_ports(self, context=None): # then assign one OutputPort (with the default name, indexed by the number of the item) per item of variable if len(self.output_ports) == 1 and self.output_ports[0] == RESULTS: if len(self.defaults.variable) == 1: - self.output_ports = [RESULT] + output_ports = [RESULT] else: - self.output_ports = [] + output_ports = [] for i, item in enumerate(self.defaults.variable): - self.output_ports.append({NAME: f'{RESULT}-{i}', VARIABLE: (OWNER_VALUE, i)}) + output_ports.append({NAME: f'{RESULT}-{i}', VARIABLE: (OWNER_VALUE, i)}) + self.parameters.output_ports._set(output_ports, context) super()._instantiate_output_ports(context=context) # # Relabel first output_port: @@ -1382,7 +1352,7 @@ def _instantiate_output_ports(self, context=None): def _get_instantaneous_function_input(self, function_variable, noise, context=None): noise = self._try_execute_param(noise, function_variable, context=context) - if (np.array(noise) != 0).any(): + if noise is not None and not safe_equals(noise, 0): current_input = function_variable + noise else: current_input = function_variable @@ -1626,18 +1596,21 @@ def _instantiate_attributes_after_function(self, context=None): self.parameters.value.history_min_length = self._termination_measure_num_items_expected - 1 - def _report_mechanism_execution(self, input, params, output, context=None): + def _report_mechanism_execution(self, input, params=None, output=None, context=None): """Override super to report previous_input rather than input, and selected params """ # KAM Changed 8/29/17 print_input = self.previous_input --> print_input = input # because self.previous_input is not a valid attrib of TransferMechanism print_input = input - print_params = params.copy() - # Suppress reporting of range (not currently used) - del print_params[CLIP] + try: + params = params.copy() + # Suppress reporting of range (not currently used) + del params[CLIP] + except (AttributeError, KeyError): + pass - super()._report_mechanism_execution(input_val=print_input, params=print_params, context=context) + super()._report_mechanism_execution(input_val=print_input, params=params, context=context) @handle_external_context() def is_finished(self, context=None): diff --git a/psyneulink/core/components/ports/inputport.py b/psyneulink/core/components/ports/inputport.py index e7488b11904..0081447359a 100644 --- a/psyneulink/core/components/ports/inputport.py +++ b/psyneulink/core/components/ports/inputport.py @@ -992,7 +992,7 @@ def _parse_port_specific_specs(self, owner, port_dict, port_specific_spec): (port_spec, weights, exponents, connections) See Port._parse_port_specific_spec for additional info. -. + Returns: - port_spec: 1st item of tuple if it is a numeric value; otherwise None - params dict with WEIGHT, EXPONENT and/or PROJECTIONS entries if any of these was specified. @@ -1360,7 +1360,7 @@ def _instantiate_input_ports(owner, input_ports=None, reference_value=None, cont if context.source & (ContextFlags.METHOD | ContextFlags.COMMAND_LINE): owner.input_ports.extend(port_list) else: - owner.input_ports = port_list + owner.parameters.input_ports._set(port_list, context) # Assign value of require_projection_in_composition for port in owner.input_ports: diff --git a/psyneulink/core/components/ports/outputport.py b/psyneulink/core/components/ports/outputport.py index 1d4edd5d8ad..fde28f85ed3 100644 --- a/psyneulink/core/components/ports/outputport.py +++ b/psyneulink/core/components/ports/outputport.py @@ -1500,7 +1500,7 @@ def _instantiate_output_ports(owner, output_ports=None, context=None): if context.source & (ContextFlags.COMMAND_LINE | ContextFlags.METHOD): owner.output_ports.extend(port_list) else: - owner.output_ports = port_list + owner.parameters.output_ports._set(port_list, context) # Assign value of require_projection_in_composition for port in owner.output_ports: diff --git a/psyneulink/core/components/projections/projection.py b/psyneulink/core/components/projections/projection.py index f12f9fe464b..4c909a573b4 100644 --- a/psyneulink/core/components/projections/projection.py +++ b/psyneulink/core/components/projections/projection.py @@ -2052,13 +2052,18 @@ def _add_projection_to(receiver, port, projection_spec, context=None): # Update InputPort and input_ports if receiver.input_ports: - receiver.input_ports[input_port.name] = input_port + receiver.parameters.input_ports._get(context)[input_port.name] = input_port # No InputPort(s) yet, so create them else: - receiver.input_ports = ContentAddressableList(component_type=Port_Base, - list=[input_port], - name=receiver.name + '.input_ports') + receiver.parameters.input_ports._set( + ContentAddressableList( + component_type=Port_Base, + list=[input_port], + name=receiver.name + '.input_ports' + ), + context + ) return input_port._instantiate_projections_to_port(projections=projection_spec, context=context) @@ -2160,8 +2165,12 @@ def _add_projection_from(sender, port, projection_spec, receiver, context=None): # No OutputPort(s) yet, so create them except AttributeError: from psyneulink.core.components.ports.port import Port_Base - sender.output_ports = ContentAddressableList(component_type=Port_Base, - list=[output_port], - name=sender.name + '.output_ports') + sender.parameters.output_ports._set( + ContentAddressableList( + component_type=Port_Base, + list=[output_port], + name=sender.name + '.output_ports' + ) + ) output_port._instantiate_projections_to_port(projections=projection_spec, context=context) diff --git a/psyneulink/core/compositions/composition.py b/psyneulink/core/compositions/composition.py index 3d95f68679b..0b9e7d813b5 100644 --- a/psyneulink/core/compositions/composition.py +++ b/psyneulink/core/compositions/composition.py @@ -9568,6 +9568,15 @@ def parse_params_dict(params_dict): def _after_agent_rep_execution(self, context=None): pass + def _update_default_variable(self, *args, **kwargs): + # NOTE: Composition should not really have a default_variable, + # but does as a result of subclassing from Component. + # Subclassing may not be necessary anymore + raise TypeError(f'_update_default_variable unsupported for {self.__class__.__name__}') + + def _get_parsed_variable(self, *args, **kwargs): + raise TypeError(f'_get_parsed_variable unsupported for {self.__class__.__name__}') + # ****************************************************************************************************************** # LLVM diff --git a/psyneulink/core/globals/parameters.py b/psyneulink/core/globals/parameters.py index 4917f3808a8..db2dfaffda1 100644 --- a/psyneulink/core/globals/parameters.py +++ b/psyneulink/core/globals/parameters.py @@ -1235,10 +1235,35 @@ def set(self, value, context=None, override=False, skip_history=False, skip_log= kwargs any additional arguments to be passed to this `Parameter`'s `setter` if it exists """ + from psyneulink.core.components.component import Component + if not override and self.read_only: raise ParameterError('Parameter \'{0}\' is read-only. Set at your own risk. Pass override=True to force set.'.format(self.name)) - return self._set(self._parse(value), context, skip_history, skip_log, **kwargs) + value = self._set(self._parse(value), context, skip_history, skip_log, **kwargs) + + try: + value = value.__self__ + except AttributeError: + pass + + if isinstance(value, Component): + owner = self._owner._owner + if value not in owner._parameter_components: + if not owner.is_initializing: + value._initialize_from_context(context) + owner._parameter_components.add(value) + + try: + value._update_default_variable(owner._get_parsed_variable(self, context=context), context) + except TypeError as e: + if ( + f'unsupported for {value.__class__.__name__}' not in str(e) + and f'unsupported for {owner.__class__.__name__}' not in str(e) + ): + raise + + return value def _set(self, value, context, skip_history=False, skip_log=False, **kwargs): if not self.stateful: @@ -1283,23 +1308,6 @@ def _set_value(self, value, execution_id=None, context=None, skip_history=False, # set value self.values[execution_id] = value - try: - value = value.__self__ - except AttributeError: - pass - - try: - if ( - value in self._owner._owner._parameter_components - or context.execution_phase is ContextFlags.IDLE - ): - pass - else: - value._initialize_from_context(context) - self._owner._owner._parameter_components.add(value) - except (AttributeError, TypeError): - pass - @handle_external_context() def delete(self, context=None): try: diff --git a/psyneulink/core/globals/preferences/basepreferenceset.py b/psyneulink/core/globals/preferences/basepreferenceset.py index 9243531eafe..9f53d9467b5 100644 --- a/psyneulink/core/globals/preferences/basepreferenceset.py +++ b/psyneulink/core/globals/preferences/basepreferenceset.py @@ -132,7 +132,8 @@ class BasePreferenceSet(PreferenceSet): Implement the following preferences: - verbose (bool): enables/disables reporting of (non-exception) warnings and system function - paramValidation (bool): enables/disables run-time validation of the execute method of a Function object - - reportOutput (bool): enables/disables reporting of execution of execute method + - reportOutput ([bool, str]): enables/disables reporting of execution of execute method: + True prints input/output, 'params' or 'parameters' includes parameter values - log (bool): sets LogCondition for a given Component - functionRunTimeParams (Modulation): uses run-time params to modulate execute method params Implement the following preference levels: diff --git a/psyneulink/core/globals/utilities.py b/psyneulink/core/globals/utilities.py index fb90ee4d198..a3c3c7714e8 100644 --- a/psyneulink/core/globals/utilities.py +++ b/psyneulink/core/globals/utilities.py @@ -1793,7 +1793,7 @@ def gen_friendly_comma_str(items): def contains_type( - arr: collections.Iterable, + arr: collections.abc.Iterable, typ: typing.Union[type, typing.Tuple[type, ...]] ) -> bool: """ diff --git a/psyneulink/core/llvm/codegen.py b/psyneulink/core/llvm/codegen.py index b9adbbd08e9..c332f9ced97 100644 --- a/psyneulink/core/llvm/codegen.py +++ b/psyneulink/core/llvm/codegen.py @@ -10,6 +10,7 @@ import ast import warnings import numpy as np +from functools import reduce from llvmlite import ir from contextlib import contextmanager @@ -61,7 +62,9 @@ def _exp(x): helpers.call_elementwise_operation(self.ctx, self.builder, x, helpers.exp, output_ptr) return output_ptr - def _max(x): + # numpy's max function differs greatly from that of python's buiiltin max + # see: https://numpy.org/doc/stable/reference/generated/numpy.amax.html#numpy.amax + def _max_numpy(x): assert helpers.is_vector(x) or helpers.is_2d_matrix(x), "Attempted to call max on invalid variable! Only 1-d and 2-d lists are supported!" curr = builder.alloca(ctx.float_ty) builder.store(ctx.float_ty('NaN'), curr) @@ -72,6 +75,28 @@ def _max(x): builder.store(element, curr) return curr + # see: https://docs.python.org/3/library/functions.html#max + def _max(*args): + if len(args) == 1 and helpers.is_vector(args[0]): + curr = builder.alloca(ctx.float_ty) + builder.store(ctx.float_ty('NaN'), curr) + for (element_ptr,) in helpers.recursive_iterate_arrays(ctx, builder, args[0]): + element = builder.load(element_ptr) + greater = builder.fcmp_unordered('>', element, builder.load(curr)) + with builder.if_then(greater): + builder.store(element, curr) + return curr + elif len(args) > 1 and all(a.type == args[0].type for a in args): + curr = builder.alloca(ctx.float_ty) + builder.store(ctx.float_ty('NaN'), curr) + for element in args: + if helpers.is_pointer(element): + element = builder.load(element) + greater = builder.fcmp_unordered('>', element, builder.load(curr)) + with builder.if_then(greater): + builder.store(element, curr) + return curr + assert False, "Attempted to call max with invalid arguments!" self.register = { "sum": _list_sum, "len": _len, @@ -90,7 +115,7 @@ def _max(x): 'less_equal': self._generate_fcmp_handler(self.ctx, self.builder, "<="), 'greater': self._generate_fcmp_handler(self.ctx, self.builder, ">"), 'greater_equal': self._generate_fcmp_handler(self.ctx, self.builder, ">="), - "max": _max, + "max": _max_numpy, } for k, v in func_globals.items(): @@ -351,6 +376,23 @@ def visit_Attribute(self, node): if node.attr == "shape": shape = helpers.get_array_shape(val) return ir.ArrayType(self.ctx.float_ty, len(shape))(shape) + elif node.attr == "flatten": + def flatten(): + shape = helpers.get_array_shape(val) + flattened_size = reduce(lambda x, y: x * y, shape) + flattened_ty = ir.ArrayType(self.ctx.float_ty, flattened_size) + flattened_array = self.builder.alloca(flattened_ty) + index_var = self.builder.alloca(self.ctx.int32_ty, name="flattened_index_var_loc") + self.builder.store(self.ctx.int32_ty(0), index_var) + for (array_ptr,) in helpers.recursive_iterate_arrays(self.ctx, self.builder, val): + index = self.builder.load(index_var, name="flattened_index_var") + flattened_array_ptr = self.builder.gep(flattened_array, [self.ctx.int32_ty(0), index]) + array_val = self.builder.load(array_ptr) + self.builder.store(array_val, flattened_array_ptr) + index = self.builder.add(index, self.ctx.int32_ty(1), name="flattened_index_var_inc") + self.builder.store(index, index_var) + return flattened_array + return flatten elif node.attr == "astype": def astype(ty): def _convert(ctx, builder, x): diff --git a/psyneulink/core/llvm/helpers.py b/psyneulink/core/llvm/helpers.py index 0752cfbe8df..67264f01f37 100644 --- a/psyneulink/core/llvm/helpers.py +++ b/psyneulink/core/llvm/helpers.py @@ -12,6 +12,7 @@ from contextlib import contextmanager from ctypes import util +from ..scheduling.condition import All, AllHaveRun, Always, AtPass, AtTrial, EveryNCalls, BeforeNCalls, AtNCalls, AfterNCalls, Never, Not, WhenFinished, WhenFinishedAny, WhenFinishedAll from .debug import debug_env @@ -196,15 +197,15 @@ def is_pointer(x): def is_floating_point(x): type_t = getattr(x, "type", x) # dereference pointer - if is_pointer(x): - type_t = x.type.pointee + if is_pointer(type_t): + type_t = type_t.pointee return isinstance(type_t, (ir.DoubleType, ir.FloatType, ir.HalfType)) def is_integer(x): type_t = getattr(x, "type", x) # dereference pointer - if is_pointer(x): - type_t = x.type.pointee + if is_pointer(type_t): + type_t = type_t.pointee return isinstance(type_t, ir.IntType) def is_scalar(x): @@ -212,20 +213,20 @@ def is_scalar(x): def is_vector(x): type_t = getattr(x, "type", x) - if is_pointer(x): - type_t = x.type.pointee + if is_pointer(type_t): + type_t = type_t.pointee return isinstance(type_t, ir.ArrayType) and is_scalar(type_t.element) def is_2d_matrix(x): type_t = getattr(x, "type", x) - if is_pointer(x): - type_t = x.type.pointee + if is_pointer(type_t): + type_t = type_t.pointee return isinstance(type_t, ir.ArrayType) and is_vector(type_t.element) def is_boolean(x): type_t = getattr(x, "type", x) - if is_pointer(x): - type_t = x.type.pointee + if is_pointer(type_t): + type_t = type_t.pointee return isinstance(type_t, ir.IntType) and type_t.width == 1 def get_array_shape(x): @@ -321,9 +322,9 @@ def __init__(self, ctx, composition): self._zero = ctx.int32_ty(0) if ctx is not None else None def get_private_condition_struct_type(self, composition): - time_stamp_struct = ir.LiteralStructType([self.ctx.int32_ty, - self.ctx.int32_ty, - self.ctx.int32_ty]) + time_stamp_struct = ir.LiteralStructType([self.ctx.int32_ty, # Run + self.ctx.int32_ty, # Pass + self.ctx.int32_ty]) # Step structure = ir.LiteralStructType([ time_stamp_struct, # current time stamp @@ -361,15 +362,16 @@ def bump_ts(self, builder, cond_ptr, count=(0, 0, 1)): Indices greater than that of the one are zeroed. """ - # Validate count tuple + # Only one element should be non-zero assert count.count(0) == len(count) - 1 # Get timestruct pointer ts_ptr = builder.gep(cond_ptr, [self._zero, self._zero, self._zero]) ts = builder.load(ts_ptr) + assert len(ts.type) == len(count) # Update run, pass, step of ts - for idx in range(3): + for idx in range(len(ts.type)): if all(v == 0 for v in count[:idx]): el = builder.extract_value(ts, idx) el = builder.add(el, self.ctx.int32_ty(count[idx])) @@ -382,22 +384,27 @@ def bump_ts(self, builder, cond_ptr, count=(0, 0, 1)): def ts_compare(self, builder, ts1, ts2, comp): assert comp == '<' - part_eq = [] - part_cmp = [] - for element in range(3): + # True if all elements to the left of the current one are equal + prefix_eq = self.ctx.bool_ty(1) + result = self.ctx.bool_ty(0) + + assert ts1.type == ts2.type + for element in range(len(ts1.type)): a = builder.extract_value(ts1, element) b = builder.extract_value(ts2, element) - part_eq.append(builder.icmp_signed('==', a, b)) - part_cmp.append(builder.icmp_signed(comp, a, b)) - trial = builder.and_(builder.not_(part_eq[0]), part_cmp[0]) - run = builder.and_(part_eq[0], - builder.and_(builder.not_(part_eq[1]), part_cmp[1])) - step = builder.and_(builder.and_(part_eq[0], part_eq[1]), - part_cmp[2]) + # Use existing prefix_eq to construct expression + # for the current element + element_comp = builder.icmp_signed(comp, a, b) + current_comp = builder.and_(prefix_eq, element_comp) + result = builder.or_(result, current_comp) - return builder.or_(trial, builder.or_(run, step)) + # Update prefix_eq + element_eq = builder.icmp_signed('==', a, b) + prefix_eq = builder.and_(prefix_eq, element_eq) + + return result def __get_node_status_ptr(self, builder, cond_ptr, node): node_idx = self.ctx.int32_ty(self.composition.nodes.index(node)) @@ -449,7 +456,6 @@ def generate_ran_this_trial(self, builder, cond_ptr, node): def generate_sched_condition(self, builder, condition, cond_ptr, node, is_finished_callbacks): - from psyneulink.core.scheduling.condition import All, AllHaveRun, Always, AtPass, AtTrial, EveryNCalls, BeforeNCalls, AtNCalls, AfterNCalls, Never, Not, WhenFinished, WhenFinishedAny, WhenFinishedAll if isinstance(condition, Always): return ir.IntType(1)(1) @@ -458,8 +464,8 @@ def generate_sched_condition(self, builder, condition, cond_ptr, node, is_finish return ir.IntType(1)(0) elif isinstance(condition, Not): - condition = condition.condition - return builder.not_(self.generate_sched_condition(builder, condition, cond_ptr, node, is_finished_callbacks)) + orig_condition = self.generate_sched_condition(builder, condition.condition, cond_ptr, node, is_finished_callbacks) + return builder.not_(orig_condition) elif isinstance(condition, All): agg_cond = ir.IntType(1)(1) @@ -475,7 +481,6 @@ def generate_sched_condition(self, builder, condition, cond_ptr, node, is_finish dependencies = condition.args run_cond = ir.IntType(1)(1) - array_ptr = builder.gep(cond_ptr, [self._zero, self._zero, self.ctx.int32_ty(1)]) for node in dependencies: node_ran = self.generate_ran_this_trial(builder, cond_ptr, node) run_cond = builder.and_(run_cond, node_ran) @@ -499,10 +504,7 @@ def generate_sched_condition(self, builder, condition, cond_ptr, node, is_finish elif isinstance(condition, EveryNCalls): target, count = condition.args - target_idx = self.ctx.int32_ty(self.composition.nodes.index(target)) - - array_ptr = builder.gep(cond_ptr, [self._zero, self._zero, self.ctx.int32_ty(1)]) - target_status = builder.load(builder.gep(array_ptr, [self._zero, target_idx])) + target_status = builder.load(self.__get_node_status_ptr(builder, cond_ptr, target)) # Check number of runs target_runs = builder.extract_value(target_status, 0, target.name + " runs") @@ -522,42 +524,20 @@ def generate_sched_condition(self, builder, condition, cond_ptr, node, is_finish elif isinstance(condition, BeforeNCalls): target, count = condition.args - target_idx = self.ctx.int32_ty(self.composition.nodes.index(target)) - - array_ptr = builder.gep(cond_ptr, [self._zero, self._zero, self.ctx.int32_ty(1)]) - target_status = builder.load(builder.gep(array_ptr, [self._zero, target_idx])) + target_status = builder.load(self.__get_node_status_ptr(builder, cond_ptr, target)) # Check number of runs target_runs = builder.extract_value(target_status, 0, target.name + " runs") - less_than_call_count = builder.icmp_unsigned('<', target_runs, self.ctx.int32_ty(count)) - - # Check that we have not run yet - my_time_stamp = self.__get_node_ts(builder, cond_ptr, node) - target_time_stamp = self.__get_node_ts(builder, cond_ptr, target) - ran_after_me = self.ts_compare(builder, my_time_stamp, target_time_stamp, '<') - - # Return: target.calls % N == 0 AND me.last_time < target.last_time - return builder.and_(less_than_call_count, ran_after_me) + return builder.icmp_unsigned('<', target_runs, self.ctx.int32_ty(count)) elif isinstance(condition, AtNCalls): target, count = condition.args - target_idx = self.ctx.int32_ty(self.composition.nodes.index(target)) - - array_ptr = builder.gep(cond_ptr, [self._zero, self._zero, self.ctx.int32_ty(1)]) - target_status = builder.load(builder.gep(array_ptr, [self._zero, target_idx])) + target_status = builder.load(self.__get_node_status_ptr(builder, cond_ptr, target)) # Check number of runs target_runs = builder.extract_value(target_status, 0, target.name + " runs") - less_than_call_count = builder.icmp_unsigned('==', target_runs, self.ctx.int32_ty(count)) - - # Check that we have not run yet - my_time_stamp = self.__get_node_ts(builder, cond_ptr, node) - target_time_stamp = self.__get_node_ts(builder, cond_ptr, target) - ran_after_me = self.ts_compare(builder, my_time_stamp, target_time_stamp, '<') - - # Return: target.calls % N == 0 AND me.last_time < target.last_time - return builder.and_(less_than_call_count, ran_after_me) + return builder.icmp_unsigned('==', target_runs, self.ctx.int32_ty(count)) elif isinstance(condition, AfterNCalls): target, count = condition.args @@ -569,15 +549,7 @@ def generate_sched_condition(self, builder, condition, cond_ptr, node, is_finish # Check number of runs target_runs = builder.extract_value(target_status, 0, target.name + " runs") - less_than_call_count = builder.icmp_unsigned('>=', target_runs, self.ctx.int32_ty(count)) - - # Check that we have not run yet - my_time_stamp = self.__get_node_ts(builder, cond_ptr, node) - target_time_stamp = self.__get_node_ts(builder, cond_ptr, target) - ran_after_me = self.ts_compare(builder, my_time_stamp, target_time_stamp, '<') - - # Return: target.calls % N == 0 AND me.last_time < target.last_time - return builder.and_(less_than_call_count, ran_after_me) + return builder.icmp_unsigned('>=', target_runs, self.ctx.int32_ty(count)) elif isinstance(condition, WhenFinished): # The first argument is the target node diff --git a/psyneulink/library/components/mechanisms/modulatory/control/agt/lccontrolmechanism.py b/psyneulink/library/components/mechanisms/modulatory/control/agt/lccontrolmechanism.py index 749412881c0..63ffa31415d 100644 --- a/psyneulink/library/components/mechanisms/modulatory/control/agt/lccontrolmechanism.py +++ b/psyneulink/library/components/mechanisms/modulatory/control/agt/lccontrolmechanism.py @@ -798,7 +798,7 @@ def _instantiate_output_ports(self, context=None): ctl_sig_projs = [] for mech, mult_param_name in zip(self.modulated_mechanisms, multiplicative_param_names): ctl_sig_projs.append((mult_param_name, mech)) - self.control = [{PROJECTIONS: ctl_sig_projs}] + self.parameters.control._set([{PROJECTIONS: ctl_sig_projs}], context) self.parameters.control_allocation.default_value = self.value[0] super()._instantiate_output_ports(context=context) diff --git a/psyneulink/library/models/Nieuwenhuis2005Model.py b/psyneulink/library/models/Nieuwenhuis2005Model.py index e5ef9d79d0b..08e35d82837 100644 --- a/psyneulink/library/models/Nieuwenhuis2005Model.py +++ b/psyneulink/library/models/Nieuwenhuis2005Model.py @@ -75,7 +75,7 @@ decision_layer.set_log_conditions('value') # Log value of the decision layer for output_port in decision_layer.output_ports: - output_port.value *= 0.0 # Set initial output values for decision layer to 0 + output_port.parameters.value.set(output_port.value * 0.0, override=True) # Set initial output values for decision layer to 0 # Create Response Layer --- [ Target1, Target2 ] response_layer = pnl.LCAMechanism( @@ -93,7 +93,7 @@ response_layer.set_log_conditions('RESULT') # Log RESULT of the response layer for output_port in response_layer.output_ports: - output_port.value *= 0.0 # Set initial output values for response layer to 0 + output_port.parameters.value.set(output_port.value * 0.0, override=True) # Set initial output values for response layer to 0 # Connect mechanisms -------------------------------------------------------------------------------------------------- # Weight matrix from Input Layer --> Decision Layer @@ -164,7 +164,7 @@ # Set initial gain to G + k*initial_w, when the System runs the very first time, # since the decison layer executes before the LC and hence needs one initial gain value to start with. for output_port in LC.output_ports: - output_port.value *= G + k * initial_w + output_port.parameters.value.set(output_port.value * (G + k * initial_w), override=True) task = pnl.Composition() task.add_linear_processing_pathway(decision_pathway) @@ -212,10 +212,10 @@ LC_results = LC.log.nparray()[1][1] # get logged results LC_results_w = np.zeros([trials]) # get LC_results_w for i in range(trials): - LC_results_w[i] = LC_results[4][i + 1][2][0][0] + LC_results_w[i] = LC_results[5][i + 1][2][0][0] LC_results_v = np.zeros([trials]) # get LC_results_v for i in range(trials): - LC_results_v[i] = LC_results[4][i + 1][1][0][0] + LC_results_v[i] = LC_results[5][i + 1][1][0][0] def h_v(v, C, d): # Compute h(v) diff --git a/setup.cfg b/setup.cfg index 192250dc538..a45aae96561 100644 --- a/setup.cfg +++ b/setup.cfg @@ -17,6 +17,7 @@ addopts = --pydocstyle --pycodestyle --strict-markers + --strict-config --ignore=Scripts markers = @@ -57,7 +58,8 @@ markers = model: Tests based on existing models pytorch: Tests using Torch -pytest_plugins = ['pytest_profiling', 'helpers_namespace', 'benchmark'] +# These are needed for test fixtures or default parameters +required_plugins = pytest-benchmark pytest-cov pytest-helpers-namespace pytest-pycodestyle pytest-pydocstyle pytest-xdist xfail_strict = True diff --git a/tests/composition/test_composition.py b/tests/composition/test_composition.py index 25a751ad2ed..6fe921ee9e7 100644 --- a/tests/composition/test_composition.py +++ b/tests/composition/test_composition.py @@ -1797,7 +1797,7 @@ def test_simple_loop(self): D.set_log_conditions("OutputPort-0") cycle_nodes = [B, C, D] for cycle_node in cycle_nodes: - cycle_node.output_ports[0].value = [1.0] + cycle_node.output_ports[0].parameters.value.set([1.0], override=True) comp.run(inputs={A: [1.0]}) expected_values = {A: 1.0, diff --git a/tests/control/test_gilzenrat.py b/tests/control/test_gilzenrat.py index ce5535cd997..a2dab0b6f39 100644 --- a/tests/control/test_gilzenrat.py +++ b/tests/control/test_gilzenrat.py @@ -49,7 +49,7 @@ def test_previous_value_stored(self): initial_value=np.array([[1.0]])) C = Composition(pathways=[G]) - G.output_port.value = [0.0] + G.output_port.parameters.value.set([0.0], override=True) # - - - - - LCAMechanism integrator functions - - - - - # X = previous_value + (rate * previous_value + variable) * self.time_step_size + noise diff --git a/tests/functions/test_accumulator_integrator.py b/tests/functions/test_accumulator_integrator.py index 5f73d06bb69..12045180d7c 100644 --- a/tests/functions/test_accumulator_integrator.py +++ b/tests/functions/test_accumulator_integrator.py @@ -160,7 +160,7 @@ def test_accumulator_standalone_noise_function_in_array(self): A() A() val = A() - expected_val = [[40.0, 0.2480800486427607, 80.0]] + expected_val = [[40.0, -0.43300219, 80.0]] for i in range(len(val)): for j in range(len(val[i])): assert np.allclose(expected_val[i][j], val[i][j]) diff --git a/tests/functions/test_buffer.py b/tests/functions/test_buffer.py index ed017f2e898..f56ee3b94df 100644 --- a/tests/functions/test_buffer.py +++ b/tests/functions/test_buffer.py @@ -154,6 +154,7 @@ def test_buffer_as_function_of_processing_mech(self, benchmark): history=3)) val = P.execute(1.0) + # NOTE: actual output is [0, [[1]]] assert np.allclose(np.asfarray(val), [[0., 1.]]) if benchmark.enabled: benchmark(P.execute, 5.0) diff --git a/tests/functions/test_user_defined_func.py b/tests/functions/test_user_defined_func.py index 9573c2cd0ba..a74f90c1769 100644 --- a/tests/functions/test_user_defined_func.py +++ b/tests/functions/test_user_defined_func.py @@ -443,6 +443,10 @@ def myFunction(variable): ("SHAPE", [[1, 3]], [1, 2]), ("ASTYPE_FLOAT", [1], [1.0]), ("ASTYPE_INT", [-1.5], [-1.0]), + ("NP_MAX", [0.0, 0.0], 0), + ("NP_MAX", [1.0, 2.0], 2), + ("NP_MAX", [[2.0, 1.0], [6.0, 2.0]], 6), + ("FLATTEN", [[1.0, 2.0], [3.0, 4.0]], [1.0, 2.0, 3.0, 4.0]) ]) @pytest.mark.parametrize("bin_execute", ['Python', pytest.param('LLVM', marks=pytest.mark.llvm), @@ -466,7 +470,12 @@ def myFunction(variable): # return types cannot be integers, so we cast back to float and check for truncation def myFunction(variable): return variable.astype(int).astype(float) - + elif op == "NP_MAX": + def myFunction(variable): + return np.max(variable) + elif op == "FLATTEN": + def myFunction(variable): + return variable.flatten() U = UserDefinedFunction(custom_function=myFunction, default_variable=variable) if bin_execute == 'LLVM': e = pnlvm.execution.FuncExecution(U).execute @@ -504,9 +513,8 @@ def myFunction(variable, param1, param2): ("LEN", [1.0, 3.0], 2), ("LEN", [[1.0], [3.0]], 2), ("LEN_TUPLE", [0, 0], 2), - ("MAX", [0.0, 0.0], 0), - ("MAX", [1.0, 2.0], 2), - ("MAX", [[2.0, 1.0], [6.0, 2.0]], 6), + ("MAX_MULTI", [1,], 6), + ("MAX", [1.0, 3.0, 2.0], 3.0), ]) @pytest.mark.parametrize("bin_execute", ['Python', pytest.param('LLVM', marks=pytest.mark.llvm), @@ -525,7 +533,11 @@ def myFunction(variable): return len((1,2)) elif op == "MAX": def myFunction(variable): - return np.max(variable) + return max(variable) + elif op == "MAX_MULTI": + # special cased, since passing in multiple variables without a closure is hard + def myFunction(_): + return max(1, 2, 3, 4, 5, 6, -1, -2) U = UserDefinedFunction(custom_function=myFunction, default_variable=variable) if bin_execute == 'LLVM': diff --git a/tests/llvm/test_helpers.py b/tests/llvm/test_helpers.py index 7ce013332ae..e3f771f749a 100644 --- a/tests/llvm/test_helpers.py +++ b/tests/llvm/test_helpers.py @@ -228,18 +228,16 @@ class TestHelperTypegetters: FLOAT_PTR_TYPE = pnlvm.ir.PointerType(FLOAT_TYPE) DOUBLE_TYPE = pnlvm.ir.DoubleType() DOUBLE_PTR_TYPE = pnlvm.ir.PointerType(DOUBLE_TYPE) - DOUBLE_VECTOR_TYPE = pnlvm.ir.ArrayType(pnlvm.ir.DoubleType(), 1) + DOUBLE_VECTOR_TYPE = pnlvm.ir.ArrayType(DOUBLE_TYPE, 1) DOUBLE_VECTOR_PTR_TYPE = pnlvm.ir.PointerType(DOUBLE_VECTOR_TYPE) - DOUBLE_MATRIX_TYPE = pnlvm.ir.ArrayType(pnlvm.ir.ArrayType(pnlvm.ir.DoubleType(), 1), 1) + DOUBLE_MATRIX_TYPE = pnlvm.ir.ArrayType(pnlvm.ir.ArrayType(DOUBLE_TYPE, 1), 1) DOUBLE_MATRIX_PTR_TYPE = pnlvm.ir.PointerType(DOUBLE_MATRIX_TYPE) INT_TYPE = pnlvm.ir.IntType(32) - INT_PTR_TYPE = pnlvm.ir.PointerType(pnlvm.ir.IntType(32)) + INT_PTR_TYPE = pnlvm.ir.PointerType(INT_TYPE) BOOL_TYPE = pnlvm.ir.IntType(1) BOOL_PTR_TYPE = pnlvm.ir.PointerType(BOOL_TYPE) @pytest.mark.llvm - @pytest.mark.parametrize('mode', ['CPU', - pytest.param('PTX', marks=pytest.mark.cuda)]) @pytest.mark.parametrize('ir_type,expected', [ (FLOAT_TYPE, 0), (FLOAT_PTR_TYPE, 1), @@ -254,39 +252,11 @@ class TestHelperTypegetters: (BOOL_TYPE, 0), (BOOL_PTR_TYPE, 1), ], ids=str) - def test_helper_is_pointer(self, mode, ir_type, expected): - with pnlvm.LLVMBuilderContext() as ctx: - func_ty = ir.FunctionType(ir.VoidType(), [ir.IntType(32).as_pointer()]) - - custom_name = ctx.get_unique_name("is_pointer") - function = ir.Function(ctx.module, func_ty, name=custom_name) - out = function.args[0] - block = function.append_basic_block(name="entry") - builder = ir.IRBuilder(block) - - variable = builder.load(builder.alloca(ir_type)) - if pnlvm.helpers.is_pointer(variable): - builder.store(out.type.pointee(1), out) - else: - builder.store(out.type.pointee(0), out) - - builder.ret_void() - - bin_f = pnlvm.LLVMBinaryFunction.get(custom_name) - if mode == 'CPU': - res = bin_f.byref_arg_types[0](-1) - bin_f(ctypes.byref(res)) - res = res.value - else: - res = np.array([-1], dtype=np.int32) - bin_f.cuda_wrap_call(res) - res = res[0] - - assert res == expected + def test_helper_is_pointer(self, ir_type, expected): + assert pnlvm.helpers.is_pointer(ir_type) == expected + assert pnlvm.helpers.is_pointer(ir_type(None)) == expected @pytest.mark.llvm - @pytest.mark.parametrize('mode', ['CPU', - pytest.param('PTX', marks=pytest.mark.cuda)]) @pytest.mark.parametrize('ir_type,expected', [ (FLOAT_TYPE, 1), (FLOAT_PTR_TYPE, 1), @@ -301,39 +271,11 @@ def test_helper_is_pointer(self, mode, ir_type, expected): (BOOL_TYPE, 1), (BOOL_PTR_TYPE, 1), ], ids=str) - def test_helper_is_scalar(self, mode, ir_type, expected): - with pnlvm.LLVMBuilderContext() as ctx: - func_ty = ir.FunctionType(ir.VoidType(), [ir.IntType(32).as_pointer()]) - - custom_name = ctx.get_unique_name("is_scalar") - function = ir.Function(ctx.module, func_ty, name=custom_name) - out = function.args[0] - block = function.append_basic_block(name="entry") - builder = ir.IRBuilder(block) - - variable = builder.load(builder.alloca(ir_type)) - if pnlvm.helpers.is_scalar(variable): - builder.store(out.type.pointee(1), out) - else: - builder.store(out.type.pointee(0), out) - - builder.ret_void() - - bin_f = pnlvm.LLVMBinaryFunction.get(custom_name) - if mode == 'CPU': - res = bin_f.byref_arg_types[0](-1) - bin_f(ctypes.byref(res)) - res = res.value - else: - res = np.array([-1], dtype=np.int32) - bin_f.cuda_wrap_call(res) - res = res[0] - - assert res == expected + def test_helper_is_scalar(self, ir_type, expected): + assert pnlvm.helpers.is_scalar(ir_type) == expected + assert pnlvm.helpers.is_scalar(ir_type(None)) == expected @pytest.mark.llvm - @pytest.mark.parametrize('mode', ['CPU', - pytest.param('PTX', marks=pytest.mark.cuda)]) @pytest.mark.parametrize('ir_type,expected', [ (FLOAT_TYPE, 1), (FLOAT_PTR_TYPE, 1), @@ -348,39 +290,30 @@ def test_helper_is_scalar(self, mode, ir_type, expected): (BOOL_TYPE, 0), (BOOL_PTR_TYPE, 0), ], ids=str) - def test_helper_is_floating_point(self, mode, ir_type, expected): - with pnlvm.LLVMBuilderContext() as ctx: - func_ty = ir.FunctionType(ir.VoidType(), [ir.IntType(32).as_pointer()]) - - custom_name = ctx.get_unique_name("is_floating_point") - function = ir.Function(ctx.module, func_ty, name=custom_name) - out = function.args[0] - block = function.append_basic_block(name="entry") - builder = ir.IRBuilder(block) - - variable = builder.load(builder.alloca(ir_type)) - if pnlvm.helpers.is_floating_point(variable): - builder.store(out.type.pointee(1), out) - else: - builder.store(out.type.pointee(0), out) - - builder.ret_void() - - bin_f = pnlvm.LLVMBinaryFunction.get(custom_name) - if mode == 'CPU': - res = bin_f.byref_arg_types[0](-1) - bin_f(ctypes.byref(res)) - res = res.value - else: - res = np.array([-1], dtype=np.int32) - bin_f.cuda_wrap_call(res) - res = res[0] - - assert res == expected + def test_helper_is_floating_point(self, ir_type, expected): + assert pnlvm.helpers.is_floating_point(ir_type) == expected + assert pnlvm.helpers.is_floating_point(ir_type(None)) == expected + + @pytest.mark.llvm + @pytest.mark.parametrize('ir_type,expected', [ + (FLOAT_TYPE, 0), + (FLOAT_PTR_TYPE, 0), + (DOUBLE_TYPE, 0), + (DOUBLE_PTR_TYPE, 0), + (DOUBLE_VECTOR_TYPE, 0), + (DOUBLE_VECTOR_PTR_TYPE, 0), + (DOUBLE_MATRIX_TYPE, 0), + (DOUBLE_MATRIX_PTR_TYPE, 0), + (INT_TYPE, 1), + (INT_PTR_TYPE, 1), + (BOOL_TYPE, 1), + (BOOL_PTR_TYPE, 1), + ], ids=str) + def test_helper_is_integer(self, ir_type, expected): + assert pnlvm.helpers.is_integer(ir_type) == expected + assert pnlvm.helpers.is_integer(ir_type(None)) == expected @pytest.mark.llvm - @pytest.mark.parametrize('mode', ['CPU', - pytest.param('PTX', marks=pytest.mark.cuda)]) @pytest.mark.parametrize('ir_type,expected', [ (FLOAT_TYPE, 0), (FLOAT_PTR_TYPE, 0), @@ -395,39 +328,11 @@ def test_helper_is_floating_point(self, mode, ir_type, expected): (BOOL_TYPE, 0), (BOOL_PTR_TYPE, 0), ], ids=str) - def test_helper_is_vector(self, mode, ir_type, expected): - with pnlvm.LLVMBuilderContext() as ctx: - func_ty = ir.FunctionType(ir.VoidType(), [ir.IntType(32).as_pointer()]) - - custom_name = ctx.get_unique_name("is_vector") - function = ir.Function(ctx.module, func_ty, name=custom_name) - out = function.args[0] - block = function.append_basic_block(name="entry") - builder = ir.IRBuilder(block) - - variable = builder.load(builder.alloca(ir_type)) - if pnlvm.helpers.is_vector(variable): - builder.store(out.type.pointee(1), out) - else: - builder.store(out.type.pointee(0), out) - - builder.ret_void() - - bin_f = pnlvm.LLVMBinaryFunction.get(custom_name) - if mode == 'CPU': - res = bin_f.byref_arg_types[0](-1) - bin_f(ctypes.byref(res)) - res = res.value - else: - res = np.array([-1], dtype=np.int32) - bin_f.cuda_wrap_call(res) - res = res[0] - - assert res == expected + def test_helper_is_vector(self, ir_type, expected): + assert pnlvm.helpers.is_vector(ir_type) == expected + assert pnlvm.helpers.is_vector(ir_type(None)) == expected @pytest.mark.llvm - @pytest.mark.parametrize('mode', ['CPU', - pytest.param('PTX', marks=pytest.mark.cuda)]) @pytest.mark.parametrize('ir_type,expected', [ (FLOAT_TYPE, 0), (FLOAT_PTR_TYPE, 0), @@ -442,39 +347,11 @@ def test_helper_is_vector(self, mode, ir_type, expected): (BOOL_TYPE, 0), (BOOL_PTR_TYPE, 0), ], ids=str) - def test_helper_is_2d_matrix(self, mode, ir_type, expected): - with pnlvm.LLVMBuilderContext() as ctx: - func_ty = ir.FunctionType(ir.VoidType(), [ir.IntType(32).as_pointer()]) - - custom_name = ctx.get_unique_name("is_2d_matrix") - function = ir.Function(ctx.module, func_ty, name=custom_name) - out = function.args[0] - block = function.append_basic_block(name="entry") - builder = ir.IRBuilder(block) - - variable = builder.load(builder.alloca(ir_type)) - if pnlvm.helpers.is_2d_matrix(variable): - builder.store(out.type.pointee(1), out) - else: - builder.store(out.type.pointee(0), out) - - builder.ret_void() - - bin_f = pnlvm.LLVMBinaryFunction.get(custom_name) - if mode == 'CPU': - res = bin_f.byref_arg_types[0](-1) - bin_f(ctypes.byref(res)) - res = res.value - else: - res = np.array([-1], dtype=np.int32) - bin_f.cuda_wrap_call(res) - res = res[0] - - assert res == expected + def test_helper_is_2d_matrix(self, ir_type, expected): + assert pnlvm.helpers.is_2d_matrix(ir_type) == expected + assert pnlvm.helpers.is_2d_matrix(ir_type(None)) == expected @pytest.mark.llvm - @pytest.mark.parametrize('mode', ['CPU', - pytest.param('PTX', marks=pytest.mark.cuda)]) @pytest.mark.parametrize('ir_type,expected', [ (FLOAT_TYPE, 0), (FLOAT_PTR_TYPE, 0), @@ -489,35 +366,9 @@ def test_helper_is_2d_matrix(self, mode, ir_type, expected): (BOOL_TYPE, 1), (BOOL_PTR_TYPE, 1), ], ids=str) - def test_helper_is_boolean(self, mode, ir_type, expected): - with pnlvm.LLVMBuilderContext() as ctx: - func_ty = ir.FunctionType(ir.VoidType(), [ir.IntType(32).as_pointer()]) - - custom_name = ctx.get_unique_name("is_boolean") - function = ir.Function(ctx.module, func_ty, name=custom_name) - out = function.args[0] - block = function.append_basic_block(name="entry") - builder = ir.IRBuilder(block) - - variable = builder.load(builder.alloca(ir_type)) - if pnlvm.helpers.is_boolean(variable): - builder.store(out.type.pointee(1), out) - else: - builder.store(out.type.pointee(0), out) - - builder.ret_void() - - bin_f = pnlvm.LLVMBinaryFunction.get(custom_name) - if mode == 'CPU': - res = bin_f.byref_arg_types[0](-1) - bin_f(ctypes.byref(res)) - res = res.value - else: - res = np.array([-1], dtype=np.int32) - bin_f.cuda_wrap_call(res) - res = res[0] - - assert res == expected + def test_helper_is_boolean(self, ir_type, expected): + assert pnlvm.helpers.is_boolean(ir_type) == expected + assert pnlvm.helpers.is_boolean(ir_type(None)) == expected @pytest.mark.llvm @pytest.mark.parametrize('ir_type,expected', [ @@ -535,8 +386,7 @@ def test_helper_get_array_shape(self, ir_type, expected): (DOUBLE_MATRIX_TYPE, (1,1)), ], ids=str) def test_helper_array_from_shape(self, ir_type, shape): - with pnlvm.LLVMBuilderContext() as ctx: - assert ir_type == pnlvm.helpers.array_from_shape(shape, ctx.float_ty) + assert ir_type == pnlvm.helpers.array_from_shape(shape, self.DOUBLE_TYPE) @pytest.mark.llvm @pytest.mark.parametrize('mode', ['CPU', diff --git a/tests/mechanisms/test_integrator_mechanism.py b/tests/mechanisms/test_integrator_mechanism.py index a212e4821d3..4face0ae73c 100644 --- a/tests/mechanisms/test_integrator_mechanism.py +++ b/tests/mechanisms/test_integrator_mechanism.py @@ -1053,6 +1053,48 @@ def test_integrator_simple_noise_fn(self): np.testing.assert_allclose(val, 11.00018002983055) np.testing.assert_allclose(val2, 7.549690404329112) + @pytest.mark.mechanism + @pytest.mark.integrator_mechanism + def test_integrator_simple_noise_fn_noise_list(self): + I = IntegratorMechanism( + name='IntegratorMechanism', + function=SimpleIntegrator( + noise=[NormalDist()] + ), + ) + val = float(I.execute(10)) + + np.testing.assert_allclose(val, 10.302846) + + @pytest.mark.mechanism + @pytest.mark.integrator_mechanism + def test_integrator_simple_noise_fn_noise_list_squeezed(self): + I = IntegratorMechanism( + name='IntegratorMechanism', + function=SimpleIntegrator( + default_variable=[[0, 0, 0]], + noise=[NormalDist(seed=0), NormalDist(seed=0), NormalDist(seed=0)], # seed to check elementwise + ), + ) + val = I.execute([10, 10, 10]) + + np.testing.assert_allclose(val, [[10.302846, 10.302846, 10.302846]]) + + @pytest.mark.mechanism + @pytest.mark.integrator_mechanism + def test_integrator_simple_noise_fn_noise_shaped(self): + I = IntegratorMechanism( + variable=[[0], [0], [0]], + name='IntegratorMechanism', + function=SimpleIntegrator( + default_variable=[[0], [0], [0]], + noise=NormalDist([[0], [0], [0]]), + ), + ) + val = I.execute([[10], [10], [10]]) + + np.testing.assert_allclose(val, [[10.660535], [11.108879], [ 9.084011]]) + @pytest.mark.mechanism @pytest.mark.integrator_mechanism def test_integrator_simple_noise_fn_var_list(self): diff --git a/tests/mechanisms/test_mechanisms.py b/tests/mechanisms/test_mechanisms.py index 46b701d2b50..4bd4b8bc9d0 100644 --- a/tests/mechanisms/test_mechanisms.py +++ b/tests/mechanisms/test_mechanisms.py @@ -41,6 +41,21 @@ def test_value_shapes(self, mechanism_type, default_variable, mechanism_value, f assert M.defaults.value.shape == mechanism_value.shape assert M.function.defaults.value.shape == function_value.shape + @pytest.mark.parametrize( + 'noise', + [pnl.GaussianDistort, pnl.NormalDist] + ) + def test_noise_variations(self, noise): + t1 = pnl.TransferMechanism(name='t1', size=2, noise=noise()) + t2 = pnl.TransferMechanism(name='t2', size=2) + t2.integrator_function.parameters.noise.set(noise()) + + t1.integrator_function.noise.base.random_state = np.random.RandomState([0]) + t2.integrator_function.noise.base.random_state = np.random.RandomState([0]) + + for _ in range(5): + np.testing.assert_equal(t1.execute([1, 1]), t2.execute([1, 1])) + class TestMechanismFunctionParameters: f = pnl.Linear() diff --git a/tests/mechanisms/test_transfer_mechanism.py b/tests/mechanisms/test_transfer_mechanism.py index 5d5b79ac5bb..987de3d7450 100644 --- a/tests/mechanisms/test_transfer_mechanism.py +++ b/tests/mechanisms/test_transfer_mechanism.py @@ -210,7 +210,7 @@ def test_transfer_mech_array_var_normal_array_noise(self): ) T.reset_stateful_function_when = Never() val = T.execute([0, 0, 0, 0]) - expected = [0.6202001216069017, 0.840166034615641, 0.7279826246296707, -1.5678942459349325] + expected = [[-1.56404341, -3.01320403, -1.22503678, 1.3093712]] assert np.allclose(np.asfarray(val[0]), expected) @pytest.mark.mechanism