Skip to content

Commit

Permalink
• emcomposition.py
Browse files Browse the repository at this point in the history
  - add PROBABILISTIC option for softmax_choice option
  • Loading branch information
jdcpni committed Oct 8, 2024
1 parent cd67070 commit 7f7117f
Show file tree
Hide file tree
Showing 3 changed files with 32 additions and 26 deletions.
Original file line number Diff line number Diff line change
Expand Up @@ -3188,7 +3188,7 @@ def _validate_adapt_entropy_weighting(self, adapt_entropy_weighting):
return f'must be a scalar greater than 0'

def _validate_output(self, output):
options = {ALL, MAX_VAL, MAX_INDICATOR, PROB}
options = {ALL, MAX_VAL, MAX_INDICATOR, PROB, PROB_INDICATOR}
if output in options:
return None
else:
Expand Down Expand Up @@ -3277,7 +3277,6 @@ def apply_softmax(self, input_value, gain, mask_threshold, output_type):
sm = v / np.sum(v, axis=0)

# Generate one-hot encoding based on selected output_type

if output_type in {MAX_VAL, MAX_INDICATOR}:
return self.one_hot_function(sm)
elif output_type in {PROB, PROB_INDICATOR}:
Expand Down Expand Up @@ -3316,8 +3315,8 @@ def _function(self,
if isinstance(gain, str) and gain == ADAPTIVE:
gain = self.adapt_gain(variable, context)
per_item = self._get_current_parameter_value(PER_ITEM, context)
# Compute softmax and assign to sm

# Compute softmax and assign to sm
if per_item and len(np.shape(variable)) > 1:
output = []
for item in variable:
Expand Down
36 changes: 16 additions & 20 deletions psyneulink/library/compositions/emcomposition.py
Original file line number Diff line number Diff line change
Expand Up @@ -540,17 +540,16 @@
* *ARG_MAX*: entry with the largest dot product.
* *PROBABISTIC*: probabilistically chosen entry based on softmax-transformed distribution of dot products.
.. warning::
Use of the *ARG_MAX* option is not compatible with learning, as it implements a discrete choice and thus is not
differentiable; use of this with `enable_learning <EMComposition.enable_learning>` set to ``True`` will generate
an error.
Use of the *ARG_MAX* and *PROBABILISTIC* options is not compatible with learning, as these implement a discrete
choice and thus are not differentiable; use of these with `enable_learning <EMComposition.enable_learning>` set
to ``True`` will generate an error.
.. technical_note::
The *WEIGHTED* option is passed as *ALL* to the **output** argument of the `SoftMax` Function, and
*ARG_MAX* is passed as *MAX_INDICATOR*; the *MAX_VAL* and *PROB* arguments are not currently.
COMMENT:
* *PROB*: probabilistically-chosen entry, based on the softmax transformation of thee dot products.
COMMENT
The *WEIGHTED* option is passed as *ALL* to the **output** argument of the `SoftMax` Function, *ARG_MAX* is
passed as *MAX_INDICATOR*; *PROBALISTIC* is passed as *PROB*; and *MAX_VAL* is not currently supported.
.. _EMComposition_Learning:
Expand Down Expand Up @@ -1010,18 +1009,19 @@
from psyneulink.core.globals.parameters import Parameter, check_user_specified
from psyneulink.core.globals.keywords import \
(ADAPTIVE, ALL, AUTO, CONTROL, DEFAULT_INPUT, DEFAULT_VARIABLE, EM_COMPOSITION, FULL_CONNECTIVITY_MATRIX,
GAIN, IDENTITY_MATRIX, MAX_INDICATOR, MULTIPLICATIVE_PARAM, NAME, PARAMS, PRODUCT, PROJECTIONS,
GAIN, IDENTITY_MATRIX, MAX_INDICATOR, MULTIPLICATIVE_PARAM, NAME, PARAMS, PROB_INDICATOR, PRODUCT, PROJECTIONS,
RANDOM, SIZE, VARIABLE)
from psyneulink.core.globals.utilities import convert_all_elements_to_np_array, is_numeric_scalar
from psyneulink.core.globals.context import ContextFlags
from psyneulink.core.llvm import ExecutionMode


__all__ = ['EMComposition', 'WEIGHTED', 'ARG_MAX']
__all__ = ['EMComposition', 'WEIGHTED', 'ARG_MAX', 'PROBABILISTIC']

STORAGE_PROB = 'storage_prob'
WEIGHTED = ALL
ARG_MAX = MAX_INDICATOR
PROBABILISTIC = PROB_INDICATOR

QUERY_AFFIX = ' [QUERY]'
VALUE_AFFIX = ' [VALUE]'
Expand Down Expand Up @@ -1143,7 +1143,7 @@ class EMComposition(AutodiffComposition):
specifies the threshold used to mask out small values in the softmax calculation;
see *mask_threshold* under `Thresholding and Adaptive Gain <SoftMax_AdaptGain>` for details).
softmax_choice : WEIGHTED, ARG_MAX : default WEIGHTED
softmax_choice : WEIGHTED, ARG_MAX, PROBABILISTIC : default WEIGHTED
specifies how the softmax over dot products of keys and memories is used for retrieval; see `Softmax
normalize matches over fields <EMComposition_Processing>` description of each option.
Expand Down Expand Up @@ -1241,7 +1241,7 @@ class EMComposition(AutodiffComposition):
determines the threshold used to mask out small values in the softmax calculation;
see *mask_threshold* under `Thresholding and Adaptive Gain <SoftMax_AdaptGain>` for details).
softmax_choice : WEIGHTED or ARG_MAX
softmax_choice : WEIGHTED, ARG_MAX or PROBABILISTIC
determines how the softmax over dot products of keys and memories is used for retrieval; see `Softmax
normalize matches over fields <EMComposition_Processing>` description of each option.
Expand Down Expand Up @@ -1573,7 +1573,7 @@ def __init__(self,
normalize_memories:bool=True,
softmax_gain:Union[float, ADAPTIVE, CONTROL]=1.0,
softmax_threshold:Optional[float]=.001,
softmax_choice:Optional[Union[WEIGHTED, ARG_MAX]]=WEIGHTED,
softmax_choice:Optional[Union[WEIGHTED, ARG_MAX, PROBABILISTIC]]=WEIGHTED,
storage_prob:float=1.0,
memory_decay_rate:Union[float,AUTO]=AUTO,
enable_learning:Union[bool,list]=True,
Expand Down Expand Up @@ -2203,9 +2203,9 @@ def _construct_match_nodes(self, memory_template, memory_capacity, concatenate_k
return match_nodes

def _validate_softmax_choice(self, softmax_choice, enable_learning):
if softmax_choice == ARG_MAX and enable_learning:
raise EMCompositionError(f"The ARG_MAX option for the 'softmax_choice' arg of '{self.name}' "
f"can not be used when 'enable_learning' is set to True; "
if softmax_choice in {ARG_MAX, PROBABILISTIC} and enable_learning:
raise EMCompositionError(f"The ARG_MAX and PROBABILISTIC options for the 'softmax_choice' arg "
f"of '{self.name}' cannot be used when 'enable_learning' is set to True; "
f"use WEIGHTED or set 'enable_learning' to False.")
def _construct_softmax_nodes(self, memory_capacity, field_weights,
softmax_gain, softmax_threshold, softmax_choice)->list:
Expand Down Expand Up @@ -2515,10 +2515,6 @@ def _encode_memory(self, context=None):
# Assign updated matrix to Projection
self.retrieved_nodes[i].path_afferents[0].parameters.matrix.set(field_memories, context)

# 7/10/24 - FIX: WHY BOTHER WITH OVERRIDE IF NOTHING IS DONE:
def learn(self, *args, **kwargs)->list:
return super().learn(*args, **kwargs)

def _get_execution_mode(self, execution_mode):
"""Parse execution_mode argument and return a valid execution mode for the learn() method"""
if execution_mode is None:
Expand Down
17 changes: 14 additions & 3 deletions tests/composition/test_emcomposition.py
Original file line number Diff line number Diff line change
Expand Up @@ -233,7 +233,7 @@ def test_memory_fill(start, memory_fill):
test_memory_fill(start=repeat, memory_fill=memory_fill)

def test_softmax_choice(self):
for softmax_choice in [pnl.WEIGHTED, pnl.ARG_MAX]:
for softmax_choice in [pnl.WEIGHTED, pnl.ARG_MAX, pnl.PROBABILISTIC]:
em = EMComposition(memory_template=[[[1,.1,.1]], [[.1,1,.1]], [[.1,.1,1]]],
softmax_choice=softmax_choice,
enable_learning=False)
Expand All @@ -242,14 +242,25 @@ def test_softmax_choice(self):
np.testing.assert_allclose(result, [[0.21330295, 0.77339411, 0.21330295]])
if softmax_choice == pnl.ARG_MAX:
np.testing.assert_allclose(result, [[.1, 1, .1]])
if softmax_choice == pnl.PROBABILISTIC: # NOTE: actual stochasticity not tested here
np.testing.assert_allclose(result, [[.1, 1, .1]])

with pytest.raises(pnl.ComponentError) as error_text:
em = EMComposition(memory_template=[[[1,.1,.1]], [[.1,1,.1]], [[.1,.1,1]]],

Check warning

Code scanning / CodeQL

Variable defined multiple times Warning test

This assignment to 'em' is unnecessary as it is
redefined
before this value is used.
softmax_choice=pnl.ARG_MAX)
assert ("The ARG_MAX option for the 'softmax_choice' arg of 'EM_Composition-2' can not be used "
"when 'enable_learning' is set to True; use WEIGHTED or set 'enable_learning' to False."
assert ("The ARG_MAX and PROBABILISTIC options for the 'softmax_choice' arg of 'EM_Composition-3' cannot "
"be used when 'enable_learning' is set to True; use WEIGHTED or set 'enable_learning' to False."
in str(error_text.value))

for softmax_choice in [pnl.ARG_MAX, pnl.PROBABILISTIC]:
with pytest.raises(pnl.ComponentError) as error_text:
em = EMComposition(memory_template=[[[1,.1,.1]], [[.1,1,.1]], [[.1,.1,1]]],

Check notice

Code scanning / CodeQL

Unused local variable Note test

Variable em is not used.
softmax_choice=softmax_choice)
assert ("The ARG_MAX and PROBABILISTIC options for the 'softmax_choice' arg of 'EM_Composition-5' cannot "
"be used when 'enable_learning' is set to True; use WEIGHTED or set 'enable_learning' to False."
in str(error_text.value))


@pytest.mark.pytorch
class TestExecution:

Expand Down

0 comments on commit 7f7117f

Please sign in to comment.