Skip to content

Commit

Permalink
• emcomposition.py
Browse files Browse the repository at this point in the history
  WEIGHTED -> WEIGHTED_AVG
  • Loading branch information
jdcpni committed Oct 10, 2024
1 parent ff518ef commit 05e6a6a
Show file tree
Hide file tree
Showing 2 changed files with 23 additions and 22 deletions.
29 changes: 15 additions & 14 deletions psyneulink/library/compositions/emcomposition.py
Original file line number Diff line number Diff line change
Expand Up @@ -536,7 +536,7 @@
<EMComposition.softmax_nodes>` is used, with the dot products of queries and keys, to generate a retrieved item;
the following are the options that can be used and the retrieved value they produce:
* *WEIGHTED*: softmax-weighted average of entries, based on their dot products with the key(s); this is the default;
* *WEIGHTED_AVG* (default): softmax-weighted average of entries, based on their dot products with the key(s).
* *ARG_MAX*: entry with the largest dot product.
Expand All @@ -545,12 +545,13 @@
.. warning::
Use of the *ARG_MAX* and *PROBABILISTIC* options is not compatible with learning, as these implement a discrete
choice and thus are not differentiable. Constructing an EMComposition with **softmax_choice** set to either of
these options and **enable_learning** set to True will generate a warning, and calling the EMComposition's
`learn <Composition.learn>` method will generate an error; it must be changed to *WEIGHTED* to execute learning.
these options and **enable_learning** set to True will generate a warning, and calling the EMComposition's `learn
<Composition.learn>` method will generate an error; it must be changed to *WEIGHTED_AVG* to execute learning.
.. technical_note::
The *WEIGHTED* option is passed as *ALL* to the **output** argument of the `SoftMax` Function, *ARG_MAX* is
passed as *MAX_INDICATOR*; *PROBALISTIC* is passed as *PROB_INDICATOR*; and *MAX_VAL* is not currently supported.
The *WEIGHTED_AVG* option is passed as *ALL* to the **output** argument of the `SoftMax` Function, *ARG_MAX* is
passed as *ARG_MAX_INDICATOR*; and *PROBALISTIC* is passed as *PROB_INDICATOR*; the other SoftMax options are
not currently supported.
.. _EMComposition_Learning:
Expand Down Expand Up @@ -1019,10 +1020,10 @@
from psyneulink.core.llvm import ExecutionMode


__all__ = ['EMComposition', 'WEIGHTED', 'PROBABILISTIC']
__all__ = ['EMComposition', 'WEIGHTED_AVG', 'PROBABILISTIC']

STORAGE_PROB = 'storage_prob'
WEIGHTED = ALL
WEIGHTED_AVG = ALL
PROBABILISTIC = PROB_INDICATOR

QUERY_AFFIX = ' [QUERY]'
Expand Down Expand Up @@ -1145,7 +1146,7 @@ class EMComposition(AutodiffComposition):
specifies the threshold used to mask out small values in the softmax calculation;
see *mask_threshold* under `Thresholding and Adaptive Gain <SoftMax_AdaptGain>` for details).
softmax_choice : WEIGHTED, ARG_MAX, PROBABILISTIC : default WEIGHTED
softmax_choice : WEIGHTED_AVG, ARG_MAX, PROBABILISTIC : default WEIGHTED_AVG
specifies how the softmax over dot products of keys and memories is used for retrieval;
see `softmax_choice <EMComposition_Softmax_Choice>` for a description of each option.
Expand Down Expand Up @@ -1243,7 +1244,7 @@ class EMComposition(AutodiffComposition):
determines the threshold used to mask out small values in the softmax calculation;
see *mask_threshold* under `Thresholding and Adaptive Gain <SoftMax_AdaptGain>` for details).
softmax_choice : WEIGHTED, ARG_MAX or PROBABILISTIC
softmax_choice : WEIGHTED_AVG, ARG_MAX or PROBABILISTIC
determines how the softmax over dot products of keys and memories is used for retrieval;
see `softmax_choice <EMComposition_Softmax_Choice>` for a description of each option.
Expand Down Expand Up @@ -1473,7 +1474,7 @@ class Parameters(AutodiffComposition.Parameters):
softmax_choice
see `softmax_choice <EMComposition.softmax_choice>`
:default value: WEIGHTED
:default value: WEIGHTED_AVG
:type: ``keyword``
softmax_threshold
Expand All @@ -1497,7 +1498,7 @@ class Parameters(AutodiffComposition.Parameters):
normalize_memories = Parameter(True)
softmax_gain = Parameter(1.0, modulable=True)
softmax_threshold = Parameter(.001, modulable=True, specify_none=True)
softmax_choice = Parameter(WEIGHTED, modulable=False, specify_none=True)
softmax_choice = Parameter(WEIGHTED_AVG, modulable=False, specify_none=True)
storage_prob = Parameter(1.0, modulable=True, aliases=[MULTIPLICATIVE_PARAM])
memory_decay_rate = Parameter(AUTO, modulable=True)
enable_learning = Parameter(True, structural=True)
Expand Down Expand Up @@ -1575,7 +1576,7 @@ def __init__(self,
normalize_memories:bool=True,
softmax_gain:Union[float, ADAPTIVE, CONTROL]=1.0,
softmax_threshold:Optional[float]=.001,
softmax_choice:Optional[Union[WEIGHTED, ARG_MAX, PROBABILISTIC]]=WEIGHTED,
softmax_choice:Optional[Union[WEIGHTED_AVG, ARG_MAX, PROBABILISTIC]]=WEIGHTED_AVG,
storage_prob:float=1.0,
memory_decay_rate:Union[float,AUTO]=AUTO,
enable_learning:Union[bool,list]=True,
Expand Down Expand Up @@ -2208,7 +2209,7 @@ def _validate_softmax_choice(self, softmax_choice, enable_learning):
if softmax_choice in {ARG_MAX, PROBABILISTIC} and enable_learning:
warnings.warn(f"The 'softmax_choice' arg of '{self.name}' is set to {softmax_choice} with "
f"'enable_learning' set to True; this will generate an error if its 'learn' "
f"method is called; set 'softmax_choice' to WEIGHTED to use learning.")
f"method is called; set 'softmax_choice' to WEIGHTED_AVG to use learning.")

def _construct_softmax_nodes(self, memory_capacity, field_weights,
softmax_gain, softmax_threshold, softmax_choice)->list:
Expand Down Expand Up @@ -2529,7 +2530,7 @@ def learn(self, *args, **kwargs)->list:
arg = self.parameters.softmax_choice.get(kwargs[CONTEXT])
if arg in {ARG_MAX, PROBABILISTIC}:
raise EMCompositionError(f"The ARG_MAX and PROBABILISTIC options for the 'softmax_choice' arg "
f"of '{self.name}' cannot be used during learning; change to WEIGHTED.")
f"of '{self.name}' cannot be used during learning; change to WEIGHTED_AVG.")
return super().learn(*args, **kwargs)

def _get_execution_mode(self, execution_mode):
Expand Down
16 changes: 8 additions & 8 deletions tests/composition/test_emcomposition.py
Original file line number Diff line number Diff line change
Expand Up @@ -233,25 +233,25 @@ def test_memory_fill(start, memory_fill):
test_memory_fill(start=repeat, memory_fill=memory_fill)

def test_softmax_choice(self):
for softmax_choice in [pnl.WEIGHTED, pnl.ARG_MAX, pnl.PROBABILISTIC]:
em = EMComposition(memory_template=[[[1,.1,.1]], [[.1,1,.1]], [[.1,.1,1]]],
for softmax_choice in [pnl.WEIGHTED_AVG, pnl.ARG_MAX, pnl.PROBABILISTIC]:
em = EMComposition(memory_template=[[[1,.1,.1]], [[1,.1,.1]], [[.1,.1,1]]],
softmax_choice=softmax_choice,
enable_learning=False)
result = em.run(inputs={em.query_input_nodes[0]:[[0,1,0]]})
if softmax_choice == pnl.WEIGHTED:
np.testing.assert_allclose(result, [[0.21330295, 0.77339411, 0.21330295]])
result = em.run(inputs={em.query_input_nodes[0]:[[1,0,0]]})
if softmax_choice == pnl.WEIGHTED_AVG:
np.testing.assert_allclose(result, [[0.93016008, 0.1, 0.16983992]])
if softmax_choice == pnl.ARG_MAX:
np.testing.assert_allclose(result, [[.1, 1, .1]])
np.testing.assert_allclose(result, [[1, .1, .1]])
if softmax_choice == pnl.PROBABILISTIC: # NOTE: actual stochasticity not tested here
np.testing.assert_allclose(result, [[.1, 1, .1]])
np.testing.assert_allclose(result, [[1, .1, .1]])

em = EMComposition(memory_template=[[[1,.1,.1]], [[.1,1,.1]], [[.1,.1,1]]])
for softmax_choice in [pnl.ARG_MAX, pnl.PROBABILISTIC]:
with pytest.raises(pnl.ComponentError) as error_text:
em.parameters.softmax_choice.set(softmax_choice)
em.learn()
assert (f"The ARG_MAX and PROBABILISTIC options for the 'softmax_choice' arg "
f"of '{em.name}' cannot be used during learning; change to WEIGHTED." in str(error_text.value))
f"of '{em.name}' cannot be used during learning; change to WEIGHTED_AVG." in str(error_text.value))


@pytest.mark.pytorch
Expand Down

0 comments on commit 05e6a6a

Please sign in to comment.